more fixes to the backup contract (#281)
This PR irons out the last issues with the backup contract and the Restic implementation. I could check it works backing up files to a local folder and to Backblaze on my server.
This commit is contained in:
parent
10dea06ec1
commit
f8fdf2f704
19 changed files with 326 additions and 348 deletions
|
@ -9,7 +9,7 @@ rec {
|
||||||
# - resultPath is the location the config file should have on the filesystem.
|
# - resultPath is the location the config file should have on the filesystem.
|
||||||
# - generator is a function taking two arguments name and value and returning path in the nix
|
# - generator is a function taking two arguments name and value and returning path in the nix
|
||||||
# nix store where the
|
# nix store where the
|
||||||
replaceSecrets = { userConfig, resultPath, generator }:
|
replaceSecrets = { userConfig, resultPath, generator, user ? null, permissions ? "u=r,g=r,o=" }:
|
||||||
let
|
let
|
||||||
configWithTemplates = withReplacements userConfig;
|
configWithTemplates = withReplacements userConfig;
|
||||||
|
|
||||||
|
@ -20,6 +20,7 @@ rec {
|
||||||
replaceSecretsScript {
|
replaceSecretsScript {
|
||||||
file = nonSecretConfigFile;
|
file = nonSecretConfigFile;
|
||||||
inherit resultPath replacements;
|
inherit resultPath replacements;
|
||||||
|
inherit user permissions;
|
||||||
};
|
};
|
||||||
|
|
||||||
replaceSecretsFormatAdapter = format: format.generate;
|
replaceSecretsFormatAdapter = format: format.generate;
|
||||||
|
@ -30,7 +31,7 @@ rec {
|
||||||
resultPath = newPath;
|
resultPath = newPath;
|
||||||
};
|
};
|
||||||
|
|
||||||
replaceSecretsScript = { file, resultPath, replacements }:
|
replaceSecretsScript = { file, resultPath, replacements, user ? null, permissions ? "u=r,g=r,o=" }:
|
||||||
let
|
let
|
||||||
templatePath = resultPath + ".template";
|
templatePath = resultPath + ".template";
|
||||||
sedPatterns = lib.strings.concatStringsSep " " (lib.attrsets.mapAttrsToList (from: to: "-e \"s|${from}|${to}|\"") replacements);
|
sedPatterns = lib.strings.concatStringsSep " " (lib.attrsets.mapAttrsToList (from: to: "-e \"s|${from}|${to}|\"") replacements);
|
||||||
|
@ -44,7 +45,12 @@ rec {
|
||||||
mkdir -p $(dirname ${templatePath})
|
mkdir -p $(dirname ${templatePath})
|
||||||
ln -fs ${file} ${templatePath}
|
ln -fs ${file} ${templatePath}
|
||||||
rm -f ${resultPath}
|
rm -f ${resultPath}
|
||||||
|
touch ${resultPath}
|
||||||
|
'' + (lib.optionalString (user != null) ''
|
||||||
|
chown ${user} ${resultPath}
|
||||||
|
'') + ''
|
||||||
${sedCmd} ${templatePath} > ${resultPath}
|
${sedCmd} ${templatePath} > ${resultPath}
|
||||||
|
chmod ${permissions} ${resultPath}
|
||||||
'';
|
'';
|
||||||
|
|
||||||
secretFileType = lib.types.submodule {
|
secretFileType = lib.types.submodule {
|
||||||
|
@ -237,13 +243,14 @@ rec {
|
||||||
pkgs.runCommand "nix-flake-tests-success" { } "echo > $out";
|
pkgs.runCommand "nix-flake-tests-success" { } "echo > $out";
|
||||||
|
|
||||||
|
|
||||||
genConfigOutOfBandSystemd = { config, configLocation, generator }:
|
genConfigOutOfBandSystemd = { config, configLocation, generator, user ? null, permissions ? "u=r,g=r,o=" }:
|
||||||
{
|
{
|
||||||
loadCredentials = getLoadCredentials "source" config;
|
loadCredentials = getLoadCredentials "source" config;
|
||||||
preStart = lib.mkBefore (replaceSecrets {
|
preStart = lib.mkBefore (replaceSecrets {
|
||||||
userConfig = updateToLoadCredentials "source" "$CREDENTIALS_DIRECTORY" config;
|
userConfig = updateToLoadCredentials "source" "$CREDENTIALS_DIRECTORY" config;
|
||||||
resultPath = configLocation;
|
resultPath = configLocation;
|
||||||
inherit generator;
|
inherit generator;
|
||||||
|
inherit user permissions;
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -106,6 +106,7 @@ in
|
||||||
'';
|
'';
|
||||||
readOnly = true;
|
readOnly = true;
|
||||||
default = {
|
default = {
|
||||||
|
user = "lldap";
|
||||||
sourceDirectories = [
|
sourceDirectories = [
|
||||||
"/var/lib/lldap"
|
"/var/lib/lldap"
|
||||||
];
|
];
|
||||||
|
@ -139,10 +140,7 @@ in
|
||||||
group = "lldap";
|
group = "lldap";
|
||||||
isSystemUser = true;
|
isSystemUser = true;
|
||||||
};
|
};
|
||||||
|
users.groups.lldap = {};
|
||||||
users.groups.lldap = {
|
|
||||||
members = [ "backup" ];
|
|
||||||
};
|
|
||||||
|
|
||||||
services.lldap = {
|
services.lldap = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
|
|
@ -15,22 +15,9 @@ let
|
||||||
|
|
||||||
user = lib.mkOption {
|
user = lib.mkOption {
|
||||||
description = ''
|
description = ''
|
||||||
Unix user doing the backups.
|
Unix user doing the backups. Must be the user owning the files to be backed up.
|
||||||
|
|
||||||
For Restic, the same user must be used for all instances.
|
|
||||||
'';
|
'';
|
||||||
type = lib.types.str;
|
type = lib.types.str;
|
||||||
default = cfg.user;
|
|
||||||
};
|
|
||||||
|
|
||||||
group = lib.mkOption {
|
|
||||||
description = ''
|
|
||||||
Unix group doing the backups.
|
|
||||||
|
|
||||||
For Restic, the same group must be used for all instances.
|
|
||||||
'';
|
|
||||||
type = lib.types.str;
|
|
||||||
default = cfg.group;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
sourceDirectories = lib.mkOption {
|
sourceDirectories = lib.mkOption {
|
||||||
|
@ -125,18 +112,6 @@ let
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
options.shb.restic = {
|
options.shb.restic = {
|
||||||
user = lib.mkOption {
|
|
||||||
description = "Unix user doing the backups.";
|
|
||||||
type = lib.types.str;
|
|
||||||
default = "backup";
|
|
||||||
};
|
|
||||||
|
|
||||||
group = lib.mkOption {
|
|
||||||
description = "Unix group doing the backups.";
|
|
||||||
type = lib.types.str;
|
|
||||||
default = "backup";
|
|
||||||
};
|
|
||||||
|
|
||||||
instances = lib.mkOption {
|
instances = lib.mkOption {
|
||||||
description = "Each instance is a backup setting";
|
description = "Each instance is a backup setting";
|
||||||
default = {};
|
default = {};
|
||||||
|
@ -176,41 +151,24 @@ in
|
||||||
enabledInstances = lib.attrsets.filterAttrs (k: i: i.enable) cfg.instances;
|
enabledInstances = lib.attrsets.filterAttrs (k: i: i.enable) cfg.instances;
|
||||||
in lib.mkMerge [
|
in lib.mkMerge [
|
||||||
{
|
{
|
||||||
assertions = [
|
environment.systemPackages = lib.optionals (enabledInstances != {}) [ pkgs.restic ];
|
||||||
{
|
|
||||||
assertion = lib.all (x: x.user == cfg.user) (lib.mapAttrsToList (n: v: v)cfg.instances);
|
systemd.tmpfiles.rules =
|
||||||
message = "All Restic instances must have the same user as 'shb.restic.user'.";
|
let
|
||||||
}
|
mkRepositorySettings = name: instance: repository: lib.optionals (lib.hasPrefix "/" repository.path) [
|
||||||
{
|
"d '${repository.path}' 0750 ${instance.user} root - -"
|
||||||
assertion = lib.all (x: x.group == cfg.group) (lib.mapAttrsToList (n: v: v) cfg.instances);
|
|
||||||
message = "All Restic instances must have the same group as 'shb.restic.group'.";
|
|
||||||
}
|
|
||||||
];
|
];
|
||||||
|
|
||||||
users.users = {
|
mkSettings = name: instance: builtins.map (mkRepositorySettings name instance) instance.repositories;
|
||||||
${cfg.user} = {
|
in
|
||||||
name = cfg.user;
|
lib.flatten (lib.attrsets.mapAttrsToList mkSettings enabledInstances);
|
||||||
group = cfg.group;
|
|
||||||
home = lib.mkForce "/var/lib/${cfg.user}";
|
|
||||||
createHome = true;
|
|
||||||
isSystemUser = true;
|
|
||||||
extraGroups = [ "keys" ];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
users.groups = {
|
|
||||||
${cfg.group} = {
|
|
||||||
name = cfg.group;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
{
|
|
||||||
environment.systemPackages = lib.optionals (enabledInstances != {}) [ pkgs.restic ];
|
|
||||||
|
|
||||||
services.restic.backups =
|
services.restic.backups =
|
||||||
let
|
let
|
||||||
mkRepositorySettings = name: instance: repository: {
|
mkRepositorySettings = name: instance: repository: {
|
||||||
"${name}_${repoSlugName repository.path}" = {
|
"${name}_${repoSlugName repository.path}" = {
|
||||||
inherit (cfg) user;
|
inherit (instance) user;
|
||||||
|
|
||||||
repository = repository.path;
|
repository = repository.path;
|
||||||
|
|
||||||
paths = instance.sourceDirectories;
|
paths = instance.sourceDirectories;
|
||||||
|
@ -250,12 +208,13 @@ in
|
||||||
Nice = cfg.performance.niceness;
|
Nice = cfg.performance.niceness;
|
||||||
IOSchedulingClass = cfg.performance.ioSchedulingClass;
|
IOSchedulingClass = cfg.performance.ioSchedulingClass;
|
||||||
IOSchedulingPriority = cfg.performance.ioPriority;
|
IOSchedulingPriority = cfg.performance.ioPriority;
|
||||||
|
BindReadOnlyPaths = instance.sourceDirectories;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
(lib.attrsets.optionalAttrs (repository.secrets != {})
|
(lib.attrsets.optionalAttrs (repository.secrets != {})
|
||||||
{
|
{
|
||||||
serviceConfig.EnvironmentFile = [
|
serviceConfig.EnvironmentFile = [
|
||||||
"/run/secrets/restic/${serviceName}"
|
"/run/secrets_restic/${serviceName}"
|
||||||
];
|
];
|
||||||
after = [ "${serviceName}-pre.service" ];
|
after = [ "${serviceName}-pre.service" ];
|
||||||
requires = [ "${serviceName}-pre.service" ];
|
requires = [ "${serviceName}-pre.service" ];
|
||||||
|
@ -266,8 +225,9 @@ in
|
||||||
(let
|
(let
|
||||||
script = shblib.genConfigOutOfBandSystemd {
|
script = shblib.genConfigOutOfBandSystemd {
|
||||||
config = repository.secrets;
|
config = repository.secrets;
|
||||||
configLocation = "/run/secrets/restic/${serviceName}";
|
configLocation = "/run/secrets_restic/${serviceName}";
|
||||||
generator = name: v: pkgs.writeText "template" (lib.generators.toINIWithGlobalSection {} { globalSection = v; });
|
generator = name: v: pkgs.writeText "template" (lib.generators.toINIWithGlobalSection {} { globalSection = v; });
|
||||||
|
user = instance.user;
|
||||||
};
|
};
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
|
|
|
@ -18,18 +18,18 @@ Integration tests are defined in [`/test/blocks/restic.nix`](@REPO@/test/blocks/
|
||||||
|
|
||||||
The following snippet shows how to configure
|
The following snippet shows how to configure
|
||||||
the backup of 1 folder to 1 repository.
|
the backup of 1 folder to 1 repository.
|
||||||
|
We assume that the folder is used by the `myservice` service and is owned by a user of the same name.
|
||||||
Assumptions:
|
|
||||||
- 1 hard drive pool is used for backup and is mounted on `/srv/pool1`.
|
|
||||||
|
|
||||||
```nix
|
```nix
|
||||||
shb.restic.instances.myfolder = {
|
shb.restic.instances.myservice = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
|
||||||
|
user = "myservice";
|
||||||
|
|
||||||
passphraseFile = "<path/to/passphrase>";
|
passphraseFile = "<path/to/passphrase>";
|
||||||
|
|
||||||
repositories = [{
|
repositories = [{
|
||||||
path = "/srv/pool1/backups/myfolder";
|
path = "/srv/backups/myservice";
|
||||||
timerConfig = {
|
timerConfig = {
|
||||||
OnCalendar = "00:00:00";
|
OnCalendar = "00:00:00";
|
||||||
RandomizedDelaySec = "3h";
|
RandomizedDelaySec = "3h";
|
||||||
|
@ -47,17 +47,9 @@ shb.restic.instances.myfolder = {
|
||||||
keep_weekly = 4;
|
keep_weekly = 4;
|
||||||
keep_monthly = 6;
|
keep_monthly = 6;
|
||||||
};
|
};
|
||||||
|
|
||||||
consistency = {
|
|
||||||
repository = "2 weeks";
|
|
||||||
archives = "1 month";
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
```
|
```
|
||||||
|
|
||||||
To be secure, the `passphraseFile` must contain a secret that is deployed out of band, otherwise it will be world-readable in the nix store.
|
|
||||||
To achieve that, I recommend [sops](usage.html#usage-secrets) although other methods work great too.
|
|
||||||
|
|
||||||
### One folder backed up to S3 {#blocks-restic-usage-remote}
|
### One folder backed up to S3 {#blocks-restic-usage-remote}
|
||||||
|
|
||||||
Here we will only highlight the differences with the previous configuration.
|
Here we will only highlight the differences with the previous configuration.
|
||||||
|
@ -65,7 +57,7 @@ Here we will only highlight the differences with the previous configuration.
|
||||||
This assumes you have access to such a remote S3 store, for example by using [Backblaze](https://www.backblaze.com/).
|
This assumes you have access to such a remote S3 store, for example by using [Backblaze](https://www.backblaze.com/).
|
||||||
|
|
||||||
```diff
|
```diff
|
||||||
shb.backup.instances.myfolder = {
|
shb.backup.instances.myservice = {
|
||||||
|
|
||||||
repositories = [{
|
repositories = [{
|
||||||
- path = "/srv/pool1/backups/myfolder";
|
- path = "/srv/pool1/backups/myfolder";
|
||||||
|
@ -83,6 +75,48 @@ This assumes you have access to such a remote S3 store, for example by using [Ba
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Secrets {#blocks-restic-secrets}
|
||||||
|
|
||||||
|
To be secure, the secrets should deployed out of band, otherwise they will be world-readable in the nix store.
|
||||||
|
|
||||||
|
To achieve that, I recommend [sops](usage.html#usage-secrets) although other methods work great too.
|
||||||
|
The code to backup to Backblaze with secrets stored in Sops would look like so:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
shb.restic.instances.myfolder.passphraseFile = config.sops.secrets."myservice/backup/passphrase".path;
|
||||||
|
shb.restic.instances.myfolder.repositories = [
|
||||||
|
{
|
||||||
|
path = "s3:s3.us-west-000.backblazeb2.com/<mybucket>";
|
||||||
|
secrets = {
|
||||||
|
AWS_ACCESS_KEY_ID.source = config.sops.secrets."backup/b2/access_key_id".path;
|
||||||
|
AWS_SECRET_ACCESS_KEY.source = config.sops.secrets."backup/b2/secret_access_key".path;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
sops.secrets."myservice/backup/passphrase" = {
|
||||||
|
sopsFile = ./secrets.yaml;
|
||||||
|
mode = "0400";
|
||||||
|
owner = "myservice";
|
||||||
|
group = "myservice";
|
||||||
|
};
|
||||||
|
sops.secrets."backup/b2/access_key_id" = {
|
||||||
|
sopsFile = ./secrets.yaml;
|
||||||
|
mode = "0400";
|
||||||
|
owner = "myservice";
|
||||||
|
group = "myservice";
|
||||||
|
};
|
||||||
|
sops.secrets."backup/b2/secret_access_key" = {
|
||||||
|
sopsFile = ./secrets.yaml;
|
||||||
|
mode = "0400";
|
||||||
|
owner = "myservice";
|
||||||
|
group = "myservice";
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
Pay attention that the owner must be the `myservice` user, the one owning the files to be backed up.
|
||||||
|
A `secrets` contract is in progress that will allow one to not care about such details.
|
||||||
|
|
||||||
### Multiple directories to multiple destinations {#blocks-restic-usage-multiple}
|
### Multiple directories to multiple destinations {#blocks-restic-usage-multiple}
|
||||||
|
|
||||||
The following snippet shows how to configure backup of any number of folders to 3 repositories,
|
The following snippet shows how to configure backup of any number of folders to 3 repositories,
|
||||||
|
@ -151,11 +185,6 @@ backupcfg = repositories: name: sourceDirectories {
|
||||||
keep_monthly = 6;
|
keep_monthly = 6;
|
||||||
};
|
};
|
||||||
|
|
||||||
consistency = {
|
|
||||||
repository = "2 weeks";
|
|
||||||
archives = "1 month";
|
|
||||||
};
|
|
||||||
|
|
||||||
environmentFile = true;
|
environmentFile = true;
|
||||||
};
|
};
|
||||||
```
|
```
|
||||||
|
|
|
@ -6,13 +6,6 @@ lib.types.submodule {
|
||||||
user = lib.mkOption {
|
user = lib.mkOption {
|
||||||
description = "Unix user doing the backups.";
|
description = "Unix user doing the backups.";
|
||||||
type = lib.types.str;
|
type = lib.types.str;
|
||||||
default = "backup";
|
|
||||||
};
|
|
||||||
|
|
||||||
group = lib.mkOption {
|
|
||||||
description = "Unix group doing the backups.";
|
|
||||||
type = lib.types.str;
|
|
||||||
default = "backup";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
sourceDirectories = lib.mkOption {
|
sourceDirectories = lib.mkOption {
|
||||||
|
@ -26,18 +19,6 @@ lib.types.submodule {
|
||||||
default = [];
|
default = [];
|
||||||
};
|
};
|
||||||
|
|
||||||
retention = lib.mkOption {
|
|
||||||
description = "Backup files retention.";
|
|
||||||
type = lib.types.attrsOf (lib.types.oneOf [ lib.types.int lib.types.nonEmptyStr ]);
|
|
||||||
default = {
|
|
||||||
keep_within = "1d";
|
|
||||||
keep_hourly = 24;
|
|
||||||
keep_daily = 7;
|
|
||||||
keep_weekly = 4;
|
|
||||||
keep_monthly = 6;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
hooks = lib.mkOption {
|
hooks = lib.mkOption {
|
||||||
description = "Hooks to run around the backup.";
|
description = "Hooks to run around the backup.";
|
||||||
default = {};
|
default = {};
|
||||||
|
|
|
@ -4,6 +4,11 @@ This NixOS contract represents a backup job
|
||||||
that will backup one or more files or directories
|
that will backup one or more files or directories
|
||||||
at a regular schedule.
|
at a regular schedule.
|
||||||
|
|
||||||
|
It is a contract between a service that has files to be backed up
|
||||||
|
and a service that backs up files.
|
||||||
|
All options in this contract should be set by the former.
|
||||||
|
The latter will then use the values of those options to know what to backup.
|
||||||
|
|
||||||
## Contract Reference {#backup-contract-options}
|
## Contract Reference {#backup-contract-options}
|
||||||
|
|
||||||
These are all the options that are expected to exist for this contract to be respected.
|
These are all the options that are expected to exist for this contract to be respected.
|
||||||
|
@ -16,41 +21,85 @@ source: @OPTIONS_JSON@
|
||||||
|
|
||||||
## Usage {#backup-contract-usage}
|
## Usage {#backup-contract-usage}
|
||||||
|
|
||||||
A service that can be backed up will provide a `backup` option, like for the [Vaultwarden service][vaultwarden-service-backup].
|
A service that can be backed up will provide a `backup` option.
|
||||||
What this option defines is an implementation detail of that service
|
What this option defines is, from the user perspective - that is _you_ - an implementation detail
|
||||||
but it will at least define what directories to backup
|
but it will at least define what directories to backup,
|
||||||
|
the user to backup with
|
||||||
and possibly hooks to run before or after the backup job runs.
|
and possibly hooks to run before or after the backup job runs.
|
||||||
|
|
||||||
[vaultwarden-service-backup]: services-vaultwarden.html#services-vaultwarden-options-shb.vaultwarden.backup
|
Here is an example module defining such a `backup` option:
|
||||||
|
|
||||||
```nix
|
```nix
|
||||||
shb.<service>.backup
|
{
|
||||||
```
|
options = {
|
||||||
|
myservice.backup = lib.mkOption {
|
||||||
Let's assume a module implementing this contract is available under the `shb.<backup_impl>` variable.
|
type = contracts.backup;
|
||||||
Then, to actually backup the service, one would write:
|
readOnly = true;
|
||||||
|
default = {
|
||||||
```nix
|
user = "myservice";
|
||||||
shb.<backup_impl>.instances."<service>" = shb.<service>.backup // {
|
sourceDirectories = [
|
||||||
enable = true;
|
"/var/lib/myservice"
|
||||||
|
];
|
||||||
# Options specific to backup_impl
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
```
|
```
|
||||||
|
|
||||||
Then, for extra caution, a second backup could be made using another module `shb.<backup_impl_2>`:
|
As you can see, NixOS modules are a bit abused to make contracts work.
|
||||||
|
Default values are set as well as the `readOnly` attribute to ensure those values stay as defined.
|
||||||
|
|
||||||
|
Now, on the other side we have a service that uses this `backup` option and actually backs up files.
|
||||||
|
Let's assume such a module is available under the `backupservice` option
|
||||||
|
and that one can create multiple backup instances under `backupservice.instances`.
|
||||||
|
Then, to actually backup the `myservice` service, one would write:
|
||||||
|
|
||||||
```nix
|
```nix
|
||||||
shb.<backup_impl_2>.instances."<service>" = shb.<service>.backup // {
|
backupservice.instances.myservice = myservice.backup // {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
|
||||||
# Options specific to backup_impl_2
|
repository = {
|
||||||
|
path = "/srv/backup/myservice";
|
||||||
|
};
|
||||||
|
|
||||||
|
# ... Other options specific to backupservice like scheduling.
|
||||||
};
|
};
|
||||||
```
|
```
|
||||||
|
|
||||||
|
It is advised to backup files to different location, to improve redundancy.
|
||||||
|
Thanks to using contracts, this can be made easily either with the same `backupservice`:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
backupservice.instances.myservice_2 = myservice.backup // {
|
||||||
|
enable = true;
|
||||||
|
|
||||||
|
repository = {
|
||||||
|
path = "<remote path>";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
Or with another module `backupservice_2`!
|
||||||
|
|
||||||
## Provided Implementations {#backup-contract-impl}
|
## Provided Implementations {#backup-contract-impl}
|
||||||
|
|
||||||
|
An implementation here is a service that understands the `backup` contract
|
||||||
|
and will backup the files accordingly.
|
||||||
|
|
||||||
One implementation is provided out of the box:
|
One implementation is provided out of the box:
|
||||||
- [Restic block](blocks-restic.html).
|
- [Restic block](blocks-restic.html).
|
||||||
|
|
||||||
A second one based on `borgbackup` is in progress.
|
A second one based on `borgbackup` is in progress.
|
||||||
|
|
||||||
|
## Services Providing `backup` Option {#backup-contract-services}
|
||||||
|
|
||||||
|
- <!-- [ -->Audiobookshelf<!-- ](services-audiobookshelf.html). --> (no manual yet)
|
||||||
|
- <!-- [ -->Deluge<!--](services-deluge.html). --> (no manual yet)
|
||||||
|
- <!-- [ -->Grocy<!--](services-grocy.html). --> (no manual yet)
|
||||||
|
- <!-- [ -->Hledger<!--](services-hledger.html). --> (no manual yet)
|
||||||
|
- <!-- [ -->Home Assistant<!--](services-home-assistant.html). --> (no manual yet)
|
||||||
|
- <!-- [ -->Jellyfin<!--](services-jellyfin.html). --> (no manual yet)
|
||||||
|
- <!-- [ -->LLDAP<!--](blocks-ldap.html). --> (no manual yet)
|
||||||
|
- [Nextcloud](services-nextcloud.html#services-nextcloud-server-usage-backup).
|
||||||
|
- [Vaultwarden](services-vaultwarden.html#services-vaultwarden-backup).
|
||||||
|
- <!-- [ -->*arr<!--](services-arr.html). --> (no manual yet)
|
||||||
|
|
|
@ -277,20 +277,6 @@ let
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
backup = name: {
|
|
||||||
systemd.tmpfiles.rules = [
|
|
||||||
"d '${config.shb.arr.${name}.dataDir}' 0750 ${config.services.${name}.user} ${config.services.${name}.group} - -"
|
|
||||||
];
|
|
||||||
users.groups.${name} = {
|
|
||||||
members = [ "backup" ];
|
|
||||||
};
|
|
||||||
systemd.services.${name}.serviceConfig = {
|
|
||||||
# Setup permissions needed for backups, as the backup user is member of the jellyfin group.
|
|
||||||
UMask = lib.mkForce "0027";
|
|
||||||
StateDirectoryMode = lib.mkForce "0750";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
appOption = name: c: lib.nameValuePair name (lib.mkOption {
|
appOption = name: c: lib.nameValuePair name (lib.mkOption {
|
||||||
description = "Configuration for ${name}";
|
description = "Configuration for ${name}";
|
||||||
default = {};
|
default = {};
|
||||||
|
@ -347,6 +333,7 @@ let
|
||||||
'';
|
'';
|
||||||
readOnly = true;
|
readOnly = true;
|
||||||
default = {
|
default = {
|
||||||
|
user = name;
|
||||||
sourceDirectories = [
|
sourceDirectories = [
|
||||||
cfg.${name}.dataDir
|
cfg.${name}.dataDir
|
||||||
];
|
];
|
||||||
|
@ -386,7 +373,6 @@ in
|
||||||
|
|
||||||
shb.nginx.vhosts = [ (vhosts {} cfg') ];
|
shb.nginx.vhosts = [ (vhosts {} cfg') ];
|
||||||
}))
|
}))
|
||||||
(lib.mkIf cfg.radarr.enable (backup "radarr"))
|
|
||||||
|
|
||||||
(lib.mkIf cfg.sonarr.enable (
|
(lib.mkIf cfg.sonarr.enable (
|
||||||
let
|
let
|
||||||
|
@ -416,7 +402,6 @@ in
|
||||||
|
|
||||||
shb.nginx.vhosts = [ (vhosts {} cfg') ];
|
shb.nginx.vhosts = [ (vhosts {} cfg') ];
|
||||||
}))
|
}))
|
||||||
(lib.mkIf cfg.sonarr.enable (backup "sonarr"))
|
|
||||||
|
|
||||||
(lib.mkIf cfg.bazarr.enable (
|
(lib.mkIf cfg.bazarr.enable (
|
||||||
let
|
let
|
||||||
|
@ -443,7 +428,6 @@ in
|
||||||
|
|
||||||
shb.nginx.vhosts = [ (vhosts {} cfg') ];
|
shb.nginx.vhosts = [ (vhosts {} cfg') ];
|
||||||
}))
|
}))
|
||||||
(lib.mkIf cfg.bazarr.enable (backup "bazarr"))
|
|
||||||
|
|
||||||
(lib.mkIf cfg.readarr.enable (
|
(lib.mkIf cfg.readarr.enable (
|
||||||
let
|
let
|
||||||
|
@ -465,7 +449,6 @@ in
|
||||||
|
|
||||||
shb.nginx.vhosts = [ (vhosts {} cfg') ];
|
shb.nginx.vhosts = [ (vhosts {} cfg') ];
|
||||||
}))
|
}))
|
||||||
(lib.mkIf cfg.readarr.enable (backup "readarr"))
|
|
||||||
|
|
||||||
(lib.mkIf cfg.lidarr.enable (
|
(lib.mkIf cfg.lidarr.enable (
|
||||||
let
|
let
|
||||||
|
@ -492,7 +475,6 @@ in
|
||||||
|
|
||||||
shb.nginx.vhosts = [ (vhosts {} cfg') ];
|
shb.nginx.vhosts = [ (vhosts {} cfg') ];
|
||||||
}))
|
}))
|
||||||
(lib.mkIf cfg.lidarr.enable (backup "lidarr"))
|
|
||||||
|
|
||||||
(lib.mkIf cfg.jackett.enable (
|
(lib.mkIf cfg.jackett.enable (
|
||||||
let
|
let
|
||||||
|
@ -503,6 +485,7 @@ in
|
||||||
enable = true;
|
enable = true;
|
||||||
dataDir = "/var/lib/jackett";
|
dataDir = "/var/lib/jackett";
|
||||||
};
|
};
|
||||||
|
# TODO: avoid implicitly relying on the media group
|
||||||
users.users.jackett = {
|
users.users.jackett = {
|
||||||
extraGroups = [ "media" ];
|
extraGroups = [ "media" ];
|
||||||
};
|
};
|
||||||
|
@ -516,6 +499,5 @@ in
|
||||||
extraBypassResources = [ "^/dl.*" ];
|
extraBypassResources = [ "^/dl.*" ];
|
||||||
} cfg') ];
|
} cfg') ];
|
||||||
}))
|
}))
|
||||||
(lib.mkIf cfg.jackett.enable (backup "jackett"))
|
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|
|
@ -100,6 +100,7 @@ in
|
||||||
'';
|
'';
|
||||||
readOnly = true;
|
readOnly = true;
|
||||||
default = {
|
default = {
|
||||||
|
user = "audiobookshelf";
|
||||||
sourceDirectories = [
|
sourceDirectories = [
|
||||||
"/var/lib/audiobookshelf"
|
"/var/lib/audiobookshelf"
|
||||||
];
|
];
|
||||||
|
@ -162,17 +163,6 @@ in
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
|
|
||||||
# We want audiobookshelf to create files in the media group and to make those files group readable.
|
|
||||||
users.users.audiobookshelf = {
|
|
||||||
extraGroups = [ "media" ];
|
|
||||||
};
|
|
||||||
systemd.services.audiobookshelfd.serviceConfig.Group = lib.mkForce "media";
|
|
||||||
systemd.services.audiobookshelfd.serviceConfig.UMask = lib.mkForce "0027";
|
|
||||||
|
|
||||||
# We backup the whole audiobookshelf directory and set permissions for the backup user accordingly.
|
|
||||||
users.groups.audiobookshelf.members = [ "backup" ];
|
|
||||||
users.groups.media.members = [ "backup" ];
|
|
||||||
} {
|
} {
|
||||||
systemd.services.audiobookshelfd.serviceConfig = cfg.extraServiceConfig;
|
systemd.services.audiobookshelfd.serviceConfig = cfg.extraServiceConfig;
|
||||||
}]);
|
}]);
|
||||||
|
|
|
@ -251,6 +251,7 @@ in
|
||||||
'';
|
'';
|
||||||
readOnly = true;
|
readOnly = true;
|
||||||
default = {
|
default = {
|
||||||
|
user = "deluge";
|
||||||
sourceDirectories = [
|
sourceDirectories = [
|
||||||
cfg.dataDir
|
cfg.dataDir
|
||||||
];
|
];
|
||||||
|
@ -373,17 +374,6 @@ in
|
||||||
inherit (cfg) authEndpoint;
|
inherit (cfg) authEndpoint;
|
||||||
}))
|
}))
|
||||||
];
|
];
|
||||||
|
|
||||||
# We want deluge to create files in the media group and to make those files group readable.
|
|
||||||
users.users.deluge = {
|
|
||||||
extraGroups = [ "media" ];
|
|
||||||
};
|
|
||||||
systemd.services.deluged.serviceConfig.Group = lib.mkForce "media";
|
|
||||||
systemd.services.deluged.serviceConfig.UMask = lib.mkForce "0027";
|
|
||||||
|
|
||||||
# We backup the whole deluge directory and set permissions for the backup user accordingly.
|
|
||||||
users.groups.deluge.members = [ "backup" ];
|
|
||||||
users.groups.media.members = [ "backup" ];
|
|
||||||
} {
|
} {
|
||||||
systemd.services.deluged.serviceConfig = cfg.extraServiceConfig;
|
systemd.services.deluged.serviceConfig = cfg.extraServiceConfig;
|
||||||
} (lib.mkIf (config.shb.deluge.prometheusScraperPasswordFile != null) {
|
} (lib.mkIf (config.shb.deluge.prometheusScraperPasswordFile != null) {
|
||||||
|
|
|
@ -80,6 +80,7 @@ in
|
||||||
'';
|
'';
|
||||||
readOnly = true;
|
readOnly = true;
|
||||||
default = {
|
default = {
|
||||||
|
user = "grocy";
|
||||||
sourceDirectories = [
|
sourceDirectories = [
|
||||||
cfg.dataDir
|
cfg.dataDir
|
||||||
];
|
];
|
||||||
|
@ -115,10 +116,6 @@ in
|
||||||
sslCertificate = lib.mkIf (!(isNull cfg.ssl)) cfg.ssl.paths.cert;
|
sslCertificate = lib.mkIf (!(isNull cfg.ssl)) cfg.ssl.paths.cert;
|
||||||
sslCertificateKey = lib.mkIf (!(isNull cfg.ssl)) cfg.ssl.paths.key;
|
sslCertificateKey = lib.mkIf (!(isNull cfg.ssl)) cfg.ssl.paths.key;
|
||||||
};
|
};
|
||||||
|
|
||||||
# We backup the whole grocy directory and set permissions for the backup user accordingly.
|
|
||||||
users.groups.grocy.members = [ "backup" ];
|
|
||||||
users.groups.media.members = [ "backup" ];
|
|
||||||
} {
|
} {
|
||||||
systemd.services.grocyd.serviceConfig = cfg.extraServiceConfig;
|
systemd.services.grocyd.serviceConfig = cfg.extraServiceConfig;
|
||||||
}]);
|
}]);
|
||||||
|
|
|
@ -72,6 +72,7 @@ in
|
||||||
'';
|
'';
|
||||||
readOnly = true;
|
readOnly = true;
|
||||||
default = {
|
default = {
|
||||||
|
user = "hledger";
|
||||||
sourceDirectories = [
|
sourceDirectories = [
|
||||||
cfg.dataDir
|
cfg.dataDir
|
||||||
];
|
];
|
||||||
|
|
|
@ -154,6 +154,7 @@ in
|
||||||
'';
|
'';
|
||||||
readOnly = true;
|
readOnly = true;
|
||||||
default = {
|
default = {
|
||||||
|
user = "hass";
|
||||||
# No need for backup hooks as we use an hourly automation job in home assistant directly with a cron job.
|
# No need for backup hooks as we use an hourly automation job in home assistant directly with a cron job.
|
||||||
sourceDirectories = [
|
sourceDirectories = [
|
||||||
"/var/lib/hass/backups"
|
"/var/lib/hass/backups"
|
||||||
|
@ -322,22 +323,5 @@ in
|
||||||
"f ${config.services.home-assistant.configDir}/scenes.yaml 0755 hass hass"
|
"f ${config.services.home-assistant.configDir}/scenes.yaml 0755 hass hass"
|
||||||
"f ${config.services.home-assistant.configDir}/scripts.yaml 0755 hass hass"
|
"f ${config.services.home-assistant.configDir}/scripts.yaml 0755 hass hass"
|
||||||
];
|
];
|
||||||
|
|
||||||
# Adds the "backup" user to the "hass" group.
|
|
||||||
users.groups.hass = {
|
|
||||||
members = [ "backup" ];
|
|
||||||
};
|
|
||||||
|
|
||||||
# This allows the "backup" user, member of the "backup" group, to access what's inside the home
|
|
||||||
# folder, which is needed for accessing the "backups" folder. It allows to read (r), enter the
|
|
||||||
# directory (x) but not modify what's inside.
|
|
||||||
users.users.hass.homeMode = "0750";
|
|
||||||
|
|
||||||
systemd.services.home-assistant.serviceConfig = {
|
|
||||||
# This allows all members of the "hass" group to read files, list directories and enter
|
|
||||||
# directories created by the home-assistant service. This is needed for the "backup" user,
|
|
||||||
# member of the "hass" group, to backup what is inside the "backup/" folder.
|
|
||||||
UMask = lib.mkForce "0027";
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -138,6 +138,7 @@ in
|
||||||
'';
|
'';
|
||||||
readOnly = true;
|
readOnly = true;
|
||||||
default = {
|
default = {
|
||||||
|
user = "jellyfin";
|
||||||
sourceDirectories = [
|
sourceDirectories = [
|
||||||
"/var/lib/jellyfin"
|
"/var/lib/jellyfin"
|
||||||
];
|
];
|
||||||
|
@ -153,16 +154,6 @@ in
|
||||||
allowedUDPPorts = [ 1900 7359 ];
|
allowedUDPPorts = [ 1900 7359 ];
|
||||||
};
|
};
|
||||||
|
|
||||||
users.groups = {
|
|
||||||
media = {
|
|
||||||
name = "media";
|
|
||||||
members = [ "jellyfin" ];
|
|
||||||
};
|
|
||||||
jellyfin = {
|
|
||||||
members = [ "backup" ];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
services.nginx.enable = true;
|
services.nginx.enable = true;
|
||||||
|
|
||||||
# Take advice from https://jellyfin.org/docs/general/networking/nginx/ and https://nixos.wiki/wiki/Plex
|
# Take advice from https://jellyfin.org/docs/general/networking/nginx/ and https://nixos.wiki/wiki/Plex
|
||||||
|
@ -432,13 +423,5 @@ in
|
||||||
redirect_uris = [ "https://${cfg.subdomain}.${cfg.domain}/sso/OID/r/${cfg.sso.provider}" ];
|
redirect_uris = [ "https://${cfg.subdomain}.${cfg.domain}/sso/OID/r/${cfg.sso.provider}" ];
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
|
|
||||||
# For backup
|
|
||||||
|
|
||||||
systemd.services.jellyfin.serviceConfig = {
|
|
||||||
# Setup permissions needed for backups, as the backup user is member of the jellyfin group.
|
|
||||||
UMask = lib.mkForce "0027";
|
|
||||||
StateDirectoryMode = lib.mkForce "0750";
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -515,6 +515,7 @@ in
|
||||||
'';
|
'';
|
||||||
readOnly = true;
|
readOnly = true;
|
||||||
default = {
|
default = {
|
||||||
|
user = "nextcloud";
|
||||||
sourceDirectories = [
|
sourceDirectories = [
|
||||||
cfg.dataDir
|
cfg.dataDir
|
||||||
];
|
];
|
||||||
|
@ -568,12 +569,6 @@ in
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
# users.groups = {
|
|
||||||
# nextcloud = {
|
|
||||||
# members = [ "backup" ];
|
|
||||||
# };
|
|
||||||
# };
|
|
||||||
|
|
||||||
# LDAP is manually configured through
|
# LDAP is manually configured through
|
||||||
# https://github.com/lldap/lldap/blob/main/example_configs/nextcloud.md, see also
|
# https://github.com/lldap/lldap/blob/main/example_configs/nextcloud.md, see also
|
||||||
# https://docs.nextcloud.com/server/latest/admin_manual/configuration_user/user_auth_ldap.html
|
# https://docs.nextcloud.com/server/latest/admin_manual/configuration_user/user_auth_ldap.html
|
||||||
|
@ -708,10 +703,6 @@ in
|
||||||
|
|
||||||
services.postgresql.settings = lib.mkIf (! (isNull cfg.postgresSettings)) cfg.postgresSettings;
|
services.postgresql.settings = lib.mkIf (! (isNull cfg.postgresSettings)) cfg.postgresSettings;
|
||||||
|
|
||||||
systemd.services.phpfpm-nextcloud.serviceConfig = {
|
|
||||||
# Setup permissions needed for backups, as the backup user is member of the jellyfin group.
|
|
||||||
UMask = lib.mkForce "0027";
|
|
||||||
};
|
|
||||||
systemd.services.phpfpm-nextcloud.preStart = ''
|
systemd.services.phpfpm-nextcloud.preStart = ''
|
||||||
mkdir -p /var/log/xdebug; chown -R nextcloud: /var/log/xdebug
|
mkdir -p /var/log/xdebug; chown -R nextcloud: /var/log/xdebug
|
||||||
'';
|
'';
|
||||||
|
|
|
@ -275,9 +275,19 @@ shb.nextcloud.postgresSettings = {
|
||||||
};
|
};
|
||||||
```
|
```
|
||||||
|
|
||||||
### Backup the Nextcloud data {#services-nextcloud-server-usage-backup}
|
### Backup {#services-nextcloud-server-usage-backup}
|
||||||
|
|
||||||
TODO
|
Backing up Nextcloud using the [Restic block](blocks-restic.html) is done like so:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
shb.restic.instances."nextcloud" = config.shb.nextcloud.backup // {
|
||||||
|
enable = true;
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
The name `"nextcloud"` in the `instances` can be anything.
|
||||||
|
The `config.shb.nextcloud.backup` option provides what directories to backup.
|
||||||
|
You can define any number of Restic instances to backup Nextcloud multiple times.
|
||||||
|
|
||||||
### Enable Preview Generator App {#services-nextcloud-server-usage-previewgenerator}
|
### Enable Preview Generator App {#services-nextcloud-server-usage-previewgenerator}
|
||||||
|
|
||||||
|
|
|
@ -132,6 +132,7 @@ in
|
||||||
'';
|
'';
|
||||||
readOnly = true;
|
readOnly = true;
|
||||||
default = {
|
default = {
|
||||||
|
user = "vaultwarden";
|
||||||
sourceDirectories = [
|
sourceDirectories = [
|
||||||
dataFolder
|
dataFolder
|
||||||
];
|
];
|
||||||
|
@ -224,17 +225,6 @@ in
|
||||||
passwordFile = builtins.toString cfg.databasePasswordFile;
|
passwordFile = builtins.toString cfg.databasePasswordFile;
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
|
|
||||||
systemd.services.vaultwarden.serviceConfig.UMask = lib.mkForce "0027";
|
|
||||||
# systemd.services.vaultwarden.serviceConfig.Group = lib.mkForce "media";
|
|
||||||
users.users.vaultwarden = {
|
|
||||||
extraGroups = [ "media" ];
|
|
||||||
};
|
|
||||||
|
|
||||||
users.groups.vaultwarden = {
|
|
||||||
members = [ "backup" ];
|
|
||||||
};
|
|
||||||
|
|
||||||
# TODO: make this work.
|
# TODO: make this work.
|
||||||
# It does not work because it leads to infinite recursion.
|
# It does not work because it leads to infinite recursion.
|
||||||
# ${cfg.mount}.path = dataFolder;
|
# ${cfg.mount}.path = dataFolder;
|
||||||
|
|
|
@ -8,20 +8,15 @@ let
|
||||||
base = testLib.base [
|
base = testLib.base [
|
||||||
../../modules/blocks/restic.nix
|
../../modules/blocks/restic.nix
|
||||||
];
|
];
|
||||||
in
|
|
||||||
{
|
commonTest = user: pkgs.testers.runNixOSTest {
|
||||||
backupAndRestore = pkgs.testers.runNixOSTest {
|
name = "restic_backupAndRestore_${user}";
|
||||||
name = "restic_backupAndRestore";
|
|
||||||
|
|
||||||
nodes.machine = {
|
nodes.machine = {
|
||||||
imports = ( testLib.baseImports pkgs' ) ++ [
|
imports = ( testLib.baseImports pkgs' ) ++ [
|
||||||
../../modules/blocks/restic.nix
|
../../modules/blocks/restic.nix
|
||||||
];
|
];
|
||||||
|
|
||||||
shb.restic = {
|
|
||||||
user = "root";
|
|
||||||
group = "root";
|
|
||||||
};
|
|
||||||
shb.restic.instances."testinstance" = {
|
shb.restic.instances."testinstance" = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
|
||||||
|
@ -32,6 +27,8 @@ in
|
||||||
"/opt/files/B"
|
"/opt/files/B"
|
||||||
];
|
];
|
||||||
|
|
||||||
|
user = user;
|
||||||
|
|
||||||
repositories = [
|
repositories = [
|
||||||
{
|
{
|
||||||
path = "/opt/repos/A";
|
path = "/opt/repos/A";
|
||||||
|
@ -42,8 +39,8 @@ in
|
||||||
# Those are not needed by the repository but are still included
|
# Those are not needed by the repository but are still included
|
||||||
# so we can test them in the hooks section.
|
# so we can test them in the hooks section.
|
||||||
secrets = {
|
secrets = {
|
||||||
A.source = pkgs.writeText "A" "secretA";
|
A.source = "/run/secrets/A";
|
||||||
B.source = pkgs.writeText "B" "secretB";
|
B.source = "/run/secrets/B";
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
|
@ -58,7 +55,7 @@ in
|
||||||
hooks.before_backup = [''
|
hooks.before_backup = [''
|
||||||
echo $RUNTIME_DIRECTORY
|
echo $RUNTIME_DIRECTORY
|
||||||
if [ "$RUNTIME_DIRECTORY" = /run/restic-backups-testinstance_opt_repos_A ]; then
|
if [ "$RUNTIME_DIRECTORY" = /run/restic-backups-testinstance_opt_repos_A ]; then
|
||||||
if ! [ -f /run/secrets/restic/restic-backups-testinstance_opt_repos_A ]; then
|
if ! [ -f /run/secrets_restic/restic-backups-testinstance_opt_repos_A ]; then
|
||||||
exit 10
|
exit 10
|
||||||
fi
|
fi
|
||||||
if [ -z "$A" ] || ! [ "$A" = "secretA" ]; then
|
if [ -z "$A" ] || ! [ "$A" = "secretA" ]; then
|
||||||
|
@ -66,7 +63,7 @@ in
|
||||||
exit 11
|
exit 11
|
||||||
fi
|
fi
|
||||||
if [ -z "$B" ] || ! [ "$B" = "secretB" ]; then
|
if [ -z "$B" ] || ! [ "$B" = "secretB" ]; then
|
||||||
echo "A:$A"
|
echo "B:$B"
|
||||||
exit 12
|
exit 12
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
@ -77,9 +74,7 @@ in
|
||||||
extraPythonPackages = p: [ p.dictdiffer ];
|
extraPythonPackages = p: [ p.dictdiffer ];
|
||||||
skipTypeCheck = true;
|
skipTypeCheck = true;
|
||||||
|
|
||||||
testScript = { nodes, ... }: let
|
testScript = ''
|
||||||
instanceCfg = nodes.machine.shb.restic.instances."testinstance";
|
|
||||||
in ''
|
|
||||||
from dictdiffer import diff
|
from dictdiffer import diff
|
||||||
|
|
||||||
def list_files(dir):
|
def list_files(dir):
|
||||||
|
@ -102,19 +97,31 @@ in
|
||||||
if len(result) > 0:
|
if len(result) > 0:
|
||||||
raise Exception("Unexpected files:", result)
|
raise Exception("Unexpected files:", result)
|
||||||
|
|
||||||
|
with subtest("Create secrets"):
|
||||||
|
print(machine.succeed("""
|
||||||
|
mkdir -p /run/secrets/
|
||||||
|
|
||||||
|
echo secretA > /run/secrets/A
|
||||||
|
echo secretB > /run/secrets/B
|
||||||
|
|
||||||
|
chown root:keys -R /run/secrets
|
||||||
|
find /run/secrets -type d -exec chmod u=rwx,g=rx,o=x '{}' ';'
|
||||||
|
find /run/secrets -type f -exec chmod u=r,g=r,o= '{}' ';'
|
||||||
|
ls -l /run/secrets
|
||||||
|
"""))
|
||||||
|
|
||||||
with subtest("Create initial content"):
|
with subtest("Create initial content"):
|
||||||
machine.succeed("""
|
machine.succeed("""
|
||||||
mkdir -p /opt/files/A
|
mkdir -p /opt/files/A
|
||||||
mkdir -p /opt/files/B
|
mkdir -p /opt/files/B
|
||||||
mkdir -p /opt/repos/A
|
|
||||||
mkdir -p /opt/repos/B
|
|
||||||
|
|
||||||
echo repoA_fileA_1 > /opt/files/A/fileA
|
echo repoA_fileA_1 > /opt/files/A/fileA
|
||||||
echo repoA_fileB_1 > /opt/files/A/fileB
|
echo repoA_fileB_1 > /opt/files/A/fileB
|
||||||
echo repoB_fileA_1 > /opt/files/B/fileA
|
echo repoB_fileA_1 > /opt/files/B/fileA
|
||||||
echo repoB_fileB_1 > /opt/files/B/fileB
|
echo repoB_fileB_1 > /opt/files/B/fileB
|
||||||
|
|
||||||
# chown :backup -R /opt/files
|
chown ${user}: -R /opt/files
|
||||||
|
chmod go-rwx -R /opt/files
|
||||||
""")
|
""")
|
||||||
|
|
||||||
assert_files("/opt/files", {
|
assert_files("/opt/files", {
|
||||||
|
@ -176,5 +183,10 @@ in
|
||||||
'/opt/files/A/fileB': 'repoA_fileB_2',
|
'/opt/files/A/fileB': 'repoA_fileB_2',
|
||||||
})
|
})
|
||||||
'';
|
'';
|
||||||
|
|
||||||
};
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
backupAndRestoreRoot = commonTest "root";
|
||||||
|
backupAndRestoreUser = commonTest "nobody";
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,15 +56,8 @@ in
|
||||||
|
|
||||||
testRadarr = {
|
testRadarr = {
|
||||||
expected = {
|
expected = {
|
||||||
systemd.services.radarr = {
|
users = {};
|
||||||
serviceConfig = {
|
systemd.services.radarr = {};
|
||||||
StateDirectoryMode = "0750";
|
|
||||||
UMask = "0027";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
systemd.tmpfiles.rules = [
|
|
||||||
"d '/var/lib/radarr' 0750 radarr radarr - -"
|
|
||||||
];
|
|
||||||
shb.nginx.vhosts = [
|
shb.nginx.vhosts = [
|
||||||
{
|
{
|
||||||
autheliaRules = [
|
autheliaRules = [
|
||||||
|
@ -90,7 +83,6 @@ in
|
||||||
ssl = null;
|
ssl = null;
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
users.groups.radarr.members = [ "backup" ];
|
|
||||||
services.nginx.enable = true;
|
services.nginx.enable = true;
|
||||||
services.bazarr = {};
|
services.bazarr = {};
|
||||||
services.jackett = {};
|
services.jackett = {};
|
||||||
|
@ -122,15 +114,8 @@ in
|
||||||
|
|
||||||
testRadarrWithBackup = {
|
testRadarrWithBackup = {
|
||||||
expected = {
|
expected = {
|
||||||
systemd.services.radarr = {
|
users = {};
|
||||||
serviceConfig = {
|
systemd.services.radarr = {};
|
||||||
StateDirectoryMode = "0750";
|
|
||||||
UMask = "0027";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
systemd.tmpfiles.rules = [
|
|
||||||
"d '/var/lib/radarr' 0750 radarr radarr - -"
|
|
||||||
];
|
|
||||||
shb.nginx.vhosts = [
|
shb.nginx.vhosts = [
|
||||||
{
|
{
|
||||||
autheliaRules = [
|
autheliaRules = [
|
||||||
|
@ -156,7 +141,6 @@ in
|
||||||
ssl = null;
|
ssl = null;
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
users.groups.radarr.members = [ "backup" ];
|
|
||||||
services.nginx.enable = true;
|
services.nginx.enable = true;
|
||||||
services.bazarr = {};
|
services.bazarr = {};
|
||||||
services.jackett = {};
|
services.jackett = {};
|
||||||
|
|
|
@ -87,6 +87,25 @@ let
|
||||||
authEndpoint = "https://${config.shb.authelia.subdomain}.${config.shb.authelia.domain}";
|
authEndpoint = "https://${config.shb.authelia.subdomain}.${config.shb.authelia.domain}";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
backup = { config, ... }: {
|
||||||
|
imports = [
|
||||||
|
../../modules/blocks/restic.nix
|
||||||
|
];
|
||||||
|
shb.restic.instances."testinstance" = config.shb.vaultwarden.backup // {
|
||||||
|
enable = true;
|
||||||
|
passphraseFile = pkgs.writeText "passphrase" "PassPhrase";
|
||||||
|
repositories = [
|
||||||
|
{
|
||||||
|
path = "/opt/repos/A";
|
||||||
|
timerConfig = {
|
||||||
|
OnCalendar = "00:00:00";
|
||||||
|
RandomizedDelaySec = "5h";
|
||||||
|
};
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
basic = pkgs.testers.runNixOSTest {
|
basic = pkgs.testers.runNixOSTest {
|
||||||
|
@ -168,4 +187,25 @@ in
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
backup = pkgs.testers.runNixOSTest {
|
||||||
|
name = "vaultwarden_backup";
|
||||||
|
|
||||||
|
nodes.server = { config, ... }: {
|
||||||
|
imports = [
|
||||||
|
base
|
||||||
|
basic
|
||||||
|
backup
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
nodes.client = {};
|
||||||
|
|
||||||
|
testScript = commonTestScript.override {
|
||||||
|
extraScript = { proto_fqdn, ... }: ''
|
||||||
|
with subtest("backup"):
|
||||||
|
server.succeed("systemctl start restic-backups-testinstance_opt_repos_A")
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue