add flake with some modules
This commit is contained in:
parent
eb8776eb46
commit
8761dc2e9d
8 changed files with 1379 additions and 0 deletions
79
flake.lock
Normal file
79
flake.lock
Normal file
|
@ -0,0 +1,79 @@
|
|||
{
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1687412861,
|
||||
"narHash": "sha256-Z/g0wbL68C+mSGerYS2quv9FXQ1RRP082cAC0Bh4vcs=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "e603dc5f061ca1d8a19b3ede6a8cf9c9fcba6cdc",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nixos",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-stable": {
|
||||
"locked": {
|
||||
"lastModified": 1687031877,
|
||||
"narHash": "sha256-yMFcVeI+kZ6KD2QBrFPNsvBrLq2Gt//D0baHByMrjFY=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "e2e2059d19668dab1744301b8b0e821e3aae9c99",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "release-23.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1686979235,
|
||||
"narHash": "sha256-gBlBtk+KrezFkfMrZw6uwTuA7YWtbFciiS14mEoTCo0=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "7cc30fd5372ddafb3373c318507d9932bd74aafe",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs",
|
||||
"sops-nix": "sops-nix"
|
||||
}
|
||||
},
|
||||
"sops-nix": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"nixpkgs-stable": "nixpkgs-stable"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1687398569,
|
||||
"narHash": "sha256-e/umuIKFcFtZtWeX369Hbdt9r+GQ48moDmlTcyHWL28=",
|
||||
"owner": "Mic92",
|
||||
"repo": "sops-nix",
|
||||
"rev": "2ff6973350682f8d16371f8c071a304b8067f192",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "Mic92",
|
||||
"repo": "sops-nix",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
22
flake.nix
Normal file
22
flake.nix
Normal file
|
@ -0,0 +1,22 @@
|
|||
{
|
||||
description = "SelfHostBlocks module";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
|
||||
sops-nix.url = "github:Mic92/sops-nix";
|
||||
};
|
||||
|
||||
outputs = inputs@{ self, nixpkgs, sops-nix, ... }: {
|
||||
nixosModules.default = { config, ... }: {
|
||||
imports = [
|
||||
modules/backup.nix
|
||||
modules/jellyfin.nix
|
||||
modules/haproxy.nix
|
||||
modules/home-assistant.nix
|
||||
modules/nextcloud-server.nix
|
||||
];
|
||||
};
|
||||
|
||||
# templates.default = {}; Would be nice to have a template
|
||||
};
|
||||
}
|
269
modules/backup.nix
Normal file
269
modules/backup.nix
Normal file
|
@ -0,0 +1,269 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.shb.backup;
|
||||
|
||||
instanceOptions = {
|
||||
backend = lib.mkOption {
|
||||
description = "What program to use to make the backups.";
|
||||
type = lib.types.enum [ "borgmatic" "restic" ];
|
||||
example = "borgmatic";
|
||||
};
|
||||
|
||||
keySopsFile = lib.mkOption {
|
||||
description = "Sops file that holds this instance's Borgmatic repository key and passphrase.";
|
||||
type = lib.types.path;
|
||||
example = "secrets/backup.yaml";
|
||||
};
|
||||
|
||||
sourceDirectories = lib.mkOption {
|
||||
description = "Borgmatic source directories.";
|
||||
type = lib.types.nonEmptyListOf lib.types.str;
|
||||
};
|
||||
|
||||
excludePatterns = lib.mkOption {
|
||||
description = "Borgmatic exclude patterns.";
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [];
|
||||
};
|
||||
|
||||
repositories = lib.mkOption {
|
||||
description = lib.mdDoc "Repositories to back this instance to.";
|
||||
type = lib.types.nonEmptyListOf lib.types.str;
|
||||
};
|
||||
|
||||
retention = lib.mkOption {
|
||||
description = "Retention options.";
|
||||
type = lib.types.attrsOf (lib.types.oneOf [ lib.types.int lib.types.nonEmptyStr ]);
|
||||
default = {
|
||||
keep_within = "1d";
|
||||
keep_hourly = 24;
|
||||
keep_daily = 7;
|
||||
keep_weekly = 4;
|
||||
keep_monthly = 6;
|
||||
};
|
||||
};
|
||||
|
||||
consistency = lib.mkOption {
|
||||
description = "Consistency frequency options. Only applicable for borgmatic";
|
||||
type = lib.types.attrsOf lib.types.nonEmptyStr;
|
||||
default = {};
|
||||
example = {
|
||||
repository = "2 weeks";
|
||||
archives = "1 month";
|
||||
};
|
||||
};
|
||||
|
||||
hooks = lib.mkOption {
|
||||
description = "Borgmatic hooks.";
|
||||
default = {};
|
||||
type = lib.types.submodule {
|
||||
options = {
|
||||
before_backup = lib.mkOption {
|
||||
description = "Hooks to run before backup";
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [];
|
||||
};
|
||||
|
||||
after_backup = lib.mkOption {
|
||||
description = "Hooks to run after backup";
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
environmentFile = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
description = "Add environment file to be read by the systemd service.";
|
||||
default = false;
|
||||
example = true;
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
options.shb.backup = {
|
||||
onlyOnAC = lib.mkOption {
|
||||
description = lib.mdDoc "Run backups only if AC power is plugged in.";
|
||||
default = true;
|
||||
example = false;
|
||||
type = lib.types.bool;
|
||||
};
|
||||
|
||||
user = lib.mkOption {
|
||||
description = lib.mdDoc "Unix user doing the backups.";
|
||||
type = lib.types.str;
|
||||
default = "backup";
|
||||
};
|
||||
|
||||
group = lib.mkOption {
|
||||
description = lib.mdDoc "Unix group doing the backups.";
|
||||
type = lib.types.str;
|
||||
default = "backup";
|
||||
};
|
||||
|
||||
instances = lib.mkOption {
|
||||
description = lib.mdDoc "Each instance is a backup setting";
|
||||
default = {};
|
||||
type = lib.types.attrsOf (lib.types.submodule {
|
||||
options = instanceOptions;
|
||||
});
|
||||
};
|
||||
|
||||
borgServer = lib.mkOption {
|
||||
description = lib.mdDoc "Add borgbackup package so external backups can use this server as a remote.";
|
||||
default = false;
|
||||
example = true;
|
||||
type = lib.types.bool;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf (cfg.instances != {}) (
|
||||
let
|
||||
borgmaticInstances = lib.attrsets.filterAttrs (k: i: i.backend == "borgmatic") cfg.instances;
|
||||
resticInstances = lib.attrsets.filterAttrs (k: i: i.backend == "restic") cfg.instances;
|
||||
in
|
||||
{
|
||||
users.users = {
|
||||
${cfg.user} = {
|
||||
name = cfg.user;
|
||||
group = cfg.group;
|
||||
home = "/var/lib/backup";
|
||||
createHome = true;
|
||||
isSystemUser = true;
|
||||
};
|
||||
};
|
||||
users.groups = {
|
||||
${cfg.group} = {
|
||||
name = cfg.group;
|
||||
};
|
||||
};
|
||||
|
||||
sops.secrets =
|
||||
let
|
||||
repoSlugName = name: builtins.replaceStrings ["/"] ["_"] (lib.strings.removePrefix "/" name);
|
||||
|
||||
mkSopsSecret = name: instance: (
|
||||
[
|
||||
{
|
||||
"${instance.backend}/${name}/passphrase" = {
|
||||
sopsFile = instance.keySopsFile;
|
||||
mode = "0440";
|
||||
owner = cfg.user;
|
||||
group = cfg.group;
|
||||
};
|
||||
}
|
||||
] ++ lib.optional ((lib.filter (lib.strings.hasPrefix "s3") instance.repositories) != []) {
|
||||
"${instance.backend}/${name}/environmentfile" = {
|
||||
sopsFile = instance.keySopsFile;
|
||||
mode = "0440";
|
||||
owner = cfg.user;
|
||||
group = cfg.group;
|
||||
};
|
||||
}
|
||||
);
|
||||
in
|
||||
lib.mkMerge (lib.flatten (lib.attrsets.mapAttrsToList mkSopsSecret cfg.instances));
|
||||
|
||||
systemd.timers.borgmatic = lib.mkIf (borgmaticInstances != {}) {
|
||||
timerConfig = {
|
||||
OnCalendar = "hourly";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.borgmatic = lib.mkIf (borgmaticInstances != {}) {
|
||||
serviceConfig = {
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
ExecStartPre = ""; # Do not sleep before starting.
|
||||
ExecStart = [ "" "${pkgs.borgmatic}/bin/borgmatic --verbosity -1 --syslog-verbosity 1" ];
|
||||
# For borgmatic, since we have only one service, we need to merge all environmentFile
|
||||
# from all instances.
|
||||
EnvironmentFile = builtins.mapAttrsToList (name: value: value.environmentFile) cfg.instances;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.packages = lib.mkIf (borgmaticInstances != {}) [ pkgs.borgmatic ];
|
||||
environment.systemPackages = (
|
||||
lib.optionals cfg.borgServer [ pkgs.borgbackup ]
|
||||
++ lib.optionals (borgmaticInstances != {}) [ pkgs.borgbackup pkgs.borgmatic ]
|
||||
++ lib.optionals (resticInstances != {}) [ pkgs.restic ]
|
||||
);
|
||||
|
||||
services.restic.backups =
|
||||
let
|
||||
repoSlugName = name: builtins.replaceStrings ["/" ":"] ["_" "_"] (lib.strings.removePrefix "/" name);
|
||||
|
||||
mkRepositorySettings = name: instance: repository: {
|
||||
"${name}_${repoSlugName repository}" = {
|
||||
inherit (cfg) user;
|
||||
inherit repository;
|
||||
|
||||
paths = instance.sourceDirectories;
|
||||
|
||||
passwordFile = "/run/secrets/${instance.backend}/${name}/passphrase";
|
||||
|
||||
initialize = true;
|
||||
|
||||
timerConfig = {
|
||||
OnCalendar = "hourly";
|
||||
RandomizedDelaySec = "5m";
|
||||
};
|
||||
|
||||
pruneOpts = lib.mapAttrsToList (name: value:
|
||||
"--${builtins.replaceStrings ["_"] ["-"] name} ${builtins.toString value}"
|
||||
) instance.retention;
|
||||
|
||||
backupPrepareCommand = lib.strings.concatStringsSep "\n" instance.hooks.before_backup;
|
||||
|
||||
backupCleanupCommand = lib.strings.concatStringsSep "\n" instance.hooks.after_backup;
|
||||
} // lib.attrsets.optionalAttrs (instance.environmentFile) {
|
||||
environmentFile = "/run/secrets/${instance.backend}/${name}/environmentfile";
|
||||
};
|
||||
};
|
||||
|
||||
mkSettings = name: instance: builtins.map (mkRepositorySettings name instance) instance.repositories;
|
||||
in
|
||||
lib.mkMerge (lib.flatten (lib.attrsets.mapAttrsToList mkSettings resticInstances));
|
||||
|
||||
environment.etc =
|
||||
let
|
||||
mkSettings = name: instance: {
|
||||
"borgmatic.d/${name}.yaml".text = lib.generators.toYAML {} {
|
||||
location =
|
||||
{
|
||||
source_directories = instance.sourceDirectories;
|
||||
repositories = instance.repositories;
|
||||
}
|
||||
// (lib.attrsets.optionalAttrs (builtins.length instance.excludePatterns > 0) {
|
||||
excludePatterns = instance.excludePatterns;
|
||||
});
|
||||
|
||||
storage = {
|
||||
encryption_passcommand = "cat /run/secrets/borgmatic/${name}/passphrase";
|
||||
};
|
||||
|
||||
retention = instance.retention;
|
||||
consistency.checks =
|
||||
let
|
||||
mkCheck = name: frequency: {
|
||||
inherit name frequency;
|
||||
};
|
||||
in
|
||||
lib.attrsets.mapAttrsToList mkCheck instance.consistency;
|
||||
|
||||
# hooks = lib.mkMerge [
|
||||
# lib.optionalAttrs (builtins.length instance.hooks.before_backup > 0) {
|
||||
# inherit (instance.hooks) before_backup;
|
||||
# }
|
||||
# lib.optionalAttrs (builtins.length instance.hooks.after_backup > 0) {
|
||||
# inherit (instance.hooks) after_backup;
|
||||
# }
|
||||
# ];
|
||||
};
|
||||
};
|
||||
in
|
||||
lib.mkMerge (lib.attrsets.mapAttrsToList mkSettings borgmaticInstances);
|
||||
});
|
||||
}
|
492
modules/haproxy-configcreator.nix
Normal file
492
modules/haproxy-configcreator.nix
Normal file
|
@ -0,0 +1,492 @@
|
|||
{ lib
|
||||
, pkgs
|
||||
}:
|
||||
|
||||
with builtins;
|
||||
with lib;
|
||||
with lib.attrsets;
|
||||
with lib.lists;
|
||||
with lib.strings;
|
||||
let
|
||||
getAttrWithDefault = name: default: attrset:
|
||||
if isAttrs attrset && hasAttr name attrset then
|
||||
getAttr name attrset
|
||||
else
|
||||
default;
|
||||
|
||||
recursiveMerge = attrList:
|
||||
let f = attrPath:
|
||||
zipAttrsWith (n: values:
|
||||
if all isList values then
|
||||
concatLists values
|
||||
else if all isAttrs values then
|
||||
f (attrPath ++ [n]) values
|
||||
else
|
||||
last values
|
||||
);
|
||||
in f [] attrList;
|
||||
|
||||
augmentedContent = fieldName: rules: parent: set:
|
||||
let
|
||||
print = {rule = k: parent: v:
|
||||
assert assertMsg (isString v || isInt v) "cannot print key '${fieldName}.${k}' of type '${typeOf v}', should be string or int instead";
|
||||
"${k} ${toString v}";};
|
||||
|
||||
matchingRule = k: v: findFirst (rule: rule.match k parent v) print rules;
|
||||
|
||||
augment = parent: k: v:
|
||||
let
|
||||
match = matchingRule k v;
|
||||
rule = if hasAttr "rule" match then match.rule else null;
|
||||
rules = if hasAttr "rules" match then match.rules else null;
|
||||
indent = map (x: if hasAttr "indent" match then match.indent + x else x);
|
||||
headerFn = if hasAttr "header" match then match.header else null;
|
||||
header = optional (headerFn != null) (headerFn k);
|
||||
trailer = optional (headerFn != null) "";
|
||||
content = header ++ indent (augmentedContent "${fieldName}.${k}" rules (parent ++ [k]) v) ++ trailer;
|
||||
in
|
||||
if rule != null
|
||||
then rule k parent v
|
||||
else
|
||||
assert assertMsg (isAttrs v) "attempt to apply rules on key '${toString k}' which is a '${typeOf v}' but should be a set:\n${toString v}";
|
||||
if hasAttr "order" match then
|
||||
{
|
||||
inherit (match) order;
|
||||
inherit content;
|
||||
}
|
||||
else
|
||||
content;
|
||||
|
||||
augmented = mapAttrsToList (augment parent) (
|
||||
assert assertMsg (isAttrs set) "attempt to apply rules on field ${fieldName} having type '${typeOf set}':\n${toString set}";
|
||||
set
|
||||
);
|
||||
|
||||
sortAugmented = sort (a: b:
|
||||
(isAttrs a && hasAttr "order" a)
|
||||
&& (isAttrs b && hasAttr "order" b)
|
||||
&& a.order < b.order
|
||||
);
|
||||
|
||||
onlyContent = (x: if isAttrs x && hasAttr "content" x then x.content else x);
|
||||
in
|
||||
flatten (map onlyContent (sortAugmented augmented));
|
||||
|
||||
updateByPath = path: fn: set:
|
||||
if hasAttrByPath path set then
|
||||
recursiveUpdate set (setAttrByPath path (fn (getAttrFromPath path set)))
|
||||
else
|
||||
set;
|
||||
|
||||
schema =
|
||||
let
|
||||
mkRule =
|
||||
{ redirect ? false
|
||||
, scheme ? "https"
|
||||
, code ? null
|
||||
, condition ? null
|
||||
}:
|
||||
concatStringsRecursive " " [
|
||||
(optional redirect "redirect")
|
||||
"scheme" scheme
|
||||
(optional (code != null) "code ${toString code}")
|
||||
(optional (condition != null) "if ${condition}")
|
||||
];
|
||||
|
||||
mkBind =
|
||||
{ addr
|
||||
, ssl ? false
|
||||
, crt ? null
|
||||
}:
|
||||
concatStringsRecursive " " [
|
||||
"bind"
|
||||
addr
|
||||
(optional ssl "ssl")
|
||||
(optional (crt != null) "crt ${crt}")
|
||||
];
|
||||
|
||||
mkServer =
|
||||
{ name
|
||||
, address
|
||||
, balance ? null
|
||||
, check ? null
|
||||
, httpcheck ? null
|
||||
, forwardfor ? true
|
||||
, resolvers ? null
|
||||
}:
|
||||
[
|
||||
"mode http"
|
||||
(optional forwardfor "option forwardfor")
|
||||
(optional (httpcheck != null) "option httpchk ${httpcheck}")
|
||||
(optional (balance != null) "balance ${balance}")
|
||||
(concatStringsRecursive " " [
|
||||
"server"
|
||||
name
|
||||
address
|
||||
(optionals (check != null) (if
|
||||
isBool check
|
||||
then (if check then ["check"] else [])
|
||||
else mapAttrsToList (k: v: "${k} ${v}") check))
|
||||
(optional (resolvers != null) "resolvers ${resolvers}")
|
||||
])
|
||||
];
|
||||
|
||||
# Lua's import system requires the import path to be something like:
|
||||
#
|
||||
# /nix/store/123-name/<package>/<file.lua>
|
||||
#
|
||||
# Then the lua-prepend-path can be:
|
||||
#
|
||||
# /nix/store/123-name/?/<file.lua>
|
||||
#
|
||||
# Then when lua code imports <package>, it will search in the
|
||||
# prepend paths and replace the question mark with the <package>
|
||||
# name to get a match.
|
||||
#
|
||||
# But the config.source is actually without the <package> name:
|
||||
#
|
||||
# /nix/store/123-name/<file.lua>
|
||||
#
|
||||
# This requires us to create a new directory structure and we're
|
||||
# using a linkFarm for this.
|
||||
createPluginLinks = configs:
|
||||
let
|
||||
mkLink = name: config: {
|
||||
inherit name;
|
||||
path = config.source;
|
||||
};
|
||||
|
||||
in
|
||||
pkgs.linkFarm "haproxyplugins" (mapAttrsToList mkLink configs);
|
||||
|
||||
mkPlugin = links: name:
|
||||
{ luapaths ? []
|
||||
, cpaths ? []
|
||||
, load ? null
|
||||
, ...
|
||||
}:
|
||||
{
|
||||
lua-prepend-path =
|
||||
let
|
||||
f = ext: type: path:
|
||||
{
|
||||
inherit type;
|
||||
path =
|
||||
if path == "." then
|
||||
"${links}/${name}/?.${ext}"
|
||||
else
|
||||
"${links}/${name}/${path}/?.${ext}";
|
||||
};
|
||||
in
|
||||
map (f "lua" "path") (toList luapaths)
|
||||
++ map (f "so" "cpath") (toList cpaths);
|
||||
} // optionalAttrs (load != null) {
|
||||
lua-load = ["${links}/${name}/${load}"];
|
||||
};
|
||||
|
||||
# Takes plugins as an attrset of name to {init, load, source},
|
||||
# transforms them to a [attrset] with fields lua-prepend-path
|
||||
# and optionally lua-load then returns a list of lines with all
|
||||
# lua-prepend-path first and all lua-load afterwards.
|
||||
mkPlugins = v:
|
||||
let
|
||||
f = recursiveMerge (mapAttrsToList (mkPlugin (createPluginLinks v)) v);
|
||||
lua-prepend-path = map ({path, type}: "lua-prepend-path ${path} ${type}") (getAttrWithDefault "lua-prepend-path" [] f);
|
||||
lua-load = map (x: "lua-load ${x}") (getAttrWithDefault "lua-load" [] f);
|
||||
in
|
||||
lua-prepend-path ++ lua-load;
|
||||
in [
|
||||
{
|
||||
match = k: parent: v: k == "defaults";
|
||||
order = 2;
|
||||
indent = " ";
|
||||
header = k: k;
|
||||
rules = [
|
||||
{
|
||||
match = k: parent: v: k == "timeout";
|
||||
rule = k: parent: v: mapAttrsToList (k1: v1: "${k} ${k1} ${v1}") v;
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
match = k: parent: v: k == "global";
|
||||
order = 1;
|
||||
indent = " ";
|
||||
header = k: k;
|
||||
rules = [
|
||||
{
|
||||
match = k: parent: v: k == "plugins";
|
||||
rule = k: parent: v: mkPlugins v;
|
||||
}
|
||||
{
|
||||
match = k: parent: v: k == "setenv";
|
||||
rule = k: parent: v: mapAttrsToList (k: v: "setenv ${k} ${v}" ) v;
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
match = k: parent: v: k == "resolvers";
|
||||
order = 3;
|
||||
rules = [
|
||||
{
|
||||
match = k: parent: v: true;
|
||||
header = k: "resolvers " + k;
|
||||
indent = " ";
|
||||
rules = [
|
||||
{
|
||||
match = k: parent: v: k == "nameservers";
|
||||
rule = k: parent: v: mapAttrsToList (k1: v1: "nameserver ${k1} ${v1}") v;
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
match = k: parent: v: k == "frontend";
|
||||
order = 4;
|
||||
rules = [
|
||||
{
|
||||
match = k: parent: v: true;
|
||||
header = k: "frontend " + k;
|
||||
indent = " ";
|
||||
rules = [
|
||||
{
|
||||
match = k: parent: v: k == "rules";
|
||||
rule = k: parent: v: map mkRule v;
|
||||
}
|
||||
{
|
||||
match = k: parent: v: k == "bind" && isAttrs v;
|
||||
rule = k: parent: v: mkBind v;
|
||||
}
|
||||
{
|
||||
match = k: parent: v: k == "use_backend";
|
||||
rule = k: parent: v:
|
||||
let
|
||||
use = name: value: "use_backend ${name} ${toString value}";
|
||||
in
|
||||
if isList v then
|
||||
map (v: use v.name v.value) v
|
||||
else
|
||||
use v.name v.value;
|
||||
}
|
||||
{
|
||||
match = k: parent: v: true ;
|
||||
rule = k: parent: v:
|
||||
let
|
||||
l = prefix: v:
|
||||
if isAttrs v then
|
||||
mapAttrsToList (k: v: l "${prefix} ${k}" v) v
|
||||
else if isList v then
|
||||
map (l prefix) v
|
||||
else if isBool v then
|
||||
optional v prefix
|
||||
else
|
||||
assert assertMsg (isString v) "value for field ${k} should be a string, bool, attr or list, got: ${typeOf v}";
|
||||
"${prefix} ${v}";
|
||||
in
|
||||
l k v;
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
match = k: parent: v: k == "backend";
|
||||
order = 5;
|
||||
rules = [
|
||||
{
|
||||
match = k: parent: v: true;
|
||||
header = k: "backend " + k;
|
||||
indent = " ";
|
||||
rules = [
|
||||
{
|
||||
match = k: parent: v: k == "options";
|
||||
rule = k: parent: v: v;
|
||||
}
|
||||
{
|
||||
match = k: parent: v: k == "servers";
|
||||
rule = k: parent: v: map mkServer v;
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
|
||||
concatStringsRecursive = sep: strings:
|
||||
concatStringsSep sep (flatten strings);
|
||||
|
||||
assertHasAttr = name: attrPath: v:
|
||||
assertMsg
|
||||
(hasAttrByPath attrPath v)
|
||||
"no ${last attrPath} defined in config for site ${name}.${concatStringsSep "." (init attrPath)}, found attr names: ${toString (attrNames (getAttrFromPath (init attrPath) v))}";
|
||||
|
||||
# Takes a function producing a [nameValuePair], applies it to
|
||||
# all name-value pair in the given set and merges the resulting
|
||||
# [[nameValuePair]].
|
||||
mapAttrsFlatten = f: set: listToAttrs (concatLists (mapAttrsToList f set));
|
||||
|
||||
mapIfIsAttrs = f: value:
|
||||
if isAttrs value
|
||||
then f value
|
||||
else value;
|
||||
|
||||
flattenAttrs = sep: cond: set:
|
||||
let
|
||||
recurse = mapIfIsAttrs (mapAttrsFlatten (
|
||||
n: v: let
|
||||
result = recurse v;
|
||||
in
|
||||
if isAttrs result && cond n v
|
||||
then mapAttrsToList (n2: v2: nameValuePair "${n}${sep}${n2}" v2) result
|
||||
else [(nameValuePair n result)]
|
||||
));
|
||||
in recurse set;
|
||||
in
|
||||
{
|
||||
inherit updateByPath recursiveMerge;
|
||||
|
||||
default =
|
||||
{ user
|
||||
, group
|
||||
, certPath
|
||||
, plugins ? {}
|
||||
, globalEnvs ? {}
|
||||
, stats ? null
|
||||
, debug ? false
|
||||
, sites ? {}
|
||||
, globals ? {}
|
||||
, defaults ? {}
|
||||
, resolvers ? {}
|
||||
}: {
|
||||
global = {
|
||||
# Silence a warning issued by haproxy. Using 2048
|
||||
# instead of the default 1024 makes the connection stronger.
|
||||
"tune.ssl.default-dh-param" = 2048;
|
||||
|
||||
maxconn = 20000;
|
||||
|
||||
inherit user group;
|
||||
|
||||
log = "/dev/log local0 info";
|
||||
|
||||
inherit plugins;
|
||||
|
||||
setenv = globalEnvs;
|
||||
} // globals;
|
||||
|
||||
defaults = {
|
||||
log = "global";
|
||||
option = "httplog";
|
||||
|
||||
timeout = {
|
||||
connect = "10s";
|
||||
client = "15s";
|
||||
server = "30s";
|
||||
queue = "100s";
|
||||
};
|
||||
} // defaults;
|
||||
|
||||
frontend = {
|
||||
http-to-https = {
|
||||
mode = "http";
|
||||
bind = "*:80";
|
||||
rules = [
|
||||
{
|
||||
redirect = true;
|
||||
scheme = "https";
|
||||
code = 301;
|
||||
condition = "!{ ssl_fc }";
|
||||
}
|
||||
];
|
||||
backend = {};
|
||||
};
|
||||
|
||||
https = (
|
||||
let
|
||||
r = (
|
||||
[{
|
||||
mode = "http";
|
||||
bind = {
|
||||
addr = "*:443";
|
||||
ssl = true;
|
||||
crt = certPath;
|
||||
};
|
||||
|
||||
http-request = {
|
||||
set-header = [
|
||||
"X-Forwarded-Port %[dst_port]"
|
||||
"X-Forwarded-For %[src]"
|
||||
];
|
||||
add-header = [
|
||||
"X-Forwarded-Proto https"
|
||||
];
|
||||
};
|
||||
|
||||
http-response = {
|
||||
set-header = [
|
||||
''Strict-Transport-Security "max-age=15552000; includeSubDomains; preload;"''
|
||||
];
|
||||
};
|
||||
}]
|
||||
++ (mapAttrsToList (name: config:
|
||||
assert assertHasAttr name ["frontend"] config;
|
||||
(updateByPath ["frontend" "use_backend"] (x: [(nameValuePair name x)]) config).frontend
|
||||
) sites)
|
||||
++ (mapAttrsToList (name: config:
|
||||
if (hasAttr "debugHeaders" config && (getAttr "debugHeaders" config) != null) then {
|
||||
option = "httplog";
|
||||
http-request = {
|
||||
capture = "req.hdrs len 512 if ${config.debugHeaders}";
|
||||
};
|
||||
log-format = ''"%ci:%cp [%tr] %ft [[%hr]] %hs %{+Q}r"'';
|
||||
} else {}
|
||||
) sites)
|
||||
);
|
||||
in
|
||||
recursiveMerge r
|
||||
)
|
||||
// optionalAttrs (debug) {
|
||||
log-format = ''"%ci:%cp [%tr] %ft %b/%s %TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r %sslv %sslc %[ssl_fc_cipherlist_str]"'';
|
||||
};
|
||||
} // optionalAttrs (stats != null)
|
||||
(let
|
||||
stats_ = {
|
||||
enable = true;
|
||||
port = 8404;
|
||||
uri = "/stats";
|
||||
refresh = "10s";
|
||||
prometheusUri = null;
|
||||
hide-version = false;
|
||||
} // stats;
|
||||
in
|
||||
{
|
||||
stats = {
|
||||
bind = "localhost:${toString stats_.port}";
|
||||
mode = "http";
|
||||
stats = {
|
||||
enable = stats_.enable;
|
||||
hide-version = stats_.hide-version;
|
||||
uri = stats_.uri;
|
||||
refresh = stats_.refresh;
|
||||
};
|
||||
} // optionalAttrs (stats_.prometheusUri != null) {
|
||||
http-request = [
|
||||
"use-service prometheus-exporter if { path ${stats_.prometheusUri} }"
|
||||
];
|
||||
};
|
||||
});
|
||||
|
||||
backend =
|
||||
mapAttrs' (name: config:
|
||||
assert assertMsg (hasAttr "backend" config) "no backend defined in config for site ${name}, found attr names: ${toString (attrNames config)}";
|
||||
nameValuePair name config.backend)
|
||||
sites;
|
||||
|
||||
inherit resolvers;
|
||||
};
|
||||
|
||||
render = config:
|
||||
concatStringsSep "\n" (augmentedContent "" schema [] config);
|
||||
}
|
110
modules/haproxy.nix
Normal file
110
modules/haproxy.nix
Normal file
|
@ -0,0 +1,110 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.shb.reverseproxy;
|
||||
in
|
||||
{
|
||||
options.shb.reverseproxy = {
|
||||
sopsFile = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = "Sops file location";
|
||||
example = "secrets/haproxy.yaml";
|
||||
};
|
||||
|
||||
domain = lib.mkOption {
|
||||
description = lib.mdDoc "Domain to serve sites under.";
|
||||
type = lib.types.str;
|
||||
};
|
||||
|
||||
adminEmail = lib.mkOption {
|
||||
description = lib.mdDoc "Admin email in case certificate retrieval goes wrong.";
|
||||
type = lib.types.str;
|
||||
};
|
||||
|
||||
sites = lib.mkOption {
|
||||
description = lib.mdDoc "Sites to serve through the reverse proxy.";
|
||||
type = lib.types.anything;
|
||||
default = {};
|
||||
example = {
|
||||
homeassistant = {
|
||||
frontend = {
|
||||
acl = {
|
||||
acl_homeassistant = "hdr_beg(host) ha.";
|
||||
};
|
||||
use_backend = "if acl_homeassistant";
|
||||
};
|
||||
backend = {
|
||||
servers = [
|
||||
{
|
||||
name = "homeassistant1";
|
||||
address = "127.0.0.1:8123";
|
||||
forwardfor = false;
|
||||
balance = "roundrobin";
|
||||
check = {
|
||||
inter = "5s";
|
||||
downinter = "15s";
|
||||
fall = "3";
|
||||
rise = "3";
|
||||
};
|
||||
httpcheck = "GET /";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf (cfg.sites != {}) {
|
||||
networking.firewall.allowedTCPPorts = [ 80 443 ];
|
||||
|
||||
security.acme = {
|
||||
acceptTerms = true;
|
||||
certs."${cfg.domain}" = {
|
||||
extraDomainNames = ["*.${cfg.domain}"];
|
||||
};
|
||||
defaults = {
|
||||
email = cfg.adminEmail;
|
||||
dnsProvider = "linode";
|
||||
dnsResolver = "8.8.8.8";
|
||||
group = config.services.haproxy.user;
|
||||
# For example, to use Linode to prove the dns challenge,
|
||||
# the content of the file should be the following, with
|
||||
# XXX replaced by your Linode API token.
|
||||
# LINODE_HTTP_TIMEOUT=10
|
||||
# LINODE_POLLING_INTERVAL=10
|
||||
# LINODE_PROPAGATION_TIMEOUT=240
|
||||
# LINODE_TOKEN=XXX
|
||||
credentialsFile = "/run/secrets/linode";
|
||||
enableDebugLogs = false;
|
||||
};
|
||||
};
|
||||
sops.secrets.linode = {
|
||||
inherit (cfg) sopsFile;
|
||||
restartUnits = [ "acme-${cfg.domain}.service" ];
|
||||
};
|
||||
|
||||
services.haproxy.enable = true;
|
||||
|
||||
services.haproxy.config = let
|
||||
configcreator = pkgs.callPackage ./haproxy-configcreator.nix {};
|
||||
in configcreator.render ( configcreator.default {
|
||||
inherit (config.services.haproxy) user group;
|
||||
|
||||
certPath = "/var/lib/acme/${cfg.domain}/full.pem";
|
||||
|
||||
stats = {
|
||||
port = 8404;
|
||||
uri = "/stats";
|
||||
refresh = "10s";
|
||||
prometheusUri = "/metrics";
|
||||
};
|
||||
|
||||
defaults = {
|
||||
default-server = "init-addr last,none";
|
||||
};
|
||||
|
||||
inherit (cfg) sites;
|
||||
});
|
||||
};
|
||||
}
|
172
modules/home-assistant.nix
Normal file
172
modules/home-assistant.nix
Normal file
|
@ -0,0 +1,172 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.shb.home-assistant;
|
||||
in
|
||||
{
|
||||
options.shb.home-assistant = {
|
||||
enable = lib.mkEnableOption "selfhostblocks.home-assistant";
|
||||
|
||||
subdomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Subdomain under which home-assistant will be served.";
|
||||
example = "ha";
|
||||
};
|
||||
|
||||
sopsFile = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = "Sops file location";
|
||||
example = "secrets/homeassistant.yaml";
|
||||
};
|
||||
|
||||
backupCfg = lib.mkOption {
|
||||
type = lib.types.anything;
|
||||
description = "Backup configuration for home-assistant";
|
||||
default = {};
|
||||
example = {
|
||||
backend = "restic";
|
||||
repositories = [];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.home-assistant = {
|
||||
enable = true;
|
||||
# Find them at https://github.com/NixOS/nixpkgs/blob/master/pkgs/servers/home-assistant/component-packages.nix
|
||||
extraComponents = [
|
||||
# Components required to complete the onboarding
|
||||
"met"
|
||||
"radio_browser"
|
||||
];
|
||||
configDir = "/var/lib/hass";
|
||||
# If you can't find a component in component-packages.nix, you can add them manually with something similar to:
|
||||
# extraPackages = python3Packages: [
|
||||
# (python3Packages.simplisafe-python.overrideAttrs (old: rec {
|
||||
# pname = "simplisafe-python";
|
||||
# version = "5b003a9fa1abd00f0e9a0b99d3ee57c4c7c16bda";
|
||||
# format = "pyproject";
|
||||
|
||||
# src = pkgs.fetchFromGitHub {
|
||||
# owner = "bachya";
|
||||
# repo = pname;
|
||||
# rev = "${version}";
|
||||
# hash = "sha256-Ij2e0QGYLjENi/yhFBQ+8qWEJp86cgwC9E27PQ5xNno=";
|
||||
# };
|
||||
# }))
|
||||
# ];
|
||||
config = {
|
||||
# Includes dependencies for a basic setup
|
||||
# https://www.home-assistant.io/integrations/default_config/
|
||||
default_config = {};
|
||||
http = {
|
||||
use_x_forwarded_for = "true";
|
||||
trusted_proxies = "127.0.0.1";
|
||||
};
|
||||
logger.default = "info";
|
||||
homeassistant = {
|
||||
country = "!secret country";
|
||||
latitude = "!secret latitude_home";
|
||||
longitude = "!secret longitude_home";
|
||||
time_zone = "America/Los_Angeles";
|
||||
};
|
||||
"automation ui" = "!include automations.yaml";
|
||||
"scene ui" = "!include scenes.yaml";
|
||||
"script ui" = "!include scripts.yaml";
|
||||
|
||||
"automation manual" = [
|
||||
{
|
||||
alias = "Create Backup on Schedule";
|
||||
trigger = [
|
||||
{
|
||||
platform = "time_pattern";
|
||||
minutes = "5";
|
||||
}
|
||||
];
|
||||
action = [
|
||||
{
|
||||
service = "shell_command.delete_backups";
|
||||
data = {};
|
||||
}
|
||||
{
|
||||
service = "backup.create";
|
||||
data = {};
|
||||
}
|
||||
];
|
||||
mode = "single";
|
||||
}
|
||||
];
|
||||
|
||||
shell_command = {
|
||||
delete_backups = "find ${config.services.home-assistant.configDir}/backups -type f -delete";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
sops.secrets."home-assistant" = {
|
||||
inherit (cfg) sopsFile;
|
||||
mode = "0440";
|
||||
owner = "hass";
|
||||
group = "hass";
|
||||
path = "${config.services.home-assistant.configDir}/secrets.yaml";
|
||||
restartUnits = [ "home-assistant.service" ];
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"f ${config.services.home-assistant.configDir}/automations.yaml 0755 hass hass"
|
||||
"f ${config.services.home-assistant.configDir}/scenes.yaml 0755 hass hass"
|
||||
"f ${config.services.home-assistant.configDir}/scripts.yaml 0755 hass hass"
|
||||
];
|
||||
|
||||
shb.reverseproxy.sites.homeassistant = {
|
||||
frontend = {
|
||||
acl = {
|
||||
acl_homeassistant = "hdr_beg(host) ${cfg.subdomain}.";
|
||||
};
|
||||
use_backend = "if acl_homeassistant";
|
||||
};
|
||||
backend = {
|
||||
servers = [
|
||||
{
|
||||
name = "homeassistant1";
|
||||
address = "127.0.0.1:8123";
|
||||
forwardfor = false;
|
||||
balance = "roundrobin";
|
||||
check = {
|
||||
inter = "5s";
|
||||
downinter = "15s";
|
||||
fall = "3";
|
||||
rise = "3";
|
||||
};
|
||||
httpcheck = "GET /";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
shb.backup.instances.home-assistant = lib.mkIf (cfg.backupCfg != {}) (
|
||||
cfg.backupCfg
|
||||
// {
|
||||
sourceDirectories = [
|
||||
"${config.services.home-assistant.configDir}/backups"
|
||||
];
|
||||
|
||||
# No need for backup hooks as we use an hourly automation job in home assistant directly with a cron job.
|
||||
}
|
||||
);
|
||||
|
||||
users.groups = {
|
||||
hass = {
|
||||
members = [ "backup" ];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.home-assistant.serviceConfig = {
|
||||
# Setup permissions needed for backups, as the backup user is member of the hass group.
|
||||
UMask = lib.mkForce "0027";
|
||||
StateDirectory = "hass";
|
||||
StateDirectoryMode = lib.mkForce "0750";
|
||||
SupplementaryGroups = [ config.users.groups.keys.name ];
|
||||
};
|
||||
};
|
||||
}
|
73
modules/jellyfin.nix
Normal file
73
modules/jellyfin.nix
Normal file
|
@ -0,0 +1,73 @@
|
|||
{ config, lib, pkgs, ...}:
|
||||
|
||||
let
|
||||
cfg = config.shb.jellyfin;
|
||||
in
|
||||
{
|
||||
options.shb.jellyfin = {
|
||||
enable = lib.mkEnableOption "shb jellyfin";
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.jellyfin.enable = true;
|
||||
|
||||
networking.firewall = {
|
||||
# from https://jellyfin.org/docs/general/networking/index.html, for auto-discovery
|
||||
allowedUDPPorts = [ 1900 7359 ];
|
||||
};
|
||||
|
||||
users.groups = {
|
||||
media = {
|
||||
name = "media";
|
||||
members = [ "jellyfin" ];
|
||||
};
|
||||
jellyfin = {
|
||||
members = [ "backup" ];
|
||||
};
|
||||
};
|
||||
|
||||
shb.reverseproxy.sites.jellyfin = {
|
||||
frontend = {
|
||||
acl = {
|
||||
acl_jellyfin = "hdr_beg(host) jellyfin.";
|
||||
acl_jellyfin_network_allowed = "src 127.0.0.1";
|
||||
acl_jellyfin_restricted_page = "path_beg /metrics";
|
||||
};
|
||||
http-request = {
|
||||
deny = "if acl_jellyfin acl_jellyfin_restricted_page !acl_jellyfin_network_allowed";
|
||||
};
|
||||
use_backend = "if acl_jellyfin";
|
||||
};
|
||||
# TODO: enable /metrics and block from outside https://jellyfin.org/docs/general/networking/monitoring/#prometheus-metrics
|
||||
backend = {
|
||||
servers = [
|
||||
{
|
||||
name = "jellyfin1";
|
||||
address = "127.0.0.1:8091";
|
||||
forwardfor = false;
|
||||
balance = "roundrobin";
|
||||
check = {
|
||||
inter = "5s";
|
||||
downinter = "15s";
|
||||
fall = "3";
|
||||
rise = "3";
|
||||
};
|
||||
httpcheck = "GET /health";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
shb.backup.instances.jellyfin = {
|
||||
sourceDirectories = [
|
||||
"/var/lib/jellyfin"
|
||||
];
|
||||
};
|
||||
|
||||
systemd.services.jellyfin.serviceConfig = {
|
||||
# Setup permissions needed for backups, as the backup user is member of the jellyfin group.
|
||||
UMask = lib.mkForce "0027";
|
||||
StateDirectoryMode = lib.mkForce "0750";
|
||||
};
|
||||
};
|
||||
}
|
162
modules/nextcloud-server.nix
Normal file
162
modules/nextcloud-server.nix
Normal file
|
@ -0,0 +1,162 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.shb.nextcloud;
|
||||
in
|
||||
{
|
||||
options.shb.nextcloud = {
|
||||
enable = lib.mkEnableOption "selfhostblocks.nextcloud-server";
|
||||
|
||||
fqdn = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Fully qualified domain under which nextcloud will be served.";
|
||||
example = "nextcloud.domain.com";
|
||||
};
|
||||
|
||||
sopsFile = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = "Sops file location";
|
||||
example = "secrets/nextcloud.yaml";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
users.users = {
|
||||
nextcloud = {
|
||||
name = "nextcloud";
|
||||
group = "nextcloud";
|
||||
home = "/srv/data/nextcloud";
|
||||
isSystemUser = true;
|
||||
};
|
||||
};
|
||||
|
||||
users.groups = {
|
||||
nextcloud = {
|
||||
members = [ "backup" ];
|
||||
};
|
||||
};
|
||||
|
||||
services.nextcloud = {
|
||||
enable = true;
|
||||
package = pkgs.nextcloud26;
|
||||
|
||||
# Enable php-fpm and nginx which will be behind the shb haproxy instance.
|
||||
hostName = cfg.fqdn;
|
||||
|
||||
config = {
|
||||
dbtype = "pgsql";
|
||||
adminuser = "root";
|
||||
adminpassFile = "/run/secrets/nextcloud/adminpass";
|
||||
# Not using dbpassFile as we're using socket authentication.
|
||||
defaultPhoneRegion = "US";
|
||||
trustedProxies = [ "127.0.0.1" ];
|
||||
};
|
||||
database.createLocally = true;
|
||||
|
||||
# Enable caching using redis https://nixos.wiki/wiki/Nextcloud#Caching.
|
||||
configureRedis = true;
|
||||
caching.apcu = false;
|
||||
# https://docs.nextcloud.com/server/26/admin_manual/configuration_server/caching_configuration.html
|
||||
caching.redis = true;
|
||||
|
||||
# Adds appropriate nginx rewrite rules.
|
||||
webfinger = true;
|
||||
|
||||
extraOptions = {
|
||||
"overwrite.cli.url" = "https://" + cfg.fqdn;
|
||||
"overwritehost" = cfg.fqdn;
|
||||
"overwriteprotocol" = "https";
|
||||
"overwritecondaddr" = "^127\\.0\\.0\\.1$";
|
||||
};
|
||||
|
||||
phpOptions = {
|
||||
# The OPcache interned strings buffer is nearly full with 8, bump to 16.
|
||||
"opcache.interned_strings_buffer" = "16";
|
||||
};
|
||||
};
|
||||
|
||||
# Secret needed for services.nextcloud.config.adminpassFile.
|
||||
sops.secrets."nextcloud/adminpass" = {
|
||||
inherit (cfg) sopsFile;
|
||||
mode = "0440";
|
||||
owner = "nextcloud";
|
||||
group = "nextcloud";
|
||||
};
|
||||
|
||||
# The following changed the listen address for nginx and puts haproxy in front. See
|
||||
# https://nixos.wiki/wiki/Nextcloud#Change_default_listening_port
|
||||
#
|
||||
# It's a bit of a waste in resources to have nginx behind haproxy but the config for nginx is
|
||||
# complex enough that I find it better to re-use the one from nixpkgs instead of trying to copy
|
||||
# it over to haproxy. At least for now.
|
||||
services.nginx.virtualHosts.${cfg.fqdn}.listen = [ { addr = "127.0.0.1"; port = 8080; } ];
|
||||
shb.reverseproxy.sites.nextcloud = {
|
||||
frontend = {
|
||||
acl = {
|
||||
acl_nextcloud = "hdr_beg(host) n.";
|
||||
# well_known = "path_beg /.well-known";
|
||||
# caldav-endpoint = "path_beg /.well-known/caldav";
|
||||
# carddav-endpoint = "path_beg /.well-known/carddav";
|
||||
# webfinger-endpoint = "path_beg /.well-known/webfinger";
|
||||
# nodeinfo-endpoint = "path_beg /.well-known/nodeinfo";
|
||||
};
|
||||
http-request.set-header = {
|
||||
"X-Forwarded-Host" = "%[req.hdr(host)]";
|
||||
"X-Forwarded-Port" = "%[dst_port]";
|
||||
};
|
||||
# http-request = [
|
||||
# "redirect code 301 location /remote.php/dav if acl_nextcloud caldav-endpoint"
|
||||
# "redirect code 301 location /remote.php/dav if acl_nextcloud carddav-endpoint"
|
||||
# "redirect code 301 location /public.php?service=webfinger if acl_nextcloud webfinger-endpoint"
|
||||
# "redirect code 301 location /public.php?service=nodeinfo if acl_nextcloud nodeinfo-endpoint"
|
||||
# ];
|
||||
# http-response = {
|
||||
# set-header = {
|
||||
# # These headers are from https://github.com/NixOS/nixpkgs/blob/d3bb401dcfc5a46ce51cdfb5762e70cc75d082d2/nixos/modules/services/web-apps/nextcloud.nix#L1167-L1173
|
||||
# X-Content-Type-Options = "nosniff";
|
||||
# X-XSS-Protection = "\"1; mode=block\"";
|
||||
# X-Robots-Tag = "\"noindex, nofollow\"";
|
||||
# X-Download-Options = "noopen";
|
||||
# X-Permitted-Cross-Domain-Policies = "none";
|
||||
# X-Frame-Options = "sameorigin";
|
||||
# Referrer-Policy = "no-referrer";
|
||||
# };
|
||||
# };
|
||||
use_backend = "if acl_nextcloud";
|
||||
};
|
||||
backend = {
|
||||
servers = [
|
||||
{
|
||||
name = "nextcloud1";
|
||||
address =
|
||||
let
|
||||
addrs = config.services.nginx.virtualHosts.${cfg.fqdn}.listen;
|
||||
in
|
||||
builtins.map (c: "${c.addr}:${builtins.toString c.port}") addrs;
|
||||
forwardfor = true;
|
||||
balance = "roundrobin";
|
||||
check = {
|
||||
inter = "5s";
|
||||
downinter = "15s";
|
||||
fall = "3";
|
||||
rise = "3";
|
||||
};
|
||||
httpcheck = "GET /";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.phpfpm-nextcloud.serviceConfig = {
|
||||
# Setup permissions needed for backups, as the backup user is member of the jellyfin group.
|
||||
UMask = lib.mkForce "0027";
|
||||
};
|
||||
|
||||
# Sets up backup for Nextcloud.
|
||||
shb.backup.instances.nextcloud = {
|
||||
sourceDirectories = [
|
||||
config.services.nextcloud.datadir
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
Loading…
Reference in a new issue