1
0
Fork 0

remove _disnix folder

This commit is contained in:
ibizaman 2023-10-14 17:21:12 -07:00
parent 9bf187766a
commit 21e4f837e9
57 changed files with 0 additions and 5491 deletions

View file

@ -1,39 +0,0 @@
{ distribution ? null
, services ? null
, system ? builtins.currentSystem
, pkgs ? import <nixpkgs> { inherit system; }
, utils ? null
, secret ? null
}:
let
callPackage = pkgs.lib.callPackageWith (pkgs // customPkgs);
customPkgs = rec {
mkPostgresDB = callPackage ./postgresdb {};
mkHaproxyService = callPackage ./haproxy/unit.nix {inherit utils;};
CaddyConfig = callPackage ./caddy/config.nix {inherit utils;};
CaddyService = callPackage ./caddy/unit.nix {inherit utils;};
CaddySiteConfig = callPackage ./caddy/siteconfig.nix {inherit utils;};
mkCaddySiteConfig = callPackage ./caddy/mksiteconfig.nix {inherit CaddySiteConfig;};
mkNginxService = callPackage ./nginx/unit.nix {inherit utils;};
mkPHPFPMService = callPackage ./php-fpm/unit.nix {inherit utils;};
mkKeycloakService = callPackage ./keycloak/unit.nix {inherit utils;};
mkOauth2Proxy = callPackage ./oauth2-proxy/unit.nix {inherit utils;};
mkKeycloakHaproxyService = callPackage ./keycloak-haproxy/unit.nix {inherit utils;};
mkKeycloakCliService = callPackage ./keycloak-cli-config/unit.nix {inherit utils;};
keycloak = callPackage ./keycloak {inherit utils customPkgs;};
ttrss = callPackage ./ttrss {inherit utils customPkgs;};
vaultwarden = callPackage ./vaultwarden {inherit utils customPkgs secret;};
};
in
customPkgs

View file

@ -1,24 +0,0 @@
{ CaddySiteConfig
}:
{ CaddyConfig
, CaddyService
, name
, port
, siteName
, siteRoot
, phpFpmSiteSocket ? ""
}:
rec {
inherit name;
caddySocket = "${CaddyService.runtimeDirectory}/${siteName}.sock";
pkg = CaddySiteConfig rec {
inherit (CaddyConfig) siteConfigDir;
inherit phpFpmSiteSocket;
portBinding = port;
bindService = siteName;
siteSocket = caddySocket;
serviceRoot = siteRoot;
};
type = "fileset";
}

View file

@ -1,49 +0,0 @@
{ stdenv
, pkgs
, utils
}:
{ siteConfigDir
, portBinding
, bindService
, serviceRoot ? "/usr/share/webapps/${bindService}"
, siteSocket ? null
, phpFpmSiteSocket ? null
, logLevel ? "WARN"
}:
let
content =
[
"root * ${serviceRoot}"
"file_server"
]
++ (
if siteSocket != ""
then [
"bind unix/${siteSocket}"
]
else []
)
++ (
if phpFpmSiteSocket != ""
then [
"php_fastcgi unix/${phpFpmSiteSocket}"
]
else []
);
in
utils.mkConfigFile {
name = "${bindService}.config";
dir = siteConfigDir;
content = ''
:${builtins.toString portBinding} {
${builtins.concatStringsSep "\n " content}
log {
output stderr
level ${logLevel}
}
}
'';
}

View file

@ -1,80 +0,0 @@
{ stdenv
, pkgs
, utils
}:
{ user ? "http"
, group ? "http"
, siteConfigDir
}:
{...}:
let
config = pkgs.writeTextDir "Caddyfile" ''
{
# Disable auto https
http_port 10001
https_port 10002
}
import ${siteConfigDir}/*
'';
in
utils.systemd.mkService rec {
name = "caddy";
content = ''
[Unit]
Description=Caddy webserver
Documentation=https://caddyserver.com/docs/
After=network.target network-online.target
Wants=network-online.target systemd-networkd-wait-online.target
StartLimitInterval=14400
StartLimitBurst=10
[Service]
Type=notify
User=${user}
Group=${group}
ExecStart=${pkgs.caddy}/bin/caddy run --environ --config ${config}
ExecReload=${pkgs.caddy}/bin/caddy reload --config ${config}
# Restart=on-abnormal
RuntimeDirectory=caddy
# KillMode=mixed
# KillSignal=SIGQUIT
TimeoutStopSec=5s
LimitNOFILE=1048576
LimitNPROC=512
# PrivateDevices=true
LockPersonality=true
NoNewPrivileges=true
PrivateDevices=true
PrivateTmp=true
ProtectClock=true
ProtectControlGroups=true
ProtectHome=true
ProtectHostname=true
ProtectKernelLogs=true
ProtectKernelModules=true
ProtectKernelTunables=true
ProtectSystem=full
RestrictAddressFamilies=AF_INET AF_INET6 AF_NETLINK AF_UNIX
RestrictNamespaces=true
RestrictRealtime=true
RestrictSUIDSGID=true
# CapabilityBoundingSet=CAP_NET_BIND_SERVICE
AmbientCapabilities=CAP_NET_BIND_SERVICE
# ProtectSystem=strict
# ReadWritePaths=/var/lib/caddy /var/log/caddy
[Install]
WantedBy=multi-user.target
'';
}

View file

@ -1,10 +0,0 @@
{
herculesCI = {...}: {
onPush.default = {
outputs = {...}: {
unit = (import ./default.nix {}).tests.unit;
integration = (import ./default.nix {}).tests.integration;
};
};
};
}

View file

@ -1,34 +0,0 @@
{ pkgs ? import (builtins.fetchGit {
# Descriptive name to make the store path easier to identify
name = "nixos-21.11-2023-03-15";
url = "https://github.com/nixos/nixpkgs/";
# Commit hash for nixos-unstable as of 2018-09-12
# `git ls-remote https://github.com/nixos/nixpkgs nixos-unstable`
ref = "refs/tags/21.11";
rev = "506445d88e183bce80e47fc612c710eb592045ed";
}) {}
}:
let
utils = pkgs.callPackage ./utils.nix {};
in
with builtins;
with pkgs.lib.attrsets;
with pkgs.lib.lists;
with pkgs.lib.strings;
rec {
customPkgs = import ./all-packages.nix;
tests = pkgs.callPackage ./tests { inherit utils; };
runtests =
let
onlytests = filterAttrs (name: value: name != "override" && name != "overrideDerivation") tests;
failingtests = filterAttrs (name: value: length value > 0) onlytests;
formatFailure = failure: toString failure; # TODO: make this more pretty
formattedFailureGroups = mapAttrsToList (name: failures: "${name}:\n${concatMapStringsSep "\n" formatFailure failures}") failingtests;
in
if length formattedFailureGroups == 0 then
"no failing test"
else
concatStringsSep "\n" formattedFailureGroups;
}

View file

@ -1,18 +0,0 @@
# This example uses YAML anchors which allows reuse of multiple keys
# without having to repeat yourself.
# Also see https://github.com/Mic92/dotfiles/blob/master/nixos/.sops.yaml
# for a more complex example.
keys:
- &me age1nj0ulq6863y9tdk0pkwjx4ltuyjpx6gftwy27mk3gkwja6k325esgaerlr
- &machine1 age16yraj9xdpjqazwakcy4fs9gcxu75el3yefpzudhv7zu9pn6jsvtqeee23r
creation_rules:
- path_regex: secrets/[^/]+\.yaml$
key_groups:
- age:
- *me
- path_regex: secrets/machine1/[^/]+\.yaml$
key_groups:
- age:
- *me
- *machine1

View file

@ -1,36 +0,0 @@
# Vaultwarden setup
This folder contain an example configuration for setting up
Vaultwarden on Linode. But before deploying to linode, you can
actually test the deployment locally with VirtualBox.
First, [setup NixOS on a Linode instance](/docs/tutorials/linode.md).
When that's done, explore the files in this folder.
To try it out locally, follow [deploy to staging](/docs/tutorials/deploystaging.md).
```bash
nixops set-args --network dev \
--arg domain '"dev.mydomain.com"' \
--arg sopsKeyFile '"$HOME/.config/sops/age/keys.txt"'
```
You can use the `info` subcommand to print the values of the arguments:
```bash
nixops info --network dev
```
The TL; DR version is, assuming you're where this file is located:
```bash
export NIXOPS_DEPLOYMENT=vaultwarden-staging
export DISNIXOS_USE_NIXOPS=1
nixops create ./network-virtualbox.nix -d vaultwarden-staging
nixops deploy --network dev
nixops reboot
disnixos-env -s services.nix -n dev/nixops.nix -d distribution.nix
```

View file

@ -1,52 +0,0 @@
{
domain ? "dev.mydomain.com",
sopsKeyFile ? "",
}:
{
network = {
storage.legacy = {};
};
machine1 = { system, pkgs, lib, ... }:
with lib;
let
utils = pkgs.lib.callPackageWith pkgs ./../../../../utils.nix { };
base = ((import ./../network.nix).machine1 {
inherit system pkgs lib;
inherit domain utils;
secret = x: x;
});
vbox = (import ./../network.nix).virtualbox;
mkPortMapping = {name, host, guest, protocol ? "tcp"}:
["--natpf1" "${name},${protocol},,${toString host},,${toString guest}"];
in
recursiveUpdate base {
imports = [
<sops-nix/modules/sops>
];
deployment.targetEnv = "virtualbox";
deployment.virtualbox = {
memorySize = 1024;
vcpu = 2;
headless = true;
vmFlags = concatMap mkPortMapping vbox.portMappings;
};
# This will add secrets.yml to the nix store
# You can avoid this by adding a string to the full path instead, i.e.
# sops.defaultSopsFile = "/root/.sops/secrets/example.yaml";
sops.defaultSopsFile = ../secrets/linode.yaml;
# This will automatically import SSH keys as age keys
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
# This is using an age key that is expected to already be in the filesystem
sops.age.keyFile = /. + sopsKeyFile;
# This will generate a new key if the key specified above does not exist
sops.age.generateKey = true;
# This is the actual specification of the secrets.
sops.secrets.linode = {};
};
}

View file

@ -1,23 +0,0 @@
{ infrastructure
, pkgs ? import <nixpkgs> {}
}:
with infrastructure;
let
customPkgs = (pkgs.callPackage (./../../..) {}).customPkgs {
inherit pkgs;
};
keycloak = customPkgs.keycloak {};
vaultwarden = customPkgs.vaultwarden {};
in
{
HaproxyService = [ machine1 ];
KeycloakService = [ machine1 ];
KeycloakCliService = [ machine1 ];
KeycloakHaproxyService = [ machine1 ];
}
// keycloak.distribute [ machine1 ]
// vaultwarden.distribute [ machine1 ]

View file

@ -1,160 +0,0 @@
{ hostname
, userName
, userPackages
, systemPackages
, address
, gateway
, sshPublicKey
, allowedTCPPorts
}:
{
imports =
[ # Include the results of the hardware scan.
./machine1-hardware-configuration.nix
];
# Use the GRUB 2 boot loader.
boot.loader.grub.enable = true;
boot.loader.grub.version = 2;
# boot.loader.grub.efiSupport = true;
# boot.loader.grub.efiInstallAsRemovable = true;
# boot.loader.efi.efiSysMountPoint = "/boot/efi";
# Define on which hard drive you want to install Grub.
# boot.loader.grub.device = "/dev/sda"; # or "nodev" for efi only
networking.hostName = hostname; # Define your hostname.
# Pick only one of the below networking options.
# networking.wireless.enable = true; # Enables wireless support via wpa_supplicant.
networking.networkmanager.enable = true; # Easiest to use and most distros use this by default.
networking.usePredictableInterfaceNames = false;
networking.enableIPv6 = false;
# Set your time zone.
# time.timeZone = "Europe/Amsterdam";
# Configure network proxy if necessary
# networking.proxy.default = "http://user:password@proxy:port/";
# networking.proxy.noProxy = "127.0.0.1,localhost,internal.domain";
# Select internationalisation properties.
# i18n.defaultLocale = "en_US.UTF-8";
# console = {
# font = "Lat2-Terminus16";
# keyMap = "us";
# useXkbConfig = true; # use xkbOptions in tty.
# };
# Enable the X11 windowing system.
# services.xserver.enable = true;
# Configure keymap in X11
# services.xserver.layout = "us";
# services.xserver.xkbOptions = {
# "eurosign:e";
# "caps:escape" # map caps to escape.
# };
# Enable CUPS to print documents.
# services.printing.enable = true;
# Enable sound.
# sound.enable = true;
# hardware.pulseaudio.enable = true;
# Enable touchpad support (enabled default in most desktopManager).
# services.xserver.libinput.enable = true;
# Define a user account. Don't forget to set a password with passwd.
users.users.${userName} = {
isNormalUser = true;
extraGroups = [ "wheel" "networkmanager" ]; # Enable sudo for the user.
packages = userPackages;
openssh.authorizedKeys.keys = [ sshPublicKey ];
};
# List packages installed in system profile. To search, run:
# $ nix search wget
environment.systemPackages = systemPackages;
# Some programs need SUID wrappers, can be configured further or are
# started in user sessions.
# programs.mtr.enable = true;
# programs.gnupg.agent = {
# enable = true;
# enableSSHSupport = true;
# };
# List services that you want to enable:
# Enable the OpenSSH daemon.
services.openssh = {
enable = true;
permitRootLogin = "yes";
passwordAuthentication = false;
};
nix.trustedUsers = [
"deployer"
];
users.groups.deployer = {};
users.users.deployer = {
isSystemUser = true;
group = "deployer";
extraGroups = [ "wheel" ]; # Enable sudo for the user.
openssh.authorizedKeys.keys = [ sshPublicKey ];
};
users.users."root" = {
openssh.authorizedKeys.keys = [ sshPublicKey ];
};
security.sudo.wheelNeedsPassword = false;
services.longview = {
enable = true;
apiKeyFile = "/var/lib/longview/apiKeyFile";
apacheStatusUrl = "";
nginxStatusUrl = "";
mysqlUser = "";
mysqlPassword = "";
};
# Open ports in the firewall.
networking.firewall.allowedTCPPorts = allowedTCPPorts;
# networking.firewall.allowedUDPPorts = [ ... ];
# Or disable the firewall altogether.
# networking.firewall.enable = false;
networking.domain = "members.linode.com";
networking.search = [ "members.linode.com" ];
networking.resolvconf.extraOptions = [ "rotate" ];
networking.nameservers = [
"173.230.145.5"
"173.230.147.5"
"173.230.155.5"
"173.255.212.5"
"173.255.219.5"
"173.255.241.5"
"173.255.243.5"
"173.255.244.5"
"74.207.241.5"
"74.207.242.5"
];
# Copy the NixOS configuration file and link it from the resulting system
# (/run/current-system/configuration.nix). This is useful in case you
# accidentally delete configuration.nix.
# system.copySystemConfiguration = true;
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "22.05"; # Did you read the comment?
}

View file

@ -1,57 +0,0 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
[ (modulesPath + "/profiles/qemu-guest.nix")
];
boot.initrd.availableKernelModules = [ "virtio_pci" "virtio_scsi" "ahci" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-label/nixos";
fsType = "ext4";
};
swapDevices =
[ { device = "/dev/disk/by-label/swap"; }
];
boot.kernelParams = [ "console=ttyS0,19200n8" ];
boot.loader.grub.extraConfig = ''
serial --speed=19200 --unit=0 --word=8 --parity=no --stop=1;
terminal_input serial;
terminal_output serial
'';
boot.loader.grub.forceInstall = true;
boot.loader.grub.device = "nodev";
boot.loader.timeout = 10;
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault false;
# networking.interfaces.eth0.useDHCP = true;
networking.interfaces.eth0 = {
ipv4 = {
addresses = [
{
address = "45.79.76.142";
prefixLength = 24;
}
];
};
};
networking.defaultGateway = {
address = "45.79.76.1";
interface = "eth0";
};
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

View file

@ -1,221 +0,0 @@
rec {
machine1 = { system
, pkgs
, lib
, utils
, domain
, secret
, ... }:
let
customPkgs = (pkgs.callPackage (./../../..) {}).customPkgs {
inherit system pkgs utils secret;
};
vaultwarden = customPkgs.vaultwarden {};
keycloak = customPkgs.keycloak {};
httpUser = "http";
httpGroup = "http";
httpRoot = "/usr/share/webapps";
phpfpmUser = "phpfpm";
phpfpmGroup = "phpfpm";
phpfpmRoot = "/run/php-fpm";
keycloakUser = "keycloak";
keycloakGroup = "keycloak";
caddyHttpPort = 10001;
caddyHttpsPort = 10002;
keycloaksecretsdir = "/run/keys/keycloakcliconfig";
keycloakusers = [ "me" "friend" ];
in
rec {
users.groups = {
http = {
name = httpGroup;
};
phpfpm = {
name = phpfpmGroup;
};
keycloak = {
name = keycloakGroup;
};
keycloakcli = {
name = "keycloakcli";
};
"${vaultwarden.group}" = {
name = "${vaultwarden.group}";
};
};
users.users = {
http = {
name = httpUser;
group = httpGroup;
home = httpRoot;
isSystemUser = true;
};
phpfpm = {
name = phpfpmUser;
group = phpfpmGroup;
home = phpfpmRoot;
isSystemUser = true;
};
keycloak = {
name = keycloakUser;
group = keycloakGroup;
# home ?
isSystemUser = true;
};
keycloakcli = {
name = "keycloakcli";
group = "keycloakcli";
extraGroups = [ "keys" ];
isSystemUser = true;
};
"${vaultwarden.user}" = {
name = vaultwarden.user;
group = vaultwarden.group;
extraGroups = [ "keys" ];
isSystemUser = true;
};
};
# deployment.keys = {
# keycloakdbpassword.text = ''
# KC_DB_PASSWORD="${secret "${domain}/keycloakdbpassword"}"
# '';
# keycloakinitialadmin.text = ''
# KEYCLOAK_ADMIN_PASSWORD="${secret "${domain}/${keycloak.subdomain}/admin"}"
# '';
# # This convention is for keycloak-cli-config
# "keycloak.password" = {
# destDir = keycloaksecretsdir;
# user = "keycloakcli";
# text = secret "${domain}/${keycloak.subdomain}/admin";
# };
# "keycloakusers" =
# let
# e = str: lib.strings.escape [''\''] (lib.strings.escape [''"''] str);
# in
# {
# user = "keycloakcli";
# text = lib.concatMapStringsSep "\n"
# (name: "KEYCLOAK_USERS_${lib.strings.toUpper name}_PASSWORD=${e (secret "${domain}/${keycloak.subdomain}/${name}")}")
# keycloakusers;
# };
# }
# // vaultwarden.deployKeys domain;
security.acme = {
acceptTerms = true;
certs = {
"${domain}" = {
extraDomainNames = ["*.${domain}"];
};
};
defaults = {
group = httpGroup;
email = "ibizapeanut@gmail.com";
dnsProvider = "linode";
dnsResolver = "8.8.8.8";
# For example, to use Linode to prove the dns challenge,
# the content of the file should be the following, with
# XXX replaced by your Linode API token.
# LINODE_HTTP_TIMEOUT=10
# LINODE_POLLING_INTERVAL=10
# LINODE_PROPAGATION_TIMEOUT=240
# LINODE_TOKEN=XXX
credentialsFile = "/run/secrets/linode";
enableDebugLogs = true;
};
};
services = {
openssh = {
enable = true;
};
disnix = {
enable = true;
# useWebServiceInterface = true;
};
dnsmasq = {
enable = true;
servers = [ "192.168.50.15" "192.168.50.1" ];
extraConfig =
let
subdomains = [
"machine1"
keycloak.subdomain
vaultwarden.subdomain
];
inherit domain;
in (lib.concatMapStrings
(subdomain: "address=/${subdomain}.${domain}/127.0.0.1\naddress=/${subdomain}/127.0.0.1\n")
subdomains)
;
};
# tomcat.enable = false;
postgresql = {
enable = true;
package = pkgs.postgresql_14;
port = 5432;
enableTCPIP = true;
authentication = pkgs.lib.mkOverride 10 ''
local all all trust
host all all 127.0.0.1/32 trust
host all all ::1/128 trust
'';
};
};
dysnomia = {
enable = true;
enableLegacyModules = false;
extraContainerProperties = {
system = {
inherit domain;
};
postgresql-database = {
service_name = "postgresql.service";
port = builtins.toString services.postgresql.port;
};
keycloaksecrets = {
rootdir = keycloaksecretsdir;
};
};
};
networking.firewall.allowedTCPPorts = [ services.postgresql.port ] ++ virtualbox.guestPorts;
};
virtualbox = rec {
portMappings = [
{ name = "ssh";
host = 22;
guest = 22;
}
{ name = "dns";
host = 53;
guest = 53;
}
{ name = "https";
host = 443;
guest = 443;
}
];
hostPorts = map (x: x.host) portMappings;
guestPorts = map (x: x.guest) portMappings;
};
}

View file

@ -1,40 +0,0 @@
let
hostname = "machine1";
domain = "mydomain.com";
in
{
machine1 = { system, pkgs, lib, ... }:
let
utils = pkgs.lib.callPackageWith pkgs ./utils.nix { };
base = ((import ./network.nix).machine1 {
inherit system pkgs lib;
inherit domain utils;
});
vbox = (import ./network.nix).virtualbox;
in
lib.recursiveUpdate base rec {
deployment.targetHost = hostname;
imports = [
(import ./machines/machine1-configuration.nix {
inherit hostname;
userName = "me";
userPackages = with pkgs; [];
systemPackages = with pkgs; [
curl
inetutils
mtr
sysstat
tmux
vim
];
address = "45.79.76.142";
gateway = "45.79.76.1";
sshPublicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB/UeAaMECJNLMZ23vLb3A3XT7OJDcpj2OWgXzt8+GLU me@laptop";
allowedTCPPorts = vbox.guestPorts;
})
];
};
}

View file

@ -1,21 +0,0 @@
linode: ENC[AES256_GCM,data:Rg/k/gmBJ8iBP9KW8Zom7gGecNG404v9oQ85MuXPB+fjKowmm36YJ61tiVhUADPsFMWezulJG3RpvpoqZLPU+8cCX1KPsfJgUN77MlQRjjdraqVMq/opcEXfwIs3g76y9hDvbTMVIGpKCVE8hl7N5XTRPkQPSpracj+papL0bdFLhmsDgGi/vmaH7zs9K6gwYQv/mzz2oy6oZh8NoAlF,iv:2Z4NLAQmf/m5oemdM7Z+MAAyVUBVZoA4Zia/bqcW8u0=,tag:WeVnU4swvvQdA7QU9Ax4Xg==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1nj0ulq6863y9tdk0pkwjx4ltuyjpx6gftwy27mk3gkwja6k325esgaerlr
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAweERHTXJWU1BWNXJJUVNM
WUZObm82a0JHSWM2cWdBQTI2ckRvMnRGMDIwCm9TZWtTTUlaTTRuYVcvd1J3TnVF
dUN0NFdtaTZWL2IraE5BcE43WWdXcmMKLS0tIDFyQ3FGT1F4dkVtU0U5R2FNNlRa
dXB5OEx1clZiMktxdkFVUVpWOUtleU0Kb4E+x2cxcOayFigQDo9dv3e/si9a19YJ
mw2PUTb1Tm3PQ/ZXW6R6y5CfzFf7FhBTpRas84sPDg9MrOrWLygUgw==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2023-04-09T05:03:49Z"
mac: ENC[AES256_GCM,data:t03zcu+puxqs6bqb+7MJDY67UXJVN4RP48VSY2aBYtzRp4tcZ0zKpqGDJ2y9sFkLkUu5Mvha68g5Bd6uylQFwVyks2HQyyT0UFsQWJfpk9WGXElmJ+BeU4m51QFLK17rAFGSqvlHjbo3U47IgySlr4vViIyikOxY/UAUI0r2jsQ=,iv:Q5QgjDszLKbcqN415YLdqMGLa0b3Cy4WbXOtkT4HKBs=,tag:Cgpox1V73/UU4kg+dLgyWA==,type:str]
pgp: []
unencrypted_suffix: _unencrypted
version: 3.7.3

View file

@ -1,198 +0,0 @@
{ system, pkgs, distribution, invDistribution }:
let
utils = pkgs.lib.callPackageWith pkgs ./utils.nix { };
customPkgs = (pkgs.callPackage (./../../..) {}).customPkgs {
inherit system pkgs utils;
};
getTarget = name: builtins.elemAt (builtins.getAttr name distribution) 0;
getDomain = name: (getTarget name).containers.system.domain;
realm = "myrealm";
smtp = utils.recursiveMerge [
{
from = "vaultwarden@${realm}.com";
fromName = "vaultwarden";
port = 587;
authMechanism = "Login";
}
vaultwarden.smtp
];
keycloak = customPkgs.keycloak {};
KeycloakService = customPkgs.mkKeycloakService {
name = "KeycloakService";
subdomain = keycloak.subdomain;
# TODO: Get these from infrastructure.nix
user = "keycloak";
group = "keycloak";
postgresServiceName = (getTarget "KeycloakPostgresDB").containers.postgresql-database.service_name;
initialAdminUsername = "admin";
keys = {
dbPassword = "keycloakdbpassword";
initialAdminPassword = "keycloakinitialadmin";
};
# logLevel = "DEBUG,org.hibernate:info,org.keycloak.authentication:debug,org.keycloak:info,org.postgresql:info,freemarker:info";
logLevel = "INFO";
hostname = "${keycloak.subdomain}.${getDomain "KeycloakService"}";
listenPort = 8080;
dbType = "postgres";
dbDatabase = keycloak.database.name;
dbUsername = keycloak.database.username;
dbHost = {KeycloakPostgresDB}: KeycloakPostgresDB.target.properties.hostname;
dbPort = (getTarget "KeycloakPostgresDB").containers.postgresql-database.port;
KeycloakPostgresDB = keycloak.db;
};
KeycloakCliService = customPkgs.mkKeycloakCliService rec {
name = "KeycloakCliService";
keycloakServiceName = "keycloak.service";
keycloakSecretsDir = (getTarget name).containers.keycloaksecrets.rootdir;
keycloakUrl = "https://${keycloak.subdomain}.${(getDomain "KeycloakService")}";
keycloakUser = KeycloakService.initialAdminUsername;
keys = {
userpasswords = "keycloakusers";
};
dependsOn = {
inherit KeycloakService HaproxyService;
};
config = (utils.recursiveMerge [
rec {
inherit realm;
domain = getDomain name;
roles = {
user = [];
admin = ["user"];
};
users = {
me = {
email = "me@${domain}";
firstName = "Me";
lastName = "Me";
roles = ["admin"];
initialPassword = true;
};
friend = {
email = "friend@${domain}";
firstName = "Friend";
lastName = "Friend";
roles = ["user"];
initialPassword = true;
};
};
}
vaultwarden.keycloakCliConfig
]);
};
KeycloakHaproxyService = customPkgs.mkKeycloakHaproxyService {
name = "KeycloakHaproxyService";
domain = "https://${keycloak.subdomain}.${getDomain "KeycloakService"}";
realms = [realm];
inherit KeycloakService;
};
vaultwarden = customPkgs.vaultwarden {
subdomain = "vaultwarden";
ingress = 18005;
sso.realm = realm;
sso.userRole = "user";
sso.adminRole = "admin";
inherit smtp;
inherit distribution HaproxyService KeycloakService KeycloakCliService;
};
HaproxyService = customPkgs.mkHaproxyService {
name = "HaproxyService";
user = "http";
group = "http";
dependsOn = {
inherit KeycloakHaproxyService;
};
config = {...}:
let
domain = getDomain "HaproxyService";
in {
certPath = "/var/lib/acme/${domain}/full.pem";
stats = {
port = 8404;
uri = "/stats";
refresh = "10s";
prometheusUri = "/metrics";
};
defaults = {
default-server = "init-addr last,none";
};
resolvers = {
default = {
nameservers = {
ns1 = "127.0.0.1:53";
};
};
};
sites = {
vaultwarden = vaultwarden.haproxy distribution.VaultwardenService;
keycloak = {
frontend = {
capture = [
"request header origin len 128"
];
acl = {
acl_keycloak = "hdr_beg(host) ${keycloak.subdomain}.";
acl_keycloak_authorized_origin = "capture.req.hdr(0) -m end .${domain}";
};
use_backend = "if acl_keycloak";
http-response = {
add-header = map (x: x + " if acl_keycloak_authorized_origin") [
"Access-Control-Allow-Origin %[capture.req.hdr(0)]"
"Access-Control-Allow-Methods GET,\\ HEAD,\\ OPTIONS,\\ POST,\\ PUT"
"Access-Control-Allow-Credentials true"
"Access-Control-Allow-Headers Origin,\\ Accept,\\ X-Requested-With,\\ Content-Type,\\ Access-Control-Request-Method,\\ Access-Control-Request-Headers,\\ Authorization"
];
};
};
backend = {
servers = [
{
name = "keycloak1";
address = "127.0.0.1:8080"; # TODO: should use the hostname
resolvers = "default";
}
];
cookie = "JSESSIONID prefix";
};
};
};
};
};
in
with pkgs.lib.attrsets;
rec {
inherit KeycloakPostgresDB KeycloakService KeycloakCliService KeycloakHaproxyService;
inherit HaproxyService;
}
// keycloak.services
// vaultwarden.services

View file

@ -1,17 +0,0 @@
### Deploy to prod
Please read the [deploy to staging](/deploystaging.md) first as all
commands are very similar. I only show a summary of the commands with
staging variables replaced by prod ones.
```bash
export NIXOPS_DEPLOYMENT=prod
export DISNIXOS_USE_NIXOPS=1
nixops create ./network-prod.nix -d prod
nixops deploy --option extra-builtins-file $(pwd)/extra-builtins.nix
nixops reboot
disnixos-env -s services.nix -n network-prod.nix -d distribution.nix
```

View file

@ -1,52 +0,0 @@
# Deploy to staging environment
Instead of deploying to prod machines, you'll deploy to VMs running on
your computer with Virtualbox. This is tremendously helpful for
testing.
```bash
export NIXOPS_DEPLOYMENT=vboxtest
export DISNIXOS_USE_NIXOPS=1
nixops create ./network-virtualbox.nix -d vboxtest
nixops deploy --option extra-builtins-file $(pwd)/extra-builtins.nix
nixops reboot
disnixos-env -s services.nix -n network-virtualbox.nix -d distribution.nix
```
For the `nixops deploy` step to start, you'll need to generate all
necessary passwords. The easiest is to try the command and see on what
password it fails, generating it then re-issuing the command.
It's okay if the `nixops deploy` command fails to activate the new
configuration on first run because of the `virtualbox.service`. If
that happens, continue with the `nixops reboot` command. The service
will activate itself after the reboot.
Rebooting after deploying is anyway needed for systemd to pickup the
`/etc/systemd-mutable` path through the `SYSTEMD_UNIT_PATH`
environment variable.
The `extra-builtins-file` allows us to use password store as the
secrets manager. You'll probably see errors about missing passwords
when running this for the first time. To fix those, generate the
password with `pass`.
## Handle host reboot
After restarting the computer running the VMs, do `nixops start` and
continue from the `nixops deploy ...` step.
## Cleanup
To start from scratch, run `nixops destroy` and start at the `nixops
deploy ...` step. This can be useful after fiddling with creating
directories. You could do this on prod too but... it's probably not a
good idea.
Also, you'll need to add the `--no-upgrade` option when running
`disnixos-env` the first time. Otherwise, disnix will try to
deactivate services but since the machine is clean, it will fail to
deactivate the services.

View file

@ -1,37 +0,0 @@
# Integration Tests
Integration tests configure real virtual machines and run tests on
those to assert some properties.
You can find all integration tests under the [tests/integration](/tests/integration) directory.
## Run integration tests
```console
nix-build -A tests.integration.all
```
To run the "simple" integration test for keycloak, execute:
```console
nix-build -A tests.integration.keycloak.simple
```
## Write integration tests
To create an integration test for disnix, you'll need:
- a tarball,
- a manifest build,
- and a test definition with a test script written in Python.
The [NixOS official
documentation](https://nixos.org/manual/nixos/stable/index.html#sec-nixos-tests)
has a complete reference on what python functions are available in the
test script.
To iterate on the test script, the easiest is by far to use the interactive mode by using the `.driverInteractive` attribute, like so:
```console
nix-build -A tests.integration.keycloak.simple.driverInteractive
./result/bin/nixos-test-driver
```

View file

@ -1,6 +0,0 @@
# Deploy to Linode
To deploy on a [Linode](linode.com) server, you first need to follow
[this
guide](https://www.linode.com/docs/guides/install-nixos-on-linode) to
install NixOS.

View file

@ -1,24 +0,0 @@
# Unit Tests
Unit tests are used in Self Host Blocks to check that parsing
configurations produce the expected result.
You can find all unit tests under the [tests/unit](/tests/unit) directory.
To run the units test, do:
```bash
nix-instantiate --eval --strict . -A tests.unit
```
If all tests pass, you'll see the following output:
```
{ }
```
Otherwise, you'll see one attribute for each failing test. For example, you can dig into the first failing haproxy test with:
```
nix-instantiate --eval --strict . -A tests.unit.haproxy.0
```

View file

@ -1,3 +0,0 @@
{ exec, ... }: {
pass = name: exec [./nix-pass.sh name];
}

View file

@ -1,482 +0,0 @@
{ lib
, pkgs
, utils
}:
with builtins;
with lib;
with lib.attrsets;
with lib.lists;
with lib.strings;
with utils;
let
getAttrWithDefault = name: default: attrset:
if isAttrs attrset && hasAttr name attrset then
getAttr name attrset
else
default;
augmentedContent = fieldName: rules: parent: set:
let
print = {rule = k: parent: v:
assert assertMsg (isString v || isInt v) "cannot print key '${fieldName}.${k}' of type '${typeOf v}', should be string or int instead";
"${k} ${toString v}";};
matchingRule = k: v: findFirst (rule: rule.match k parent v) print rules;
augment = parent: k: v:
let
match = matchingRule k v;
rule = if hasAttr "rule" match then match.rule else null;
rules = if hasAttr "rules" match then match.rules else null;
indent = map (x: if hasAttr "indent" match then match.indent + x else x);
headerFn = if hasAttr "header" match then match.header else null;
header = optional (headerFn != null) (headerFn k);
trailer = optional (headerFn != null) "";
content = header ++ indent (augmentedContent "${fieldName}.${k}" rules (parent ++ [k]) v) ++ trailer;
in
if rule != null
then rule k parent v
else
assert assertMsg (isAttrs v) "attempt to apply rules on key '${toString k}' which is a '${typeOf v}' but should be a set:\n${toString v}";
if hasAttr "order" match then
{
inherit (match) order;
inherit content;
}
else
content;
augmented = mapAttrsToList (augment parent) (
assert assertMsg (isAttrs set) "attempt to apply rules on field ${fieldName} having type '${typeOf set}':\n${toString set}";
set
);
sortAugmented = sort (a: b:
(isAttrs a && hasAttr "order" a)
&& (isAttrs b && hasAttr "order" b)
&& a.order < b.order
);
onlyContent = (x: if isAttrs x && hasAttr "content" x then x.content else x);
in
flatten (map onlyContent (sortAugmented augmented));
updateByPath = path: fn: set:
if hasAttrByPath path set then
recursiveUpdate set (setAttrByPath path (fn (getAttrFromPath path set)))
else
set;
schema =
let
mkRule =
{ redirect ? false
, scheme ? "https"
, code ? null
, condition ? null
}:
concatStringsRecursive " " [
(optional redirect "redirect")
"scheme" scheme
(optional (code != null) "code ${toString code}")
(optional (condition != null) "if ${condition}")
];
mkBind =
{ addr
, ssl ? false
, crt ? null
}:
concatStringsRecursive " " [
"bind"
addr
(optional ssl "ssl")
(optional (crt != null) "crt ${crt}")
];
mkServer =
{ name
, address
, balance ? null
, check ? null
, httpcheck ? null
, forwardfor ? true
, resolvers ? null
}:
[
"mode http"
(optional forwardfor "option forwardfor")
(optional (httpcheck != null) "option httpchk ${httpcheck}")
(optional (balance != null) "balance ${balance}")
(concatStringsRecursive " " [
"server"
name
address
(optionals (check != null) (if
isBool check
then (if check then ["check"] else [])
else mapAttrsToList (k: v: "${k} ${v}") check))
(optional (resolvers != null) "resolvers ${resolvers}")
])
];
# Lua's import system requires the import path to be something like:
#
# /nix/store/123-name/<package>/<file.lua>
#
# Then the lua-prepend-path can be:
#
# /nix/store/123-name/?/<file.lua>
#
# Then when lua code imports <package>, it will search in the
# prepend paths and replace the question mark with the <package>
# name to get a match.
#
# But the config.source is actually without the <package> name:
#
# /nix/store/123-name/<file.lua>
#
# This requires us to create a new directory structure and we're
# using a linkFarm for this.
createPluginLinks = configs:
let
mkLink = name: config: {
inherit name;
path = config.source;
};
in
pkgs.linkFarm "haproxyplugins" (mapAttrsToList mkLink configs);
mkPlugin = links: name:
{ luapaths ? []
, cpaths ? []
, load ? null
, ...
}:
{
lua-prepend-path =
let
f = ext: type: path:
{
inherit type;
path =
if path == "." then
"${links}/${name}/?.${ext}"
else
"${links}/${name}/${path}/?.${ext}";
};
in
map (f "lua" "path") (toList luapaths)
++ map (f "so" "cpath") (toList cpaths);
} // optionalAttrs (load != null) {
lua-load = ["${links}/${name}/${load}"];
};
# Takes plugins as an attrset of name to {init, load, source},
# transforms them to a [attrset] with fields lua-prepend-path
# and optionally lua-load then returns a list of lines with all
# lua-prepend-path first and all lua-load afterwards.
mkPlugins = v:
let
f = recursiveMerge (mapAttrsToList (mkPlugin (createPluginLinks v)) v);
lua-prepend-path = map ({path, type}: "lua-prepend-path ${path} ${type}") (getAttrWithDefault "lua-prepend-path" [] f);
lua-load = map (x: "lua-load ${x}") (getAttrWithDefault "lua-load" [] f);
in
lua-prepend-path ++ lua-load;
in [
{
match = k: parent: v: k == "defaults";
order = 2;
indent = " ";
header = k: k;
rules = [
{
match = k: parent: v: k == "timeout";
rule = k: parent: v: mapAttrsToList (k1: v1: "${k} ${k1} ${v1}") v;
}
];
}
{
match = k: parent: v: k == "global";
order = 1;
indent = " ";
header = k: k;
rules = [
{
match = k: parent: v: k == "plugins";
rule = k: parent: v: mkPlugins v;
}
{
match = k: parent: v: k == "setenv";
rule = k: parent: v: mapAttrsToList (k: v: "setenv ${k} ${v}" ) v;
}
];
}
{
match = k: parent: v: k == "resolvers";
order = 3;
rules = [
{
match = k: parent: v: true;
header = k: "resolvers " + k;
indent = " ";
rules = [
{
match = k: parent: v: k == "nameservers";
rule = k: parent: v: mapAttrsToList (k1: v1: "nameserver ${k1} ${v1}") v;
}
];
}
];
}
{
match = k: parent: v: k == "frontend";
order = 4;
rules = [
{
match = k: parent: v: true;
header = k: "frontend " + k;
indent = " ";
rules = [
{
match = k: parent: v: k == "rules";
rule = k: parent: v: map mkRule v;
}
{
match = k: parent: v: k == "bind" && isAttrs v;
rule = k: parent: v: mkBind v;
}
{
match = k: parent: v: k == "use_backend";
rule = k: parent: v:
let
use = name: value: "use_backend ${name} ${toString value}";
in
if isList v then
map (v: use v.name v.value) v
else
use v.name v.value;
}
{
match = k: parent: v: true ;
rule = k: parent: v:
let
l = prefix: v:
if isAttrs v then
mapAttrsToList (k: v: l "${prefix} ${k}" v) v
else if isList v then
map (l prefix) v
else if isBool v then
optional v prefix
else
assert assertMsg (isString v) "value for field ${k} should be a string, bool, attr or list, got: ${typeOf v}";
"${prefix} ${v}";
in
l k v;
}
];
}
];
}
{
match = k: parent: v: k == "backend";
order = 5;
rules = [
{
match = k: parent: v: true;
header = k: "backend " + k;
indent = " ";
rules = [
{
match = k: parent: v: k == "options";
rule = k: parent: v: v;
}
{
match = k: parent: v: k == "servers";
rule = k: parent: v: map mkServer v;
}
];
}
];
}
];
concatStringsRecursive = sep: strings:
concatStringsSep sep (flatten strings);
assertHasAttr = name: attrPath: v:
assertMsg
(hasAttrByPath attrPath v)
"no ${last attrPath} defined in config for site ${name}.${concatStringsSep "." (init attrPath)}, found attr names: ${toString (attrNames (getAttrFromPath (init attrPath) v))}";
# Takes a function producing a [nameValuePair], applies it to
# all name-value pair in the given set and merges the resulting
# [[nameValuePair]].
mapAttrsFlatten = f: set: listToAttrs (concatLists (mapAttrsToList f set));
mapIfIsAttrs = f: value:
if isAttrs value
then f value
else value;
flattenAttrs = sep: cond: set:
let
recurse = mapIfIsAttrs (mapAttrsFlatten (
n: v: let
result = recurse v;
in
if isAttrs result && cond n v
then mapAttrsToList (n2: v2: nameValuePair "${n}${sep}${n2}" v2) result
else [(nameValuePair n result)]
));
in recurse set;
in
{
inherit updateByPath recursiveMerge;
default =
{ user
, group
, certPath
, plugins ? {}
, globalEnvs ? {}
, stats ? null
, debug ? false
, sites ? {}
, globals ? {}
, defaults ? {}
, resolvers ? {}
}: {
global = {
# Silence a warning issued by haproxy. Using 2048
# instead of the default 1024 makes the connection stronger.
"tune.ssl.default-dh-param" = 2048;
maxconn = 20000;
inherit user group;
log = "/dev/log local0 info";
inherit plugins;
setenv = globalEnvs;
} // globals;
defaults = {
log = "global";
option = "httplog";
timeout = {
connect = "10s";
client = "15s";
server = "30s";
queue = "100s";
};
} // defaults;
frontend = {
http-to-https = {
mode = "http";
bind = "*:80";
rules = [
{
redirect = true;
scheme = "https";
code = 301;
condition = "!{ ssl_fc }";
}
];
backend = {};
};
https = (
let
r = (
[{
mode = "http";
bind = {
addr = "*:443";
ssl = true;
crt = certPath;
};
http-request = {
set-header = [
"X-Forwarded-Port %[dst_port]"
"X-Forwarded-For %[src]"
];
add-header = [
"X-Forwarded-Proto https"
];
};
http-response = {
set-header = [
''Strict-Transport-Security "max-age=15552000; includeSubDomains; preload;"''
];
};
}]
++ (mapAttrsToList (name: config:
assert assertHasAttr name ["frontend"] config;
(updateByPath ["frontend" "use_backend"] (x: [(nameValuePair name x)]) config).frontend
) sites)
++ (mapAttrsToList (name: config:
if (hasAttr "debugHeaders" config && (getAttr "debugHeaders" config) != null) then {
option = "httplog";
http-request = {
capture = "req.hdrs len 512 if ${config.debugHeaders}";
};
log-format = ''"%ci:%cp [%tr] %ft [[%hr]] %hs %{+Q}r"'';
} else {}
) sites)
);
in
recursiveMerge r
)
// optionalAttrs (debug) {
log-format = ''"%ci:%cp [%tr] %ft %b/%s %TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r %sslv %sslc %[ssl_fc_cipherlist_str]"'';
};
} // optionalAttrs (stats != null)
(let
stats_ = {
enable = true;
port = 8404;
uri = "/stats";
refresh = "10s";
prometheusUri = null;
hide-version = false;
} // stats;
in
{
stats = {
bind = "localhost:${toString stats_.port}";
mode = "http";
stats = {
enable = stats_.enable;
hide-version = stats_.hide-version;
uri = stats_.uri;
refresh = stats_.refresh;
};
} // optionalAttrs (stats_.prometheusUri != null) {
http-request = [
"use-service prometheus-exporter if { path ${stats_.prometheusUri} }"
];
};
});
backend =
mapAttrs' (name: config:
assert assertMsg (hasAttr "backend" config) "no backend defined in config for site ${name}, found attr names: ${toString (attrNames config)}";
nameValuePair name config.backend)
sites;
inherit resolvers;
};
render = config:
concatStringsSep "\n" (augmentedContent "" schema [] config);
}

View file

@ -1,91 +0,0 @@
{ pkgs
, utils
}:
{ name
, user
, group
, config
, pidfile ? "/run/haproxy/haproxy.pid"
, socket ? "/run/haproxy/haproxy.sock"
, dependsOn ? {}
}:
let
configcreator = pkgs.callPackage ./configcreator.nix {inherit utils;};
content = configcreator.render (configcreator.default (config dependsOn // {inherit user group;}));
configfile = pkgs.writeText "haproxy.cfg" content;
in
{
inherit name;
inherit user group;
pkg = dependsOn: utils.systemd.mkService {
name = "haproxy";
content = ''
[Unit]
Description=HAProxy Load Balancer
Documentation=https://www.haproxy.com/documentation/hapee/latest/onepage/
After=network.target network-online.target
Wants=network-online.target systemd-networkd-wait-online.target
${utils.unitDepends "After" dependsOn}
${utils.unitDepends "Wants" dependsOn}
StartLimitInterval=14400
StartLimitBurst=10
[Service]
Environment="CONFIG=${configfile}" "PIDFILE=${pidfile}" "EXTRAOPTS=-S ${socket}"
ExecStart=${pkgs.haproxy}/bin/haproxy -Ws -f $CONFIG -p $PIDFILE $EXTRAOPTS
ExecReload=${pkgs.haproxy}/bin/haproxy -Ws -f $CONFIG -c -q $EXTRAOPTS
ExecReload=${pkgs.coreutils}/bin/kill -USR2 $MAINPID
KillMode=mixed
Restart=always
SuccessExitStatus=143
Type=notify
# Restart=on-abnormal
RuntimeDirectory=haproxy
# KillMode=mixed
# KillSignal=SIGQUIT
TimeoutStopSec=5s
LimitNOFILE=1048576
LimitNPROC=512
PrivateDevices=true
LockPersonality=true
NoNewPrivileges=true
PrivateDevices=true
PrivateTmp=true
ProtectClock=true
ProtectControlGroups=true
ProtectHome=true
ProtectHostname=true
ProtectKernelLogs=true
ProtectKernelModules=true
ProtectKernelTunables=true
ProtectSystem=full
RestrictAddressFamilies=AF_INET AF_INET6 AF_NETLINK AF_UNIX
RestrictNamespaces=true
RestrictRealtime=true
RestrictSUIDSGID=true
# CapabilityBoundingSet=CAP_NET_BIND_SERVICE
# AmbientCapabilities=CAP_NET_BIND_SERVICE
# ProtectSystem=strict
# ReadWritePaths=/var/lib/haproxy /var/log/haproxy
[Install]
WantedBy=multi-user.target
'';
};
inherit dependsOn;
type = "systemd-unit";
}

View file

@ -1,7 +0,0 @@
# Jellyfin
This packages installs Jellyfin.
## TODOs
Review all settings for jellyfin, for example prometheus metrics https://jellyfin.org/docs/general/networking/monitoring/

View file

@ -1,210 +0,0 @@
{ stdenv
, pkgs
, lib
}:
{ realm
, domain
, roles ? {}
, clients ? {}
, users ? {}
, groups ? []
}:
with builtins;
with (pkgs.lib.attrsets);
let
mkRole = k: v:
let
iscomposite = (length v) > 0;
in {
name = k;
composite = if iscomposite then true else false;
} // optionalAttrs iscomposite {
composites = {
realm = v;
};
};
mkClientRole =
let
roles = config: config.roles or [];
c = v:
{
name = v;
clientRole = true;
};
in k: config: map c (roles config);
mkGroup = name: {
inherit name;
path = "/${name}";
attributes = {};
realmRoles = [];
clientRoles = {};
subGroups = [];
};
mkClient = k: config:
let
url = "https://${k}.${domain}";
in
{
clientId = k;
rootUrl = url;
clientAuthenticatorType = "client-secret";
redirectUris = ["${url}/oauth2/callback"];
webOrigins = [url];
authorizationServicesEnabled = true;
serviceAccountsEnabled = true;
protocol = "openid-connect";
publicClient = false;
protocolMappers = [
{
name = "Client ID";
protocol = "openid-connect";
protocolMapper = "oidc-usersessionmodel-note-mapper";
consentRequired = false;
config = {
"user.session.note" = "clientId";
"id.token.claim" = "true";
"access.token.claim" = "true";
"claim.name" = "clientId";
"jsonType.label" = "String";
};
}
{
name = "Client Host";
protocol = "openid-connect";
protocolMapper = "oidc-usersessionmodel-note-mapper";
consentRequired = false;
config = {
"user.session.note" = "clientHost";
"id.token.claim" = "true";
"access.token.claim" = "true";
"claim.name" = "clientHost";
"jsonType.label" = "String";
};
}
{
name = "Client IP Address";
protocol = "openid-connect";
protocolMapper = "oidc-usersessionmodel-note-mapper";
consentRequired = false;
config = {
"user.session.note" = "clientAddress";
"id.token.claim" = "true";
"access.token.claim" = "true";
"claim.name" = "clientAddress";
"jsonType.label" = "String";
};
}
{
name = "Audience";
protocol = "openid-connect";
protocolMapper = "oidc-audience-mapper";
config = {
"included.client.audience" = k;
"id.token.claim" = "false";
"access.token.claim" = "true";
"included.custom.audience" = k;
};
}
{
name = "Group";
protocol = "openid-connect";
protocolMapper = "oidc-group-membership-mapper";
config = {
"full.path" = "true";
"id.token.claim" = "true";
"access.token.claim" = "true";
"claim.name" = "groups";
"userinfo.token.claim" = "true";
};
}
];
authorizationSettings = {
policyEnforcementMode = "ENFORCING";
resources =
let
mkResource = name: uris: {
inherit name;
type = "urn:${k}:resources:${name}";
ownerManagedAccess = false;
inherit uris;
};
in
mapAttrsToList mkResource (config.resourcesUris or {});
policies =
let
mkPolicyRole = role: {
id = role;
required = true;
};
mkPolicy = name: roles: {
name = "${concatStringsSep "," roles} has access";
type = "role";
logic = "POSITIVE";
decisionStrategy = "UNANIMOUS";
config = {
roles = toJSON (map mkPolicyRole roles);
};
};
mkPermission = name: roles: resources: {
name = "${concatStringsSep "," roles} has access to ${concatStringsSep "," resources}";
type = "resource";
logic = "POSITIVE";
decisionStrategy = "UNANIMOUS";
config = {
resources = toJSON resources;
applyPolicies = toJSON (map (r: "${concatStringsSep "," roles} has access") roles);
};
};
in
(mapAttrsToList (name: {roles, ...}: mkPolicy name roles) (config.access or {}))
++ (mapAttrsToList (name: {roles, resources}: mkPermission name roles resources) (config.access or {}));
};
};
mkUser = k: config:
{
username = k;
enabled = true;
emailVerified = true;
inherit (config) email firstName lastName;
} // optionalAttrs (config ? "groups") {
inherit (config) groups;
} // optionalAttrs (config ? "roles") {
realmRoles = config.roles;
} // optionalAttrs (config ? "initialPassword") {
credentials = [
{
type = "password";
userLabel = "initial";
value = "$(keycloak.users.${k}.password)";
}
];
};
in
{
inherit realm;
id = realm;
enabled = true;
clients = mapAttrsToList mkClient clients;
roles = {
realm = mapAttrsToList mkRole roles;
client = mapAttrs mkClientRole clients;
};
groups = map mkGroup groups;
users = mapAttrsToList mkUser users;
}

View file

@ -1,114 +0,0 @@
{ stdenv
, pkgs
, lib
, utils
}:
{ name
, config
, keycloakServiceName
, keycloakSecretsDir
, keycloakAvailabilityTimeout ? "120s"
, keycloakUrl
, keycloakUser
, keys
, debug ? false
, dependsOn ? {}
}:
# https://github.com/adorsys/keycloak-config-cli
# Password must be given through a file name "keycloak.password" under keycloakSecretsDir.
let
configcreator = pkgs.callPackage ./configcreator.nix {};
configfile = pkgs.writeText "keycloakcliconfig.json" (builtins.toJSON (configcreator config));
envs = lib.concatMapStrings (x: "\nEnvironment=" + x) ([
"SPRING_CONFIG_IMPORT=configtree:${keycloakSecretsDir}/"
"KEYCLOAK_URL=${keycloakUrl}"
"KEYCLOAK_USER=${keycloakUser}"
"KEYCLOAK_AVAILABILITYCHECK_ENABLED=true"
"KEYCLOAK_AVAILABILITYCHECK_TIMEOUT=${keycloakAvailabilityTimeout}"
"IMPORT_VARSUBSTITUTION_ENABLED=true"
"IMPORT_FILES_LOCATIONS=${configfile}"
] ++ (if !debug then [] else [
"DEBUG=true"
"LOGGING_LEVEL_ROOT=debug"
"LOGGING_LEVEL_HTTP=debug"
"LOGGING_LEVEL_REALMCONFIG=debug"
"LOGGING_LEVEL_KEYCLOAKCONFIGCLI=debug"
]));
keycloak-cli-config = pkgs.stdenv.mkDerivation rec {
pname = "keycloak-cli-config";
version = "5.3.1";
keycloakVersion = "18.0.2";
src = pkgs.fetchurl {
url = "https://github.com/adorsys/keycloak-config-cli/releases/download/v${version}/keycloak-config-cli-${keycloakVersion}.jar";
sha256 = "sha256-vC0d0g5TFddetpBwRDMokloTCr7ibFK//Yuvh+m77RA=";
};
buildInputs = [ pkgs.makeWrapper pkgs.jre ];
phases = [ "installPhase" ];
installPhase = ''
mkdir -p $out/bin
cp $src $out/bin/keycloak-cli-config.jar
'';
};
in
{
inherit name;
pkg = {...}: utils.systemd.mkService rec {
name = "keycloak-cli-config";
content = ''
[Unit]
Description=Keycloak Realm Config
After=${keycloakServiceName}
Wants=${keycloakServiceName}
After=${utils.keyServiceDependencies keys}
Wants=${utils.keyServiceDependencies keys}
[Service]
User=keycloakcli
Group=keycloakcli
${utils.keyEnvironmentFile keys.userpasswords}
Type=oneshot${envs}
ExecStart=${pkgs.jre}/bin/java -jar ${keycloak-cli-config}/bin/keycloak-cli-config.jar
RuntimeDirectory=keycloak-cli-config
PrivateDevices=true
LockPersonality=true
NoNewPrivileges=true
PrivateDevices=true
PrivateTmp=true
ProtectClock=true
ProtectControlGroups=true
ProtectHome=true
ProtectHostname=true
ProtectKernelLogs=true
ProtectKernelModules=true
ProtectKernelTunables=true
ProtectSystem=full
RestrictAddressFamilies=AF_INET AF_INET6 AF_NETLINK AF_UNIX
RestrictNamespaces=true
RestrictRealtime=true
RestrictSUIDSGID=true
'';
};
inherit dependsOn;
type = "systemd-unit";
}

View file

@ -1,95 +0,0 @@
{ stdenv
, pkgs
, utils
}:
{ name ? "keycloak-haproxy"
, domain
, realms ? []
, every ? "10m"
, KeycloakService
}:
rec {
inherit name;
stateDir = "keycloak-public-keys";
downloadDir = "/var/lib/keycloak-public-keys";
systemdUnitFile = "keycloak-haproxy.service";
pkg =
with pkgs.lib;
let
bin = pkgs.writeShellApplication {
name = "get-realms.sh";
runtimeInputs = [ pkgs.coreutils pkgs.curl pkgs.jq ];
text = ''
set -euxo pipefail
realms="$1"
for realm in $realms; do
curl "${domain}/realms/$realm" | jq --raw-output .public_key > "${downloadDir}/$realm.pem"
done
'';
};
in
{ KeycloakService
, ...
}: utils.systemd.mkService rec {
name = "keycloak-haproxy";
content = ''
[Unit]
Description=Get Keycloak realms for Haproxy
[Service]
ExecStart=${bin}/bin/get-realms.sh ${concatStringsSep " " realms}
DynamicUser=true
CapabilityBoundingSet=
AmbientCapabilities=
StateDirectory=${stateDir}
PrivateUsers=yes
NoNewPrivileges=yes
ProtectSystem=strict
ProtectHome=yes
PrivateTmp=yes
PrivateDevices=yes
ProtectHostname=yes
ProtectClock=yes
ProtectKernelTunables=yes
ProtectKernelModules=yes
ProtectKernelLogs=yes
ProtectControlGroups=yes
RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6
RestrictNamespaces=yes
LockPersonality=yes
MemoryDenyWriteExecute=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
RemoveIPC=yes
SystemCallFilter=@system-service
SystemCallFilter=~@privileged @resources
SystemCallArchitectures=native
'';
timer = ''
[Unit]
Description=Run ${name}
After=network.target ${KeycloakService.systemdUnitFile}
[Timer]
OnUnitActiveSec=${every}
[Install]
WantedBy=timers.target
'';
};
dependsOn = {
inherit KeycloakService;
};
type = "systemd-unit";
}

View file

@ -1,35 +0,0 @@
{ customPkgs
, pkgs
, utils
}:
{ serviceName ? "Keycloak"
, subdomain ? "keycloak"
, database ?
{
name = subdomain;
username = "keycloak";
# TODO: use passwordFile
password = "keycloak";
}
}:
rec {
inherit subdomain;
inherit database;
db = customPkgs.mkPostgresDB {
name = "KeycloakPostgresDB";
database = database.name;
username = database.username;
# TODO: use passwordFile
password = database.password;
};
services = {
${db.name} = db;
};
distribute = on: {
${db.name} = on;
};
}

View file

@ -1,161 +0,0 @@
{ stdenv
, pkgs
, lib
, utils
}:
{ name
, user ? "keycloak"
, group ? "keycloak"
, dbType ? "postgres"
, postgresServiceName
, initialAdminUsername ? null
, keys
, listenPort ? 8080
, logLevel ? "INFO"
, metricsEnabled ? false
, hostname
, subdomain
, dbUsername ? "keycloak"
, dbHost ? x: "localhost"
, dbPort ? "5432"
, dbDatabase ? "keycloak"
, KeycloakPostgresDB
}:
assert lib.assertOneOf "dbType" dbType ["postgres"];
let
keycloak = pkgs.keycloak.override {
# This is needed for keycloak to build with the correct driver.
confFile = pkgs.writeText "keycloak.conf" ''
db=${dbType}
'';
};
in
{
inherit name;
inherit initialAdminUsername;
inherit hostname subdomain listenPort;
systemdUnitFile = "${name}.service";
pkg = { KeycloakPostgresDB }:
let
configFile = pkgs.writeText "keycloak.conf" ''
# The password of the database user is given by an environment variable.
db=${dbType}
db-username=${dbUsername}
db-url-host=${dbHost {inherit KeycloakPostgresDB;}}
db-url-port=${dbPort}
db-url-database=${dbDatabase}
# db-url-properties= # Would be used for ssl, see https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/web-apps/keycloak.nix#L491
# Observability
# If the server should expose metrics and healthcheck endpoints.
metrics-enabled=${if metricsEnabled then "true" else "false"}
# HTTP
http-host=127.0.0.1
http-port=${builtins.toString listenPort}
# The file path to a server certificate or certificate chain in PEM format.
#https-certificate-file=''${kc.home.dir}conf/server.crt.pem
# The file path to a private key in PEM format.
#https-certificate-key-file=''${kc.home.dir}conf/server.key.pem
# The proxy address forwarding mode if the server is behind a reverse proxy.
# https://www.keycloak.org/server/reverseproxy
proxy=edge
# Do not attach route to cookies and rely on the session affinity capabilities from reverse proxy
#spi-sticky-session-encoder-infinispan-should-attach-route=false
# Hostname for the Keycloak server.
hostname=${hostname}
spi-x509cert-lookup-provider=haproxy
log-level=${logLevel}
'';
in
with lib.attrsets;
utils.systemd.mkService rec {
name = "keycloak";
content = ''
[Unit]
Description=Keycloak server
After=network-online.target
Wants=network-online.target systemd-networkd-wait-online.service ${postgresServiceName}
After=${utils.keyServiceDependencies keys}
Wants=${utils.keyServiceDependencies keys}
[Service]
User=${user}
Group=${group}
${utils.keyEnvironmentFile keys.dbPassword}
${if initialAdminUsername != null then "Environment=KEYCLOAK_ADMIN="+initialAdminUsername else ""}
${if hasAttr "initialAdminPassword" keys then utils.keyEnvironmentFile keys.initialAdminPassword else ""}
Environment=PATH=${pkgs.coreutils}/bin
Environment=KC_HOME_DIR="/run/keycloak"
# build is ran upstream in the pkgs.keycloak definition, we add
# the --optimized flag to avoid running build on startup
ExecStart=${keycloak}/bin/kc.sh -cf ${configFile} start --optimized
# ReadWritePaths=/var/lib/keycloak
# ReadWritePaths=/var/log/keycloak
# ReadWritePaths=/usr/share/java/keycloak/lib/quarkus
RuntimeDirectory=keycloak
DynamicUser=true
# Disable timeout logic and wait until process is stopped
TimeoutStopSec=0
TimeoutStartSec=10min
# SIGTERM signal is used to stop the Java process
KillSignal=SIGTERM
# Send the signal only to the JVM rather than its control group
KillMode=process
# Java process is never killed
SendSIGKILL=no
# When a JVM receives a SIGTERM signal it exits with code 143
SuccessExitStatus=143
# Hardening options
# CapabilityBoundingSet=
# AmbientCapabilities=CAP_NET_BIND_SERVICES
# NoNewPrivileges=true
# Fails with:
# Failed to set up mount namespacing: /run/systemd/unit-root/var/lib/keycloak: No such file or directory
# ProtectHome=true
# ProtectSystem=strict
# ProtectKernelTunables=true
# ProtectKernelModules=true
# ProtectControlGroups=true
# PrivateTmp=true
# PrivateDevices=true
# LockPersonality=true
[Install]
WantedBy=multi-user.target
'';
};
dependsOn = {
inherit KeycloakPostgresDB;
};
type = "systemd-unit";
}

View file

@ -1,18 +0,0 @@
let
hercules-ci-agent =
builtins.fetchTarball "https://github.com/hercules-ci/hercules-ci-agent/archive/stable.tar.gz";
in
{
network.description = "Hercules CI agents";
agent = {
imports = [
(hercules-ci-agent + "/module.nix")
];
services.hercules-ci-agent.enable = true;
services.hercules-ci-agent.concurrentTasks = 4; # Number of jobs to run
deployment.keys."cluster-join-token.key".keyFile = ./cluster-join-token.key;
deployment.keys."binary-caches.json".keyFile = ./binary-caches.json;
};
}

View file

@ -1,252 +0,0 @@
{ stdenv
, pkgs
, utils
}:
{ name
, siteName
, user ? "http"
, group ? "http"
, pidFile ? "/run/nginx/nginx.pid"
, runtimeDirectory
, config ? {}
, dependsOn ? {}
}:
let
nginxSocket = "${runtimeDirectory}/${config.siteName}.sock";
listen =
if nginxSocket != null then
"unix:${nginxSocket}"
else
config.port;
fastcgi =
if config.phpFpmSiteSocket == null then
""
else
''
location ~ \.php$ {
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass unix:${config.phpFpmSiteSocket};
fastcgi_index index.php;
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
fastcgi_param SERVER_SOFTWARE nginx;
fastcgi_param QUERY_STRING $query_string;
fastcgi_param REQUEST_METHOD $request_method;
fastcgi_param CONTENT_TYPE $content_type;
fastcgi_param CONTENT_LENGTH $content_length;
fastcgi_param SCRIPT_FILENAME ${config.siteRoot}$fastcgi_script_name;
# fastcgi_param SCRIPT_NAME $fastcgi_script_name;
fastcgi_param REQUEST_URI $request_uri;
fastcgi_param DOCUMENT_URI $document_uri;
fastcgi_param DOCUMENT_ROOT ${config.siteRoot};
fastcgi_param SERVER_PROTOCOL $server_protocol;
fastcgi_param REMOTE_ADDR $remote_addr;
fastcgi_param REMOTE_PORT $remote_port;
fastcgi_param SERVER_ADDR $server_addr;
fastcgi_param SERVER_PORT $server_port;
fastcgi_param SERVER_NAME $server_name;
}
'';
mkConfig =
{ port
, siteName
, siteRoot ? "/usr/share/webapps/${siteName}"
, siteSocket ? null
, phpFpmSiteSocket ? null
, logLevel ? "WARN"
}: ''
error_log syslog:server=unix:/dev/log,tag=nginx${siteName},nohostname,severity=error;
worker_processes 5;
worker_rlimit_nofile 8192;
events {
worker_connections 4096;
}
http {
access_log syslog:server=unix:/dev/log,tag=nginx${siteName},nohostname,severity=info combined;
server {
listen ${listen};
root ${siteRoot};
index index.php index.html;
location / {
try_files $uri $uri/ =404;
}
${fastcgi}
}
types {
text/html html htm shtml;
text/css css;
text/xml xml;
image/gif gif;
image/jpeg jpeg jpg;
application/x-javascript js;
application/atom+xml atom;
application/rss+xml rss;
text/mathml mml;
text/plain txt;
text/vnd.sun.j2me.app-descriptor jad;
text/vnd.wap.wml wml;
text/x-component htc;
image/png png;
image/tiff tif tiff;
image/vnd.wap.wbmp wbmp;
image/x-icon ico;
image/x-jng jng;
image/x-ms-bmp bmp;
image/svg+xml svg svgz;
image/webp webp;
application/java-archive jar war ear;
application/mac-binhex40 hqx;
application/msword doc;
application/pdf pdf;
application/postscript ps eps ai;
application/rtf rtf;
application/vnd.ms-excel xls;
application/vnd.ms-powerpoint ppt;
application/vnd.wap.wmlc wmlc;
application/vnd.google-earth.kml+xml kml;
application/vnd.google-earth.kmz kmz;
application/x-7z-compressed 7z;
application/x-cocoa cco;
application/x-java-archive-diff jardiff;
application/x-java-jnlp-file jnlp;
application/x-makeself run;
application/x-perl pl pm;
application/x-pilot prc pdb;
application/x-rar-compressed rar;
application/x-redhat-package-manager rpm;
application/x-sea sea;
application/x-shockwave-flash swf;
application/x-stuffit sit;
application/x-tcl tcl tk;
application/x-x509-ca-cert der pem crt;
application/x-xpinstall xpi;
application/xhtml+xml xhtml;
application/zip zip;
application/octet-stream bin exe dll;
application/octet-stream deb;
application/octet-stream dmg;
application/octet-stream eot;
application/octet-stream iso img;
application/octet-stream msi msp msm;
audio/midi mid midi kar;
audio/mpeg mp3;
audio/ogg ogg;
audio/x-m4a m4a;
audio/x-realaudio ra;
video/3gpp 3gpp 3gp;
video/mp4 mp4;
video/mpeg mpeg mpg;
video/quicktime mov;
video/webm webm;
video/x-flv flv;
video/x-m4v m4v;
video/x-mng mng;
video/x-ms-asf asx asf;
video/x-ms-wmv wmv;
video/x-msvideo avi;
}
default_type application/octet-stream;
gzip_types text/plain text/xml text/css
text/comma-separated-values
text/javascript application/x-javascript
application/atom+xml;
}
'';
configFile = pkgs.writeText "nginx.conf" (mkConfig config);
in
{
inherit name;
inherit runtimeDirectory nginxSocket;
inherit user group;
pkg = utils.systemd.mkService rec {
name = "nginx-${siteName}";
content = ''
[Unit]
Description=Nginx webserver
After=network.target network-online.target
Wants=network-online.target systemd-networkd-wait-online.target
${utils.unitDepends "After" dependsOn}
${utils.unitDepends "Wants" dependsOn}
StartLimitInterval=14400
StartLimitBurst=10
[Service]
Type=forking
User=${user}
Group=${group}
PIDFile=${pidFile}
ExecStart=${pkgs.nginx}/bin/nginx -c ${configFile} -g 'pid ${pidFile};'
ExecReload=${pkgs.nginx}/bin/nginx -s reload
KillMode=mixed
# Nginx verifies it can open a file under here even when configured
# to write elsewhere.
LogsDirectory=nginx
CacheDirectory=nginx
RuntimeDirectory=nginx
# Restart=on-abnormal
# KillSignal=SIGQUIT
TimeoutStopSec=5s
LimitNOFILE=1048576
LimitNPROC=512
LockPersonality=true
NoNewPrivileges=true
PrivateDevices=true
PrivateTmp=true
ProtectClock=true
ProtectControlGroups=true
ProtectHome=true
ProtectHostname=true
ProtectKernelLogs=true
ProtectKernelModules=true
ProtectKernelTunables=true
ProtectSystem=full
RestrictAddressFamilies=AF_INET AF_INET6 AF_NETLINK AF_UNIX
RestrictNamespaces=true
RestrictRealtime=true
RestrictSUIDSGID=true
# CapabilityBoundingSet=CAP_NET_BIND_SERVICE
AmbientCapabilities=CAP_NET_BIND_SERVICE
# ProtectSystem=strict
# ReadWritePaths=/var/lib/nginx /var/log/nginx
[Install]
WantedBy=multi-user.target
'';
};
inherit dependsOn;
type = "systemd-unit";
}

View file

@ -1,10 +0,0 @@
#!/usr/bin/env bash
# nix-pass.sh
set -euo pipefail
f=$(mktemp)
trap "rm $f" EXIT
pass show "$1" | head -c -1 > $f
nix-instantiate --eval -E "builtins.readFile $f"

View file

@ -1,158 +0,0 @@
{ stdenv
, pkgs
, utils
}:
{ name
, serviceName
, domain
, keycloakSubdomain ? "keycloak"
, keycloakDomain ? domain
, realm
, allowed_roles ? []
, skip_auth_routes ? []
, api_routes ? []
, ingress
, egress
, metricsPort
, keys
, distribution
, KeycloakService
, KeycloakCliService
, HaproxyService
, debug ? true
}:
with builtins;
with pkgs.lib.lists;
with pkgs.lib.strings;
rec {
inherit name;
pkg =
{ KeycloakService
, KeycloakCliService
, HaproxyService
}:
let
config = pkgs.writeText "${serviceName}.cfg" (''
provider = "keycloak-oidc"
provider_display_name="Keycloak"
http_address = "${ingress}"
upstreams = [ "${concatStringsSep " " egress}" ]
metrics_address = "127.0.0.1:${toString metricsPort}"
client_id = "${serviceName}"
scope="openid"
redirect_url = "https://${serviceName}.${domain}/oauth2/callback"
oidc_issuer_url = "https://${keycloakSubdomain}.${keycloakDomain}/realms/${realm}"
email_domains = [ "*" ]
allowed_roles = ${builtins.toJSON allowed_roles}
skip_auth_routes = ${builtins.toJSON skip_auth_routes}
api_routes = ${builtins.toJSON api_routes}
reverse_proxy = "true"
# trusted_ips = "@"
skip_provider_button = "true"
# pass_authorization_header = true
# pass_access_token = true
# pass_user_headers = true
# set_authorization_header = true
# set_xauthrequest = true
'' + (if !debug then "" else ''
auth_logging = "true"
request_logging = "true"
''));
exec = pkgs.writeShellApplication {
name = "oauth2proxy-wrapper";
runtimeInputs = with pkgs; [curl coreutils];
text = ''
while ! curl --silent ${KeycloakService.hostname}:${builtins.toString KeycloakService.listenPort} > /dev/null; do
echo "Waiting for port ${builtins.toString KeycloakService.listenPort} to open..."
sleep 10
done
sleep 2
'';
};
oauth2-proxy =
let
version = "f93166229fe9b57f7d54fb0a9c42939f3f30340f";
src = pkgs.fetchFromGitHub {
owner = "ibizaman";
repo = "oauth2-proxy";
rev = version;
sha256 = "sha256-RI34N+YmUqAanuJOGUA+rUTS1TpUoy8rw6EFGeLh5L0=";
# sha256 = pkgs.lib.fakeSha256;
};
in
(pkgs.callPackage "${pkgs.path}/pkgs/tools/backup/kopia" {
buildGoModule = args: pkgs.buildGo118Module (args // {
vendorSha256 = "sha256-2WUd2RxeOal0lpp/TuGSyfP1ppvG/Vd3bgsSsNO8ejo=";
inherit src version;
});
});
oauth2proxyBin = "${oauth2-proxy}/bin/oauth2-proxy";
in utils.systemd.mkService rec {
name = "oauth2proxy-${serviceName}";
content = ''
[Unit]
Description=Oauth2 proxy for ${serviceName}
After=${KeycloakService.systemdUnitFile}
Wants=${KeycloakService.systemdUnitFile}
After=${utils.keyServiceDependencies keys}
Wants=${utils.keyServiceDependencies keys}
[Service]
ExecStartPre=${exec}/bin/oauth2proxy-wrapper
TimeoutStartSec=8m
ExecStart=${oauth2proxyBin} --config ${config}
DynamicUser=true
RuntimeDirectory=oauth2proxy-${serviceName}
${utils.keyEnvironmentFiles keys}
CapabilityBoundingSet=
AmbientCapabilities=
PrivateUsers=yes
NoNewPrivileges=yes
ProtectSystem=strict
ProtectHome=yes
PrivateTmp=yes
PrivateDevices=yes
ProtectHostname=yes
ProtectClock=yes
ProtectKernelTunables=yes
ProtectKernelModules=yes
ProtectKernelLogs=yes
ProtectControlGroups=yes
RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6
RestrictNamespaces=yes
LockPersonality=yes
MemoryDenyWriteExecute=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
RemoveIPC=yes
SystemCallFilter=@system-service
SystemCallFilter=~@privileged @resources
SystemCallArchitectures=native
[Install]
WantedBy=multi-user.target
'';
};
dependsOn = {
inherit HaproxyService KeycloakService KeycloakCliService;
};
type = "systemd-unit";
}

View file

@ -1,46 +0,0 @@
{ pkgs
, siteName
, logLevel ? "notice"
, siteRoot ? "/usr/share/webapps/${siteName}"
, user
, group
, siteSocket
, allowedClients ? "127.0.0.1"
, socketUser
, socketGroup
, statusPath ? "/status"
, maxChildren ? 5
, startServers ? 2
, minSpareServers ? 1
, maxSpareServers ? 3
}: pkgs.writeText "php-fpm-${siteName}.conf" ''
[global]
error_log = syslog
syslog.ident = php-fpm
log_level = ${logLevel}
[${siteName}]
user = ${user}
group = ${group}
listen = ${siteSocket}
listen.allowed_clients = ${allowedClients}
listen.owner = ${socketUser}
listen.group = ${socketGroup}
env[PATH] = /usr/local/bin:/usr/bin:/bin
env[TMP] = /tmp
chdir = ${siteRoot}
pm = dynamic
pm.max_children = ${builtins.toString maxChildren}
pm.start_servers = ${builtins.toString startServers}
pm.min_spare_servers = ${builtins.toString minSpareServers}
pm.max_spare_servers = ${builtins.toString maxSpareServers}
catch_workers_output = yes
pm.status_path = ${statusPath}
''

View file

@ -1,95 +0,0 @@
{ lib
, pkgs
, siteName
, prependFile ? null
, extensions ? [
# "bcmath"
# "curl"
# "gd"
# "gmp"
# "iconv"
# "imagick"
# "intl"
# "ldap"
# "pdo_pgsql"
# "pdo_sqlite"
# "pgsql"
# "soap"
# "sqlite3"
# "zip"
]
, zend_extensions ? [
# "opcache"
]
}:
let
concatWithPrefix = prefix: content:
lib.strings.concatMapStrings
(x: prefix + x + "\n")
content;
in
pkgs.writeText "php-${siteName}.ini" ''
[PHP]
engine = On
short_open_tag = Off
precision = 14
output_buffering = 4096
zlib.output_compression = Off
implicit_flush = Off
serialize_precision = -1
zend.enable_gc = On
zend.exception_ignore_args = On
expose_php = Off
max_execution_time = 30 ; seconds
max_input_time = 60
memory_limit = 1024M
error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT
display_errors = Off
display_startup_errors = Off
log_errors = On
log_errors_max_len = 1024
ignore_repeated_errors = On
ignore_repeated_source = On
report_memleaks = On
error_log = syslog
syslog.ident = php
cgi.fix_pathinfo=1
post_max_size = 8M
auto_prepend_file = "${if prependFile == null then "" else prependFile}"
auto_append_file =
extension_dir = "/usr/lib/php/modules/"
${concatWithPrefix "extension=" extensions}
${concatWithPrefix "zend_extension=" zend_extensions}
[CLI Server]
cli_server.color = On
; [PostgreSQL]
; pgsql.allow_persistent = On
; pgsql.auto_reset_persistent = Off
; pgsql.max_persistent = -1
; pgsql.max_links = -1
; pgsql.ignore_notice = 0
; pgsql.log_notice = 0
; [Session]
; session.save_handler = redis
; session.save_path = "unix:///run/redis/redis.sock?database=1"
; session.use_strict_mode = 1
; session.use_cookies = 1
; session.use_only_cookies = 1
; [opcache]
; opcache.enable=1
; opcache.memory_consumption=128
; opcache.interned_strings_buffer=16
; opcache.max_accelerated_files=20000
''

View file

@ -1,86 +0,0 @@
{ stdenv
, pkgs
, utils
}:
{ name
, siteName
, user
, group
, socketUser
, socketGroup
, runtimeDirectory ? "/run/${siteName}"
, phpIniConfig ? {}
, siteConfig ? {}
, extensions ? []
, zend_extensions ? []
, dependsOn ? {}
}:
let
phpIniFile = pkgs.callPackage (import ./php-ini.nix) {
inherit siteName;
inherit extensions zend_extensions;
} // phpIniConfig;
siteSocket = "${runtimeDirectory}/${siteName}.sock";
siteConfigFile = pkgs.callPackage (import ./php-fpm.nix) {
inherit siteName;
inherit user group;
inherit siteSocket socketUser socketGroup;
} // siteConfig;
in
# This service runs as root, each pool runs as a user.
{
inherit name;
inherit user group;
inherit socketUser socketGroup;
inherit siteSocket;
pkg = utils.systemd.mkService rec {
name = "php-fpm-${siteName}";
content = ''
[Unit]
Description=The PHP FastCGI Process Manager
After=network.target
[Service]
Type=notify
PIDFile=/run/${siteName}/php-fpm.pid
ExecStart=${pkgs.php}/bin/php-fpm --nodaemonize --fpm-config ${siteConfigFile} --php-ini ${phpIniFile}
ExecReload=/bin/kill -USR2 $MAINPID
# Keeping this around to avoid uncommenting them. These directories
# are handled through tmpfiles.d.
#
# RuntimeDirectory=${siteName}
# StateDirectory=${siteName}
LockPersonality=true
NoNewPrivileges=true
PrivateDevices=true
PrivateTmp=true
ProtectClock=true
ProtectControlGroups=true
ProtectHome=true
ProtectHostname=true
ProtectKernelLogs=true
ProtectKernelModules=true
ProtectKernelTunables=true
ProtectSystem=full
RestrictAddressFamilies=AF_INET AF_INET6 AF_NETLINK AF_UNIX
RestrictNamespaces=true
RestrictRealtime=true
RestrictSUIDSGID=true
[Install]
WantedBy=multi-user.target
'';
};
inherit dependsOn;
type = "systemd-unit";
}

View file

@ -1,48 +0,0 @@
{ stdenv
, pkgs
, lib
}:
{ name
, database
, username
, password ? null
, passwordFile ? null
, dependsOn ? {}
}:
assert lib.assertMsg (
(password == null && passwordFile != null)
|| (password != null && passwordFile == null)
) "set either postgresPassword or postgresPasswordFile";
# From https://github.com/svanderburg/dysnomia/blob/master/dysnomia-modules/postgresql-database.in
# and https://github.com/svanderburg/dysnomia/blob/master/tests/deployment/postgresql-database.nix
#
# On activation, an initial dump can be restored. If the mutable component
# contains a sub folder named postgresql-databases/, then the dump files stored
# inside get imported.
# TODO: https://stackoverflow.com/a/69480184/1013628
{
inherit name;
inherit database username password passwordFile;
pkg = stdenv.mkDerivation {
name = database;
src = pkgs.writeTextDir "${database}.sql" ''
CREATE USER "${username}" WITH PASSWORD '${password}';
GRANT ALL PRIVILEGES ON DATABASE "${database}" TO "${username}";
'';
buildCommand = ''
mkdir -p $out/postgresql-databases
cp $src/*.sql $out/postgresql-databases
'';
};
inherit dependsOn;
type = "postgresql-database";
}

View file

@ -1,8 +0,0 @@
{ pkgs
, utils
}:
{
unit = pkgs.callPackage ./unit { inherit utils; };
integration = pkgs.callPackage ./integration { inherit utils; };
}

View file

@ -1,68 +0,0 @@
{ nixpkgs, pkgs }:
let
generateManifestSrc =
{name, tarball}:
pkgs.stdenv.mkDerivation {
name = "${name}-manifest-src";
buildCommand =
''
mkdir -p $out
cd $out
tar xfvj ${tarball}/tarballs/*.tar.bz2 --strip-components=1
'';
};
disnixos = import "${pkgs.disnixos}/share/disnixos/testing.nix" {
inherit nixpkgs;
};
# We need this function because, for a reason that eludes me, the
# one defined in disnixos fails the name attribute not correctly set
# in the call to simpleTest. The only difference between this
# function and the one in disnixos is the additional `inherit name`
# line.
customDisnixTest = system:
{name, manifest, tarball, networkFile, externalNetworkFile ? false, testScript, dysnomiaStateDir ? "", postActivateTimeout ? 1}:
let
manifestSrc = generateManifestSrc {
inherit name tarball;
};
network = if externalNetworkFile then import networkFile else import "${manifestSrc}/${networkFile}";
in
with import "${nixpkgs}/nixos/lib/testing-python.nix" { inherit system; };
simpleTest {
nodes = network;
inherit name;
testScript = import "${pkgs.disnixos}/share/disnixos/generate-testscript.nix" {
inherit network testScript dysnomiaStateDir postActivateTimeout;
inherit (pkgs) disnix daemon socat libxml2;
inherit (pkgs.lib) concatMapStrings;
manifestFile = "${manifest}/manifest.xml";
};
};
in
{
inherit (disnixos) sourceTarball;
genBuilds = systems: config:
pkgs.lib.genAttrs systems (system:
let
pkgs = import nixpkgs { inherit system; };
disnixos = import "${pkgs.disnixos}/share/disnixos/testing.nix" {
inherit nixpkgs system;
};
in
disnixos.buildManifest config
);
disnixTest = currentSystem: manifest: config:
customDisnixTest currentSystem (config // {
manifest = builtins.getAttr currentSystem manifest;
});
}

View file

@ -1,8 +0,0 @@
{ pkgs
, utils
}:
rec {
all = [keycloak];
keycloak = pkgs.callPackage ./keycloak.nix {};
}

View file

@ -1,58 +0,0 @@
# Run tests with nix-build -A tests.integration.keycloak
{ nixpkgs ? <nixpkgs>
, systems ? [ "i686-linux" "x86_64-linux" ]
}:
let
pkgs = import nixpkgs {};
version = "1.0";
disnixos = pkgs.callPackage ./common.nix { inherit nixpkgs; };
in
rec {
tarball = disnixos.sourceTarball {
name = "testproject-zip";
inherit version;
src = ../../.;
officialRelease = false;
};
builds = {
simple = disnixos.genBuilds systems {
name = "test-project-manifest";
inherit version;
inherit tarball;
servicesFile = "tests/integration/keycloak/services.nix";
networkFile = "tests/integration/keycloak/network.nix";
distributionFile = "tests/integration/keycloak/distribution.nix";
};
};
tests = {
simple = disnixos.disnixTest builtins.currentSystem builds.simple {
name = "test-project-test";
inherit tarball;
networkFile = "tests/integration/keycloak/network.nix";
# dysnomiaStateDir = /var/state/dysnomia;
testScript =
''
def assert_service_started(machine, name):
code, log = machine.systemctl("status " + name)
if code != 0:
raise Exception(name + " could not be started:\n---\n" + log + "---\n")
def assert_database_exists(machine, name):
if machine.succeed("""psql -XtA -U postgres -h localhost -c "SELECT 1 FROM pg_database WHERE datname='{}'" """.format(name)) != '1\n':
raise Exception("could not find database '{}' in postgresql".format(name))
with subtest("check postgres service started"):
assert_service_started(test1, "postgresql.service")
with subtest("check db is created"):
assert_database_exists(test1, "keycloak")
'';
};
};
}.tests

View file

@ -1,7 +0,0 @@
{ infrastructure }:
with infrastructure;
{
KeycloakPostgresDB = [ test1 ];
KeycloakService = [ test1 ];
}

View file

@ -1,76 +0,0 @@
rec {
test1 = { system
, pkgs
, lib
, ... }:
let
domain = "local";
utils = pkgs.lib.callPackageWith pkgs ../../../utils.nix { };
customPkgs = import ../../../all-packages.nix {
inherit system pkgs utils;
};
in
rec {
users.groups = {
keycloak = {
name = "keycloak";
};
};
users.users = {
keycloak = {
name = "keycloak";
group = "keycloak";
isSystemUser = true;
};
};
# Normally, you'd provision the deploy target with secrets.
systemd.tmpfiles.rules = [
# Type Path Mode User Group Age Argument...
''d /run/keys 0755 root root - -''
''f+ /run/keys/keycloackinitialadmin 0755 root root - KEYCLOAK_ADMIN_PASSWORD="KEYCLOAK_ADMIN_PASSWORD"''
];
services = {
openssh = {
enable = true;
};
disnix = {
enable = true;
# useWebServiceInterface = true;
};
postgresql = {
enable = true;
package = pkgs.postgresql_14;
port = 5432;
enableTCPIP = true;
authentication = pkgs.lib.mkOverride 10 ''
local all all trust
host all all 127.0.0.1/32 trust
host all all ::1/128 trust
'';
};
};
dysnomia = {
enable = true;
enableLegacyModules = false;
extraContainerProperties = {
system = {
inherit domain;
};
postgresql-database = {
service_name = "postgresql.service";
port = builtins.toString services.postgresql.port;
};
};
};
networking.firewall.allowedTCPPorts = [ services.postgresql.port ];
};
}

View file

@ -1,47 +0,0 @@
{ system, pkgs, distribution, invDistribution }:
let
utils = pkgs.lib.callPackageWith pkgs ../../../utils.nix { };
customPkgs = import ../../../all-packages.nix {
inherit system pkgs utils;
};
in
with utils;
rec {
KeycloakPostgresDB = customPkgs.mkPostgresDB {
name = "KeycloakPostgresDB";
database = "keycloak";
username = "keycloak";
# TODO: use passwordFile
password = "keycloak";
};
KeycloakService = customPkgs.mkKeycloakService {
name = "KeycloakService";
subdomain = "keycloak";
# Get these from infrastructure.nix
user = "keycloak";
group = "keycloak";
postgresServiceName = (getTarget distribution "KeycloakPostgresDB").containers.postgresql-database.service_name;
initialAdminUsername = "admin";
keys = {
dbPassword = "keycloakdbpassword";
initialAdminPassword = "keycloakinitialadmin";
};
logLevel = "INFO";
hostname = "keycloak.${getDomain distribution "KeycloakService"}";
dbType = "postgres";
dbDatabase = KeycloakPostgresDB.database;
dbUsername = KeycloakPostgresDB.username;
dbHost = {KeycloakPostgresDB}: KeycloakPostgresDB.target.properties.hostname;
dbPort = (getTarget distribution "KeycloakPostgresDB").containers.postgresql-database.port;
inherit KeycloakPostgresDB;
};
}

View file

@ -1,9 +0,0 @@
{ pkgs
, utils
}:
{
haproxy = pkgs.callPackage ./haproxy.nix { inherit utils; };
keycloak = pkgs.callPackage ./keycloak.nix {};
keycloak-cli-config = pkgs.callPackage ./keycloak-cli-config.nix {};
}

View file

@ -1,562 +0,0 @@
# to run these tests:
# nix-instantiate --eval --strict . -A tests.haproxy
{ lib
, stdenv
, pkgs
, utils
}:
let
configcreator = pkgs.callPackage ./../../haproxy/configcreator.nix { inherit utils; };
mksiteconfig = pkgs.callPackage ./../../haproxy/siteconfig.nix {};
diff = testResult:
with builtins;
with lib.strings;
if isString testResult.expected && isString testResult.result then
let
# Taken from nixpkgs master
commonPrefixLength = a: b:
let
m = lib.min (stringLength a) (stringLength b);
go = i: if i >= m then m else if substring i 1 a == substring i 1 b then go (i + 1) else i;
in go 0;
# Taken from nixpkgs master
commonSuffixLength = a: b:
let
m = lib.min (stringLength a) (stringLength b);
go = i: if i >= m then m else if substring (stringLength a - i - 1) 1 a == substring (stringLength b - i - 1) 1 b then go (i + 1) else i;
in go 0;
p = commonPrefixLength testResult.expected testResult.result;
s = commonSuffixLength testResult.expected testResult.result;
expectedSuffixLen = stringLength testResult.expected - s - p;
resultSuffixLen = stringLength testResult.result - s - p;
expectedDiff = substring p expectedSuffixLen testResult.expected;
resultDiff = substring p resultSuffixLen testResult.result;
omitted = len: if len == 0 then "" else "[... ${toString len} omitted]";
in
{inherit (testResult) name;
commonPrefix = substring 0 p testResult.expected;
commonSuffix = substring (stringLength testResult.expected - s) s testResult.expected;
expected = "${omitted p}${expectedDiff}${omitted s}";
result = "${omitted p}${resultDiff}${omitted s}";
allExpected = testResult.expected;
allResult = testResult.result;
}
else testResult;
runTests = x: map diff (lib.runTests x);
in
with lib.attrsets;
runTests {
testDiffSame = {
expr = "abdef";
expected = "abdef";
};
testUpdateByPath1 = {
expr = configcreator.updateByPath ["a"] (x: x+1) {
a = 1;
b = 1;
};
expected = {
a = 2;
b = 1;
};
};
testUpdateByPath2 = {
expr = configcreator.updateByPath ["a" "a"] (x: x+1) {
a = {
a = 1;
b = 1;
};
b = 1;
};
expected = {
a = {
a = 2;
b = 1;
};
b = 1;
};
};
testUpdateByPath3 = {
expr = configcreator.updateByPath ["a" "a" "a"] (x: x+1) {
a = {
a = {
a = 1;
b = 1;
};
b = 1;
};
b = 1;
};
expected = {
a = {
a = {
a = 2;
b = 1;
};
b = 1;
};
b = 1;
};
};
testRecursiveMerge1 = {
expr = configcreator.recursiveMerge [
{a = 1;}
{b = 2;}
];
expected = {
a = 1;
b = 2;
};
};
testRecursiveMerge2 = {
expr = configcreator.recursiveMerge [
{a = {a = 1; b = 2;};}
{a = {a = 2;};}
];
expected = {
a = {a = 2; b = 2;};
};
};
tesFlattenArgs1 = {
expr = configcreator.flattenAttrs {
a = 1;
b = 2;
};
expected = {
a = 1;
b = 2;
};
};
tesFlattenArgs2 = {
expr = configcreator.flattenAttrs {
a = {
a = 1;
b = {
c = 3;
d = 4;
};
};
b = 2;
};
expected = {
"a.a" = 1;
"a.b.c" = 3;
"a.b.d" = 4;
b = 2;
};
};
testHaproxyConfigDefaultRender = {
expr = configcreator.render (configcreator.default {
user = "me";
group = "mygroup";
certPath = "/cert/path";
plugins = {
zone = {
luapaths = "lib";
source = pkgs.writeText "one.lua" "a binary";
};
two = {
load = "right/two.lua";
luapaths = ".";
cpaths = "right";
source = pkgs.writeText "two.lua" "a binary";
};
};
globalEnvs = {
ABC = "hello";
};
stats = null;
debug = false;
});
expected = ''
global
group mygroup
log /dev/log local0 info
maxconn 20000
lua-prepend-path /nix/store/ybcka9g095hp8s1hnm2ncfh1hp56v9yq-haproxyplugins/two/?.lua path
lua-prepend-path /nix/store/ybcka9g095hp8s1hnm2ncfh1hp56v9yq-haproxyplugins/two/right/?.so cpath
lua-prepend-path /nix/store/ybcka9g095hp8s1hnm2ncfh1hp56v9yq-haproxyplugins/zone/lib/?.lua path
lua-load /nix/store/ybcka9g095hp8s1hnm2ncfh1hp56v9yq-haproxyplugins/two/right/two.lua
setenv ABC hello
tune.ssl.default-dh-param 2048
user me
defaults
log global
option httplog
timeout client 15s
timeout connect 10s
timeout queue 100s
timeout server 30s
frontend http-to-https
bind *:80
mode http
redirect scheme https code 301 if !{ ssl_fc }
frontend https
bind *:443 ssl crt /cert/path
http-request add-header X-Forwarded-Proto https
http-request set-header X-Forwarded-Port %[dst_port]
http-request set-header X-Forwarded-For %[src]
http-response set-header Strict-Transport-Security "max-age=15552000; includeSubDomains; preload;"
mode http
'';
};
testHaproxyConfigDefaultRenderWithStatsAndDebug = {
expr = configcreator.render (configcreator.default {
user = "me";
group = "mygroup";
certPath = "/cert/path";
stats = {
port = 8405;
uri = "/stats";
refresh = "10s";
prometheusUri = "/prom/etheus";
hide-version = true;
};
debug = true;
});
expected = ''
global
group mygroup
log /dev/log local0 info
maxconn 20000
tune.ssl.default-dh-param 2048
user me
defaults
log global
option httplog
timeout client 15s
timeout connect 10s
timeout queue 100s
timeout server 30s
frontend http-to-https
bind *:80
mode http
redirect scheme https code 301 if !{ ssl_fc }
frontend https
bind *:443 ssl crt /cert/path
http-request add-header X-Forwarded-Proto https
http-request set-header X-Forwarded-Port %[dst_port]
http-request set-header X-Forwarded-For %[src]
http-response set-header Strict-Transport-Security "max-age=15552000; includeSubDomains; preload;"
log-format "%ci:%cp [%tr] %ft %b/%s %TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r %sslv %sslc %[ssl_fc_cipherlist_str]"
mode http
frontend stats
bind localhost:8405
http-request use-service prometheus-exporter if { path /prom/etheus }
mode http
stats enable
stats hide-version
stats refresh 10s
stats uri /stats
'';
};
testRenderHaproxyConfigWithSite = {
expr = configcreator.render (configcreator.default {
user = "me";
group = "mygroup";
certPath = "/cert/path";
stats = null;
debug = false;
sites = {
siteName = {
frontend = {
capture = [
"request header origin len 128"
];
acl = {
acl_siteName = "hdr_beg(host) siteName.";
acl_siteName_path = "path_beg /siteName";
};
http-response = {
add-header = [
"Access-Control-Allow-Origin1 $[capture]"
"Access-Control-Allow-Origin2 $[capture]"
];
};
use_backend = "if acl_siteName OR acl_siteName_path";
};
backend = {
servers = [
{
name = "serviceName1";
address = "serviceSocket";
}
];
options = [
"cookie JSESSIONID prefix"
];
};
};
};
});
expected = ''
global
group mygroup
log /dev/log local0 info
maxconn 20000
tune.ssl.default-dh-param 2048
user me
defaults
log global
option httplog
timeout client 15s
timeout connect 10s
timeout queue 100s
timeout server 30s
frontend http-to-https
bind *:80
mode http
redirect scheme https code 301 if !{ ssl_fc }
frontend https
acl acl_siteName hdr_beg(host) siteName.
acl acl_siteName_path path_beg /siteName
bind *:443 ssl crt /cert/path
capture request header origin len 128
http-request add-header X-Forwarded-Proto https
http-request set-header X-Forwarded-Port %[dst_port]
http-request set-header X-Forwarded-For %[src]
http-response add-header Access-Control-Allow-Origin1 $[capture]
http-response add-header Access-Control-Allow-Origin2 $[capture]
http-response set-header Strict-Transport-Security "max-age=15552000; includeSubDomains; preload;"
mode http
use_backend siteName if acl_siteName OR acl_siteName_path
backend siteName
cookie JSESSIONID prefix
mode http
option forwardfor
server serviceName1 serviceSocket
'';
};
testRenderHaproxyConfigWith2Sites = {
expr = configcreator.render (configcreator.default {
user = "me";
group = "mygroup";
certPath = "/cert/path";
stats = null;
debug = false;
sites = {
siteName = {
frontend = {
capture = [
"request header origin len 128"
];
acl = {
acl_siteName = "hdr_beg(host) siteName.";
acl_siteName_path = "path_beg /siteName";
};
http-response = {
add-header = [
"Access-Control-Allow-Origin1 $[capture]"
"Access-Control-Allow-Origin2 $[capture]"
];
};
use_backend = "if acl_siteName OR acl_siteName_path";
};
backend = {
servers = [
{
name = "serviceName1";
address = "serviceSocket";
}
];
options = [
"cookie JSESSIONID prefix"
];
};
};
siteName2 = {
frontend = {
capture = [
"request header origin len 128"
];
acl = {
acl_siteName2 = "hdr_beg(host) siteName2.";
acl_siteName2_path = "path_beg /siteName2";
};
http-response = {
add-header = [
"Access-Control-Allow-Origin3 $[capture]"
"Access-Control-Allow-Origin4 $[capture]"
];
};
use_backend = "if acl_siteName2 OR acl_siteName2_path";
};
backend = {
servers = [
{
name = "serviceName2";
address = "serviceSocket";
}
];
options = [
"cookie JSESSIONID prefix"
];
};
};
};
});
expected = ''
global
group mygroup
log /dev/log local0 info
maxconn 20000
tune.ssl.default-dh-param 2048
user me
defaults
log global
option httplog
timeout client 15s
timeout connect 10s
timeout queue 100s
timeout server 30s
frontend http-to-https
bind *:80
mode http
redirect scheme https code 301 if !{ ssl_fc }
frontend https
acl acl_siteName hdr_beg(host) siteName.
acl acl_siteName2 hdr_beg(host) siteName2.
acl acl_siteName2_path path_beg /siteName2
acl acl_siteName_path path_beg /siteName
bind *:443 ssl crt /cert/path
capture request header origin len 128
capture request header origin len 128
http-request add-header X-Forwarded-Proto https
http-request set-header X-Forwarded-Port %[dst_port]
http-request set-header X-Forwarded-For %[src]
http-response add-header Access-Control-Allow-Origin1 $[capture]
http-response add-header Access-Control-Allow-Origin2 $[capture]
http-response add-header Access-Control-Allow-Origin3 $[capture]
http-response add-header Access-Control-Allow-Origin4 $[capture]
http-response set-header Strict-Transport-Security "max-age=15552000; includeSubDomains; preload;"
mode http
use_backend siteName if acl_siteName OR acl_siteName_path
use_backend siteName2 if acl_siteName2 OR acl_siteName2_path
backend siteName
cookie JSESSIONID prefix
mode http
option forwardfor
server serviceName1 serviceSocket
backend siteName2
cookie JSESSIONID prefix
mode http
option forwardfor
server serviceName2 serviceSocket
'';
};
testRenderHaproxyConfigWithSiteDebugHeaders = {
expr = configcreator.render (configcreator.default {
user = "me";
group = "mygroup";
certPath = "/cert/path";
stats = null;
debug = false;
sites = {
siteName = {
frontend = {
capture = [
"request header origin len 128"
];
acl = {
acl_siteName = "hdr_beg(host) siteName.";
acl_siteName_path = "path_beg /siteName";
};
http-response = {
add-header = [
"Access-Control-Allow-Origin1 $[capture]"
"Access-Control-Allow-Origin2 $[capture]"
];
};
use_backend = "if acl_siteName OR acl_siteName_path";
};
backend = {
servers = [
{
name = "serviceName1";
address = "serviceSocket";
}
];
options = [
"cookie JSESSIONID prefix"
];
};
debugHeaders = "acl_siteName";
};
};
});
expected = ''
global
group mygroup
log /dev/log local0 info
maxconn 20000
tune.ssl.default-dh-param 2048
user me
defaults
log global
option httplog
timeout client 15s
timeout connect 10s
timeout queue 100s
timeout server 30s
frontend http-to-https
bind *:80
mode http
redirect scheme https code 301 if !{ ssl_fc }
frontend https
acl acl_siteName hdr_beg(host) siteName.
acl acl_siteName_path path_beg /siteName
bind *:443 ssl crt /cert/path
capture request header origin len 128
http-request add-header X-Forwarded-Proto https
http-request capture req.hdrs len 512 if acl_siteName
http-request set-header X-Forwarded-Port %[dst_port]
http-request set-header X-Forwarded-For %[src]
http-response add-header Access-Control-Allow-Origin1 $[capture]
http-response add-header Access-Control-Allow-Origin2 $[capture]
http-response set-header Strict-Transport-Security "max-age=15552000; includeSubDomains; preload;"
log-format "%ci:%cp [%tr] %ft [[%hr]] %hs %{+Q}r"
mode http
option httplog
use_backend siteName if acl_siteName OR acl_siteName_path
backend siteName
cookie JSESSIONID prefix
mode http
option forwardfor
server serviceName1 serviceSocket
'';
};
}

View file

@ -1,343 +0,0 @@
# to run these tests:
# nix-instantiate --eval --strict . -A tests.keycloak-cli-config
{ lib
, stdenv
, pkgs
}:
let
configcreator = pkgs.callPackage ./../../keycloak-cli-config/configcreator.nix {};
default_config = {
realm = "myrealm";
domain = "mydomain.com";
};
keep_fields = fields:
lib.filterAttrs (n: v: lib.any (n_: n_ == n) fields);
in
lib.runTests {
testDefault = {
expr = configcreator default_config;
expected = {
id = "myrealm";
realm = "myrealm";
enabled = true;
clients = [];
roles = {
realm = [];
client = {};
};
groups = [];
users = [];
};
};
testUsers = {
expr = (configcreator (default_config // {
users = {
me = {
email = "me@mydomain.com";
firstName = "me";
lastName = "stillme";
};
};
})).users;
expected = [
{
username = "me";
enabled = true;
email = "me@mydomain.com";
emailVerified = true;
firstName = "me";
lastName = "stillme";
}
];
};
testUsersWithGroups = {
expr = (configcreator (default_config // {
users = {
me = {
email = "me@mydomain.com";
firstName = "me";
lastName = "stillme";
groups = [ "MyGroup" ];
};
};
})).users;
expected = [
{
username = "me";
enabled = true;
email = "me@mydomain.com";
emailVerified = true;
firstName = "me";
lastName = "stillme";
groups = [ "MyGroup" ];
}
];
};
testUsersWithRoles = {
expr = (configcreator (default_config // {
users = {
me = {
email = "me@mydomain.com";
firstName = "me";
lastName = "stillme";
roles = [ "MyRole" ];
};
};
})).users;
expected = [
{
username = "me";
enabled = true;
email = "me@mydomain.com";
emailVerified = true;
firstName = "me";
lastName = "stillme";
realmRoles = [ "MyRole" ];
}
];
};
testUsersWithInitialPassword = {
expr = (configcreator (default_config // {
users = {
me = {
email = "me@mydomain.com";
firstName = "me";
lastName = "stillme";
initialPassword = true;
};
};
})).users;
expected = [
{
username = "me";
enabled = true;
email = "me@mydomain.com";
emailVerified = true;
firstName = "me";
lastName = "stillme";
credentials = [
{
type = "password";
userLabel = "initial";
value = "$(keycloak.users.me.password)";
}
];
}
];
};
testGroups = {
expr = (configcreator (default_config // {
groups = [ "MyGroup" ];
})).groups;
expected = [
{
name = "MyGroup";
path = "/MyGroup";
attributes = {};
realmRoles = [];
clientRoles = {};
subGroups = [];
}
];
};
testRealmRoles = {
expr = (configcreator (default_config // {
roles = {
A = [ "B" ];
B = [ ];
};
})).roles;
expected = {
client = {};
realm = [
{
name = "A";
composite = true;
composites = {
realm = [ "B" ];
};
}
{
name = "B";
composite = false;
}
];
};
};
testClientRoles = {
expr = (configcreator (default_config // {
clients = {
clientA = {
roles = [ "cA" ];
};
};
})).roles;
expected = {
client = {
clientA = [
{
name = "cA";
clientRole = true;
}
];
};
realm = [];
};
};
testClient = {
expr = map (keep_fields [
"clientId"
"rootUrl"
"redirectUris"
"webOrigins"
"authorizationSettings"
]) (configcreator (default_config // {
clients = {
clientA = {};
};
})).clients;
expected = [
{
clientId = "clientA";
rootUrl = "https://clientA.mydomain.com";
redirectUris = ["https://clientA.mydomain.com/oauth2/callback"];
webOrigins = ["https://clientA.mydomain.com"];
authorizationSettings = {
policyEnforcementMode = "ENFORCING";
resources = [];
policies = [];
};
}
];
};
testClientAuthorization = with builtins; {
expr = (head (configcreator (default_config // {
clients = {
clientA = {
resourcesUris = {
adminPath = ["/admin/*"];
userPath = ["/*"];
};
access = {
admin = {
roles = [ "admin" ];
resources = [ "adminPath" ];
};
user = {
roles = [ "user" ];
resources = [ "userPath" ];
};
};
};
};
})).clients).authorizationSettings;
expected = {
policyEnforcementMode = "ENFORCING";
resources = [
{
name = "adminPath";
type = "urn:clientA:resources:adminPath";
ownerManagedAccess = false;
uris = ["/admin/*"];
}
{
name = "userPath";
type = "urn:clientA:resources:userPath";
ownerManagedAccess = false;
uris = ["/*"];
}
];
policies = [
{
name = "admin has access";
type = "role";
logic = "POSITIVE";
decisionStrategy = "UNANIMOUS";
config = {
roles = ''[{"id":"admin","required":true}]'';
};
}
{
name = "user has access";
type = "role";
logic = "POSITIVE";
decisionStrategy = "UNANIMOUS";
config = {
roles = ''[{"id":"user","required":true}]'';
};
}
{
name = "admin has access to adminPath";
type = "resource";
logic = "POSITIVE";
decisionStrategy = "UNANIMOUS";
config = {
resources = ''["adminPath"]'';
applyPolicies = ''["admin has access"]'';
};
}
{
name = "user has access to userPath";
type = "resource";
logic = "POSITIVE";
decisionStrategy = "UNANIMOUS";
config = {
resources = ''["userPath"]'';
applyPolicies = ''["user has access"]'';
};
}
];
};
};
testClientAudience =
let
audienceProtocolMapper = config:
with builtins;
let
protocolMappers = (head config.clients).protocolMappers;
protocolMapperByName = name: protocolMappers: head (filter (x: x.name == name) protocolMappers);
in
protocolMapperByName "Audience" protocolMappers;
in
{
expr = audienceProtocolMapper (configcreator (default_config // {
clients = {
clientA = {};
};
}));
expected = {
name = "Audience";
protocol = "openid-connect";
protocolMapper = "oidc-audience-mapper";
config = {
"included.client.audience" = "clientA";
"id.token.claim" = "false";
"access.token.claim" = "true";
"included.custom.audience" = "clientA";
};
};
};
}

View file

@ -1,295 +0,0 @@
# to run these tests:
# nix-instantiate --eval --strict . -A tests.keycloak
{ lib
, stdenv
, pkgs
}:
let
configcreator = pkgs.callPackage ./../../keycloak-cli-config/configcreator.nix {};
# Taken from https://github.com/NixOS/nixpkgs/blob/master/lib/attrsets.nix
updateManyAttrsByPath =
with builtins;
with lib.lists;
let
# When recursing into attributes, instead of updating the `path` of each
# update using `tail`, which needs to allocate an entirely new list,
# we just pass a prefix length to use and make sure to only look at the
# path without the prefix length, so that we can reuse the original list
# entries.
go = prefixLength: hasValue: value: updates:
let
# Splits updates into ones on this level (split.right)
# And ones on levels further down (split.wrong)
split = partition (el: length el.path == prefixLength) updates;
# Groups updates on further down levels into the attributes they modify
nested = groupBy (el: elemAt el.path prefixLength) split.wrong;
# Applies only nested modification to the input value
withNestedMods =
# Return the value directly if we don't have any nested modifications
if split.wrong == [] then
if hasValue then value
else
# Throw an error if there is no value. This `head` call here is
# safe, but only in this branch since `go` could only be called
# with `hasValue == false` for nested updates, in which case
# it's also always called with at least one update
let updatePath = (head split.right).path; in
throw
( "updateManyAttrsByPath: Path '${showAttrPath updatePath}' does "
+ "not exist in the given value, but the first update to this "
+ "path tries to access the existing value.")
else
# If there are nested modifications, try to apply them to the value
if ! hasValue then
# But if we don't have a value, just use an empty attribute set
# as the value, but simplify the code a bit
mapAttrs (name: go (prefixLength + 1) false null) nested
else if isAttrs value then
# If we do have a value and it's an attribute set, override it
# with the nested modifications
value //
mapAttrs (name: go (prefixLength + 1) (value ? ${name}) value.${name}) nested
else
# However if it's not an attribute set, we can't apply the nested
# modifications, throw an error
let updatePath = (head split.wrong).path; in
throw
( "updateManyAttrsByPath: Path '${showAttrPath updatePath}' needs to "
+ "be updated, but path '${showAttrPath (take prefixLength updatePath)}' "
+ "of the given value is not an attribute set, so we can't "
+ "update an attribute inside of it.");
# We get the final result by applying all the updates on this level
# after having applied all the nested updates
# We use foldl instead of foldl' so that in case of multiple updates,
# intermediate values aren't evaluated if not needed
in foldl (acc: el: el.update acc) withNestedMods split.right;
in updates: value: go 0 true value updates;
in
with lib.attrsets;
lib.runTests {
testConfigEmpty = {
expr = configcreator {
realm = "myrealm";
domain = "domain.com";
};
expected = {
id = "myrealm";
realm = "myrealm";
enabled = true;
clients = [];
groups = [];
roles = {
client = {};
realm = [];
};
users = [];
};
};
testConfigRole = {
expr = configcreator {
realm = "myrealm";
domain = "domain.com";
roles = {
user = [];
admin = ["user"];
};
};
expected = {
id = "myrealm";
realm = "myrealm";
enabled = true;
clients = [];
groups = [];
roles = {
realm = [
{
name = "admin";
composite = true;
composites = {
realm = ["user"];
};
}
{
name = "user";
composite = false;
}
];
client = {};
};
users = [];
};
};
testConfigClient = {
expr =
let
c = configcreator {
realm = "myrealm";
domain = "domain.com";
clients = {
myclient = {};
myclient2 = {
roles = ["uma"];
};
};
};
in
updateManyAttrsByPath [
{
path = [ "clients" ];
# We don't care about the value of the protocolMappers
# field because its value is hardcoded.
update = clients: map (filterAttrs (n: v: n != "protocolMappers")) clients;
}
] c;
expected = {
id = "myrealm";
realm = "myrealm";
enabled = true;
clients = [
{
clientId = "myclient";
rootUrl = "https://myclient.domain.com";
clientAuthenticatorType = "client-secret";
redirectUris = [
"https://myclient.domain.com/oauth2/callback"
];
webOrigins = [
"https://myclient.domain.com"
];
authorizationServicesEnabled = true;
serviceAccountsEnabled = true;
protocol = "openid-connect";
publicClient = false;
authorizationSettings = {
policyEnforcementMode = "ENFORCING";
resources = [];
policies = [];
};
}
{
clientId = "myclient2";
rootUrl = "https://myclient2.domain.com";
clientAuthenticatorType = "client-secret";
redirectUris = [
"https://myclient2.domain.com/oauth2/callback"
];
webOrigins = [
"https://myclient2.domain.com"
];
authorizationServicesEnabled = true;
serviceAccountsEnabled = true;
protocol = "openid-connect";
publicClient = false;
authorizationSettings = {
policyEnforcementMode = "ENFORCING";
resources = [];
policies = [];
};
}
];
groups = [];
roles = {
client = {
myclient = [];
myclient2 = [
{
name = "uma";
clientRole = true;
}
];
};
realm = [];
};
users = [];
};
};
testConfigUser = {
expr = configcreator {
realm = "myrealm";
domain = "domain.com";
users = {
me = {
email = "me@me.com";
firstName = null;
lastName = "Me";
realmRoles = [ "role" ];
};
};
};
expected = {
id = "myrealm";
realm = "myrealm";
enabled = true;
clients = [];
groups = [];
roles = {
client = {};
realm = [];
};
users = [
{
enabled = true;
username = "me";
email = "me@me.com";
emailVerified = true;
firstName = null;
lastName = "Me";
}
];
};
};
testConfigUserInitialPassword = {
expr = configcreator {
realm = "myrealm";
domain = "domain.com";
users = {
me = {
email = "me@me.com";
firstName = null;
lastName = "Me";
initialPassword = true;
};
};
};
expected = {
id = "myrealm";
realm = "myrealm";
enabled = true;
clients = [];
groups = [];
roles = {
client = {};
realm = [];
};
users = [
{
enabled = true;
username = "me";
email = "me@me.com";
emailVerified = true;
firstName = null;
lastName = "Me";
credentials = [
{
type = "password";
userLabel = "initial";
value = "$(keycloak.users.me.password)";
}
];
}
];
};
};
}

View file

@ -1,121 +0,0 @@
{ stdenv
, pkgs
, lib
}:
{ documentRoot
, name ? "ttrss"
, serviceName ? "ttrss"
, subdomain ? "ttrss"
, user ? "http"
, group ? "http"
, domain
, lock_directory
, cache_directory
, feed_icons_directory
, db_host
, db_port
, db_username
, db_database
, db_password
# , domain
# , smtp_host
# , smtp_login
# , smtp_password
# , feedback_url ? ""
, auth_remote_post_logout_url ? null
, enabled_plugins ? [ "auth_remote" "note" ]
, dependsOn ? {}
}:
let
asTtrssConfig = attrs: builtins.concatStringsSep "\n" (
["<?php" ""]
++ lib.attrsets.mapAttrsToList wrapPutenv attrs
++ [""] # Needs a newline at the end
);
wrapPutenv = key: value: "putenv('TTRSS_${lib.toUpper key}=${value}');";
config = self_url_path: db: {
db_type = "pgsql";
db_host = db_host db;
db_port = builtins.toString db_port;
db_user = db_username;
db_name = db_database;
db_pass = db_password;
self_url_path = self_url_path;
single_user_mode = "false";
simple_update_mode = "false";
php_executable = "${pkgs.php}/bin/php";
lock_directory = "${lock_directory}";
cache_dir = "${cache_directory}";
icons_dir = "${feed_icons_directory}";
icons_url = "feed-icons";
auth_auto_create = "true";
auth_auto_login = "false";
enable_registration = "false";
force_article_purge = "0";
sphinx_server = "localhost:9312";
sphinx_index = "ttrss, delta";
session_check_address = "true";
session_cookie_lifetime = "0";
session_expire_time = "86400";
smtp_from_name = "Tiny Tiny RSS";
# smtp_from_address = "noreply@${domain}";
# inherit smtp_host smtp_login smtp_password;
# inherit feedback_url;
digest_enable = "true";
digest_email_limit = "10";
digest_subject = "[tt-rss] New headlines for last 24 hours";
deamon_sends_digest = "true";
check_for_new_version = "false";
plugins = builtins.concatStringsSep ", " enabled_plugins;
log_destination = "syslog";
} // (
if auth_remote_post_logout_url != null then {
allow_remote_user_auth = "false";
auth_remote_post_logout_url = auth_remote_post_logout_url;
} else {}
);
in
{
name = serviceName;
pkg = {
db
}: stdenv.mkDerivation rec {
inherit name;
src = pkgs.tt-rss;
buildCommand =
let
configFile = pkgs.writeText "config.php" (asTtrssConfig (config "https://${subdomain}.${domain}/" db));
dr = dirOf documentRoot;
in
''
mkdir -p $out/${name}
cp -ra $src/* $out/${name}
cp ${configFile} $out/${name}/config.php
echo "${dr}" > $out/.dysnomia-targetdir
echo "${user}:${group}" > $out/.dysnomia-filesetowner
cat > $out/.dysnomia-fileset <<FILESET
symlink $out/${name}
target ${dr}
FILESET
'';
};
inherit dependsOn;
type = "fileset";
}

View file

@ -1,52 +0,0 @@
{ stdenv
, pkgs
}:
{ name
, user
, binDir
, dependsOn ? {}
}:
{
inherit name;
pkg =
{ db
, config
}:
stdenv.mkDerivation {
name = "dbupgrade";
src = pkgs.writeTextDir "wrapper" ''
#!/bin/bash -e
sudo -u ${user} bash <<HERE
case "$1" in
activate)
${pkgs.php}/bin/php ${binDir}/update.php --update-schema=force-yes
;;
lock)
if [ -f /tmp/wrapper.lock ]
then
exit 1
else
echo "1" > /tmp/wrapper.lock
fi
;;
unlock)
rm -f /tmp/wrapper.lock
;;
esac
HERE
'';
installPhase = ''
mkdir -p $out/bin
cp $src/wrapper $out/bin
chmod +x $out/bin/*
'';
};
inherit dependsOn;
type = "wrapper";
}

View file

@ -1,216 +0,0 @@
{ customPkgs
, pkgs
, utils
}:
{ serviceName ? "Ttrss"
, siteName ? "ttrss"
, subdomain ? "ttrss"
, domain ? ""
, ingress ? 18010
, user ? "ttrss"
, group ? "ttrss"
, documentRoot ? "/usr/share/webapps/ttrss"
, postgresDatabase ? "ttrss"
, postgresUser ? "ttrss"
, postgresPasswordLocation ? "ttrss"
, smtp ? {}
, sso ? {}
, distribution ? {}
, configPkg ? pkgs.callPackage (import ./config.nix) {}
, normalizeHeaderPkg ? pkgs.callPackate (import ./normalize-headers.nix) {}
, updateServicePkg ? pkgs.callPackage (import ./update.nix) {inherit utils;}
, dbupgradePkg ? pkgs.callPackage (import ./dbupgrade.nix) {}
}:
with pkgs.lib.attrsets;
let
rtdir = "/run/ttrss";
lock_directory = "${rtdir}/lock";
cache_directory = "${rtdir}/cache";
persistent_dir = "/var/lib/${siteName}";
feed_icons_directory = "${persistent_dir}/feed-icons";
in
rec {
inherit subdomain;
db = customPkgs.mkPostgresDB {
name = "${serviceName}PostgresDB";
database = postgresDatabase;
username = postgresUser;
# TODO: use passwordFile
password = postgresPasswordLocation;
};
config =
let
domain = utils.getDomain distribution "${serviceName}Config";
in
configPkg ({
name = "ttrss";
serviceName = "${serviceName}Config";
inherit subdomain;
inherit documentRoot;
inherit lock_directory cache_directory feed_icons_directory;
inherit (phpfpmService) user group;
inherit domain;
db_host = db: db.target.properties.hostname;
db_port = (utils.getTarget distribution "TtrssPostgresDB").containers.postgresql-database.port;
db_database = postgresDatabase;
db_username = postgresUser;
# TODO: use passwordFile
db_password = postgresPasswordLocation;
enabled_plugins = [ "auth_remote" "note" ];
dependsOn = {
inherit db;
};
}
// optionalAttrs (sso != {}) {
auth_remote_post_logout_url = "https://keycloak.${domain}/realms/${sso.realm}/account";
});
dbupgrade = dbupgradePkg {
name = "${serviceName}DBUpgrade";
inherit user;
binDir = documentRoot;
dependsOn = {
inherit config db;
};
};
service = customPkgs.mkNginxService {
name = "${serviceName}Service";
inherit siteName;
inherit user group;
runtimeDirectory = "/run/nginx";
config = {
port = ingress;
inherit siteName;
siteRoot = documentRoot;
phpFpmSiteSocket = phpfpmService.siteSocket;
};
dependsOn = {
};
};
phpfpmService = customPkgs.mkPHPFPMService {
name = "${serviceName}PHPFPMService";
inherit siteName;
runtimeDirectory = rtdir;
# Must match haproxy for socket
inherit user group;
socketUser = service.user;
socketGroup = service.group;
phpIniConfig = {
prependFile = normalizeHeaderPkg {
debug = true;
};
};
siteConfig = {
siteRoot = documentRoot;
};
};
updateService = updateServicePkg {
name = "${serviceName}UpdateService";
inherit documentRoot;
inherit (phpfpmService) user group;
readOnlyPaths = [];
readWritePaths = [
lock_directory
cache_directory
feed_icons_directory
];
postgresServiceName = (utils.getTarget distribution "TtrssPostgresDB").containers.postgresql-database.service_name;
dependsOn = {
inherit config db dbupgrade;
};
};
haproxy = {
frontend = {
acl = {
acl_ttrss = "hdr_beg(host) ttrss.";
};
use_backend = "if acl_ttrss";
};
backend = {
servers = [
{
name = "ttrss1";
address = service.nginxSocket;
balance = "roundrobin";
check = {
inter = "5s";
downinter = "15s";
fall = "3";
rise = "3";
};
httpcheck = "GET /";
# captureoutput = {
# firstport = "3000";
# secondport = "3001";
# issocket = true;
# outputfile = "/tmp/haproxy/ttrss.stream";
# };
}
];
};
debugHeaders = "acl_ttrss";
};
keycloakCliConfig = {
clients = {
ttrss = {
};
};
};
deployKeys = domain: {};
services = {
${db.name} = db;
${config.name} = config;
${dbupgrade.name} = dbupgrade;
${service.name} = service;
${phpfpmService.name} = phpfpmService;
${updateService.name} = updateService;
};
distribute = on: {
${db.name} = on;
${config.name} = on;
${dbupgrade.name} = on;
${service.name} = on;
${phpfpmService.name} = on;
${updateService.name} = on;
};
directories_modes = {
"${rtdir}" = "0550";
"${lock_directory}" = "0770";
"${cache_directory}" = "0770";
"${cache_directory}/upload" = "0770";
"${cache_directory}/images" = "0770";
"${cache_directory}/export" = "0770";
"${feed_icons_directory}" = "0770";
};
}

View file

@ -1,48 +0,0 @@
{ pkgs
}:
{ debug ? false
}:
pkgs.writeText "normalize-headers.php" (''
<?php
$trustedProxies = array(
'127.0.0.1',
'@'
);
# phpinfo(INFO_VARIABLES);
if (isSet($_SERVER['REMOTE_ADDR'])) {
$remote = $_SERVER['REMOTE_ADDR'];
$allowedHeaders = array(
'HTTP_X_FORWARDED_FOR' => 'REMOTE_ADDR',
'HTTP_X_REAL_IP' => 'REMOTE_HOST',
'HTTP_X_FORWARDED_PORT' => 'REMOTE_PORT',
'HTTP_X_FORWARDED_HTTPS' => 'HTTPS',
'HTTP_X_FORWARDED_SERVER_ADDR' => 'SERVER_ADDR',
'HTTP_X_FORWARDED_SERVER_NAME' => 'SERVER_NAME',
'HTTP_X_FORWARDED_SERVER_PORT' => 'SERVER_PORT',
'HTTP_X_FORWARDED_PREFERRED_USERNAME' => 'REMOTE_USER',
);
if(in_array($remote, $trustedProxies)) {
foreach($allowedHeaders as $header => $serverVar) {
if(isSet($_SERVER[$header])) {
if(isSet($_SERVER[$serverVar])) {
$_SERVER["ORIGINAL_$serverVar"] = $_SERVER[$serverVar];
}
$_SERVER[$serverVar] = explode(',', $_SERVER[$header], 2)[0];
}
}
}
}
'' + (if !debug then "" else ''
trigger_error(print_r($_SERVER, true), E_USER_WARNING);
'')
)

View file

@ -1,74 +0,0 @@
{ stdenv
, pkgs
, lib
, utils
}:
{ name
, user
, group
, documentRoot
, readOnlyPaths ? []
, readWritePaths ? []
, postgresServiceName
, dependsOn ? {}
}:
# Assumptions:
# - Do not run as root.
# - Image cache should be writable.
# - Upload cache should be writable.
# - Data export cache should be writable.
# - ICONS_DIR should be writable.
# - LOCK_DIRECTORY should be writable.
let
fullPath = "${documentRoot}";
roPaths = [fullPath] ++ readOnlyPaths;
in
{
inherit name;
pkg = {...}: utils.systemd.mkService rec {
name = "ttrss-update";
content = ''
[Unit]
Description=${name}
After=network.target ${postgresServiceName}
[Service]
User=${user}
Group=${group}
ExecStart=${pkgs.php}/bin/php ${fullPath}/update_daemon2.php
RuntimeDirectory=${name}
PrivateDevices=true
PrivateTmp=true
ProtectKernelTunables=true
ProtectKernelModules=true
ProtectControlGroups=true
ProtectKernelLogs=true
ProtectHome=true
ProtectHostname=true
ProtectClock=true
RestrictSUIDSGID=true
LockPersonality=true
NoNewPrivileges=true
SystemCallFilter=@basic-io @file-system @process @system-service
ProtectSystem=strict
ReadOnlyPaths=${builtins.concatStringsSep " " roPaths}
ReadWritePaths=${builtins.concatStringsSep " " readWritePaths}
# NoExecPaths=/
# ExecPaths=${pkgs.php}/bin
[Install]
WantedBy=multi-user.target
'';
};
inherit dependsOn;
type = "systemd-unit";
}

View file

@ -1,88 +0,0 @@
{ stdenv
, pkgs
, lib
}:
with lib;
with lib.lists;
with lib.attrsets;
rec {
tmpFilesFromDirectories = user: group: d:
let
wrapTmpfiles = dir: mode: "d '${dir}' ${mode} ${user} ${group} - -";
in
mapAttrsToList wrapTmpfiles d;
systemd = {
mkService = {name, content, timer ? null}: stdenv.mkDerivation {
inherit name;
src = pkgs.writeTextDir "${name}.service" content;
timerSrc = pkgs.writeTextDir "${name}.timer" timer;
installPhase = ''
mkdir -p $out/etc/systemd/system
cp $src/*.service $out/etc/systemd/system
'' + (if timer == null then "" else ''
cp $timerSrc/*.timer $out/etc/systemd/system
'');
};
};
mkConfigFile = {dir, name, content}: stdenv.mkDerivation rec {
inherit name;
src = pkgs.writeTextDir name content;
buildCommand = ''
mkdir -p $out
cp ${src}/${name} $out/${name}
echo "${dir}" > $out/.dysnomia-targetdir
cat > $out/.dysnomia-fileset <<FILESET
symlink $out/${name}
target .
FILESET
'';
};
dnsmasqConfig = domain: subdomains:
''
${concatMapStringsSep "\n" (x: "address=/${x}.${domain}/127.0.0.1") subdomains}
domain=${domain}
'';
keyEnvironmentFile = path: "EnvironmentFile=/run/keys/${path}";
keyEnvironmentFiles = names: concatMapStrings (path: "${keyEnvironmentFile path}\n") (attrValues names);
keyServiceDependencies = names: concatMapStringsSep " " (path: "${path}-key.service") (attrValues names);
recursiveMerge = attrList:
let f = attrPath:
zipAttrsWith (n: values:
if all isList values then
concatLists values
else if all isAttrs values then
f (attrPath ++ [n]) values
else
last values
);
in f [] attrList;
getTarget = distribution: name: builtins.elemAt (builtins.getAttr name distribution) 0;
getDomain = distribution: name: (getTarget distribution name).containers.system.domain;
unitDepends = verb: dependsOn:
let
withSystemdUnitFile = filter (hasAttr "systemdUnitFile") (attrValues dependsOn);
systemdUnitFiles = map (x: x.systemdUnitFile) withSystemdUnitFile;
in
if length systemdUnitFiles == 0 then
""
else
"${verb}=${concatStringsSep " " systemdUnitFiles}";
}

View file

@ -1,267 +0,0 @@
{ customPkgs
, pkgs
, utils
, secret
}:
{ serviceName ? "Vaultwarden"
, subdomain ? "vaultwarden"
, ingress ? 18005
, signupsAllowed ? true # signups allowed since we're behind SSO
, signupsVerify ? false
, user ? "vaultwarden"
, group ? "vaultwarden"
, dataFolder ? "/var/lib/vaultwarden"
, postgresDatabase ? "vaultwarden"
, postgresUser ? "vaultwarden"
, postgresPasswordLocation ? "vaultwarden"
, webvaultEnabled ? true
, webvaultPath ? "/usr/share/webapps/vaultwarden"
, cookieSecretName ? "cookiesecret"
, clientSecretName ? "clientsecret"
, smtp ? {}
, sso ? {}
, distribution ? {}
, KeycloakService ? null
, KeycloakCliService ? null
, HaproxyService ? null
}:
let
mkVaultwardenWeb = pkgs.callPackage ./web.nix {inherit utils;};
ssoIngress = if sso != {} then ingress else null;
serviceIngress = if sso != {} then ingress+1 else ingress;
metricsPort = if sso != {} then ingress+2 else ingress+1;
smtpConfig = smtp;
in
rec {
inherit user group;
inherit subdomain;
db = customPkgs.mkPostgresDB {
name = "${serviceName}PostgresDB";
database = postgresDatabase;
username = postgresUser;
# TODO: use passwordFile
password = postgresPasswordLocation;
};
web = mkVaultwardenWeb {
name = "${serviceName}Web";
path = webvaultPath;
};
service = let
name = "${serviceName}Service";
domain = utils.getDomain distribution name;
pkgsVaultwarden-1_27_0 =
let pkg = builtins.fetchurl {
url = "https://raw.githubusercontent.com/NixOS/nixpkgs/988cc958c57ce4350ec248d2d53087777f9e1949/pkgs/tools/security/vaultwarden/default.nix";
sha256 = "0hwjbq5qb8y5frb2ca3m501x84zaibzyn088zzaf7zcwkxvqb0im";
};
in pkgs.callPackage pkg {
inherit (pkgs.darwin.apple_sdk.frameworks) Security CoreServices;
dbBackend = "postgresql";
};
in {
inherit name;
pkg =
{ db
, web
}: let
postgresHost = db.target.properties.hostname;
in utils.systemd.mkService rec {
name = "vaultwarden";
content = ''
[Unit]
Description=Vaultwarden Server
Documentation=https://github.com/dani-garcia/vaultwarden
After=network.target
After=${utils.keyServiceDependencies smtpConfig.keys}
Wants=${utils.keyServiceDependencies smtpConfig.keys}
[Service]
Environment=DATA_FOLDER=${dataFolder}
Environment=DATABASE_URL=postgresql://${postgresUser}:${postgresPasswordLocation}@${postgresHost}/${postgresDatabase}
Environment=IP_HEADER=X-Real-IP
Environment=WEB_VAULT_FOLDER=${web.path}
Environment=WEB_VAULT_ENABLED=${if webvaultEnabled then "true" else "false"}
Environment=SIGNUPS_ALLOWED=${if signupsAllowed then "true" else "false"}
Environment=SIGNUPS_VERIFY=${if signupsVerify then "true" else "false"}
# Disabled because the /admin path is protected by SSO
Environment=DISABLE_ADMIN_TOKEN=true
Environment=INVITATIONS_ALLOWED=true
Environment=DOMAIN=https://${subdomain}.${domain}
# Assumes we're behind a reverse proxy
Environment=ROCKET_ADDRESS=127.0.0.1
Environment=ROCKET_PORT=${builtins.toString serviceIngress}
Environment=USE_SYSLOG=true
Environment=EXTENDED_LOGGING=true
Environment=LOG_FILE=
Environment=LOG_LEVEL=trace
${utils.keyEnvironmentFiles smtpConfig.keys}
Environment=SMTP_FROM=${smtpConfig.from}
Environment=SMTP_FROM_NAME=${smtpConfig.fromName}
Environment=SMTP_PORT=${builtins.toString smtpConfig.port}
Environment=SMTP_AUTH_MECHANISM=${smtpConfig.authMechanism}
ExecStart=${pkgsVaultwarden-1_27_0}/bin/vaultwarden
WorkingDirectory=${dataFolder}
StateDirectory=${name}
User=${user}
Group=${group}
# Allow vaultwarden to bind ports in the range of 0-1024 and restrict it to
# that capability
CapabilityBoundingSet=${if serviceIngress <= 1024 then "CAP_NET_BIND_SERVICE" else ""}
AmbientCapabilities=${if serviceIngress <= 1024 then "CAP_NET_BIND_SERVICE" else ""}
PrivateUsers=yes
NoNewPrivileges=yes
LimitNOFILE=1048576
UMask=0077
ProtectSystem=strict
ProtectHome=yes
# ReadWritePaths=${dataFolder}
PrivateTmp=yes
PrivateDevices=yes
ProtectHostname=yes
ProtectClock=yes
ProtectKernelTunables=yes
ProtectKernelModules=yes
ProtectKernelLogs=yes
ProtectControlGroups=yes
RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6
RestrictNamespaces=yes
LockPersonality=yes
MemoryDenyWriteExecute=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
RemoveIPC=yes
SystemCallFilter=@system-service
SystemCallFilter=~@privileged @resources
SystemCallArchitectures=native
[Install]
WantedBy=multi-user.target
'';
};
dependsOn = {
inherit db;
inherit web;
};
type = "systemd-unit";
};
haproxy = service: {
frontend = {
acl = {
acl_vaultwarden = "hdr_beg(host) vaultwarden.";
};
use_backend = "if acl_vaultwarden";
};
backend = {
# TODO: instead, we should generate target specific service https://hydra.nixos.org/build/203347995/download/2/manual/#idm140737322273072
servers = map (dist: {
name = "vaultwarden_${dist.properties.hostname}_1";
# TODO: should use the hostname
# address = "${dist.properties.hostname}:${builtins.toString ingress}";
address = "127.0.0.1:${builtins.toString ingress}";
resolvers = "default";
}) service;
};
};
oauth2Proxy =
let
name = "${serviceName}Oauth2Proxy";
in customPkgs.mkOauth2Proxy {
inherit name;
serviceName = subdomain;
domain = utils.getDomain distribution name;
keycloakSubdomain = KeycloakService.subdomain;
keycloakDomain = utils.getDomain distribution "KeycloakService";
ingress = "127.0.0.1:${toString ssoIngress}";
egress = [ "http://127.0.0.1:${toString serviceIngress}" ];
realm = sso.realm;
allowed_roles = [ "user" "/admin|admin" ];
skip_auth_routes = [
"^/api"
"^/identity/connect/token"
"^/identity/accounts/prelogin"
];
inherit metricsPort;
keys = {
cookieSecret = "${serviceName}_oauth2proxy_cookiesecret";
clientSecret = "${serviceName}_oauth2proxy_clientsecret";
};
inherit distribution HaproxyService KeycloakService KeycloakCliService;
};
keycloakCliConfig = {
clients = {
vaultwarden = {
resourcesUris = {
adminPath = ["/admin/*"];
userPath = ["/*"];
};
access = {
admin = {
roles = [ "admin" ];
resources = [ "adminPath" ];
};
user = {
roles = [ "user" ];
resources = [ "userPath" ];
};
};
};
};
};
deployKeys = domain: {
"${serviceName}_oauth2proxy_cookiesecret".text = ''
OAUTH2_PROXY_COOKIE_SECRET="${secret "${domain}/${subdomain}/${cookieSecretName}"}"
'';
"${serviceName}_oauth2proxy_clientsecret".text = ''
OAUTH2_PROXY_CLIENT_SECRET="${secret "${domain}/${subdomain}/${clientSecretName}"}"
'';
"${serviceName}_smtp_all".text = ''
SMTP_HOST="${secret "${domain}/mailgun.com/smtp_hostname"}"
SMTP_USERNAME="${secret "${domain}/mailgun.com/smtp_login"}"
SMTP_PASSWORD="${secret "${domain}/mailgun.com/password"}"
'';
};
smtp.keys.setup = "${serviceName}_smtp_all";
services = {
${db.name} = db;
${web.name} = web;
${service.name} = service;
${oauth2Proxy.name} = oauth2Proxy;
};
distribute = on: {
${db.name} = on;
${web.name} = on;
${service.name} = on;
${oauth2Proxy.name} = on;
};
}

View file

@ -1,35 +0,0 @@
{ stdenv
, pkgs
, utils
}:
{ name
, path
}:
{
inherit name;
inherit path;
pkg = stdenv.mkDerivation rec {
inherit name;
buildCommand =
let
dir = dirOf path;
base = baseNameOf path;
in ''
mkdir -p $out
ln -s ${pkgs.vaultwarden-vault}/share/vaultwarden/vault $out/${base}
echo "${dir}" > $out/.dysnomia-targetdir
cat > $out/.dysnomia-fileset <<FILESET
symlink $out/${base}
target .
FILESET
'';
};
type = "fileset";
}