split nixos from darwin

required because they don't share all attributes
This commit is contained in:
Noah Masur
2022-12-21 17:07:58 -07:00
parent 7063bd5f7a
commit d021baa1bb
78 changed files with 22 additions and 21 deletions

View File

@ -0,0 +1,67 @@
{ config, pkgs, lib, ... }: {
options = {
backup.s3 = {
endpoint = lib.mkOption {
type = lib.types.str;
description = "S3 endpoint for backups";
default = null;
};
bucket = lib.mkOption {
type = lib.types.str;
description = "S3 bucket for backups";
default = null;
};
accessKeyId = lib.mkOption {
type = lib.types.str;
description = "S3 access key ID for backups";
default = null;
};
};
};
config = {
users.groups.backup = { };
secrets.backup = {
source = ../../private/backup.age;
dest = "${config.secretsDirectory}/backup";
group = "backup";
permissions = "0440";
};
users.users.litestream.extraGroups = [ "backup" ];
services.litestream = {
enable = true;
environmentFile = config.secrets.backup.dest;
};
# Wait for secret to exist
systemd.services.litestream = {
after = [ "backup-secret.service" ];
requires = [ "backup-secret.service" ];
environment.AWS_ACCESS_KEY_ID = config.backupS3.accessKeyId;
};
# # Backup library to object storage
# services.restic.backups.calibre = {
# user = "calibre-web";
# repository =
# "s3://${config.backupS3.endpoint}/${config.backupS3.bucket}/calibre";
# paths = [
# "/var/books"
# "/var/lib/calibre-web/app.db"
# "/var/lib/calibre-web/gdrive.db"
# ];
# initialize = true;
# timerConfig = { OnCalendar = "00:05:00"; };
# environmentFile = backupS3File;
# };
};
}

37
nixos/services/caddy.nix Normal file
View File

@ -0,0 +1,37 @@
{ config, pkgs, lib, ... }: {
options = {
caddy.enable = lib.mkEnableOption "Caddy reverse proxy.";
caddy.routes = lib.mkOption {
type = lib.types.listOf lib.types.attrs;
description = "Caddy JSON routes for http servers";
default = [ ];
};
caddy.blocks = lib.mkOption {
type = lib.types.listOf lib.types.attrs;
description = "Caddy JSON error blocks for http servers";
default = [ ];
};
};
config = lib.mkIf (config.caddy.enable && config.caddy.routes != [ ]) {
services.caddy = {
enable = true;
adapter = "''"; # Required to enable JSON
configFile = pkgs.writeText "Caddyfile" (builtins.toJSON {
apps.http.servers.main = {
listen = [ ":443" ];
routes = config.caddy.routes;
errors.routes = config.caddy.blocks;
};
});
};
networking.firewall.allowedTCPPorts = [ 80 443 ];
networking.firewall.allowedUDPPorts = [ 443 ];
};
}

View File

@ -0,0 +1,73 @@
{ config, pkgs, lib, ... }: {
options = {
bookServer = lib.mkOption {
type = lib.types.str;
description = "Hostname for Calibre library";
default = null;
};
};
config = lib.mkIf (config.bookServer != null) {
services.calibre-web = {
enable = true;
openFirewall = true;
options = {
reverseProxyAuth.enable = false;
enableBookConversion = true;
enableBookUploading = true;
};
};
# Fix: https://github.com/janeczku/calibre-web/issues/2422
nixpkgs.overlays = [
(final: prev: {
calibre-web = prev.calibre-web.overrideAttrs (old: {
patches = (old.patches or [ ])
++ [ ../../patches/calibre-web-cloudflare.patch ];
});
})
];
caddy.routes = [{
match = [{ host = [ config.bookServer ]; }];
handle = [{
handler = "reverse_proxy";
upstreams = [{ dial = "localhost:8083"; }];
headers.request.add."X-Script-Name" = [ "/calibre-web" ];
}];
}];
# Run a backup on a schedule
systemd.timers.calibre-backup = {
timerConfig = {
OnCalendar = "*-*-* 00:00:00"; # Once per day
Unit = "calibre-backup.service";
};
wantedBy = [ "timers.target" ];
};
# Backup Calibre data to object storage
systemd.services.calibre-backup =
let libraryPath = "/var/lib/calibre-web"; # Default location
in {
description = "Backup Calibre data";
environment.AWS_ACCESS_KEY_ID = config.backupS3.accessKeyId;
serviceConfig = {
Type = "oneshot";
User = "calibre-web";
Group = "backup";
EnvironmentFile = config.secrets.backup.dest;
};
script = ''
${pkgs.awscli2}/bin/aws s3 sync \
${libraryPath}/ \
s3://${config.backupS3.bucket}/calibre/ \
--endpoint-url=https://${config.backupS3.endpoint}
'';
};
};
}

View File

@ -0,0 +1,56 @@
# This module is necessary for hosts that are serving through Cloudflare.
{ config, lib, ... }:
let
cloudflareIpRanges = [
# Cloudflare IPv4: https://www.cloudflare.com/ips-v4
"173.245.48.0/20"
"103.21.244.0/22"
"103.22.200.0/22"
"103.31.4.0/22"
"141.101.64.0/18"
"108.162.192.0/18"
"190.93.240.0/20"
"188.114.96.0/20"
"197.234.240.0/22"
"198.41.128.0/17"
"162.158.0.0/15"
"104.16.0.0/13"
"104.24.0.0/14"
"172.64.0.0/13"
"131.0.72.0/22"
# Cloudflare IPv6: https://www.cloudflare.com/ips-v6
"2400:cb00::/32"
"2606:4700::/32"
"2803:f800::/32"
"2405:b500::/32"
"2405:8100::/32"
"2a06:98c0::/29"
"2c0f:f248::/32"
];
in {
options.cloudflare.enable = lib.mkEnableOption "Use Cloudflare.";
config = lib.mkIf config.cloudflare.enable {
# Forces Caddy to error if coming from a non-Cloudflare IP
caddy.blocks = [{
match = [{ not = [{ remote_ip.ranges = cloudflareIpRanges; }]; }];
handle = [{
handler = "static_response";
abort = true;
}];
}];
# Allows Nextcloud to trust Cloudflare IPs
services.nextcloud.config.trustedProxies = cloudflareIpRanges;
};
}

View File

@ -0,0 +1,25 @@
{ ... }: {
imports = [
./backups.nix
./caddy.nix
./calibre.nix
./cloudflare.nix
./gitea.nix
./gnupg.nix
./honeypot.nix
./jellyfin.nix
./keybase.nix
./mullvad.nix
./n8n.nix
./netdata.nix
./nextcloud.nix
./prometheus.nix
./secrets.nix
./sshd.nix
./transmission.nix
./vaultwarden.nix
./wireguard.nix
];
}

92
nixos/services/gitea.nix Normal file
View File

@ -0,0 +1,92 @@
{ config, lib, ... }:
let giteaPath = "/var/lib/gitea"; # Default service directory
in {
options = {
giteaServer = lib.mkOption {
description = "Hostname for Gitea.";
type = lib.types.str;
default = null;
};
};
config = lib.mkIf (config.giteaServer != null) {
services.gitea = {
enable = true;
httpPort = 3001;
httpAddress = "127.0.0.1";
rootUrl = "https://${config.giteaServer}/";
database.type = "sqlite3";
settings = {
repository = {
DEFAULT_PUSH_CREATE_PRIVATE = true;
DISABLE_HTTP_GIT = false;
ACCESS_CONTROL_ALLOW_ORIGIN = config.giteaServer;
ENABLE_PUSH_CREATE_USER = true;
ENABLE_PUSH_CREATE_ORG = true;
DEFAULT_BRANCH = "main";
};
server = {
SSH_PORT = 22;
START_SSH_SERVER = false; # Use sshd instead
DISABLE_SSH = false;
# SSH_LISTEN_HOST = "0.0.0.0";
# SSH_LISTEN_PORT = 122;
};
service.DISABLE_REGISTRATION = true;
session.COOKIE_SECURE = true;
ui.SHOW_USER_EMAIL = false;
};
extraConfig = null;
};
networking.firewall.allowedTCPPorts = [ 122 ];
caddy.routes = [{
match = [{ host = [ config.giteaServer ]; }];
handle = [{
handler = "reverse_proxy";
upstreams = [{ dial = "localhost:3001"; }];
}];
}];
## Backup config
# Open to groups, allowing for backups
systemd.services.gitea.serviceConfig.StateDirectoryMode =
lib.mkForce "0770";
systemd.tmpfiles.rules = [
"d ${giteaPath}/data 0775 gitea gitea"
"f ${giteaPath}/data/gitea.db 0660 gitea gitea"
];
# Allow litestream and gitea to share a sqlite database
users.users.litestream.extraGroups = [ "gitea" ];
users.users.gitea.extraGroups = [ "litestream" ];
# Backup sqlite database with litestream
services.litestream = {
settings = {
dbs = [{
path = "${giteaPath}/data/gitea.db";
replicas = [{
url =
"s3://${config.backupS3.bucket}.${config.backupS3.endpoint}/gitea";
}];
}];
};
};
# Don't start litestream unless gitea is up
systemd.services.litestream = {
after = [ "gitea.service" ];
requires = [ "gitea.service" ];
};
};
}

18
nixos/services/gnupg.nix Normal file
View File

@ -0,0 +1,18 @@
{ config, pkgs, lib, ... }: {
options.gpg.enable = lib.mkEnableOption "GnuPG encryption.";
config.home-manager.users.${config.user} = lib.mkIf config.gpg.enable {
programs.gpg.enable = true;
services.gpg-agent = {
enable = true;
defaultCacheTtl = 86400; # Resets when used
defaultCacheTtlSsh = 86400; # Resets when used
maxCacheTtl = 34560000; # Can never reset
maxCacheTtlSsh = 34560000; # Can never reset
pinentryFlavor = "tty";
};
home = lib.mkIf config.gui.enable { packages = with pkgs; [ pinentry ]; };
};
}

View File

@ -0,0 +1,77 @@
{ config, lib, pkgs, ... }:
# Currently has some issues that don't make this viable.
# Taken from:
# https://dataswamp.org/~solene/2022-09-29-iblock-implemented-in-nixos.html
# You will need to flush all rules when removing:
# https://serverfault.com/questions/200635/best-way-to-clear-all-iptables-rules
let
portsToBlock = [ 25545 25565 25570 ];
portsString =
builtins.concatStringsSep "," (builtins.map builtins.toString portsToBlock);
# Block IPs for 20 days
expire = 60 * 60 * 24 * 20;
rules = table: [
"INPUT -i eth0 -p tcp -m multiport --dports ${portsString} -m state --state NEW -m recent --set"
"INPUT -i eth0 -p tcp -m multiport --dports ${portsString} -m state --state NEW -m recent --update --seconds 10 --hitcount 1 -j SET --add-set ${table} src"
"INPUT -i eth0 -p tcp -m set --match-set ${table} src -j nixos-fw-refuse"
"INPUT -i eth0 -p udp -m set --match-set ${table} src -j nixos-fw-refuse"
];
create-rules = lib.concatStringsSep "\n"
(builtins.map (rule: "iptables -C " + rule + " || iptables -A " + rule)
(rules "blocked") ++ builtins.map
(rule: "ip6tables -C " + rule + " || ip6tables -A " + rule)
(rules "blocked6"));
delete-rules = lib.concatStringsSep "\n"
(builtins.map (rule: "iptables -C " + rule + " && iptables -D " + rule)
(rules "blocked") ++ builtins.map
(rule: "ip6tables -C " + rule + " && ip6tables -D " + rule)
(rules "blocked6"));
in {
options.honeypot.enable = lib.mkEnableOption "Honeypot fail2ban system.";
config.networking.firewall = lib.mkIf config.honeypot.enable {
extraPackages = [ pkgs.ipset ];
# allowedTCPPorts = portsToBlock;
# Restore ban list when starting up
extraCommands = ''
if test -f /var/lib/ipset.conf
then
ipset restore -! < /var/lib/ipset.conf
else
ipset -exist create blocked hash:ip ${
if expire > 0 then "timeout ${toString expire}" else ""
}
ipset -exist create blocked6 hash:ip family inet6 ${
if expire > 0 then "timeout ${toString expire}" else ""
}
fi
${create-rules}
'';
# Save list when shutting down
extraStopCommands = ''
ipset -exist create blocked hash:ip ${
if expire > 0 then "timeout ${toString expire}" else ""
}
ipset -exist create blocked6 hash:ip family inet6 ${
if expire > 0 then "timeout ${toString expire}" else ""
}
ipset save > /var/lib/ipset.conf
${delete-rules}
'';
};
}

View File

@ -0,0 +1,31 @@
{ config, lib, ... }: {
options = {
streamServer = lib.mkOption {
type = lib.types.str;
description = "Hostname for Jellyfin library";
default = null;
};
};
config = lib.mkIf (config.streamServer != null) {
services.jellyfin.enable = true;
caddy.routes = [{
match = [{ host = [ config.streamServer ]; }];
handle = [{
handler = "reverse_proxy";
upstreams = [{ dial = "localhost:8096"; }];
}];
}];
# Create videos directory, allow anyone in Jellyfin group to manage it
systemd.tmpfiles.rules = [
"d /var/lib/jellyfin 0775 jellyfin jellyfin"
"d /var/lib/jellyfin/library 0775 jellyfin jellyfin"
];
};
}

View File

@ -0,0 +1,34 @@
{ config, pkgs, lib, ... }: {
options.keybase.enable = lib.mkEnableOption "Keybase.";
config = lib.mkIf config.keybase.enable {
services.keybase.enable = true;
services.kbfs = {
enable = true;
# enableRedirector = true;
mountPoint = "/run/user/1000/keybase/kbfs";
};
security.wrappers.keybase-redirector = {
setuid = true;
owner = "root";
group = "root";
source = "${pkgs.kbfs}/bin/redirector";
};
home-manager.users.${config.user} = {
home.packages = [ (lib.mkIf config.gui.enable pkgs.keybase-gui) ];
home.file = let
ignorePatterns = ''
keybase/
kbfs/'';
in {
".rgignore".text = ignorePatterns;
".fdignore".text = ignorePatterns;
};
};
};
}

View File

@ -0,0 +1,12 @@
{ config, pkgs, lib, ... }: {
options.mullvad.enable = lib.mkEnableOption "Mullvad VPN.";
config = lib.mkIf config.mullvad.enable {
services.mullvad-vpn.enable = true;
environment.systemPackages = [ pkgs.mullvad-vpn ];
};
}

33
nixos/services/n8n.nix Normal file
View File

@ -0,0 +1,33 @@
{ config, pkgs, lib, ... }: {
options = {
n8nServer = lib.mkOption {
type = lib.types.str;
description = "Hostname for n8n automation";
default = null;
};
};
config = lib.mkIf (config.n8nServer != null) {
services.n8n = {
enable = true;
settings = {
n8n = {
listenAddress = "127.0.0.1";
port = 5678;
};
};
};
caddy.routes = [{
match = [{ host = [ config.n8nServer ]; }];
handle = [{
handler = "reverse_proxy";
upstreams = [{ dial = "localhost:5678"; }];
}];
}];
};
}

View File

@ -0,0 +1,16 @@
{ config, pkgs, lib, ... }: {
options.netdata.enable = lib.mkEnableOption "Netdata metrics.";
config = lib.mkIf config.netdata.enable {
services.netdata = {
enable = true;
# Disable local dashboard (unsecured)
config = { web.mode = "none"; };
};
};
}

View File

@ -0,0 +1,86 @@
{ config, pkgs, lib, ... }: {
options = {
nextcloudServer = lib.mkOption {
type = lib.types.str;
description = "Hostname for Nextcloud";
default = null;
};
};
config = lib.mkIf (config.nextcloudServer != null) {
services.nextcloud = {
enable = true;
package = pkgs.nextcloud25; # Required to specify
https = true;
hostName = "localhost";
maxUploadSize = "50G";
config = {
adminpassFile = config.secrets.nextcloud.dest;
extraTrustedDomains = [ config.nextcloudServer ];
};
};
# Don't let Nginx use main ports (using Caddy instead)
services.nginx.virtualHosts."localhost".listen = [{
addr = "127.0.0.1";
port = 8080;
}];
# Point Caddy to Nginx
caddy.routes = [{
match = [{ host = [ config.nextcloudServer ]; }];
handle = [{
handler = "reverse_proxy";
upstreams = [{ dial = "localhost:8080"; }];
}];
}];
# Create credentials file for nextcloud
secrets.nextcloud = {
source = ../../private/nextcloud.age;
dest = "${config.secretsDirectory}/nextcloud";
owner = "nextcloud";
group = "nextcloud";
permissions = "0440";
};
systemd.services.nextcloud-secret = {
requiredBy = [ "nextcloud-setup.service" ];
before = [ "nextcloud-setup.service" ];
};
## Backup config
# Open to groups, allowing for backups
systemd.services.phpfpm-nextcloud.serviceConfig.StateDirectoryMode =
lib.mkForce "0770";
# Allow litestream and nextcloud to share a sqlite database
users.users.litestream.extraGroups = [ "nextcloud" ];
users.users.nextcloud.extraGroups = [ "litestream" ];
# Backup sqlite database with litestream
services.litestream = {
settings = {
dbs = [{
path = "${config.services.nextcloud.datadir}/data/nextcloud.db";
replicas = [{
url =
"s3://${config.backupS3.bucket}.${config.backupS3.endpoint}/nextcloud";
}];
}];
};
};
# Don't start litestream unless nextcloud is up
systemd.services.litestream = {
after = [ "phpfpm-nextcloud.service" ];
requires = [ "phpfpm-nextcloud.service" ];
};
};
}

View File

@ -0,0 +1,35 @@
{ config, pkgs, lib, ... }: {
options.metricsServer = lib.mkOption {
type = lib.types.str;
description = "Hostname of the Grafana server.";
default = null;
};
config = lib.mkIf (config.metricsServer != null) {
services.grafana.enable = true;
# Required to fix error in latest nixpkgs
services.grafana.settings = { };
services.prometheus = {
enable = true;
exporters.node.enable = true;
scrapeConfigs = [{
job_name = "local";
static_configs = [{ targets = [ "127.0.0.1:9100" ]; }];
}];
};
caddy.routes = [{
match = [{ host = [ config.metricsServer ]; }];
handle = [{
handler = "reverse_proxy";
upstreams = [{ dial = "localhost:3000"; }];
}];
}];
};
}

View File

@ -0,0 +1,91 @@
# Secrets management method taken from here:
# https://xeiaso.net/blog/nixos-encrypted-secrets-2021-01-20
# In my case, I pre-encrypt my secrets and commit them to git.
{ config, pkgs, lib, ... }: {
options = {
secretsDirectory = lib.mkOption {
type = lib.types.str;
description = "Default path to place secrets.";
default = "/var/private";
};
secrets = lib.mkOption {
type = lib.types.attrsOf (lib.types.submodule {
options = {
source = lib.mkOption {
type = lib.types.path;
description = "Path to encrypted secret.";
};
dest = lib.mkOption {
type = lib.types.str;
description = "Resulting path for decrypted secret.";
};
owner = lib.mkOption {
default = "root";
type = lib.types.str;
description = "User to own the secret.";
};
group = lib.mkOption {
default = "root";
type = lib.types.str;
description = "Group to own the secret.";
};
permissions = lib.mkOption {
default = "0400";
type = lib.types.str;
description = "Permissions expressed as octal.";
};
};
});
description = "Set of secrets to decrypt to disk.";
default = { };
};
};
config = lib.mkIf (pkgs.stdenv.isLinux && !config.wsl.enable) {
# Create a default directory to place secrets
systemd.tmpfiles.rules = [ "d ${config.secretsDirectory} 0755 root wheel" ];
# Declare oneshot service to decrypt secret using SSH host key
# - Requires that the secret is already encrypted for the host
# - Encrypt secrets: nix run github:nmasur/dotfiles#encrypt-secret
systemd.services = lib.mapAttrs' (name: attrs: {
name = "${name}-secret";
value = {
description = "Decrypt secret for ${name}";
wantedBy = [ "multi-user.target" ];
serviceConfig.Type = "oneshot";
script = ''
${pkgs.age}/bin/age --decrypt \
--identity ${config.identityFile} \
--output ${attrs.dest} \
${attrs.source}
chown '${attrs.owner}':'${attrs.group}' '${attrs.dest}'
chmod '${attrs.permissions}' '${attrs.dest}'
'';
};
}) config.secrets;
# Example declaration
# config.secrets.my-secret = {
# source = ../../private/my-secret.age;
# dest = "/var/lib/private/my-secret";
# owner = "my-app";
# group = "my-app";
# permissions = "0440";
# };
};
}

36
nixos/services/sshd.nix Normal file
View File

@ -0,0 +1,36 @@
{ config, pkgs, lib, ... }: {
options = {
publicKey = lib.mkOption {
type = lib.types.str;
description = "Public SSH key authorized for this system.";
};
permitRootLogin = lib.mkOption {
type = lib.types.str;
description = "Root login settings.";
default = "no";
};
};
config = lib.mkIf (pkgs.stdenv.isLinux && !config.wsl.enable) {
services.openssh = {
enable = true;
ports = [ 22 ];
passwordAuthentication = false;
gatewayPorts = "no";
forwardX11 = false;
allowSFTP = true;
permitRootLogin = config.permitRootLogin;
};
users.users.${config.user}.openssh.authorizedKeys.keys =
[ config.publicKey ];
# Implement a simple fail2ban service for sshd
services.sshguard.enable = true;
# Add terminfo for SSH from popular terminal emulators
environment.enableAllTerminfo = true;
};
}

View File

@ -0,0 +1,82 @@
{ config, pkgs, lib, ... }: {
options = {
transmissionServer = lib.mkOption {
type = lib.types.str;
description = "Hostname for Transmission";
default = null;
};
};
config = let
namespace = config.networking.wireguard.interfaces.wg0.interfaceNamespace;
vpnIp = lib.strings.removeSuffix "/32"
(builtins.head config.networking.wireguard.interfaces.wg0.ips);
in lib.mkIf (config.wireguard.enable && config.transmissionServer != null) {
# Setup transmission
services.transmission = {
enable = true;
settings = {
port-forwarding-enabled = false;
rpc-authentication-required = true;
rpc-port = 9091;
rpc-bind-address = "0.0.0.0";
rpc-username = config.user;
rpc-host-whitelist = config.transmissionServer;
rpc-host-whitelist-enabled = true;
rpc-whitelist = "127.0.0.1,${vpnIp}";
rpc-whitelist-enabled = true;
};
credentialsFile = config.secrets.transmission.dest;
};
# Bind transmission to wireguard namespace
systemd.services.transmission = {
bindsTo = [ "netns@${namespace}.service" ];
requires = [ "network-online.target" "transmission-secret.service" ];
after = [ "wireguard-wg0.service" "transmission-secret.service" ];
unitConfig.JoinsNamespaceOf = "netns@${namespace}.service";
serviceConfig.NetworkNamespacePath = "/var/run/netns/${namespace}";
};
# Create reverse proxy for web UI
caddy.routes = [{
match = [{ host = [ config.transmissionServer ]; }];
handle = [{
handler = "reverse_proxy";
upstreams = [{ dial = "localhost:9091"; }];
}];
}];
# Caddy and Transmission both try to set rmem_max for larger UDP packets.
# We will choose Transmission's recommendation (4 MB).
boot.kernel.sysctl."net.core.rmem_max" = 4194304;
# Allow inbound connections to reach namespace
systemd.services.transmission-web-netns = {
description = "Forward to transmission in wireguard namespace";
requires = [ "transmission.service" ];
after = [ "transmission.service" ];
serviceConfig = {
Restart = "on-failure";
TimeoutStopSec = 300;
};
wantedBy = [ "multi-user.target" ];
script = ''
${pkgs.iproute2}/bin/ip netns exec ${namespace} ${pkgs.iproute2}/bin/ip link set dev lo up
${pkgs.socat}/bin/socat tcp-listen:9091,fork,reuseaddr exec:'${pkgs.iproute2}/bin/ip netns exec ${namespace} ${pkgs.socat}/bin/socat STDIO "tcp-connect:${vpnIp}:9091"',nofork
'';
};
# Create credentials file for transmission
secrets.transmission = {
source = ../../private/transmission.json.age;
dest = "${config.secretsDirectory}/transmission.json";
owner = "transmission";
group = "transmission";
};
};
}

View File

@ -0,0 +1,123 @@
{ config, pkgs, lib, ... }:
let vaultwardenPath = "/var/lib/bitwarden_rs"; # Default service directory
in {
options = {
vaultwardenServer = lib.mkOption {
description = "Hostname for Vaultwarden.";
type = lib.types.str;
default = null;
};
};
config = lib.mkIf (config.vaultwardenServer != null) {
services.vaultwarden = {
enable = true;
config = {
DOMAIN = "https://${config.vaultwardenServer}";
SIGNUPS_ALLOWED = false;
SIGNUPS_VERIFY = true;
INVITATIONS_ALLOWED = true;
WEB_VAULT_ENABLED = true;
ROCKET_ADDRESS = "127.0.0.1";
ROCKET_PORT = 8222;
WEBSOCKET_ENABLED = true;
WEBSOCKET_ADDRESS = "0.0.0.0";
WEBSOCKET_PORT = 3012;
LOGIN_RATELIMIT_SECONDS = 60;
LOGIN_RATELIMIT_MAX_BURST = 10;
ADMIN_RATELIMIT_SECONDS = 300;
ADMIN_RATELIMIT_MAX_BURST = 3;
};
environmentFile = config.secrets.vaultwarden.dest;
dbBackend = "sqlite";
};
secrets.vaultwarden = {
source = ../../private/vaultwarden.age;
dest = "${config.secretsDirectory}/vaultwarden";
owner = "vaultwarden";
group = "vaultwarden";
};
networking.firewall.allowedTCPPorts = [ 3012 ];
caddy.routes = [{
match = [{ host = [ config.vaultwardenServer ]; }];
handle = [{
handler = "reverse_proxy";
upstreams = [{ dial = "localhost:8222"; }];
headers.request.add."X-Real-IP" = [ "{http.request.remote.host}" ];
}];
}];
## Backup config
# Open to groups, allowing for backups
systemd.services.vaultwarden.serviceConfig.StateDirectoryMode =
lib.mkForce "0770";
systemd.tmpfiles.rules = [
"f ${vaultwardenPath}/db.sqlite3 0660 vaultwarden vaultwarden"
"f ${vaultwardenPath}/db.sqlite3-shm 0660 vaultwarden vaultwarden"
"f ${vaultwardenPath}/db.sqlite3-wal 0660 vaultwarden vaultwarden"
];
# Allow litestream and vaultwarden to share a sqlite database
users.users.litestream.extraGroups = [ "vaultwarden" ];
users.users.vaultwarden.extraGroups = [ "litestream" ];
# Backup sqlite database with litestream
services.litestream = {
settings = {
dbs = [{
path = "${vaultwardenPath}/db.sqlite3";
replicas = [{
url =
"s3://${config.backupS3.bucket}.${config.backupS3.endpoint}/vaultwarden";
}];
}];
};
};
# Don't start litestream unless vaultwarden is up
systemd.services.litestream = {
after = [ "vaultwarden.service" ];
requires = [ "vaultwarden.service" ];
};
# Run a separate file backup on a schedule
systemd.timers.vaultwarden-backup = {
timerConfig = {
OnCalendar = "*-*-* 06:00:00"; # Once per day
Unit = "vaultwarden-backup.service";
};
wantedBy = [ "timers.target" ];
};
# Backup other Vaultwarden data to object storage
systemd.services.vaultwarden-backup = {
description = "Backup Vaultwarden files";
environment.AWS_ACCESS_KEY_ID = config.backupS3.accessKeyId;
serviceConfig = {
Type = "oneshot";
User = "vaultwarden";
Group = "backup";
EnvironmentFile = config.secrets.backup.dest;
};
script = ''
${pkgs.awscli2}/bin/aws s3 sync \
${vaultwardenPath}/ \
s3://${config.backupS3.bucket}/vaultwarden/ \
--endpoint-url=https://${config.backupS3.endpoint} \
--exclude "*db.sqlite3*" \
--exclude ".db.sqlite3*"
'';
};
};
}

View File

@ -0,0 +1,44 @@
{ config, pkgs, lib, ... }: {
options.wireguard.enable = lib.mkEnableOption "Wireguard VPN setup.";
config = lib.mkIf (pkgs.stdenv.isLinux && config.wireguard.enable) {
networking.wireguard = {
enable = true;
interfaces = {
wg0 = {
# Establishes identity of this machine
generatePrivateKeyFile = false;
privateKeyFile = config.secrets.wireguard.dest;
# Move to network namespace for isolating programs
interfaceNamespace = "wg";
};
};
};
# Create namespace for Wireguard
# This allows us to isolate specific programs to Wireguard
systemd.services."netns@" = {
description = "%I network namespace";
before = [ "network.target" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = "${pkgs.iproute2}/bin/ip netns add %I";
ExecStop = "${pkgs.iproute2}/bin/ip netns del %I";
};
};
# Create private key file for wireguard
secrets.wireguard = {
source = ../../private/wireguard.age;
dest = "${config.secretsDirectory}/wireguard";
};
};
}