initial refactoring

This commit is contained in:
Noah Masur
2025-01-20 22:35:40 -05:00
parent a4b5e05f8f
commit c7933f8502
209 changed files with 5998 additions and 5308 deletions

View File

@ -0,0 +1,33 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.programs.calendar;
in
{
options.nmasur.presets.programs.calendar.enable = lib.mkEnableOption "Calendar application";
config = lib.mkIf cfg.enable {
accounts.calendar.accounts.default = {
basePath = "other/calendars"; # Where to save calendars in ~ directory
name = "personal";
local.type = "filesystem";
primary = true;
remote = {
passwordCommand = [ "" ];
type = "caldav";
url = "https://${config.hostnames.content}/remote.php/dav/principals/users/${config.user}";
userName = config.user;
};
};
home.packages = [ pkgs.gnome-calendar ];
};
}

View File

@ -0,0 +1,42 @@
{
config,
lib,
...
}:
let
cfg = config.nmasur.presets.programs.doas;
in
{
options.nmasur.presets.programs.doas.enable = lib.mkEnableOption "doas sudo alternative";
config = lib.mkIf cfg.enable {
security = {
# Remove sudo
sudo.enable = false;
# Add doas
doas = {
enable = true;
# No password required for trusted users
wheelNeedsPassword = false;
# Pass environment variables from user to root
# Also requires specifying that we are removing password here
extraRules = [
{
groups = [ "wheel" ];
noPass = true;
keepEnv = true;
}
];
};
};
};
}

View File

@ -0,0 +1,43 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.programs.dwarf-fortress;
in
{
options.nmasur.presets.programs.dwarf-fortress.enable = lib.mkEnableOption "Dwarf Fortress";
config = lib.mkIf cfg.enable {
unfreePackages = [
"dwarf-fortress"
"phoebus-theme"
];
environment.systemPackages =
let
dfDesktopItem = pkgs.makeDesktopItem {
name = "dwarf-fortress";
desktopName = "Dwarf Fortress";
exec = "${pkgs.dwarf-fortress-packages.dwarf-fortress-full}/bin/dfhack";
terminal = false;
};
dtDesktopItem = pkgs.makeDesktopItem {
name = "dwarftherapist";
desktopName = "Dwarf Therapist";
exec = "${pkgs.dwarf-fortress-packages.dwarf-fortress-full}/bin/dwarftherapist";
terminal = false;
};
in
[
pkgs.dwarf-fortress-packages.dwarf-fortress-full
dfDesktopItem
dtDesktopItem
];
};
}

View File

@ -0,0 +1,29 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.programs.nautilus;
in
{
options.nmasur.presets.programs.nautilus.enable = lib.mkEnableOption "Nautilus file manager";
config = lib.mkIf cfg.enable {
environment.systemPackages = [ pkgs.nautilus ];
# Quick preview with spacebar
services.gnome.sushi.enable = true;
# Allow client browsing Samba and virtual filesystem shares
services.gvfs = {
enable = true;
};
};
}

View File

@ -0,0 +1,48 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.programs.steam;
in
{
options.nmasur.presets.programs.steam.enable = lib.mkEnableOption "Steam game client";
config = lib.mkIf cfg.enable {
hardware.steam-hardware.enable = true;
unfreePackages = [
"steam"
"steam-original"
"steamcmd"
"steam-run"
"steam-unwrapped"
];
programs.steam = {
enable = true;
remotePlay.openFirewall = true;
extraCompatPackages = [ pkgs.proton-ge-bin ];
gamescopeSession.enable = true;
};
environment.systemPackages = with pkgs; [
# Enable terminal interaction
steamcmd
steam-tui
# Overlay with performance monitoring
mangohud
];
# Seems like NetworkManager can help speed up Steam launch
# https://www.reddit.com/r/archlinux/comments/qguhco/steam_startup_time_arch_1451_seconds_fedora_34/hi8opet/
networking.networkmanager.enable = true;
};
}

View File

@ -0,0 +1,18 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.programs.;
in
{
options.nmasur.presets.programs..enable = lib.mkEnableOption "";
config = lib.mkIf cfg.enable {
};
}

View File

@ -0,0 +1,27 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.zfs;
in
{
options.nmasur.presets.services.zfs.enable = lib.mkEnableOption "ZFS file system";
config = lib.mkIf cfg.enable {
# Only use compatible Linux kernel, since ZFS can be behind
boot.kernelPackages = pkgs.linuxPackages; # Defaults to latest LTS
boot.kernelParams = [ "nohibernate" ]; # ZFS does not work with hibernation
boot.supportedFilesystems = [ "zfs" ];
services.prometheus.exporters.zfs.enable = config.prometheus.exporters.enable;
prometheus.scrapeTargets = [
"127.0.0.1:${builtins.toString config.services.prometheus.exporters.zfs.port}"
];
};
}

View File

@ -0,0 +1,81 @@
{
config,
lib,
...
}:
let
cfg = config.nmasur.presets.services.actualbudget;
in
{
options.nmasur.presets.services.actualbudget = {
enable = lib.mkEnableOption "ActualBudget budgeting service";
port = lib.mkOption {
type = lib.types.port;
description = "Port to use for the localhost";
default = 5006;
};
};
config = lib.mkIf cfg.enable {
virtualisation.podman.enable = true;
users.users.actualbudget = {
isSystemUser = true;
group = "shared";
uid = 980;
};
# Create budget directory, allowing others to manage it
systemd.tmpfiles.rules = [
"d /var/lib/actualbudget 0770 actualbudget shared"
];
virtualisation.oci-containers.containers.actualbudget = {
workdir = null;
volumes = [ "/var/lib/actualbudget:/data" ];
user = "${toString (builtins.toString config.users.users.actualbudget.uid)}";
pull = "missing";
privileged = false;
ports = [ "127.0.0.1:${builtins.toString config.services.actualbudget.port}:5006" ];
networks = [ ];
log-driver = "journald";
labels = {
app = "actualbudget";
};
image = "ghcr.io/actualbudget/actual-server:25.1.0";
hostname = null;
environmentFiles = [ ];
environment = {
DEBUG = "actual:config"; # Enable debug logging
ACTUAL_TRUSTED_PROXIES = builtins.concatStringsSep "," [ "127.0.0.1" ];
};
dependsOn = [ ];
autoStart = true;
};
# Allow web traffic to Caddy
caddy.routes = [
{
match = [ { host = [ config.hostnames.budget ]; } ];
handle = [
{
handler = "reverse_proxy";
upstreams = [ { dial = "localhost:${builtins.toString config.services.actualbudget.port}"; } ];
}
];
}
];
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ config.hostnames.budget ];
# Backups
services.restic.backups.default.paths = [ "/var/lib/actualbudget" ];
};
}

View File

@ -0,0 +1,288 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.actualbudget;
# This config specifies ports for Prometheus to scrape information
arrConfig = {
radarr = {
exportarrPort = "9707";
url = "localhost:7878";
apiKey = config.secrets.radarrApiKey.dest;
};
readarr = {
exportarrPort = "9711";
url = "localhost:8787";
apiKey = config.secrets.readarrApiKey.dest;
};
sonarr = {
exportarrPort = "9708";
url = "localhost:8989";
apiKey = config.secrets.sonarrApiKey.dest;
};
prowlarr = {
exportarrPort = "9709";
url = "localhost:9696";
apiKey = config.secrets.prowlarrApiKey.dest;
};
sabnzbd = {
exportarrPort = "9710";
url = "localhost:8085";
apiKey = config.secrets.sabnzbdApiKey.dest;
};
};
in
{
options.nmasur.presets.services.arrs.enable = lib.mkEnableOption "Arr services";
config = lib.mkIf cfg.enable {
# Required
config.nmasur.profiles.shared-media.enable = true; # Shared user for multiple services
# # Broken on 2024-12-07
# # https://discourse.nixos.org/t/solved-sonarr-is-broken-in-24-11-unstable-aka-how-the-hell-do-i-use-nixpkgs-config-permittedinsecurepackages/
# insecurePackages = [
# "aspnetcore-runtime-wrapped-6.0.36"
# "aspnetcore-runtime-6.0.36"
# "dotnet-sdk-wrapped-6.0.428"
# "dotnet-sdk-6.0.428"
# ];
services = {
bazarr = {
enable = true;
group = lib.mkIf config.nmasur.profiles.shared-media.enable "shared";
};
jellyseerr.enable = true;
prowlarr.enable = true;
sabnzbd = {
enable = true;
group = lib.mkIf config.nmasur.profiles.shared-media.enable "shared";
# The config file must be editable within the application
# It contains server configs and credentials
configFile = "/data/downloads/sabnzbd/sabnzbd.ini";
};
sonarr = {
enable = true;
group = lib.mkIf config.nmasur.profiles.shared-media.enable "shared";
};
radarr = {
enable = true;
group = lib.mkIf config.nmasur.profiles.shared-media.enable "shared";
};
readarr = {
enable = true;
group = lib.mkIf config.nmasur.profiles.shared-media.enable "shared";
};
};
# Allows shared group to read/write the sabnzbd directory
users.users.sabnzbd.homeMode = "0770";
unfreePackages = [ "unrar" ]; # Required as a dependency for sabnzbd
# Requires updating the base_url config value in each service
# If you try to rewrite the URL, the service won't redirect properly
caddy.routes = [
{
# Group means that routes with the same name are mutually exclusive,
# so they are split between the appropriate services.
group = "download";
match = [
{
host = [ config.hostnames.download ];
path = [ "/sonarr*" ];
}
];
handle = [
{
handler = "reverse_proxy";
# We're able to reference the url and port of the service dynamically
upstreams = [ { dial = arrConfig.sonarr.url; } ];
}
];
}
{
group = "download";
match = [
{
host = [ config.hostnames.download ];
path = [ "/radarr*" ];
}
];
handle = [
{
handler = "reverse_proxy";
upstreams = [ { dial = arrConfig.radarr.url; } ];
}
];
}
{
group = "download";
match = [
{
host = [ config.hostnames.download ];
path = [ "/readarr*" ];
}
];
handle = [
{
handler = "reverse_proxy";
upstreams = [ { dial = arrConfig.readarr.url; } ];
}
];
}
{
group = "download";
match = [
{
host = [ config.hostnames.download ];
path = [ "/prowlarr*" ];
}
];
handle = [
{
handler = "reverse_proxy";
# Prowlarr doesn't offer a dynamic config, so we have to hardcode it
upstreams = [ { dial = "localhost:9696"; } ];
}
];
}
{
group = "download";
match = [
{
host = [ config.hostnames.download ];
path = [ "/bazarr*" ];
}
];
handle = [
{
handler = "reverse_proxy";
upstreams = [
{
# Bazarr only dynamically sets the port, not the host
dial = "localhost:${builtins.toString config.services.bazarr.listenPort}";
}
];
}
];
}
{
group = "download";
match = [
{
host = [ config.hostnames.download ];
path = [ "/sabnzbd*" ];
}
];
handle = [
{
handler = "reverse_proxy";
upstreams = [ { dial = arrConfig.sabnzbd.url; } ];
}
];
}
{
group = "download";
match = [ { host = [ config.hostnames.download ]; } ];
handle = [
{
handler = "reverse_proxy";
upstreams = [ { dial = "localhost:${builtins.toString config.services.jellyseerr.port}"; } ];
}
];
}
];
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ config.hostnames.download ];
# Enable Prometheus exporters
systemd.services = lib.mapAttrs' (name: attrs: {
name = "prometheus-${name}-exporter";
value = {
description = "Export Prometheus metrics for ${name}";
after = [ "network.target" ];
wantedBy = [ "${name}.service" ];
serviceConfig = {
Type = "simple";
DynamicUser = true;
ExecStart =
let
# Sabnzbd doesn't accept the URI path, unlike the others
url = if name != "sabnzbd" then "http://${attrs.url}/${name}" else "http://${attrs.url}";
in
# Exportarr is trained to pull from the arr services
''
${pkgs.exportarr}/bin/exportarr ${name} \
--url ${url} \
--port ${attrs.exportarrPort}'';
EnvironmentFile = lib.mkIf (builtins.hasAttr "apiKey" attrs) attrs.apiKey;
Restart = "on-failure";
ProtectHome = true;
ProtectSystem = "strict";
PrivateTmp = true;
PrivateDevices = true;
ProtectHostname = true;
ProtectClock = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
NoNewPrivileges = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
RemoveIPC = true;
PrivateMounts = true;
};
};
}) arrConfig;
# Secrets for Prometheus exporters
secrets.radarrApiKey = {
source = ../../../private/radarr-api-key.age;
dest = "/var/private/radarr-api";
prefix = "API_KEY=";
};
secrets.readarrApiKey = {
source = ../../../private/radarr-api-key.age;
dest = "/var/private/readarr-api";
prefix = "API_KEY=";
};
secrets.sonarrApiKey = {
source = ../../../private/sonarr-api-key.age;
dest = "/var/private/sonarr-api";
prefix = "API_KEY=";
};
secrets.prowlarrApiKey = {
source = ../../../private/prowlarr-api-key.age;
dest = "/var/private/prowlarr-api";
prefix = "API_KEY=";
};
secrets.sabnzbdApiKey = {
source = ../../../private/sabnzbd-api-key.age;
dest = "/var/private/sabnzbd-api";
prefix = "API_KEY=";
};
# Prometheus scrape targets (expose Exportarr to Prometheus)
prometheus.scrapeTargets = map (
key:
"127.0.0.1:${
lib.attrsets.getAttrFromPath [
key
"exportarrPort"
] arrConfig
}"
) (builtins.attrNames arrConfig);
};
}

View File

@ -0,0 +1,48 @@
{
config,
lib,
globals,
...
}:
let
cfg = config.nmasur.presets.services.audiobookshelf;
in
{
options.nmasur.presets.services.audiobookshelf.enable =
lib.mkEnableOption "Audiobookshelf e-book and audiobook manager";
config = lib.mkIf cfg.enable {
services.audiobookshelf = {
enable = true;
# Setting a generic group to make it easier for the different programs
# that make use of the same files
group = lib.mkIf config.nmasur.profiles.shared-media.enable "shared";
# This is the default /var/lib/audiobookshelf
dataDir = "audiobookshelf";
};
# Allow web traffic to Caddy
caddy.routes = [
{
match = [ { host = [ globals.hostnames.audiobooks ]; } ];
handle = [
{
handler = "reverse_proxy";
upstreams = [ { dial = "localhost:${builtins.toString config.services.audiobookshelf.port}"; } ];
}
];
}
];
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ globals.hostnames.audiobooks ];
};
}

View File

@ -0,0 +1,33 @@
{
config,
lib,
...
}:
let
cfg = config.nmasur.presets.services.avahi;
in
{
options.nmasur.presets.services.avahi.enable = lib.mkEnableOption "Avahi DNS service discovery";
config = lib.mkIf cfg.enable {
# DNS service discovery
services.avahi = {
enable = true;
domainName = "local";
ipv6 = false; # Should work either way
# Resolve local hostnames using Avahi DNS
nssmdns4 = true;
publish = {
enable = true;
addresses = true;
domain = true;
workstation = true;
};
};
};
}

View File

@ -0,0 +1,93 @@
# Bind is a DNS service. This allows me to resolve public domains locally so
# when I'm at home, I don't have to travel over the Internet to reach my
# server.
# To set this on all home machines, I point my router's DNS resolver to the
# local IP address of the machine running this service (swan).
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.bind;
localIp = "192.168.1.218";
localServices = [
config.hostnames.stream
config.hostnames.content
config.hostnames.books
config.hostnames.download
config.hostnames.photos
];
mkRecord = service: "${service} A ${localIp}";
localRecords = lib.concatLines (map mkRecord localServices);
in
{
options.nmasur.presets.services.bind.enable = lib.mkEnableOption "Bind DNS server";
config = lib.mkIf cfg.enable {
# Normally I block all requests not coming from Cloudflare, so I have to also
# allow my local network.
caddy.cidrAllowlist = [ "192.168.0.0/16" ];
services.bind = {
enable = true;
# Allow requests coming from these IPs. This way I don't somehow get
# spammed with DNS requests coming from the Internet.
cacheNetworks = [
"127.0.0.0/24"
"192.168.0.0/16"
"::1/128" # Required because IPv6 loopback now added to resolv.conf
# (see: https://github.com/NixOS/nixpkgs/pull/302228)
];
# When making normal DNS requests, forward them to Cloudflare to resolve.
forwarders = [
"1.1.1.1"
"1.0.0.1"
];
ipv4Only = false;
# Use rpz zone as an override
extraOptions = ''response-policy { zone "rpz"; };'';
zones = {
rpz = {
master = true;
file = pkgs.writeText "db.rpz" ''
$TTL 60 ; 1 minute
@ IN SOA localhost. root.localhost. (
2023071800 ; serial
1h ; refresh
30m ; retry
1w ; expire
30m ; minimum ttl
)
IN NS localhost.
localhost A 127.0.0.1
${localRecords}
'';
};
};
};
# We must allow DNS traffic to hit our machine as well
networking.firewall.allowedTCPPorts = [ 53 ];
networking.firewall.allowedUDPPorts = [ 53 ];
# Set our own nameservers to ourselves
networking.nameservers = [
"127.0.0.1"
"::1"
];
};
}

View File

@ -0,0 +1,228 @@
# Caddy is a reverse proxy, like Nginx or Traefik. This creates an ingress
# point from my local network or the public (via Cloudflare). Instead of a
# Caddyfile, I'm using the more expressive JSON config file format. This means
# I can source routes from other areas in my config and build the JSON file
# using the result of the expression.
# Caddy helpfully provides automatic ACME cert generation and management, but
# it requires a form of validation. We are using a custom build of Caddy
# (compiled with an overlay) to insert a plugin for managing DNS validation
# with Cloudflare's DNS API.
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.caddy;
in
{
options.nmasur.presets.services.caddy = {
enable = lib.mkEnableOption "Caddy reverse-proxy";
tlsPolicies = lib.mkOption {
type = lib.types.listOf lib.types.attrs;
description = "Caddy JSON TLS issuer policies";
default = [ ];
};
routes = lib.mkOption {
type = lib.types.listOf lib.types.attrs;
description = "Caddy JSON routes for http servers";
default = [ ];
};
blocks = lib.mkOption {
type = lib.types.listOf lib.types.attrs;
description = "Caddy JSON error blocks for http servers";
default = [ ];
};
cidrAllowlist = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "CIDR blocks to allow for requests";
default = [ ];
merge = lib.mkMerge; # Ensure that values are merged from default
};
};
config = lib.mkIf cfg.enable {
# Force Caddy to 403 if not coming from allowlisted source
caddy.cidrAllowlist = lib.mkDefault [ "127.0.0.1/32" ];
caddy.routes = lib.mkBefore [
{
match = [ { not = [ { remote_ip.ranges = config.caddy.cidrAllowlist; } ]; } ];
handle = [
{
handler = "static_response";
status_code = "403";
}
];
}
];
services.caddy =
let
default_logger_name = "other";
roll_size_mb = 25;
# Extract list of hostnames (fqdns) from current caddy routes
getHostnameFromMatch = match: if (lib.hasAttr "host" match) then match.host else [ ];
getHostnameFromRoute =
route:
if (lib.hasAttr "match" route) then (lib.concatMap getHostnameFromMatch route.match) else [ ];
hostnames_non_unique = lib.concatMap getHostnameFromRoute config.caddy.routes;
hostnames = lib.unique hostnames_non_unique;
# Create attrset of subdomains to their fqdns
hostname_map = builtins.listToAttrs (
map (hostname: {
name = builtins.head (lib.splitString "." hostname);
value = hostname;
}) hostnames
);
in
{
adapter = "''"; # Required to enable JSON
configFile = pkgs.writeText "Caddyfile" (
builtins.toJSON {
apps.http.servers.main = {
listen = [ ":443" ];
# These routes are pulled from the rest of this repo
routes = config.caddy.routes;
errors.routes = config.caddy.blocks;
# Uncommenting collects access logs
logs = {
inherit default_logger_name;
# Invert hostnames keys and values
logger_names = lib.mapAttrs' (name: value: {
name = value;
value = name;
}) hostname_map;
};
};
apps.http.servers.metrics = { }; # Enables Prometheus metrics
apps.tls.automation.policies = config.caddy.tlsPolicies;
# Setup logging to journal and files
logging.logs =
{
# System logs and catch-all
# Must be called `default` to override Caddy's built-in default logger
default = {
level = "INFO";
encoder.format = "console";
writer = {
output = "stderr";
};
exclude = (map (hostname: "http.log.access.${hostname}") (builtins.attrNames hostname_map)) ++ [
"http.log.access.${default_logger_name}"
];
};
# This is for the default access logs (anything not captured by hostname)
other = {
level = "INFO";
encoder.format = "json";
writer = {
output = "file";
filename = "${config.services.caddy.logDir}/other.log";
roll = true;
inherit roll_size_mb;
};
include = [ "http.log.access.${default_logger_name}" ];
};
# This is for using the Caddy API, which will probably never happen
admin = {
level = "INFO";
encoder.format = "json";
writer = {
output = "file";
filename = "${config.services.caddy.logDir}/admin.log";
roll = true;
inherit roll_size_mb;
};
include = [ "admin" ];
};
# This is for TLS cert management tracking
tls = {
level = "INFO";
encoder.format = "json";
writer = {
output = "file";
filename = "${config.services.caddy.logDir}/tls.log";
roll = true;
inherit roll_size_mb;
};
include = [ "tls" ];
};
# This is for debugging
debug = {
level = "DEBUG";
encoder.format = "json";
writer = {
output = "file";
filename = "${config.services.caddy.logDir}/debug.log";
roll = true;
roll_keep = 1;
inherit roll_size_mb;
};
};
}
# These are the access logs for individual hostnames
// (lib.mapAttrs (name: value: {
level = "INFO";
encoder.format = "json";
writer = {
output = "file";
filename = "${config.services.caddy.logDir}/${name}-access.log";
roll = true;
inherit roll_size_mb;
};
include = [ "http.log.access.${name}" ];
}) hostname_map)
# We also capture just the errors separately for easy debugging
// (lib.mapAttrs' (name: value: {
name = "${name}-error";
value = {
level = "ERROR";
encoder.format = "json";
writer = {
output = "file";
filename = "${config.services.caddy.logDir}/${name}-error.log";
roll = true;
inherit roll_size_mb;
};
include = [ "http.log.access.${name}" ];
};
}) hostname_map);
}
);
};
systemd.services.caddy.serviceConfig = {
# Allows Caddy to serve lower ports (443, 80)
AmbientCapabilities = "CAP_NET_BIND_SERVICE";
# Prevent flooding of logs by rate-limiting
LogRateLimitIntervalSec = "5s"; # Limit period
LogRateLimitBurst = 100; # Limit threshold
};
# Required for web traffic to reach this machine
networking.firewall.allowedTCPPorts = [
80
443
];
# HTTP/3 QUIC uses UDP (not sure if being used)
networking.firewall.allowedUDPPorts = [ 443 ];
# Caddy exposes Prometheus metrics with the admin API
# https://caddyserver.com/docs/api
prometheus.scrapeTargets = [ "127.0.0.1:2019" ];
};
}

View File

@ -0,0 +1,89 @@
# Calibre-web is an E-Book library and management tool.
# - Exposed to the public via Caddy.
# - Hostname defined with config.hostnames.books
# - File directory backed up to S3 on a cron schedule.
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.calibre-web;
libraryPath = "/data/books";
in
{
options.nmasur.presets.services.calibre-web = {
enable = lib.mkEnableOption "Calibre-Web e-book manager";
};
config = lib.mkIf cfg.enable {
services.calibre-web = {
group = lib.mkIf config.nmasur.profiles.shared-media.enable "shared";
openFirewall = true;
options = {
reverseProxyAuth.enable = false;
enableBookConversion = true;
enableBookUploading = true;
calibreLibrary = libraryPath;
};
};
# Allow web traffic to Caddy
caddy.routes = [
{
match = [ { host = [ config.hostnames.books ]; } ];
handle = [
{
handler = "reverse_proxy";
upstreams = [
{ dial = "localhost:${builtins.toString config.services.calibre-web.listen.port}"; }
];
# This is required when calibre-web is behind a reverse proxy
# https://github.com/janeczku/calibre-web/issues/19
headers.request.add."X-Script-Name" = [ "/calibre-web" ];
}
];
}
];
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ config.hostnames.books ];
# Grant user access to Calibre directories
users.users.${config.user}.extraGroups = [ "calibre-web" ];
# Run a backup on a schedule
systemd.timers.calibre-backup = lib.mkIf config.backups.calibre {
timerConfig = {
OnCalendar = "*-*-* 00:00:00"; # Once per day
Unit = "calibre-backup.service";
};
wantedBy = [ "timers.target" ];
};
# Backup Calibre data to object storage
systemd.services.calibre-backup = {
description = "Backup Calibre data";
environment.AWS_ACCESS_KEY_ID = config.backup.s3.accessKeyId;
serviceConfig = {
Type = "oneshot";
User = "calibre-web";
Group = "backup";
EnvironmentFile = config.secrets.backup.dest;
};
script = ''
${pkgs.awscli2}/bin/aws s3 sync \
${libraryPath}/ \
s3://${config.backup.s3.bucket}/calibre/ \
--endpoint-url=https://${config.backup.s3.endpoint}
'';
};
};
}

View File

@ -0,0 +1,105 @@
# Cloudflare Tunnel is a service for accessing the network even behind a
# firewall, through outbound-only requests. It works by installing an agent on
# our machines that exposes services through Cloudflare Access (Zero Trust),
# similar to something like Tailscale.
# In this case, we're using Cloudflare Tunnel to enable SSH access over a web
# browser even when outside of my network. This is probably not the safest
# choice but I feel comfortable enough with it anyway.
{ config, lib, ... }:
# First time setup:
# nix-shell -p cloudflared
# cloudflared tunnel login
# cloudflared tunnel create <host>
# nix run github:nmasur/dotfiles#encrypt-secret > private/cloudflared-<host>.age
# Paste ~/.cloudflared/<id>.json
# Set tunnel.id = "<id>"
# Remove ~/.cloudflared/
# For SSH access:
# Cloudflare Zero Trust -> Access -> Applications -> Create Application
# Service Auth -> SSH -> Select Application -> Generate Certificate
# Set ca = "<public key>"
let
cfg = config.nmasur.presets.services.cloudflared;
in
{
options.nmasur.presets.services.cloudflared = {
enable = lib.mkEnableOption "Cloudflare tunnel";
tunnel = {
id = lib.mkOption {
type = lib.types.str;
description = "Cloudflare tunnel ID";
};
credentialsFile = lib.mkOption {
type = lib.types.path;
description = "Cloudflare tunnel credentials file (age-encrypted)";
};
ca = lib.mkOption {
type = lib.types.str;
description = "Cloudflare tunnel CA public key";
};
};
};
config = lib.mkIf cfg.enable {
services.cloudflared = {
enable = true;
tunnels = {
"${cfg.tunnel.id}" = {
credentialsFile = config.secrets.cloudflared.dest;
# Catch-all if no match (should never happen anyway)
default = "http_status:404";
# Match from ingress of any valid server name to SSH access
ingress = {
"*.masu.rs" = "ssh://localhost:22";
};
};
};
};
# Grant Cloudflare access to SSH into this server
environment.etc = {
"ssh/ca.pub".text = ''
${cfg.tunnel.ca}
'';
# Must match the username portion of the email address in Cloudflare
# Access
"ssh/authorized_principals".text = ''
${config.user}
'';
};
# Adjust SSH config to allow access from Cloudflare's certificate
services.openssh.extraConfig = ''
PubkeyAuthentication yes
TrustedUserCAKeys /etc/ssh/ca.pub
Match User '${config.user}'
AuthorizedPrincipalsFile /etc/ssh/authorized_principals
# if there is no existing AuthenticationMethods
AuthenticationMethods publickey
'';
services.openssh.settings.Macs = [ "hmac-sha2-512" ]; # Fix for failure to find matching mac
# Create credentials file for Cloudflare
secrets.cloudflared = {
source = cfg.tunnel.credentialsFile;
dest = "${config.secretsDirectory}/cloudflared";
owner = "cloudflared";
group = "cloudflared";
permissions = "0440";
};
systemd.services.cloudflared-secret = {
requiredBy = [ "cloudflared-tunnel-${cfg.tunnel.id}.service" ];
before = [ "cloudflared-tunnel-${cfg.tunnel.id}.service" ];
};
};
}

View File

@ -0,0 +1,25 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.empty-trash;
in
{
options.nmasur.presets.services.empty-trash.enable = lib.mkEnableOption "automatically empty trash";
config = lib.mkIf cfg.enable {
# Delete Trash files older than 1 week
systemd.user.services.empty-trash = {
description = "Empty Trash on a regular basis";
wantedBy = [ "default.target" ];
script = "${pkgs.trash-cli}/bin/trash-empty 7";
};
};
}

View File

@ -0,0 +1,40 @@
{
config,
lib,
...
}:
let
cfg = config.nmasur.presets.services.filebrowser;
in
{
options.nmasur.presets.services.filebrowser.enable = lib.mkEnableOption "Filebrowser private files";
config = lib.mkIf cfg.enable {
services.filebrowser = {
enable = true;
# Generate password: htpasswd -nBC 10 "" | tr -d ':\n'
password = "$2y$10$ze1cMob0k6pnXRjLowYfZOVZWg4G.dsPtH3TohbUeEbI0sdkG9.za";
};
caddy.routes = [
{
match = [ { host = [ config.hostnames.files ]; } ];
handle = [
{
handler = "reverse_proxy";
upstreams = [
{ dial = "localhost:8020"; }
];
}
];
}
];
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ config.hostnames.files ];
};
}

View File

@ -0,0 +1,70 @@
# Gitea Actions is a CI/CD service for the Gitea source code server, meaning it
# allows us to run code operations (such as testing or deploys) when our git
# repositories are updated. Any machine can act as a Gitea Action Runner, so
# the Runners don't necessarily need to be running Gitea. All we need is an API
# key for Gitea to connect to it and register ourselves as a Runner.
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.gitea-runner-local;
in
{
options.nmasur.presets.services.gitea-runner-local.enable =
lib.mkEnableOption "Gitea Actions runner local to Gitea instance";
config = lib.mkIf cfg.enable {
services.gitea-actions-runner.instances.${config.networking.hostName} = {
enable = true;
labels = [
# Provide a Debian base with NodeJS for actions
# "debian-latest:docker://node:18-bullseye"
# Fake the Ubuntu name, because Node provides no Ubuntu builds
# "ubuntu-latest:docker://node:18-bullseye"
# Provide native execution on the host using below packages
"native:host"
];
hostPackages = with pkgs; [
bash
coreutils
curl
gawk
gitMinimal
gnused
nodejs
wget
];
name = config.networking.hostName;
url = "https://${config.hostnames.git}";
tokenFile = config.secrets.giteaRunnerToken.dest;
};
# Make sure the runner doesn't start until after Gitea
systemd.services."gitea-runner-${config.networking.hostName}".after = [ "gitea.service" ];
# API key needed to connect to Gitea
secrets.giteaRunnerToken = {
source = ../../../private/gitea-runner-token.age; # TOKEN=xyz
dest = "${config.secretsDirectory}/gitea-runner-token";
};
systemd.services.giteaRunnerToken-secret = {
requiredBy = [
"gitea-runner-${
config.services.gitea-actions-runner.instances.${config.networking.hostName}.name
}.service"
];
before = [
"gitea-runner-${
config.services.gitea-actions-runner.instances.${config.networking.hostName}.name
}.service"
];
};
};
}

View File

@ -0,0 +1,156 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.gitea;
giteaPath = "/var/lib/gitea"; # Default service directory
in
{
options.nmasur.presets.services.gitea.enable = lib.mkEnableOption "Gitea git server";
config = lib.mkIf cfg.enable {
services.gitea = {
enable = true;
database.type = "sqlite3";
settings = {
actions.ENABLED = true;
metrics.ENABLED = true;
repository = {
# Pushing to a repo that doesn't exist automatically creates one as
# private.
DEFAULT_PUSH_CREATE_PRIVATE = true;
# Allow git over HTTP.
DISABLE_HTTP_GIT = false;
# Allow requests hitting the specified hostname.
ACCESS_CONTROL_ALLOW_ORIGIN = config.hostnames.git;
# Automatically create viable users/orgs on push.
ENABLE_PUSH_CREATE_USER = true;
ENABLE_PUSH_CREATE_ORG = true;
# Default when creating new repos.
DEFAULT_BRANCH = "main";
};
server = {
HTTP_PORT = 3001;
HTTP_ADDRESS = "127.0.0.1";
ROOT_URL = "https://${config.hostnames.git}/";
SSH_PORT = 22;
START_SSH_SERVER = false; # Use sshd instead
DISABLE_SSH = false;
};
# Don't allow public users to register accounts.
service.DISABLE_REGISTRATION = true;
# Force using HTTPS for all session access.
session.COOKIE_SECURE = true;
# Hide users' emails.
ui.SHOW_USER_EMAIL = false;
};
extraConfig = null;
};
users.users.${config.user}.extraGroups = [ "gitea" ];
caddy.routes = [
# Prevent public access to Prometheus metrics.
{
match = [
{
host = [ config.hostnames.git ];
path = [ "/metrics*" ];
}
];
handle = [
{
handler = "static_response";
status_code = "403";
}
];
}
# Allow access to primary server.
{
match = [ { host = [ config.hostnames.git ]; } ];
handle = [
{
handler = "reverse_proxy";
upstreams = [
{ dial = "localhost:${builtins.toString config.services.gitea.settings.server.HTTP_PORT}"; }
];
}
];
}
];
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ config.hostnames.git ];
# Scrape the metrics endpoint for Prometheus.
prometheus.scrapeTargets = [
"127.0.0.1:${builtins.toString config.services.gitea.settings.server.HTTP_PORT}"
];
## Backup config
# Open to groups, allowing for backups
systemd.services.gitea.serviceConfig.StateDirectoryMode = lib.mkForce "0770";
systemd.tmpfiles.rules = [ "f ${giteaPath}/data/gitea.db 0660 gitea gitea" ];
# Allow litestream and gitea to share a sqlite database
users.users.litestream.extraGroups = [ "gitea" ];
users.users.gitea.extraGroups = [ "litestream" ];
# Backup sqlite database with litestream
services.litestream = {
settings = {
dbs = [
{
path = "${giteaPath}/data/gitea.db";
replicas = [ { url = "s3://${config.backup.s3.bucket}.${config.backup.s3.endpoint}/gitea"; } ];
}
];
};
};
# Don't start litestream unless gitea is up
systemd.services.litestream = {
after = [ "gitea.service" ];
requires = [ "gitea.service" ];
};
# Run a repository file backup on a schedule
systemd.timers.gitea-backup = lib.mkIf (config.backup.s3.endpoint != null) {
timerConfig = {
OnCalendar = "*-*-* 00:00:00"; # Once per day
Unit = "gitea-backup.service";
};
wantedBy = [ "timers.target" ];
};
# Backup Gitea repos to object storage
systemd.services.gitea-backup = lib.mkIf (config.backup.s3.endpoint != null) {
description = "Backup Gitea data";
environment.AWS_ACCESS_KEY_ID = config.backup.s3.accessKeyId;
serviceConfig = {
Type = "oneshot";
User = "gitea";
Group = "backup";
EnvironmentFile = config.secrets.backup.dest;
};
script = ''
${pkgs.awscli2}/bin/aws s3 sync --exclude */gitea.db* \
${giteaPath}/ \
s3://${config.backup.s3.bucket}/gitea-data/ \
--endpoint-url=https://${config.backup.s3.endpoint}
'';
};
};
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,56 @@
{
config,
lib,
...
}:
let
cfg = config.nmasur.presets.services.grub;
in
{
options.nmasur.presets.services.grub.enable = lib.mkEnableOption "Grub bootloader";
config = lib.mkIf cfg.enable {
boot.loader = {
grub = {
enable = true;
# Not sure what this does, but it involves the UEFI/BIOS
efiSupport = true;
# Check for other OSes and make them available
useOSProber = true;
# Attempt to display GRUB on widescreen monitor
gfxmodeEfi = "1920x1080";
# Limit the total number of configurations to rollback
configurationLimit = 25;
# Install GRUB onto the boot disk
# device = config.fileSystems."/boot".device;
# Don't install GRUB, required for UEFI?
device = "nodev";
# Display menu indefinitely if holding shift key
extraConfig = ''
if keystatus --shift ; then
set timeout=-1
else
set timeout=3
fi
'';
};
# Always display menu indefinitely; default is 5 seconds
# timeout = null;
# Allows GRUB to interact with the UEFI/BIOS I guess
efi.canTouchEfiVariables = true;
};
};
}

View File

@ -0,0 +1,54 @@
{ config, lib, ... }:
let
cfg = config.nmasur.presets.services.immich;
in
{
options.nmasur.presets.services.immich.enable = lib.mkEnableOption "Immich photo manager";
config = lib.mkIf cfg.enable {
services.immich = {
enable = true;
port = 2283;
group = lib.mkIf config.nmasur.profiles.shared-media.enable "shared";
database.enable = true;
redis.enable = true;
machine-learning.enable = true;
machine-learning.environment = { };
mediaLocation = "/data/images";
secretsFile = null;
settings.server.externalDomain = "https://${config.hostnames.photos}";
environment = {
IMMICH_ENV = "production";
IMMICH_LOG_LEVEL = "log";
NO_COLOR = "false";
IMMICH_TRUSTED_PROXIES = "127.0.0.1";
};
};
caddy.routes = [
{
match = [ { host = [ config.hostnames.photos ]; } ];
handle = [
{
handler = "reverse_proxy";
upstreams = [ { dial = "localhost:${builtins.toString config.services.immich.port}"; } ];
}
];
}
];
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ config.hostnames.photos ];
# Point localhost to the local domain
networking.hosts."127.0.0.1" = [ config.hostnames.photos ];
# Backups
services.restic.backups.default.paths = [ "/data/images" ];
};
}

View File

@ -0,0 +1,72 @@
# InfluxDB is a timeseries database similar to Prometheus. While
# VictoriaMetrics can also act as an InfluxDB, this version of it allows for
# infinite retention separate from our other metrics, which can be nice for
# recording health information, for example.
{ config, lib, ... }:
let
cfg = config.nmasur.presets.services.influxdb2;
in
{
options.nmasur.presets.services.influxdb2.enable =
lib.mkEnableOption "InfluxDB timeseries database";
config = lib.mkIf cfg.enable {
services.influxdb2 = {
enable = true;
provision = {
enable = true;
initialSetup = {
bucket = "default";
organization = "main";
passwordFile = config.secrets.influxdb2Password.dest;
retention = 0; # Keep data forever
tokenFile = config.secrets.influxdb2Token.dest;
username = "admin";
};
};
settings = { };
};
# Create credentials file for InfluxDB admin
secrets.influxdb2Password = lib.mkIf config.services.influxdb2.enable {
source = ../../../private/influxdb2-password.age;
dest = "${config.secretsDirectory}/influxdb2-password";
owner = "influxdb2";
group = "influxdb2";
permissions = "0440";
};
systemd.services.influxdb2Password-secret = lib.mkIf config.services.influxdb2.enable {
requiredBy = [ "influxdb2.service" ];
before = [ "influxdb2.service" ];
};
secrets.influxdb2Token = lib.mkIf config.services.influxdb2.enable {
source = ../../../private/influxdb2-token.age;
dest = "${config.secretsDirectory}/influxdb2-token";
owner = "influxdb2";
group = "influxdb2";
permissions = "0440";
};
systemd.services.influxdb2Token-secret = lib.mkIf config.services.influxdb2.enable {
requiredBy = [ "influxdb2.service" ];
before = [ "influxdb2.service" ];
};
caddy.routes = lib.mkIf config.services.influxdb2.enable [
{
match = [ { host = [ config.hostnames.influxdb ]; } ];
handle = [
{
handler = "reverse_proxy";
upstreams = [ { dial = "localhost:8086"; } ];
}
];
}
];
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ config.hostnames.influxdb ];
};
}

View File

@ -0,0 +1,83 @@
# Jellyfin is a self-hosted video streaming service. This means I can play my
# server's videos from a webpage, mobile app, or TV client.
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.jellyfin;
in
{
config = lib.mkIf cfg.enable {
services.jellyfin.group = lib.mkIf config.nmasur.profiles.shared-media.enable "shared";
users.users.jellyfin = {
isSystemUser = true;
};
caddy.routes = [
# Prevent public access to Prometheus metrics.
{
match = [
{
host = [ config.hostnames.stream ];
path = [ "/metrics*" ];
}
];
handle = [
{
handler = "static_response";
status_code = "403";
}
];
}
# Allow access to normal route.
{
match = [ { host = [ config.hostnames.stream ]; } ];
handle = [
{
handler = "reverse_proxy";
upstreams = [ { dial = "localhost:8096"; } ];
}
];
}
];
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ config.hostnames.stream ];
# Create videos directory, allow anyone in Jellyfin group to manage it
systemd.tmpfiles.rules = [
"d /var/lib/jellyfin 0775 jellyfin shared"
"d /var/lib/jellyfin/library 0775 jellyfin shared"
];
# Enable VA-API for hardware transcoding
hardware.graphics = {
enable = true;
extraPackages = [ pkgs.libva ];
};
environment.systemPackages = [ pkgs.libva-utils ];
environment.variables = {
# VAAPI and VDPAU config for accelerated video.
# See https://wiki.archlinux.org/index.php/Hardware_video_acceleration
"VDPAU_DRIVER" = "radeonsi";
"LIBVA_DRIVER_NAME" = "radeonsi";
};
users.users.jellyfin.extraGroups = [
"render"
"video"
]; # Access to /dev/dri
# Fix issue where Jellyfin-created directories don't allow access for media group
systemd.services.jellyfin.serviceConfig.UMask = lib.mkForce "0007";
# Requires MetricsEnable is true in /var/lib/jellyfin/config/system.xml
prometheus.scrapeTargets = [ "127.0.0.1:8096" ];
};
}

View File

@ -0,0 +1,27 @@
{
config,
lib,
...
}:
let
cfg = config.nmasur.presets.services.journald;
in
{
options.nmasur.presets.services.journald.enable = lib.mkEnableOption "journald configuration";
config = lib.mkIf cfg.enable {
# How long to keep journalctl entries
# This helps to make sure log disk usage doesn't grow too unwieldy
services.journald.extraConfig = ''
SystemMaxUse=4G
SystemKeepFree=10G
SystemMaxFileSize=128M
SystemMaxFiles=500
MaxFileSec=1month
MaxRetentionSec=2month
'';
};
}

View File

@ -0,0 +1,35 @@
{
config,
lib,
...
}:
let
cfg = config.nmasur.presets.services.lightdm;
in
{
options.nmasur.presets.services.lightdm.enable = lib.mkEnableOption "Lightdm display manager";
config = lib.mkIf cfg.enable {
services.xserver = {
enable = true;
# Login screen
displayManager = {
lightdm = {
enable = true;
background = config.wallpaper;
# Show default user
# Also make sure /var/lib/AccountsService/users/<user> has SystemAccount=false
extraSeatDefaults = ''
greeter-hide-users = false
'';
};
};
};
};
}

View File

@ -0,0 +1,164 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.minecraft-server;
localPort = 25564;
publicPort = 49732;
rconPort = 25575;
rconPassword = "thiscanbeanything";
in
{
options.nmasur.presets.services.minecraft-server.enable = lib.mkEnableOption "Minecraft server";
config = lib.mkIf cfg.enable {
unfreePackages = [ "minecraft-server" ];
services.minecraft-server = {
eula = true;
declarative = true;
whitelist = { };
openFirewall = false;
serverProperties = {
server-port = localPort;
difficulty = "normal";
gamemode = "survival";
white-list = false;
enforce-whitelist = false;
level-name = "world";
motd = "Welcome!";
pvp = true;
player-idle-timeout = 30;
generate-structures = true;
max-players = 20;
snooper-enabled = false;
spawn-npcs = true;
spawn-animals = true;
spawn-monsters = true;
allow-nether = true;
allow-flight = false;
enable-rcon = true;
"rcon.port" = rconPort;
"rcon.password" = rconPassword;
};
};
networking.firewall.allowedTCPPorts = [ publicPort ];
cloudflare.noProxyDomains = [ config.hostnames.minecraft ];
## Automatically start and stop Minecraft server based on player connections
# Adapted shamelessly from:
# https://dataswamp.org/~solene/2022-08-20-on-demand-minecraft-with-systemd.html
# Prevent Minecraft from starting by default
systemd.services.minecraft-server = {
wantedBy = pkgs.lib.mkForce [ ];
};
# Listen for connections on the public port, to trigger the actual
# listen-minecraft service.
systemd.sockets.listen-minecraft = {
wantedBy = [ "sockets.target" ];
requires = [ "network.target" ];
listenStreams = [ "${toString publicPort}" ];
};
# Proxy traffic to local port, and trigger hook-minecraft
systemd.services.listen-minecraft = {
path = [ pkgs.systemd ];
requires = [
"hook-minecraft.service"
"listen-minecraft.socket"
];
after = [
"hook-minecraft.service"
"listen-minecraft.socket"
];
serviceConfig.ExecStart = "${pkgs.systemd.out}/lib/systemd/systemd-socket-proxyd 127.0.0.1:${toString localPort}";
};
# Start Minecraft if required and wait for it to be available
# Then unlock the listen-minecraft.service
systemd.services.hook-minecraft = {
path = with pkgs; [
systemd
libressl
busybox
];
# Start Minecraft and the auto-shutdown timer
script = ''
systemctl start minecraft-server.service
systemctl start stop-minecraft.timer
'';
# Keep checking until the service is available
postStart = ''
for i in $(seq 60); do
if ${pkgs.libressl.nc}/bin/nc -z 127.0.0.1 ${toString localPort} > /dev/null ; then
exit 0
fi
${pkgs.busybox.out}/bin/sleep 1
done
exit 1
'';
};
# Run a player check on a schedule for auto-shutdown
systemd.timers.stop-minecraft = {
timerConfig = {
OnCalendar = "*-*-* *:*:0/20"; # Every 20 seconds
Unit = "stop-minecraft.service";
};
};
# If no players are connected, then stop services and prepare to resume again
systemd.services.stop-minecraft = {
serviceConfig.Type = "oneshot";
script = ''
# Check when service was launched
servicestartsec=$(
date -d \
"$(systemctl show \
--property=ActiveEnterTimestamp \
minecraft-server.service \
| cut -d= -f2)" \
+%s)
# Calculate elapsed time
serviceelapsedsec=$(( $(date +%s) - servicestartsec))
# Ignore if service just started
if [ $serviceelapsedsec -lt 180 ]
then
echo "Server was just started"
exit 0
fi
PLAYERS=$(
printf "list\n" \
| ${pkgs.rcon.out}/bin/rcon -m \
-H 127.0.0.1 -p ${builtins.toString rconPort} -P ${rconPassword} \
)
if echo "$PLAYERS" | grep "are 0 of a"
then
echo "Stopping server"
systemctl stop minecraft-server.service
systemctl stop hook-minecraft.service
systemctl stop stop-minecraft.timer
fi
'';
};
};
}

View File

@ -0,0 +1,235 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.nextcloud;
in
{
options.nmasur.presets.services.nextcloud.enable = lib.mkEnableOption "Nextcloud private data hub";
config = lib.mkIf cfg.enable {
services.nextcloud = {
enable = true;
package = pkgs.nextcloud30; # Required to specify
configureRedis = true;
datadir = "/data/nextcloud";
database.createLocally = true;
https = true;
hostName = "localhost";
maxUploadSize = "50G";
config = {
adminpassFile = config.secrets.nextcloud.dest;
dbtype = "pgsql";
};
settings = {
default_phone_region = "US";
# Allow access when hitting either of these hosts or IPs
trusted_domains = [ config.hostnames.content ];
trusted_proxies = [ "127.0.0.1" ];
maintenance_window_start = 4; # Run jobs at 4am UTC
log_type = "file";
loglevel = 1; # Include all actions in the log
};
extraAppsEnable = true;
extraApps = {
calendar = config.services.nextcloud.package.packages.apps.calendar;
contacts = config.services.nextcloud.package.packages.apps.contacts;
# These apps are defined and pinned by overlay in flake.
news = pkgs.nextcloudApps.news;
external = pkgs.nextcloudApps.external;
cookbook = pkgs.nextcloudApps.cookbook;
snappymail = pkgs.nextcloudApps.snappymail;
};
phpOptions = {
"opcache.interned_strings_buffer" = "16";
"output_buffering" = "0";
};
};
# Don't let Nginx use main ports (using Caddy instead)
services.nginx.enable = false;
services.phpfpm.pools.nextcloud.settings = {
"listen.owner" = config.services.caddy.user;
"listen.group" = config.services.caddy.group;
};
users.users.caddy.extraGroups = [ "nextcloud" ];
# Point Caddy to Nginx
caddy.routes = [
{
match = [ { host = [ config.hostnames.content ]; } ];
handle = [
{
handler = "subroute";
routes = [
# Sets variables and headers
{
handle = [
{
handler = "vars";
# Grab the webroot out of the written config
# The webroot is a symlinked combined Nextcloud directory
root = config.services.nginx.virtualHosts.${config.services.nextcloud.hostName}.root;
}
{
handler = "headers";
response.set.Strict-Transport-Security = [ "max-age=31536000;" ];
}
];
}
# Reroute carddav and caldav traffic
{
match = [
{
path = [
"/.well-known/carddav"
"/.well-known/caldav"
];
}
];
handle = [
{
handler = "static_response";
headers = {
Location = [ "/remote.php/dav" ];
};
status_code = 301;
}
];
}
# Block traffic to sensitive files
{
match = [
{
path = [
"/.htaccess"
"/data/*"
"/config/*"
"/db_structure"
"/.xml"
"/README"
"/3rdparty/*"
"/lib/*"
"/templates/*"
"/occ"
"/console.php"
];
}
];
handle = [
{
handler = "static_response";
status_code = 404;
}
];
}
# Redirect index.php to the homepage
{
match = [
{
file = {
try_files = [ "{http.request.uri.path}/index.php" ];
};
not = [ { path = [ "*/" ]; } ];
}
];
handle = [
{
handler = "static_response";
headers = {
Location = [ "{http.request.orig_uri.path}/" ];
};
status_code = 308;
}
];
}
# Rewrite paths to be relative
{
match = [
{
file = {
split_path = [ ".php" ];
try_files = [
"{http.request.uri.path}"
"{http.request.uri.path}/index.php"
"index.php"
];
};
}
];
handle = [
{
handler = "rewrite";
uri = "{http.matchers.file.relative}";
}
];
}
# Send all PHP traffic to Nextcloud PHP service
{
match = [ { path = [ "*.php" ]; } ];
handle = [
{
handler = "reverse_proxy";
transport = {
protocol = "fastcgi";
split_path = [ ".php" ];
};
upstreams = [ { dial = "unix//run/phpfpm/nextcloud.sock"; } ];
}
];
}
# Finally, send the rest to the file server
{ handle = [ { handler = "file_server"; } ]; }
];
}
];
terminal = true;
}
];
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ config.hostnames.content ];
# Create credentials file for nextcloud
secrets.nextcloud = {
source = ../../../private/nextcloud.age;
dest = "${config.secretsDirectory}/nextcloud";
owner = "nextcloud";
group = "nextcloud";
permissions = "0440";
};
systemd.services.nextcloud-secret = {
requiredBy = [ "nextcloud-setup.service" ];
before = [ "nextcloud-setup.service" ];
};
# Grant user access to Nextcloud directories
users.users.${config.user}.extraGroups = [ "nextcloud" ];
# Open to groups, allowing for backups
systemd.services.phpfpm-nextcloud.serviceConfig.StateDirectoryMode = lib.mkForce "0770";
# Log metrics to prometheus
networking.hosts."127.0.0.1" = [ config.hostnames.content ];
services.prometheus.exporters.nextcloud = {
enable = config.prometheus.exporters.enable;
username = config.services.nextcloud.config.adminuser;
url = "https://${config.hostnames.content}";
passwordFile = config.services.nextcloud.config.adminpassFile;
};
prometheus.scrapeTargets = [
"127.0.0.1:${builtins.toString config.services.prometheus.exporters.nextcloud.port}"
];
# Allows nextcloud-exporter to read passwordFile
users.users.nextcloud-exporter.extraGroups =
lib.mkIf config.services.prometheus.exporters.nextcloud.enable
[ "nextcloud" ];
};
}

View File

@ -0,0 +1,61 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.nix-autoupgrade;
in
{
options.nmasur.presets.services.nix-autoupgrade.enable = lib.mkEnableOption "Nix auto upgrade";
config = lib.mkIf cfg.enable {
# Update the system daily by pointing it at the flake repository
system.autoUpgrade = {
enable = true;
dates = "09:33";
flake = "git+${config.dotfilesRepo}";
randomizedDelaySec = "25min";
operation = "switch";
allowReboot = true;
rebootWindow = {
lower = "09:01";
upper = "11:00";
};
};
# Create an email notification service for failed jobs
systemd.services."notify-email@" =
let
address = "system@${config.mail.server}";
in
{
enable = config.mail.enable;
environment.SERVICE_ID = "%i";
script = ''
TEMPFILE=$(mktemp)
echo "From: ${address}" > $TEMPFILE
echo "To: ${address}" >> $TEMPFILE
echo "Subject: Failure in $SERVICE_ID" >> $TEMPFILE
echo -e "\nGot an error with $SERVICE_ID\n\n" >> $TEMPFILE
set +e
systemctl status $SERVICE_ID >> $TEMPFILE
set -e
${pkgs.msmtp}/bin/msmtp \
--file=${config.homePath}/.config/msmtp/config \
--account=system \
${address} < $TEMPFILE
'';
};
# Send an email whenever auto upgrade fails
systemd.services.nixos-upgrade.onFailure = lib.mkIf config.systemd.services."notify-email@".enable [
"notify-email@%i.service"
];
};
}

View File

@ -0,0 +1,20 @@
{
config,
lib,
...
}:
let
cfg = config.nmasur.presets.services.nix-daemon;
in
{
options.nmasur.presets.services.nix-daemon.enable = lib.mkEnableOption "Nix daemon";
config = lib.mkIf cfg.enable {
nix.gc.dates = "09:03"; # Run every morning (but before upgrade)
};
}

View File

@ -0,0 +1,39 @@
{ config, lib, ... }:
let
cfg = config.nmasur.presets.services.ntfy-sh;
in
{
options.nmasur.presets.services.ntfy-sh.enable = lib.mkEnableOption "ntfy notification service";
config = lib.mkIf cfg.enable {
services.ntfy-sh = {
enable = true;
settings = {
base-url = "https://${config.hostnames.notifications}";
upstream-base-url = "https://ntfy.sh";
listen-http = ":8333";
behind-proxy = true;
auth-default-access = "deny-all";
auth-file = "/var/lib/ntfy-sh/user.db";
};
};
caddy.routes = [
{
match = [ { host = [ config.hostnames.notifications ]; } ];
handle = [
{
handler = "reverse_proxy";
upstreams = [ { dial = "localhost${config.services.ntfy-sh.settings.listen-http}"; } ];
}
];
}
];
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ config.hostnames.notifications ];
};
}

View File

@ -0,0 +1,48 @@
# SSHD service for allowing SSH access to my machines.
{
config,
lib,
...
}:
let
cfg = config.nmasur.presets.services.openssh;
in
{
options.nmasur.presets.services.openssh = {
enable = lib.mkEnableOption "OpenSSH remote access service";
publicKeys = lib.mkOption {
type = lib.types.nullOr (lib.types.listOf lib.types.str);
description = "Public SSH keys authorized for this system.";
default = null;
};
# permitRootLogin = lib.mkOption {
# type = lib.types.str;
# description = "Root login settings.";
# default = "no";
# };
};
config = lib.mkIf cfg.enable {
services.openssh = {
enable = true;
ports = [ 22 ];
allowSFTP = true;
settings = {
GatewayPorts = "no";
X11Forwarding = false;
PasswordAuthentication = false;
PermitRootLogin = "no";
};
};
users.users.${config.user}.openssh.authorizedKeys.keys = lib.mkIf (
cfg.publicKeys != null
) cfg.publicKeys;
# Add terminfo for SSH from popular terminal emulators
environment.enableAllTerminfo = true;
};
}

View File

@ -0,0 +1,73 @@
# Paperless-ngx is a document scanning and management solution.
{ config, lib, ... }:
let
cfg = config.nmasur.presets.services.paperless;
in
{
options.nmasur.presets.services.paperless.enable =
lib.mkEnableOption "Paperless-ngx document manager";
config = lib.mkIf cfg.enable {
services.paperless = {
enable = true;
mediaDir = "/data/generic/paperless";
passwordFile = config.secrets.paperless.dest;
settings = {
PAPERLESS_OCR_USER_ARGS = builtins.toJSON { invalidate_digital_signatures = true; };
# Enable if changing the path name in Caddy
# PAPERLESS_FORCE_SCRIPT_NAME = "/paperless";
# PAPERLESS_STATIC_URL = "/paperless/static/";
};
};
# Allow Nextcloud and user to see files
users.users.nextcloud.extraGroups = lib.mkIf config.services.nextcloud.enable [ "paperless" ];
users.users.${config.user}.extraGroups = [ "paperless" ];
caddy.routes = [
{
match = [
{
host = [ config.hostnames.paperless ];
# path = [ "/paperless*" ]; # Change path name in Caddy
}
];
handle = [
{
handler = "reverse_proxy";
upstreams = [ { dial = "localhost:${builtins.toString config.services.paperless.port}"; } ];
}
];
}
];
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ config.hostnames.paperless ];
secrets.paperless = {
source = ../../../private/prometheus.age;
dest = "${config.secretsDirectory}/paperless";
owner = "paperless";
group = "paperless";
permissions = "0440";
};
systemd.services.paperless-secret = {
requiredBy = [ "paperless.service" ];
before = [ "paperless.service" ];
};
# Fix paperless shared permissions
systemd.services.paperless-web.serviceConfig.UMask = lib.mkForce "0026";
systemd.services.paperless-scheduler.serviceConfig.UMask = lib.mkForce "0026";
systemd.services.paperless-task-queue.serviceConfig.UMask = lib.mkForce "0026";
# Backups
services.restic.backups.default.paths = [ "/data/generic/paperless/documents" ];
};
}

View File

@ -0,0 +1,33 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.pipewire;
in
{
options.nmasur.presets.services.pipewire.enable = lib.mkEnableOption "Pipewire audio system";
config = lib.mkIf cfg.enable {
# Enable PipeWire
services.pipewire = {
enable = true;
pulse.enable = true;
};
# Provides audio source with background noise filtered
programs.noisetorch.enable = true;
# These aren't necessary, but helpful for the user
environment.systemPackages = with pkgs; [
pamixer # Audio control
volnoti # Volume notifications
];
};
}

View File

@ -0,0 +1,40 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.postgresql;
in
{
options.nmasur.presets.services.postgresql.enable = lib.mkEnableOption "Postgresql database";
services.postgresql = lib.mkIf cfg.enable {
enable = true;
package = pkgs.postgresql_15;
settings = { };
authentication = ''
local all postgres peer map=root
local all admin peer map=admin
'';
identMap = ''
root postgres postgres
root root postgres
admin ${config.user} admin
'';
ensureUsers = [
{
name = "admin";
ensureClauses = {
createdb = true;
createrole = true;
login = true;
};
}
];
};
}

View File

@ -0,0 +1,58 @@
# Prometheus is a timeseries database that exposes system and service metrics
# for use in visualizing, monitoring, and alerting (with Grafana).
# Instead of running traditional Prometheus, I generally run VictoriaMetrics as
# a more efficient drop-in replacement.
{
config,
lib,
...
}:
let
cfg = config.nmasur.presets.services.prometheus-exporters;
in
{
options.nmasur.presets.services.prometheus-exporters = {
enable = lib.mkEnableOption "Prometheus exporters";
scrapeTargets = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "Prometheus scrape targets";
default = [ ];
};
};
config = lib.mkIf cfg.enable {
# Default scrape the basic host information
cfg.scrapeTargets = [
"127.0.0.1:${builtins.toString config.services.prometheus.exporters.node.port}"
"127.0.0.1:${builtins.toString config.services.prometheus.exporters.systemd.port}"
"127.0.0.1:${builtins.toString config.services.prometheus.exporters.process.port}"
];
services.prometheus = {
exporters.node.enable = config.prometheus.exporters.enable;
exporters.node.enabledCollectors = [ ];
exporters.node.disabledCollectors = [ "cpufreq" ];
exporters.systemd.enable = config.prometheus.exporters.enable;
exporters.process.enable = config.prometheus.exporters.enable;
exporters.process.settings.process_names = [
# Remove nix store path from process name
{
name = "{{.Matches.Wrapped}} {{ .Matches.Args }}";
cmdline = [ "^/nix/store[^ ]*/(?P<Wrapped>[^ /]*) (?P<Args>.*)" ];
}
];
scrapeConfigs = [
{
job_name = config.networking.hostName;
static_configs = [ { targets = cfg.scrapeTargets; } ];
}
];
};
};
}

View File

@ -0,0 +1,51 @@
# Prometheus is a timeseries database that exposes system and service metrics
# for use in visualizing, monitoring, and alerting (with Grafana).
# Instead of running traditional Prometheus, I generally run VictoriaMetrics as
# a more efficient drop-in replacement.
{
config,
lib,
...
}:
let
cfg = config.nmasur.presets.services.prometheus-remote-write;
in
{
options.nmasur.presets.services.prometheus-remote-write = {
enable = lib.mkEnableOption "Prometheus remote write for agent machines";
};
config = lib.mkIf cfg.enable {
services.prometheus = {
remoteWrite = [
{
name = config.networking.hostName;
url = "https://${config.hostnames.prometheus}/api/v1/write";
basic_auth = {
# Uses password hashed with bcrypt above
username = "prometheus";
password_file = config.secrets.prometheus.dest;
};
}
];
};
# Create credentials file for remote Prometheus push
secrets.prometheus = {
source = ../../../private/prometheus.age;
dest = "${config.secretsDirectory}/prometheus";
owner = "prometheus";
group = "prometheus";
permissions = "0440";
};
systemd.services.prometheus-secret = {
requiredBy = [ "prometheus.service" ];
before = [ "prometheus.service" ];
};
};
}

View File

@ -0,0 +1,35 @@
{
config,
lib,
...
}:
let
cfg = config.nmasur.presets.services.redshift;
in
{
options.nmasur.presets.services.redshift.enable = lib.mkEnableOption "Redshift light adjuster";
config = lib.mkIf cfg.enable {
# Reduce blue light at night
services.redshift = {
enable = true;
brightness = {
day = "1.0";
night = "1.0";
};
};
# Service to determine location for time zone
# This is required for redshift which depends on the location provider
services.geoclue2.enable = true;
services.geoclue2.enableWifi = false; # Breaks when it can't connect
location = {
provider = "geoclue2";
};
};
}

View File

@ -0,0 +1,31 @@
# Samba is a Windows-compatible file-sharing service.
{ config, lib, ... }:
let
cfg = config.nmasur.presets.services.samba;
in
{
options.nmasur.presets.services.samba.enable = lib.mkEnableOption "Samba network filesharing";
config = {
services.samba = lib.mkIf cfg.enable {
enable = true;
openFirewall = true;
settings.data = {
path = "/data";
browseable = "yes";
"read only" = "no";
"guest ok" = "no";
comment = "NAS";
};
};
# Allows Windows clients to discover server
services.samba-wsdd.enable = true;
networking.firewall.allowedTCPPorts = [ 5357 ];
networking.firewall.allowedUDPPorts = [ 3702 ];
};
}

View File

@ -0,0 +1,18 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.;
in
{
options.nmasur.presets.services..enable = lib.mkEnableOption "";
config = lib.mkIf cfg.enable {
};
}

View File

@ -0,0 +1,38 @@
{ config, lib, ... }:
let
cfg = config.nmasur.presets.services.thelounge;
in
{
config = lib.mkIf cfg.enable {
services.thelounge = {
public = false;
port = 9000;
extraConfig = {
reverseProxy = true;
maxHistory = 10000;
};
};
# Adding new users:
# nix shell nixpkgs#thelounge
# sudo su - thelounge -s /bin/sh -c "thelounge add myuser"
# Allow web traffic to Caddy
caddy.routes = [
{
match = [ { host = [ config.hostnames.irc ]; } ];
handle = [
{
handler = "reverse_proxy";
upstreams = [ { dial = "localhost:${builtins.toString config.services.thelounge.port}"; } ];
}
];
}
];
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ config.hostnames.irc ];
};
}

View File

@ -0,0 +1,111 @@
# Transmission is a bittorrent client, which can run in the background for
# automated downloads with a web GUI.
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.transmission;
in
{
options.nmasur.presets.services.transmission.enable =
lib.mkEnableOption "Transmission BitTorrent service";
config =
let
namespace = config.networking.wireguard.interfaces.wg0.interfaceNamespace;
vpnIp = lib.strings.removeSuffix "/32" (
builtins.head config.networking.wireguard.interfaces.wg0.ips
);
in
lib.mkIf cfg.enable {
# Setup transmission
services.transmission = {
enable = true;
settings = {
port-forwarding-enabled = false;
rpc-authentication-required = true;
rpc-port = 9091;
rpc-bind-address = "0.0.0.0";
rpc-username = config.user;
# This is a salted hash of the real password
# https://github.com/tomwijnroks/transmission-pwgen
rpc-password = "{c4c5145f6e18bcd3c7429214a832440a45285ce26jDOBGVW";
rpc-host-whitelist = config.hostnames.transmission;
rpc-host-whitelist-enabled = true;
rpc-whitelist = lib.mkDefault "127.0.0.1"; # Overwritten by Cloudflare
rpc-whitelist-enabled = true;
};
};
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ config.hostnames.transmission ];
# Bind transmission to wireguard namespace
systemd.services.transmission = lib.mkIf config.wireguard.enable {
bindsTo = [ "netns@${namespace}.service" ];
requires = [
"network-online.target"
"transmission-secret.service"
];
after = [
"wireguard-wg0.service"
"transmission-secret.service"
];
unitConfig.JoinsNamespaceOf = "netns@${namespace}.service";
serviceConfig.NetworkNamespacePath = "/var/run/netns/${namespace}";
};
# Create reverse proxy for web UI
caddy.routes =
let
# Set if the download domain is the same as the Transmission domain
useDownloadDomain = config.hostnames.download == config.hostnames.transmission;
in
lib.mkAfter [
{
group = if useDownloadDomain then "download" else "transmission";
match = [
{
host = [ config.hostnames.transmission ];
path = if useDownloadDomain then [ "/transmission*" ] else null;
}
];
handle = [
{
handler = "reverse_proxy";
upstreams = [
{ dial = "localhost:${builtins.toString config.services.transmission.settings.rpc-port}"; }
];
}
];
}
];
# Caddy and Transmission both try to set rmem_max for larger UDP packets.
# We will choose Transmission's recommendation (4 MB).
boot.kernel.sysctl."net.core.rmem_max" = 4194304;
# Allow inbound connections to reach namespace
systemd.services.transmission-web-netns = lib.mkIf config.wireguard.enable {
description = "Forward to transmission in wireguard namespace";
requires = [ "transmission.service" ];
after = [ "transmission.service" ];
serviceConfig = {
Restart = "on-failure";
TimeoutStopSec = 300;
};
wantedBy = [ "multi-user.target" ];
script = ''
${pkgs.iproute2}/bin/ip netns exec ${namespace} ${pkgs.iproute2}/bin/ip link set dev lo up
${pkgs.socat}/bin/socat tcp-listen:9091,fork,reuseaddr exec:'${pkgs.iproute2}/bin/ip netns exec ${namespace} ${pkgs.socat}/bin/socat STDIO "tcp-connect:${vpnIp}:9091"',nofork
'';
};
};
}

View File

@ -0,0 +1,40 @@
{ config, lib, ... }:
let
cfg = config.nmasur.presets.services.uptime-kuma;
in
{
options.nmasur.presets.services.uptime-kuma.enable = lib.mkEnableOption "Uptime-kuma ping monitor";
config = lib.mkIf cfg.enable {
services.uptime-kuma = {
enable = true;
settings = {
PORT = "3033";
};
};
# Allow web traffic to Caddy
caddy.routes = [
{
match = [ { host = [ config.hostnames.status ]; } ];
handle = [
{
handler = "reverse_proxy";
upstreams = [
{ dial = "localhost:${config.services.uptime-kuma.settings.PORT}"; }
];
}
];
}
];
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ config.hostnames.status ];
};
}

View File

@ -0,0 +1,134 @@
# Vaultwarden is an implementation of the Bitwarden password manager backend
# service, which allows for self-hosting the synchronization of a Bitwarden
# password manager client.
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.vaultwarden;
vaultwardenPath = "/var/lib/bitwarden_rs"; # Default service directory
in
{
options.nmasur.presets.services.vaultwarden.enable =
lib.mkEnableOption "Vaultwarden password server for Bitwarden";
config = lib.mkIf cfg.enable {
services.vaultwarden = {
enable = true;
config = {
DOMAIN = "https://${config.hostnames.secrets}";
SIGNUPS_ALLOWED = false;
SIGNUPS_VERIFY = true;
INVITATIONS_ALLOWED = true;
WEB_VAULT_ENABLED = true;
ROCKET_ADDRESS = "127.0.0.1";
ROCKET_PORT = 8222;
WEBSOCKET_ENABLED = true;
WEBSOCKET_ADDRESS = "0.0.0.0";
WEBSOCKET_PORT = 3012;
LOGIN_RATELIMIT_SECONDS = 60;
LOGIN_RATELIMIT_MAX_BURST = 10;
ADMIN_RATELIMIT_SECONDS = 300;
ADMIN_RATELIMIT_MAX_BURST = 3;
};
environmentFile = config.secrets.vaultwarden.dest;
dbBackend = "sqlite";
};
secrets.vaultwarden = {
source = ../../../private/vaultwarden.age;
dest = "${config.secretsDirectory}/vaultwarden";
owner = "vaultwarden";
group = "vaultwarden";
};
networking.firewall.allowedTCPPorts = [ 3012 ];
caddy.routes = [
{
match = [ { host = [ config.hostnames.secrets ]; } ];
handle = [
{
handler = "reverse_proxy";
upstreams = [
{ dial = "localhost:${builtins.toString config.services.vaultwarden.config.ROCKET_PORT}"; }
];
headers.request.add."X-Real-IP" = [ "{http.request.remote.host}" ];
}
];
}
];
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ config.hostnames.secrets ];
## Backup config
# Open to groups, allowing for backups
systemd.services.vaultwarden.serviceConfig.StateDirectoryMode = lib.mkForce "0770";
systemd.tmpfiles.rules = [
"f ${vaultwardenPath}/db.sqlite3 0660 vaultwarden vaultwarden"
"f ${vaultwardenPath}/db.sqlite3-shm 0660 vaultwarden vaultwarden"
"f ${vaultwardenPath}/db.sqlite3-wal 0660 vaultwarden vaultwarden"
];
# Allow litestream and vaultwarden to share a sqlite database
users.users.litestream.extraGroups = [ "vaultwarden" ];
users.users.vaultwarden.extraGroups = [ "litestream" ];
# Backup sqlite database with litestream
services.litestream = {
settings = {
dbs = [
{
path = "${vaultwardenPath}/db.sqlite3";
replicas = [
{ url = "s3://${config.backup.s3.bucket}.${config.backup.s3.endpoint}/vaultwarden"; }
];
}
];
};
};
# Don't start litestream unless vaultwarden is up
systemd.services.litestream = {
after = [ "vaultwarden.service" ];
requires = [ "vaultwarden.service" ];
};
# Run a separate file backup on a schedule
systemd.timers.vaultwarden-backup = {
timerConfig = {
OnCalendar = "*-*-* 06:00:00"; # Once per day
Unit = "vaultwarden-backup.service";
};
wantedBy = [ "timers.target" ];
};
# Backup other Vaultwarden data to object storage
systemd.services.vaultwarden-backup = {
description = "Backup Vaultwarden files";
environment.AWS_ACCESS_KEY_ID = config.backup.s3.accessKeyId;
serviceConfig = {
Type = "oneshot";
User = "vaultwarden";
Group = "backup";
EnvironmentFile = config.secrets.backup.dest;
};
script = ''
${pkgs.awscli2}/bin/aws s3 sync \
${vaultwardenPath}/ \
s3://${config.backup.s3.bucket}/vaultwarden/ \
--endpoint-url=https://${config.backup.s3.endpoint} \
--exclude "*db.sqlite3*" \
--exclude ".db.sqlite3*"
'';
};
};
}

View File

@ -0,0 +1,96 @@
# VictoriaMetrics is a more efficient drop-in replacement for Prometheus and
# InfluxDB (timeseries databases built for monitoring system metrics).
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.victoriametrics;
username = "prometheus";
prometheusConfig = {
scrape_configs = [
{
job_name = config.networking.hostName;
stream_parse = true;
static_configs = [ { targets = config.prometheus.scrapeTargets; } ];
}
];
};
authConfig = (pkgs.formats.yaml { }).generate "auth.yml" {
users = [
{
username = username;
password = "%{PASSWORD}";
url_prefix = "http://localhost${config.services.victoriametrics.listenAddress}";
}
];
};
authPort = "8427";
in
{
options.nmasur.presets.services.victoriametrics.enable =
lib.mkEnableOption "VictoriaMetrics timeseries database";
config = lib.mkIf cfg.enable {
services.victoriametrics = {
enable = true;
extraOptions = [
"-promscrape.config=${(pkgs.formats.yaml { }).generate "scrape.yml" prometheusConfig}"
];
};
systemd.services.vmauth = lib.mkIf config.services.victoriametrics.enable {
description = "VictoriaMetrics basic auth proxy";
after = [ "network.target" ];
startLimitBurst = 5;
serviceConfig = {
Restart = "on-failure";
RestartSec = 1;
DynamicUser = true;
EnvironmentFile = config.secrets.vmauth.dest;
ExecStart = ''
${pkgs.victoriametrics}/bin/vmauth \
-auth.config=${authConfig} \
-httpListenAddr=:${authPort}'';
};
wantedBy = [ "multi-user.target" ];
};
secrets.vmauth = lib.mkIf config.services.victoriametrics.enable {
source = ../../../private/prometheus.age;
dest = "${config.secretsDirectory}/vmauth";
prefix = "PASSWORD=";
};
systemd.services.vmauth-secret = lib.mkIf config.services.victoriametrics.enable {
requiredBy = [ "vmauth.service" ];
before = [ "vmauth.service" ];
};
caddy.routes = [
{
match = [ { host = [ config.hostnames.prometheus ]; } ];
handle = [
{
handler = "reverse_proxy";
upstreams = [ { dial = "localhost:${authPort}"; } ];
}
];
}
];
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ config.hostnames.prometheus ];
};
}

View File

@ -0,0 +1,55 @@
# VictoriaMetrics is a more efficient drop-in replacement for Prometheus and
# InfluxDB (timeseries databases built for monitoring system metrics).
{
config,
lib,
pkgs-stable,
...
}:
let
cfg = config.nmasur.presets.services.vm-agent;
username = "prometheus";
prometheusConfig = {
scrape_configs = [
{
job_name = config.networking.hostName;
stream_parse = true;
static_configs = [ { targets = config.prometheus.scrapeTargets; } ];
}
];
};
in
{
options.nmasur.presets.services.vm-agent.enable =
lib.mkEnableOption "vm-agent VictoriaMetrics collector";
config = lib.mkIf cfg.enable {
services.vmagent = {
enable = true;
package = pkgs-stable.vmagent;
prometheusConfig = prometheusConfig;
remoteWrite = {
url = "https://${config.hostnames.prometheus}/api/v1/write";
basicAuthUsername = username;
basicAuthPasswordFile = config.secrets.vmagent.dest;
};
};
secrets.vmagent = {
source = ../../../private/prometheus.age;
dest = "${config.secretsDirectory}/vmagent";
};
systemd.services.vmagent-secret = lib.mkIf config.services.vmagent.enable {
requiredBy = [ "vmagent.service" ];
before = [ "vmagent.service" ];
};
};
}

View File

@ -0,0 +1,59 @@
# Wireguard is a VPN protocol that can be setup to create a mesh network
# between machines on different LANs. This is currently not in use in my setup.
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.presets.services.wireguard;
in
{
options.nmasur.presets.services.wireguard.enable = lib.mkEnableOption "Wireguard VPN setup";
config = lib.mkIf cfg.enable {
networking.wireguard = {
enable = true;
interfaces = {
wg0 = {
# Something to use as a default value
ips = lib.mkDefault [ "127.0.0.1/32" ];
# Establishes identity of this machine
generatePrivateKeyFile = false;
privateKeyFile = config.secrets.wireguard.dest;
# Move to network namespace for isolating programs
interfaceNamespace = "wg";
};
};
};
# Create namespace for Wireguard
# This allows us to isolate specific programs to Wireguard
systemd.services."netns@" = {
enable = config.wireguard.enable;
description = "%I network namespace";
before = [ "network.target" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = "${pkgs.iproute2}/bin/ip netns add %I";
ExecStop = "${pkgs.iproute2}/bin/ip netns del %I";
};
};
# Create private key file for wireguard
secrets.wireguard = {
source = ../../../private/wireguard.age;
dest = "${config.secretsDirectory}/wireguard";
};
};
}

View File

@ -0,0 +1,20 @@
{ config, ... }:
let
cfg = config.nmasur.profiles.aws;
in
{
options.nmasur.profiles.nmasur.aws.enable = lib.mkEnableOption "AWS EC2";
config = lib.mkIf cfg.enable {
# AWS settings require this
permitRootLogin = "prohibit-password";
# Make sure disk size is large enough
# https://github.com/nix-community/nixos-generators/issues/150
amazonImage.sizeMB = 16 * 1024;
};
}

View File

@ -0,0 +1,35 @@
{
config,
lib,
...
}:
let
cfg = config.nmasur.profiles.base;
in
{
options.nmasur.profiles.base.enable = lib.mkEnableOption "base Linux config";
config = lib.mkIf cfg.enable {
# Allows us to declaritively set password
users.mutableUsers = false;
# Define a user account. Don't forget to set a password with passwd.
users.users.${config.user} = {
# Create a home directory for human user
isNormalUser = true;
# Automatically create a password to start
hashedPassword = config.passwordHash;
extraGroups = [
"wheel" # Sudo privileges
];
};
};
}

View File

@ -0,0 +1,29 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.profiles.gaming;
in
{
options.nmasur.profiles.gaming.enable = lib.mkEnableOption "gaming options";
config = lib.mkIf cfg.enable {
# Enable graphics acceleration
hardware.graphics = {
enable = true;
enable32Bit = true;
};
# Enable gamemode which can be executed on a per-game basis
programs.gamemode.enable = true;
environment.systemPackages = with pkgs; [ moonlight-qt ];
};
}

View File

@ -0,0 +1,96 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.profiles.gui;
in
{
options.nmasur.profiles.gui.enable = lib.mkEnableOption "GUI machine";
config = lib.mkIf cfg.enable {
# Mouse customization
services.ratbagd.enable = lib.mkDefault true;
environment.systemPackages = lib.mkDefault [
pkgs.libratbag # Mouse adjustments
pkgs.piper # Mouse adjustments GUI
pkgs.ddcutil # Monitor brightness control
];
services.libinput.mouse = {
# Disable mouse acceleration
accelProfile = lib.mkDefault "flat";
accelSpeed = lib.mkDefault "1.15";
};
# Enable touchpad support
services.libinput.enable = true;
services.xserver = {
xkb.layout = lib.mkDefault "us";
# Keyboard responsiveness
autoRepeatDelay = lib.mkDefault 250;
autoRepeatInterval = lib.mkDefault 40;
windowManager = {
i3 = {
enable = lib.mkDefault true;
};
};
};
# Detect monitors (brightness) for ddcutil
hardware.i2c.enable = lib.mkDefault true;
# Grant main user access to external monitors
users.users.${config.user}.extraGroups = lib.mkDefault [ "i2c" ];
services.xserver.displayManager = {
# Put the login screen on the left monitor
lightdm.greeters.gtk.extraConfig = lib.mkDefault ''
active-monitor=0
'';
# Set up screen position and rotation
setupCommands = lib.mkDefault ''
${pkgs.xorg.xrandr}/bin/xrandr --output DisplayPort-1 \
--primary \
--rotate normal \
--mode 2560x1440 \
--rate 165 \
--output DisplayPort-2 \
--right-of DisplayPort-1 \
--rotate left \
--output DVI-0 --off \
--output DVI-1 --off \
|| echo "xrandr failed"
'';
};
# Required for setting GTK theme (for preferred-color-scheme in browser)
services.dbus.packages = [ pkgs.dconf ];
programs.dconf.enable = true;
# Make the login screen dark
services.xserver.displayManager.lightdm.greeters.gtk.theme = {
name = config.gtk.theme.name;
package = config.gtk.theme.package;
};
environment.sessionVariables = {
GTK_THEME = config.gtk.theme.name;
};
};
}

View File

@ -0,0 +1,22 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.profiles.latest;
in
{
options.nmasur.profiles.latest.enable = lib.mkEnableOption "keeping machine up-to-date";
config = lib.mkIf cfg.enable {
# Use latest released Linux kernel by default
boot.kernelPackages = lib.mkDefault pkgs.linuxPackages_latest;
};
}

View File

@ -0,0 +1,20 @@
{
config,
lib,
...
}:
let
cfg = config.nmasur.profiles.no-mitigations;
in
{
options.nmasur.profiles.no-mitigations.enable = lib.mkEnableOption "remove Kernel CPU mitigations";
config = lib.mkIf cfg.enable {
# WARNING: This is not secure
boot.kernelParams = [ "mitigations=off" ];
};
}

View File

@ -0,0 +1,44 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.profiles.on-premises;
in
{
options.nmasur.profiles.on-premises.enable = lib.mkEnableOption "on-premises machine settings";
config = lib.mkIf cfg.enable {
# Enable automatic timezone updates based on location
services.tzupdate.enable = true;
# Allow reading from Windows drives
boot.supportedFilesystems = [ "ntfs" ];
# Enable fstrim, which tracks free space on SSDs for garbage collection
# More info: https://www.reddit.com/r/NixOS/comments/rbzhb1/if_you_have_a_ssd_dont_forget_to_enable_fstrim/
services.fstrim.enable = true;
networking.useDHCP = !config.networking.networkmanager.enable;
networking.wireless = {
# Enables wireless support via wpa_supplicant.
enable = !config.networking.networkmanager.enable;
# Allows the user to control the WiFi settings.
userControlled.enable = true;
};
# Wake up tempest with a command
environment.systemPackages = [
(pkgs.writeShellScriptBin "wake-tempest" "${pkgs.wakeonlan}/bin/wakeonlan --ip=192.168.1.255 74:56:3C:40:37:5D")
];
};
}

View File

@ -0,0 +1,52 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.profiles.server;
in
{
options.nmasur.profiles.server.enable = lib.mkEnableOption "server configuration";
config = lib.mkIf cfg.enable {
networking.firewall.allowPing = true;
# Implement a simple fail2ban service for sshd
services.sshguard.enable = true;
# Servers need a bootloader or they won't start
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
# Use power button to sleep instead of poweroff
services.logind.powerKey = "suspend";
services.logind.powerKeyLongPress = "poweroff";
# Prevent wake from keyboard
powerManagement.powerDownCommands = ''
set +e
# Fix for Gigabyte motherboard
# /r/archlinux/comments/y7b97e/my_computer_wakes_up_immediately_after_i_suspend/isu99sr/
# Disable if enabled
if (grep "GPP0.*enabled" /proc/acpi/wakeup >/dev/null); then
echo GPP0 | ${pkgs.doas}/bin/doas tee /proc/acpi/wakeup
fi
sleep 2
set -e
'';
services.udev.extraRules = ''
ACTION=="add", SUBSYSTEM=="usb", DRIVER=="usb", ATTR{power/wakeup}="disabled"
ACTION=="add", SUBSYSTEM=="i2c", ATTR{power/wakeup}="disabled"
'';
};
}

View File

@ -0,0 +1,25 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.nmasur.profiles.shared-media;
in
{
options.nmasur.profiles.shared-media.enable = lib.mkEnableOption "shared media groups";
config = lib.mkIf cfg.enable {
# Create a shared group for many services
users.groups.shared = { };
# Give the human user access to the shared group
users.users.${config.user}.extraGroups = [ config.users.groups.shared.name ];
};
}

View File

@ -0,0 +1,21 @@
{ config, lib, ... }:
let
cfg = config.nmasur.profiles.wsl;
in
{
options.nmasur.profiles.wsl.enable = lib.mkEnableOption "WSL settings";
config = lib.mkIf cfg.enable {
# Replace config directory with our repo, since it sources from config on
# every launch
system.activationScripts.configDir.text = ''
rm -rf /etc/nixos
ln --symbolic --no-dereference --force ${config.dotfilesPath} /etc/nixos
'';
};
}

View File

@ -0,0 +1,41 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.services.betterlockscreen;
lockCmd = "${pkgs.betterlockscreen}/bin/betterlockscreen --lock --display 1 --blur 0.5 --span";
in
{
options.services.betterlockscreen.enable = lib.mkEnableOption "Betterlockscreen X server display lock";
config = lib.mkIf cfg.enable {
# Ref: https://github.com/betterlockscreen/betterlockscreen/blob/next/system/betterlockscreen%40.service
systemd.services.lock = {
enable = true;
description = "Lock the screen on resume from suspend";
before = [
"sleep.target"
"suspend.target"
];
serviceConfig = {
User = config.user;
Type = "simple";
Environment = "DISPLAY=:0";
TimeoutSec = "infinity";
ExecStart = lockCmd;
ExecStartPost = "${pkgs.coreutils-full}/bin/sleep 1";
};
wantedBy = [
"sleep.target"
"suspend.target"
];
};
};
}

View File

@ -0,0 +1,53 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.services.cloudflare-dyndns-no-proxy;
in
{
options.services.cloudflare-dyndns-no-proxy.enable = lib.mkEnableOption "Cloudflare dyndns client without proxying";
config = lib.mkIf cfg.enable {
# Run a second copy of dyn-dns for non-proxied domains
# Adapted from: https://github.com/NixOS/nixpkgs/blob/nixos-unstable/nixos/modules/services/networking/cloudflare-dyndns.nix
systemd.services.cloudflare-dyndns-noproxy =
lib.mkIf ((builtins.length config.cloudflare.noProxyDomains) > 0)
{
description = "CloudFlare Dynamic DNS Client (no proxy)";
after = [
"network.target"
"cloudflare-api-secret.service"
];
requires = [ "cloudflare-api-secret.service" ];
wantedBy = [ "multi-user.target" ];
startAt = "*:0/5";
environment = {
CLOUDFLARE_DOMAINS = toString config.cloudflare.noProxyDomains;
};
serviceConfig = {
Type = "simple";
DynamicUser = true;
StateDirectory = "cloudflare-dyndns-noproxy";
EnvironmentFile = config.services.cloudflare-dyndns.apiTokenFile;
ExecStart =
let
args =
[ "--cache-file /var/lib/cloudflare-dyndns-noproxy/ip.cache" ]
++ (if config.services.cloudflare-dyndns.ipv4 then [ "-4" ] else [ "-no-4" ])
++ (if config.services.cloudflare-dyndns.ipv6 then [ "-6" ] else [ "-no-6" ])
++ lib.optional config.services.cloudflare-dyndns.deleteMissing "--delete-missing";
in
"${pkgs.cloudflare-dyndns}/bin/cloudflare-dyndns ${toString args}";
};
};
};
}

View File

@ -0,0 +1,64 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.services.filebrowser;
dataDir = "/var/lib/filebrowser";
settings = {
port = 8020;
baseURL = "";
address = "";
log = "stdout";
database = "${dataDir}/filebrowser.db";
root = "";
"auth.method" = "json";
username = config.user;
# Generate password: htpasswd -nBC 10 "" | tr -d ':\n'
password = "$2y$10$ze1cMob0k6pnXRjLowYfZOVZWg4G.dsPtH3TohbUeEbI0sdkG9.za";
};
in
{
options.services.filebrowser = {
enable = lib.mkEnableOption "Filebrowser private files";
passwordHash = lib.mkOption {
type = lib.types.str;
description = ''Hashed password created from htpasswd -nBC 10 "" | tr -d ':\n' '';
};
};
config = lib.mkIf cfg.enable {
environment.etc."filebrowser/.filebrowser.json".text = builtins.toJSON settings;
systemd.services.filebrowser = lib.mkIf config.filebrowser.enable {
description = "Filebrowser cloud file services";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
startLimitIntervalSec = 14400;
startLimitBurst = 10;
serviceConfig = {
ExecStart = "${pkgs.filebrowser}/bin/filebrowser";
DynamicUser = true;
Group = "shared";
ReadWritePaths = [ dataDir ];
StateDirectory = [ "filebrowser" ];
Restart = "on-failure";
RestartPreventExitStatus = 1;
RestartSec = "5s";
};
path = [ pkgs.getent ]; # Fix: getent not found in $PATH
};
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ config.hostnames.files ];
};
}

View File

@ -0,0 +1,37 @@
{ config, lib, ... }:
let
cfg = config.services.wait-for-identity;
in
{
options.services.wait-for-identity = {
enable = lib.mkEnableOption "Wait for identity file oneshot";
identityFile = lib.mkOption {
type = lib.types.path;
};
};
config = lib.mkIf cfg.enable {
# Wait for secret to be placed on the machine
systemd.services.wait-for-identity = {
description = "Wait until identity file exists on the machine";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
script = ''
for i in $(seq 1 10); do
if [ -f ${builtins.toString cfg.identityFile} ]; then
echo "Identity file found."
exit 0
fi
sleep 6
done
'';
};
};
}

View File

@ -0,0 +1,40 @@
# n8n is an automation integration tool for connecting data from services
# together with triggers.
{ config, lib, ... }:
{
config = lib.mkIf config.services.n8n.enable {
unfreePackages = [ "n8n" ];
services.n8n = {
webhookUrl = "https://${config.hostnames.n8n}";
settings = {
listen_address = "127.0.0.1";
port = 5678;
};
};
systemd.services.n8n.environment = {
N8N_EDITOR_BASE_URL = config.services.n8n.webhookUrl;
};
# Configure Cloudflare DNS to point to this machine
services.cloudflare-dyndns.domains = [ config.hostnames.n8n ];
# Allow web traffic to Caddy
caddy.routes = [
{
match = [ { host = [ config.hostnames.n8n ]; } ];
handle = [
{
handler = "reverse_proxy";
upstreams = [ { dial = "localhost:${builtins.toString config.services.n8n.settings.port}"; } ];
}
];
}
];
};
}

View File

@ -0,0 +1,112 @@
# Secrets management method taken from here:
# https://xeiaso.net/blog/nixos-encrypted-secrets-2021-01-20
# In my case, I pre-encrypt my secrets and commit them to git.
{
config,
pkgs,
lib,
...
}:
let
cfg = config.secrets;
in
{
options = {
secretsDirectory = lib.mkOption {
type = lib.types.path;
description = "Default path to place secrets.";
default = "/var/private";
};
secretsIdentityFile = lib.mkOption {
type = lib.types.path;
description = "Path containing decryption identity.";
};
secrets = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule {
options = {
source = lib.mkOption {
type = lib.types.path;
description = "Path to encrypted secret.";
};
dest = lib.mkOption {
type = lib.types.str;
description = "Resulting path for decrypted secret.";
};
owner = lib.mkOption {
default = "root";
type = lib.types.str;
description = "User to own the secret.";
};
group = lib.mkOption {
default = "root";
type = lib.types.str;
description = "Group to own the secret.";
};
permissions = lib.mkOption {
default = "0400";
type = lib.types.str;
description = "Permissions expressed as octal.";
};
prefix = lib.mkOption {
default = "";
type = lib.types.str;
description = "Prefix for secret value (for environment files).";
};
};
}
);
description = "Set of secrets to decrypt to disk.";
default = { };
};
};
config = lib.mkIf (builtins.length cfg.secrets > 0) {
# Create a default directory to place secrets
systemd.tmpfiles.rules = [ "d ${config.secretsDirectory} 0755 root wheel" ];
# Declare oneshot service to decrypt secret using SSH host key
# - Requires that the secret is already encrypted for the host
# - Encrypt secrets: nix run github:nmasur/dotfiles#encrypt-secret
systemd.services = lib.mapAttrs' (name: attrs: {
name = "${name}-secret";
value = {
description = "Decrypt secret for ${name}";
wantedBy = [ "multi-user.target" ];
bindsTo = lib.mkIf config.services.wait-for-identity.enable [ "wait-for-identity.service" ];
after = lib.mkIf config.services.wait-for-identity.enable [ "wait-for-identity.service" ];
serviceConfig.Type = "oneshot";
script = ''
echo "${attrs.prefix}$(
${pkgs.age}/bin/age --decrypt \
--identity ${config.secretsIdentityFile} ${attrs.source}
)" > ${attrs.dest}
chown '${attrs.owner}':'${attrs.group}' '${attrs.dest}'
chmod '${attrs.permissions}' '${attrs.dest}'
'';
};
}) config.secrets;
# Example declaration
# config.secrets.my-secret = {
# source = ../../private/my-secret.age;
# dest = "/var/lib/private/my-secret";
# owner = "my-app";
# group = "my-app";
# permissions = "0440";
# };
};
}