mirror of
https://github.com/nmasur/dotfiles
synced 2025-07-06 22:30:13 +00:00
initial refactoring
This commit is contained in:
@ -1,74 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
|
||||
options = {
|
||||
services.actualbudget = {
|
||||
enable = lib.mkEnableOption "ActualBudget budgeting service";
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
description = "Port to use for the localhost";
|
||||
default = 5006;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.actualbudget.enable {
|
||||
|
||||
virtualisation.podman.enable = lib.mkDefault true;
|
||||
|
||||
users.users.actualbudget = {
|
||||
isSystemUser = true;
|
||||
group = "shared";
|
||||
uid = 980;
|
||||
};
|
||||
|
||||
# Create budget directory, allowing others to manage it
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /var/lib/actualbudget 0770 actualbudget shared"
|
||||
];
|
||||
|
||||
virtualisation.oci-containers.containers.actualbudget = {
|
||||
workdir = null;
|
||||
volumes = [ "/var/lib/actualbudget:/data" ];
|
||||
user = "${toString (builtins.toString config.users.users.actualbudget.uid)}";
|
||||
pull = "missing";
|
||||
privileged = false;
|
||||
ports = [ "127.0.0.1:${builtins.toString config.services.actualbudget.port}:5006" ];
|
||||
networks = [ ];
|
||||
log-driver = "journald";
|
||||
labels = {
|
||||
app = "actualbudget";
|
||||
};
|
||||
image = "ghcr.io/actualbudget/actual-server:25.1.0";
|
||||
hostname = null;
|
||||
environmentFiles = [ ];
|
||||
environment = {
|
||||
DEBUG = "actual:config"; # Enable debug logging
|
||||
ACTUAL_TRUSTED_PROXIES = builtins.concatStringsSep "," [ "127.0.0.1" ];
|
||||
};
|
||||
dependsOn = [ ];
|
||||
autoStart = true;
|
||||
};
|
||||
|
||||
# Allow web traffic to Caddy
|
||||
caddy.routes = [
|
||||
{
|
||||
match = [ { host = [ config.hostnames.budget ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [ { dial = "localhost:${builtins.toString config.services.actualbudget.port}"; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains = [ config.hostnames.budget ];
|
||||
|
||||
# Backups
|
||||
services.restic.backups.default.paths = [ "/var/lib/actualbudget" ];
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -1,285 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
|
||||
# This config specifies ports for Prometheus to scrape information
|
||||
arrConfig = {
|
||||
radarr = {
|
||||
exportarrPort = "9707";
|
||||
url = "localhost:7878";
|
||||
apiKey = config.secrets.radarrApiKey.dest;
|
||||
};
|
||||
readarr = {
|
||||
exportarrPort = "9711";
|
||||
url = "localhost:8787";
|
||||
apiKey = config.secrets.readarrApiKey.dest;
|
||||
};
|
||||
sonarr = {
|
||||
exportarrPort = "9708";
|
||||
url = "localhost:8989";
|
||||
apiKey = config.secrets.sonarrApiKey.dest;
|
||||
};
|
||||
prowlarr = {
|
||||
exportarrPort = "9709";
|
||||
url = "localhost:9696";
|
||||
apiKey = config.secrets.prowlarrApiKey.dest;
|
||||
};
|
||||
sabnzbd = {
|
||||
exportarrPort = "9710";
|
||||
url = "localhost:8085";
|
||||
apiKey = config.secrets.sabnzbdApiKey.dest;
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
|
||||
options = {
|
||||
arrs.enable = lib.mkEnableOption "Arr services";
|
||||
};
|
||||
|
||||
config = lib.mkIf config.arrs.enable {
|
||||
|
||||
# Broken on 2024-12-07
|
||||
# https://discourse.nixos.org/t/solved-sonarr-is-broken-in-24-11-unstable-aka-how-the-hell-do-i-use-nixpkgs-config-permittedinsecurepackages/
|
||||
insecurePackages = [
|
||||
"aspnetcore-runtime-wrapped-6.0.36"
|
||||
"aspnetcore-runtime-6.0.36"
|
||||
"dotnet-sdk-wrapped-6.0.428"
|
||||
"dotnet-sdk-6.0.428"
|
||||
];
|
||||
|
||||
services = {
|
||||
bazarr = {
|
||||
enable = true;
|
||||
group = "shared";
|
||||
};
|
||||
jellyseerr.enable = true;
|
||||
prowlarr.enable = true;
|
||||
sabnzbd = {
|
||||
enable = true;
|
||||
group = "shared";
|
||||
# The config file must be editable within the application
|
||||
# It contains server configs and credentials
|
||||
configFile = "/data/downloads/sabnzbd/sabnzbd.ini";
|
||||
};
|
||||
sonarr = {
|
||||
enable = true;
|
||||
group = "shared";
|
||||
};
|
||||
radarr = {
|
||||
enable = true;
|
||||
group = "shared";
|
||||
};
|
||||
readarr = {
|
||||
enable = true;
|
||||
group = "shared";
|
||||
};
|
||||
};
|
||||
|
||||
# Allows shared group to read/write the sabnzbd directory
|
||||
users.users.sabnzbd.homeMode = "0770";
|
||||
|
||||
unfreePackages = [ "unrar" ]; # Required as a dependency for sabnzbd
|
||||
|
||||
# Requires updating the base_url config value in each service
|
||||
# If you try to rewrite the URL, the service won't redirect properly
|
||||
caddy.routes = [
|
||||
{
|
||||
# Group means that routes with the same name are mutually exclusive,
|
||||
# so they are split between the appropriate services.
|
||||
group = "download";
|
||||
match = [
|
||||
{
|
||||
host = [ config.hostnames.download ];
|
||||
path = [ "/sonarr*" ];
|
||||
}
|
||||
];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
# We're able to reference the url and port of the service dynamically
|
||||
upstreams = [ { dial = arrConfig.sonarr.url; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
group = "download";
|
||||
match = [
|
||||
{
|
||||
host = [ config.hostnames.download ];
|
||||
path = [ "/radarr*" ];
|
||||
}
|
||||
];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [ { dial = arrConfig.radarr.url; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
group = "download";
|
||||
match = [
|
||||
{
|
||||
host = [ config.hostnames.download ];
|
||||
path = [ "/readarr*" ];
|
||||
}
|
||||
];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [ { dial = arrConfig.readarr.url; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
group = "download";
|
||||
match = [
|
||||
{
|
||||
host = [ config.hostnames.download ];
|
||||
path = [ "/prowlarr*" ];
|
||||
}
|
||||
];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
# Prowlarr doesn't offer a dynamic config, so we have to hardcode it
|
||||
upstreams = [ { dial = "localhost:9696"; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
group = "download";
|
||||
match = [
|
||||
{
|
||||
host = [ config.hostnames.download ];
|
||||
path = [ "/bazarr*" ];
|
||||
}
|
||||
];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [
|
||||
{
|
||||
# Bazarr only dynamically sets the port, not the host
|
||||
dial = "localhost:${builtins.toString config.services.bazarr.listenPort}";
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
group = "download";
|
||||
match = [
|
||||
{
|
||||
host = [ config.hostnames.download ];
|
||||
path = [ "/sabnzbd*" ];
|
||||
}
|
||||
];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [ { dial = arrConfig.sabnzbd.url; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
group = "download";
|
||||
match = [ { host = [ config.hostnames.download ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [ { dial = "localhost:${builtins.toString config.services.jellyseerr.port}"; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains = [ config.hostnames.download ];
|
||||
|
||||
# Enable Prometheus exporters
|
||||
systemd.services = lib.mapAttrs' (name: attrs: {
|
||||
name = "prometheus-${name}-exporter";
|
||||
value = {
|
||||
description = "Export Prometheus metrics for ${name}";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "${name}.service" ];
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
DynamicUser = true;
|
||||
ExecStart =
|
||||
let
|
||||
# Sabnzbd doesn't accept the URI path, unlike the others
|
||||
url = if name != "sabnzbd" then "http://${attrs.url}/${name}" else "http://${attrs.url}";
|
||||
in
|
||||
# Exportarr is trained to pull from the arr services
|
||||
''
|
||||
${pkgs.exportarr}/bin/exportarr ${name} \
|
||||
--url ${url} \
|
||||
--port ${attrs.exportarrPort}'';
|
||||
EnvironmentFile = lib.mkIf (builtins.hasAttr "apiKey" attrs) attrs.apiKey;
|
||||
Restart = "on-failure";
|
||||
ProtectHome = true;
|
||||
ProtectSystem = "strict";
|
||||
PrivateTmp = true;
|
||||
PrivateDevices = true;
|
||||
ProtectHostname = true;
|
||||
ProtectClock = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectControlGroups = true;
|
||||
NoNewPrivileges = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
RemoveIPC = true;
|
||||
PrivateMounts = true;
|
||||
};
|
||||
};
|
||||
}) arrConfig;
|
||||
|
||||
# Secrets for Prometheus exporters
|
||||
secrets.radarrApiKey = {
|
||||
source = ../../../private/radarr-api-key.age;
|
||||
dest = "/var/private/radarr-api";
|
||||
prefix = "API_KEY=";
|
||||
};
|
||||
secrets.readarrApiKey = {
|
||||
source = ../../../private/radarr-api-key.age;
|
||||
dest = "/var/private/readarr-api";
|
||||
prefix = "API_KEY=";
|
||||
};
|
||||
secrets.sonarrApiKey = {
|
||||
source = ../../../private/sonarr-api-key.age;
|
||||
dest = "/var/private/sonarr-api";
|
||||
prefix = "API_KEY=";
|
||||
};
|
||||
secrets.prowlarrApiKey = {
|
||||
source = ../../../private/prowlarr-api-key.age;
|
||||
dest = "/var/private/prowlarr-api";
|
||||
prefix = "API_KEY=";
|
||||
};
|
||||
secrets.sabnzbdApiKey = {
|
||||
source = ../../../private/sabnzbd-api-key.age;
|
||||
dest = "/var/private/sabnzbd-api";
|
||||
prefix = "API_KEY=";
|
||||
};
|
||||
|
||||
# Prometheus scrape targets (expose Exportarr to Prometheus)
|
||||
prometheus.scrapeTargets = map (
|
||||
key:
|
||||
"127.0.0.1:${
|
||||
lib.attrsets.getAttrFromPath [
|
||||
key
|
||||
"exportarrPort"
|
||||
] arrConfig
|
||||
}"
|
||||
) (builtins.attrNames arrConfig);
|
||||
};
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
|
||||
config = lib.mkIf config.services.audiobookshelf.enable {
|
||||
|
||||
services.audiobookshelf = {
|
||||
group = "shared";
|
||||
dataDir = "audiobookshelf";
|
||||
};
|
||||
|
||||
# Allow web traffic to Caddy
|
||||
caddy.routes = [
|
||||
{
|
||||
match = [ { host = [ config.hostnames.audiobooks ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [ { dial = "localhost:${builtins.toString config.services.audiobookshelf.port}"; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains = [ config.hostnames.audiobooks ];
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -1,88 +0,0 @@
|
||||
# Bind is a DNS service. This allows me to resolve public domains locally so
|
||||
# when I'm at home, I don't have to travel over the Internet to reach my
|
||||
# server.
|
||||
|
||||
# To set this on all home machines, I point my router's DNS resolver to the
|
||||
# local IP address of the machine running this service (swan).
|
||||
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
|
||||
localIp = "192.168.1.218";
|
||||
localServices = [
|
||||
config.hostnames.stream
|
||||
config.hostnames.content
|
||||
config.hostnames.books
|
||||
config.hostnames.download
|
||||
config.hostnames.photos
|
||||
];
|
||||
mkRecord = service: "${service} A ${localIp}";
|
||||
localRecords = lib.concatLines (map mkRecord localServices);
|
||||
in
|
||||
{
|
||||
|
||||
config = lib.mkIf config.services.bind.enable {
|
||||
|
||||
# Normally I block all requests not coming from Cloudflare, so I have to also
|
||||
# allow my local network.
|
||||
caddy.cidrAllowlist = [ "192.168.0.0/16" ];
|
||||
|
||||
services.bind = {
|
||||
|
||||
# Allow requests coming from these IPs. This way I don't somehow get
|
||||
# spammed with DNS requests coming from the Internet.
|
||||
cacheNetworks = [
|
||||
"127.0.0.0/24"
|
||||
"192.168.0.0/16"
|
||||
"::1/128" # Required because IPv6 loopback now added to resolv.conf
|
||||
# (see: https://github.com/NixOS/nixpkgs/pull/302228)
|
||||
];
|
||||
|
||||
# When making normal DNS requests, forward them to Cloudflare to resolve.
|
||||
forwarders = [
|
||||
"1.1.1.1"
|
||||
"1.0.0.1"
|
||||
];
|
||||
|
||||
ipv4Only = false;
|
||||
|
||||
# Use rpz zone as an override
|
||||
extraOptions = ''response-policy { zone "rpz"; };'';
|
||||
|
||||
zones = {
|
||||
rpz = {
|
||||
master = true;
|
||||
file = pkgs.writeText "db.rpz" ''
|
||||
$TTL 60 ; 1 minute
|
||||
@ IN SOA localhost. root.localhost. (
|
||||
2023071800 ; serial
|
||||
1h ; refresh
|
||||
30m ; retry
|
||||
1w ; expire
|
||||
30m ; minimum ttl
|
||||
)
|
||||
IN NS localhost.
|
||||
localhost A 127.0.0.1
|
||||
${localRecords}
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# We must allow DNS traffic to hit our machine as well
|
||||
networking.firewall.allowedTCPPorts = [ 53 ];
|
||||
networking.firewall.allowedUDPPorts = [ 53 ];
|
||||
|
||||
# Set our own nameservers to ourselves
|
||||
networking.nameservers = [
|
||||
"127.0.0.1"
|
||||
"::1"
|
||||
];
|
||||
};
|
||||
}
|
@ -1,223 +0,0 @@
|
||||
# Caddy is a reverse proxy, like Nginx or Traefik. This creates an ingress
|
||||
# point from my local network or the public (via Cloudflare). Instead of a
|
||||
# Caddyfile, I'm using the more expressive JSON config file format. This means
|
||||
# I can source routes from other areas in my config and build the JSON file
|
||||
# using the result of the expression.
|
||||
|
||||
# Caddy helpfully provides automatic ACME cert generation and management, but
|
||||
# it requires a form of validation. We are using a custom build of Caddy
|
||||
# (compiled with an overlay) to insert a plugin for managing DNS validation
|
||||
# with Cloudflare's DNS API.
|
||||
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
options = {
|
||||
caddy = {
|
||||
tlsPolicies = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.attrs;
|
||||
description = "Caddy JSON TLS policies";
|
||||
default = [ ];
|
||||
};
|
||||
routes = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.attrs;
|
||||
description = "Caddy JSON routes for http servers";
|
||||
default = [ ];
|
||||
};
|
||||
blocks = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.attrs;
|
||||
description = "Caddy JSON error blocks for http servers";
|
||||
default = [ ];
|
||||
};
|
||||
cidrAllowlist = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "CIDR blocks to allow for requests";
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.caddy.enable {
|
||||
|
||||
# Force Caddy to 403 if not coming from allowlisted source
|
||||
caddy.cidrAllowlist = [ "127.0.0.1/32" ];
|
||||
caddy.routes = lib.mkBefore [
|
||||
{
|
||||
match = [ { not = [ { remote_ip.ranges = config.caddy.cidrAllowlist; } ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "static_response";
|
||||
status_code = "403";
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
services.caddy =
|
||||
let
|
||||
default_logger_name = "other";
|
||||
roll_size_mb = 25;
|
||||
# Extract list of hostnames (fqdns) from current caddy routes
|
||||
getHostnameFromMatch = match: if (lib.hasAttr "host" match) then match.host else [ ];
|
||||
getHostnameFromRoute =
|
||||
route:
|
||||
if (lib.hasAttr "match" route) then (lib.concatMap getHostnameFromMatch route.match) else [ ];
|
||||
hostnames_non_unique = lib.concatMap getHostnameFromRoute config.caddy.routes;
|
||||
hostnames = lib.unique hostnames_non_unique;
|
||||
# Create attrset of subdomains to their fqdns
|
||||
hostname_map = builtins.listToAttrs (
|
||||
map (hostname: {
|
||||
name = builtins.head (lib.splitString "." hostname);
|
||||
value = hostname;
|
||||
}) hostnames
|
||||
);
|
||||
in
|
||||
{
|
||||
adapter = "''"; # Required to enable JSON
|
||||
configFile = pkgs.writeText "Caddyfile" (
|
||||
builtins.toJSON {
|
||||
apps.http.servers.main = {
|
||||
listen = [ ":443" ];
|
||||
|
||||
# These routes are pulled from the rest of this repo
|
||||
routes = config.caddy.routes;
|
||||
errors.routes = config.caddy.blocks;
|
||||
|
||||
# Uncommenting collects access logs
|
||||
logs = {
|
||||
inherit default_logger_name;
|
||||
# Invert hostnames keys and values
|
||||
logger_names = lib.mapAttrs' (name: value: {
|
||||
name = value;
|
||||
value = name;
|
||||
}) hostname_map;
|
||||
};
|
||||
};
|
||||
apps.http.servers.metrics = { }; # Enables Prometheus metrics
|
||||
apps.tls.automation.policies = config.caddy.tlsPolicies;
|
||||
|
||||
# Setup logging to journal and files
|
||||
logging.logs =
|
||||
{
|
||||
# System logs and catch-all
|
||||
# Must be called `default` to override Caddy's built-in default logger
|
||||
default = {
|
||||
level = "INFO";
|
||||
encoder.format = "console";
|
||||
writer = {
|
||||
output = "stderr";
|
||||
};
|
||||
exclude = (map (hostname: "http.log.access.${hostname}") (builtins.attrNames hostname_map)) ++ [
|
||||
"http.log.access.${default_logger_name}"
|
||||
];
|
||||
};
|
||||
# This is for the default access logs (anything not captured by hostname)
|
||||
other = {
|
||||
level = "INFO";
|
||||
encoder.format = "json";
|
||||
writer = {
|
||||
output = "file";
|
||||
filename = "${config.services.caddy.logDir}/other.log";
|
||||
roll = true;
|
||||
inherit roll_size_mb;
|
||||
};
|
||||
include = [ "http.log.access.${default_logger_name}" ];
|
||||
};
|
||||
# This is for using the Caddy API, which will probably never happen
|
||||
admin = {
|
||||
level = "INFO";
|
||||
encoder.format = "json";
|
||||
writer = {
|
||||
output = "file";
|
||||
filename = "${config.services.caddy.logDir}/admin.log";
|
||||
roll = true;
|
||||
inherit roll_size_mb;
|
||||
};
|
||||
include = [ "admin" ];
|
||||
};
|
||||
# This is for TLS cert management tracking
|
||||
tls = {
|
||||
level = "INFO";
|
||||
encoder.format = "json";
|
||||
writer = {
|
||||
output = "file";
|
||||
filename = "${config.services.caddy.logDir}/tls.log";
|
||||
roll = true;
|
||||
inherit roll_size_mb;
|
||||
};
|
||||
include = [ "tls" ];
|
||||
};
|
||||
# This is for debugging
|
||||
debug = {
|
||||
level = "DEBUG";
|
||||
encoder.format = "json";
|
||||
writer = {
|
||||
output = "file";
|
||||
filename = "${config.services.caddy.logDir}/debug.log";
|
||||
roll = true;
|
||||
roll_keep = 1;
|
||||
inherit roll_size_mb;
|
||||
};
|
||||
};
|
||||
}
|
||||
# These are the access logs for individual hostnames
|
||||
// (lib.mapAttrs (name: value: {
|
||||
level = "INFO";
|
||||
encoder.format = "json";
|
||||
writer = {
|
||||
output = "file";
|
||||
filename = "${config.services.caddy.logDir}/${name}-access.log";
|
||||
roll = true;
|
||||
inherit roll_size_mb;
|
||||
};
|
||||
include = [ "http.log.access.${name}" ];
|
||||
}) hostname_map)
|
||||
# We also capture just the errors separately for easy debugging
|
||||
// (lib.mapAttrs' (name: value: {
|
||||
name = "${name}-error";
|
||||
value = {
|
||||
level = "ERROR";
|
||||
encoder.format = "json";
|
||||
writer = {
|
||||
output = "file";
|
||||
filename = "${config.services.caddy.logDir}/${name}-error.log";
|
||||
roll = true;
|
||||
inherit roll_size_mb;
|
||||
};
|
||||
include = [ "http.log.access.${name}" ];
|
||||
};
|
||||
}) hostname_map);
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
systemd.services.caddy.serviceConfig = {
|
||||
|
||||
# Allows Caddy to serve lower ports (443, 80)
|
||||
AmbientCapabilities = "CAP_NET_BIND_SERVICE";
|
||||
|
||||
# Prevent flooding of logs by rate-limiting
|
||||
LogRateLimitIntervalSec = "5s"; # Limit period
|
||||
LogRateLimitBurst = 100; # Limit threshold
|
||||
|
||||
};
|
||||
|
||||
# Required for web traffic to reach this machine
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
443
|
||||
];
|
||||
|
||||
# HTTP/3 QUIC uses UDP (not sure if being used)
|
||||
networking.firewall.allowedUDPPorts = [ 443 ];
|
||||
|
||||
# Caddy exposes Prometheus metrics with the admin API
|
||||
# https://caddyserver.com/docs/api
|
||||
prometheus.scrapeTargets = [ "127.0.0.1:2019" ];
|
||||
};
|
||||
}
|
@ -1,92 +0,0 @@
|
||||
# Calibre-web is an E-Book library and management tool.
|
||||
|
||||
# - Exposed to the public via Caddy.
|
||||
# - Hostname defined with config.hostnames.books
|
||||
# - File directory backed up to S3 on a cron schedule.
|
||||
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
|
||||
libraryPath = "/data/books";
|
||||
in
|
||||
{
|
||||
|
||||
options = {
|
||||
backups.calibre = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
description = "Whether to backup Calibre library";
|
||||
default = true;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.calibre-web.enable {
|
||||
|
||||
services.calibre-web = {
|
||||
group = "shared";
|
||||
openFirewall = true;
|
||||
options = {
|
||||
reverseProxyAuth.enable = false;
|
||||
enableBookConversion = true;
|
||||
enableBookUploading = true;
|
||||
calibreLibrary = libraryPath;
|
||||
};
|
||||
};
|
||||
|
||||
# Allow web traffic to Caddy
|
||||
caddy.routes = [
|
||||
{
|
||||
match = [ { host = [ config.hostnames.books ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [
|
||||
{ dial = "localhost:${builtins.toString config.services.calibre-web.listen.port}"; }
|
||||
];
|
||||
# This is required when calibre-web is behind a reverse proxy
|
||||
# https://github.com/janeczku/calibre-web/issues/19
|
||||
headers.request.add."X-Script-Name" = [ "/calibre-web" ];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains = [ config.hostnames.books ];
|
||||
|
||||
# Grant user access to Calibre directories
|
||||
users.users.${config.user}.extraGroups = [ "calibre-web" ];
|
||||
|
||||
# Run a backup on a schedule
|
||||
systemd.timers.calibre-backup = lib.mkIf config.backups.calibre {
|
||||
timerConfig = {
|
||||
OnCalendar = "*-*-* 00:00:00"; # Once per day
|
||||
Unit = "calibre-backup.service";
|
||||
};
|
||||
wantedBy = [ "timers.target" ];
|
||||
};
|
||||
|
||||
# Backup Calibre data to object storage
|
||||
systemd.services.calibre-backup = lib.mkIf config.backups.calibre {
|
||||
description = "Backup Calibre data";
|
||||
environment.AWS_ACCESS_KEY_ID = config.backup.s3.accessKeyId;
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = "calibre-web";
|
||||
Group = "backup";
|
||||
EnvironmentFile = config.secrets.backup.dest;
|
||||
};
|
||||
script = ''
|
||||
${pkgs.awscli2}/bin/aws s3 sync \
|
||||
${libraryPath}/ \
|
||||
s3://${config.backup.s3.bucket}/calibre/ \
|
||||
--endpoint-url=https://${config.backup.s3.endpoint}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
@ -1,99 +0,0 @@
|
||||
# Cloudflare Tunnel is a service for accessing the network even behind a
|
||||
# firewall, through outbound-only requests. It works by installing an agent on
|
||||
# our machines that exposes services through Cloudflare Access (Zero Trust),
|
||||
# similar to something like Tailscale.
|
||||
|
||||
# In this case, we're using Cloudflare Tunnel to enable SSH access over a web
|
||||
# browser even when outside of my network. This is probably not the safest
|
||||
# choice but I feel comfortable enough with it anyway.
|
||||
|
||||
{ config, lib, ... }:
|
||||
|
||||
# First time setup:
|
||||
|
||||
# nix-shell -p cloudflared
|
||||
# cloudflared tunnel login
|
||||
# cloudflared tunnel create <host>
|
||||
# nix run github:nmasur/dotfiles#encrypt-secret > private/cloudflared-<host>.age
|
||||
# Paste ~/.cloudflared/<id>.json
|
||||
# Set tunnel.id = "<id>"
|
||||
# Remove ~/.cloudflared/
|
||||
|
||||
# For SSH access:
|
||||
# Cloudflare Zero Trust -> Access -> Applications -> Create Application
|
||||
# Service Auth -> SSH -> Select Application -> Generate Certificate
|
||||
# Set ca = "<public key>"
|
||||
|
||||
{
|
||||
|
||||
options.cloudflareTunnel = {
|
||||
enable = lib.mkEnableOption "Use Cloudflare Tunnel";
|
||||
id = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Cloudflare tunnel ID";
|
||||
};
|
||||
credentialsFile = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = "Cloudflare tunnel credentials file (age-encrypted)";
|
||||
};
|
||||
ca = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Cloudflare tunnel CA public key";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.cloudflareTunnel.enable {
|
||||
|
||||
services.cloudflared = {
|
||||
enable = true;
|
||||
tunnels = {
|
||||
"${config.cloudflareTunnel.id}" = {
|
||||
credentialsFile = config.secrets.cloudflared.dest;
|
||||
# Catch-all if no match (should never happen anyway)
|
||||
default = "http_status:404";
|
||||
# Match from ingress of any valid server name to SSH access
|
||||
ingress = {
|
||||
"*.masu.rs" = "ssh://localhost:22";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Grant Cloudflare access to SSH into this server
|
||||
environment.etc = {
|
||||
"ssh/ca.pub".text = ''
|
||||
${config.cloudflareTunnel.ca}
|
||||
'';
|
||||
|
||||
# Must match the username portion of the email address in Cloudflare
|
||||
# Access
|
||||
"ssh/authorized_principals".text = ''
|
||||
${config.user}
|
||||
'';
|
||||
};
|
||||
|
||||
# Adjust SSH config to allow access from Cloudflare's certificate
|
||||
services.openssh.extraConfig = ''
|
||||
PubkeyAuthentication yes
|
||||
TrustedUserCAKeys /etc/ssh/ca.pub
|
||||
Match User '${config.user}'
|
||||
AuthorizedPrincipalsFile /etc/ssh/authorized_principals
|
||||
# if there is no existing AuthenticationMethods
|
||||
AuthenticationMethods publickey
|
||||
'';
|
||||
services.openssh.settings.Macs = [ "hmac-sha2-512" ]; # Fix for failure to find matching mac
|
||||
|
||||
# Create credentials file for Cloudflare
|
||||
secrets.cloudflared = {
|
||||
source = config.cloudflareTunnel.credentialsFile;
|
||||
dest = "${config.secretsDirectory}/cloudflared";
|
||||
owner = "cloudflared";
|
||||
group = "cloudflared";
|
||||
permissions = "0440";
|
||||
};
|
||||
systemd.services.cloudflared-secret = {
|
||||
requiredBy = [ "cloudflared-tunnel-${config.cloudflareTunnel.id}.service" ];
|
||||
before = [ "cloudflared-tunnel-${config.cloudflareTunnel.id}.service" ];
|
||||
};
|
||||
};
|
||||
}
|
@ -153,39 +153,5 @@ in
|
||||
requires = [ "cloudflare-api-secret.service" ];
|
||||
};
|
||||
|
||||
# Run a second copy of dyn-dns for non-proxied domains
|
||||
# Adapted from: https://github.com/NixOS/nixpkgs/blob/nixos-unstable/nixos/modules/services/networking/cloudflare-dyndns.nix
|
||||
systemd.services.cloudflare-dyndns-noproxy =
|
||||
lib.mkIf ((builtins.length config.cloudflare.noProxyDomains) > 0)
|
||||
{
|
||||
description = "CloudFlare Dynamic DNS Client (no proxy)";
|
||||
after = [
|
||||
"network.target"
|
||||
"cloudflare-api-secret.service"
|
||||
];
|
||||
requires = [ "cloudflare-api-secret.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
startAt = "*:0/5";
|
||||
|
||||
environment = {
|
||||
CLOUDFLARE_DOMAINS = toString config.cloudflare.noProxyDomains;
|
||||
};
|
||||
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
DynamicUser = true;
|
||||
StateDirectory = "cloudflare-dyndns-noproxy";
|
||||
EnvironmentFile = config.services.cloudflare-dyndns.apiTokenFile;
|
||||
ExecStart =
|
||||
let
|
||||
args =
|
||||
[ "--cache-file /var/lib/cloudflare-dyndns-noproxy/ip.cache" ]
|
||||
++ (if config.services.cloudflare-dyndns.ipv4 then [ "-4" ] else [ "-no-4" ])
|
||||
++ (if config.services.cloudflare-dyndns.ipv6 then [ "-6" ] else [ "-no-6" ])
|
||||
++ lib.optional config.services.cloudflare-dyndns.deleteMissing "--delete-missing";
|
||||
in
|
||||
"${pkgs.cloudflare-dyndns}/bin/cloudflare-dyndns ${toString args}";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -1,68 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
dataDir = "/var/lib/filebrowser";
|
||||
|
||||
settings = {
|
||||
port = 8020;
|
||||
baseURL = "";
|
||||
address = "";
|
||||
log = "stdout";
|
||||
database = "${dataDir}/filebrowser.db";
|
||||
root = "";
|
||||
"auth.method" = "json";
|
||||
username = config.user;
|
||||
# Generate password: htpasswd -nBC 10 "" | tr -d ':\n'
|
||||
password = "$2y$10$ze1cMob0k6pnXRjLowYfZOVZWg4G.dsPtH3TohbUeEbI0sdkG9.za";
|
||||
};
|
||||
|
||||
in
|
||||
{
|
||||
|
||||
options.filebrowser.enable = lib.mkEnableOption "Use Filebrowser.";
|
||||
|
||||
config = lib.mkIf config.filebrowser.enable {
|
||||
|
||||
environment.etc."filebrowser/.filebrowser.json".text = builtins.toJSON settings;
|
||||
|
||||
systemd.services.filebrowser = lib.mkIf config.filebrowser.enable {
|
||||
description = "Filebrowser cloud file services";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
startLimitIntervalSec = 14400;
|
||||
startLimitBurst = 10;
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.filebrowser}/bin/filebrowser";
|
||||
DynamicUser = true;
|
||||
Group = "shared";
|
||||
ReadWritePaths = [ dataDir ];
|
||||
StateDirectory = [ "filebrowser" ];
|
||||
Restart = "on-failure";
|
||||
RestartPreventExitStatus = 1;
|
||||
RestartSec = "5s";
|
||||
};
|
||||
path = [ pkgs.getent ]; # Fix: getent not found in $PATH
|
||||
};
|
||||
|
||||
caddy.routes = [
|
||||
{
|
||||
match = [ { host = [ config.hostnames.files ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [ { dial = "localhost:${builtins.toString settings.port}"; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains = [ config.hostnames.files ];
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -1,65 +0,0 @@
|
||||
# Gitea Actions is a CI/CD service for the Gitea source code server, meaning it
|
||||
# allows us to run code operations (such as testing or deploys) when our git
|
||||
# repositories are updated. Any machine can act as a Gitea Action Runner, so
|
||||
# the Runners don't necessarily need to be running Gitea. All we need is an API
|
||||
# key for Gitea to connect to it and register ourselves as a Runner.
|
||||
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
options.giteaRunner.enable = lib.mkEnableOption "Enable Gitea Actions runner.";
|
||||
|
||||
config = lib.mkIf config.giteaRunner.enable {
|
||||
|
||||
services.gitea-actions-runner.instances.${config.networking.hostName} = {
|
||||
enable = true;
|
||||
labels = [
|
||||
# Provide a Debian base with NodeJS for actions
|
||||
# "debian-latest:docker://node:18-bullseye"
|
||||
# Fake the Ubuntu name, because Node provides no Ubuntu builds
|
||||
# "ubuntu-latest:docker://node:18-bullseye"
|
||||
# Provide native execution on the host using below packages
|
||||
"native:host"
|
||||
];
|
||||
hostPackages = with pkgs; [
|
||||
bash
|
||||
coreutils
|
||||
curl
|
||||
gawk
|
||||
gitMinimal
|
||||
gnused
|
||||
nodejs
|
||||
wget
|
||||
];
|
||||
name = config.networking.hostName;
|
||||
url = "https://${config.hostnames.git}";
|
||||
tokenFile = config.secrets.giteaRunnerToken.dest;
|
||||
};
|
||||
|
||||
# Make sure the runner doesn't start until after Gitea
|
||||
systemd.services."gitea-runner-${config.networking.hostName}".after = [ "gitea.service" ];
|
||||
|
||||
# API key needed to connect to Gitea
|
||||
secrets.giteaRunnerToken = {
|
||||
source = ../../../private/gitea-runner-token.age; # TOKEN=xyz
|
||||
dest = "${config.secretsDirectory}/gitea-runner-token";
|
||||
};
|
||||
systemd.services.giteaRunnerToken-secret = {
|
||||
requiredBy = [
|
||||
"gitea-runner-${
|
||||
config.services.gitea-actions-runner.instances.${config.networking.hostName}.name
|
||||
}.service"
|
||||
];
|
||||
before = [
|
||||
"gitea-runner-${
|
||||
config.services.gitea-actions-runner.instances.${config.networking.hostName}.name
|
||||
}.service"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
@ -1,153 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
giteaPath = "/var/lib/gitea"; # Default service directory
|
||||
in
|
||||
{
|
||||
|
||||
config = lib.mkIf config.services.gitea.enable {
|
||||
services.gitea = {
|
||||
database.type = "sqlite3";
|
||||
settings = {
|
||||
actions.ENABLED = true;
|
||||
metrics.ENABLED = true;
|
||||
repository = {
|
||||
# Pushing to a repo that doesn't exist automatically creates one as
|
||||
# private.
|
||||
DEFAULT_PUSH_CREATE_PRIVATE = true;
|
||||
|
||||
# Allow git over HTTP.
|
||||
DISABLE_HTTP_GIT = false;
|
||||
|
||||
# Allow requests hitting the specified hostname.
|
||||
ACCESS_CONTROL_ALLOW_ORIGIN = config.hostnames.git;
|
||||
|
||||
# Automatically create viable users/orgs on push.
|
||||
ENABLE_PUSH_CREATE_USER = true;
|
||||
ENABLE_PUSH_CREATE_ORG = true;
|
||||
|
||||
# Default when creating new repos.
|
||||
DEFAULT_BRANCH = "main";
|
||||
};
|
||||
server = {
|
||||
HTTP_PORT = 3001;
|
||||
HTTP_ADDRESS = "127.0.0.1";
|
||||
ROOT_URL = "https://${config.hostnames.git}/";
|
||||
SSH_PORT = 22;
|
||||
START_SSH_SERVER = false; # Use sshd instead
|
||||
DISABLE_SSH = false;
|
||||
};
|
||||
|
||||
# Don't allow public users to register accounts.
|
||||
service.DISABLE_REGISTRATION = true;
|
||||
|
||||
# Force using HTTPS for all session access.
|
||||
session.COOKIE_SECURE = true;
|
||||
|
||||
# Hide users' emails.
|
||||
ui.SHOW_USER_EMAIL = false;
|
||||
};
|
||||
extraConfig = null;
|
||||
};
|
||||
|
||||
users.users.${config.user}.extraGroups = [ "gitea" ];
|
||||
|
||||
caddy.routes = [
|
||||
# Prevent public access to Prometheus metrics.
|
||||
{
|
||||
match = [
|
||||
{
|
||||
host = [ config.hostnames.git ];
|
||||
path = [ "/metrics*" ];
|
||||
}
|
||||
];
|
||||
handle = [
|
||||
{
|
||||
handler = "static_response";
|
||||
status_code = "403";
|
||||
}
|
||||
];
|
||||
}
|
||||
# Allow access to primary server.
|
||||
{
|
||||
match = [ { host = [ config.hostnames.git ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [
|
||||
{ dial = "localhost:${builtins.toString config.services.gitea.settings.server.HTTP_PORT}"; }
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains = [ config.hostnames.git ];
|
||||
|
||||
# Scrape the metrics endpoint for Prometheus.
|
||||
prometheus.scrapeTargets = [
|
||||
"127.0.0.1:${builtins.toString config.services.gitea.settings.server.HTTP_PORT}"
|
||||
];
|
||||
|
||||
## Backup config
|
||||
|
||||
# Open to groups, allowing for backups
|
||||
systemd.services.gitea.serviceConfig.StateDirectoryMode = lib.mkForce "0770";
|
||||
systemd.tmpfiles.rules = [ "f ${giteaPath}/data/gitea.db 0660 gitea gitea" ];
|
||||
|
||||
# Allow litestream and gitea to share a sqlite database
|
||||
users.users.litestream.extraGroups = [ "gitea" ];
|
||||
users.users.gitea.extraGroups = [ "litestream" ];
|
||||
|
||||
# Backup sqlite database with litestream
|
||||
services.litestream = {
|
||||
settings = {
|
||||
dbs = [
|
||||
{
|
||||
path = "${giteaPath}/data/gitea.db";
|
||||
replicas = [ { url = "s3://${config.backup.s3.bucket}.${config.backup.s3.endpoint}/gitea"; } ];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# Don't start litestream unless gitea is up
|
||||
systemd.services.litestream = {
|
||||
after = [ "gitea.service" ];
|
||||
requires = [ "gitea.service" ];
|
||||
};
|
||||
|
||||
# Run a repository file backup on a schedule
|
||||
systemd.timers.gitea-backup = lib.mkIf (config.backup.s3.endpoint != null) {
|
||||
timerConfig = {
|
||||
OnCalendar = "*-*-* 00:00:00"; # Once per day
|
||||
Unit = "gitea-backup.service";
|
||||
};
|
||||
wantedBy = [ "timers.target" ];
|
||||
};
|
||||
|
||||
# Backup Gitea repos to object storage
|
||||
systemd.services.gitea-backup = lib.mkIf (config.backup.s3.endpoint != null) {
|
||||
description = "Backup Gitea data";
|
||||
environment.AWS_ACCESS_KEY_ID = config.backup.s3.accessKeyId;
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = "gitea";
|
||||
Group = "backup";
|
||||
EnvironmentFile = config.secrets.backup.dest;
|
||||
};
|
||||
script = ''
|
||||
${pkgs.awscli2}/bin/aws s3 sync --exclude */gitea.db* \
|
||||
${giteaPath}/ \
|
||||
s3://${config.backup.s3.bucket}/gitea-data/ \
|
||||
--endpoint-url=https://${config.backup.s3.endpoint}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
@ -1,25 +0,0 @@
|
||||
# GPG is an encryption tool. This isn't really in use for me at the moment.
|
||||
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
options.gpg.enable = lib.mkEnableOption "GnuPG encryption.";
|
||||
|
||||
config.home-manager.users.${config.user} = lib.mkIf config.gpg.enable {
|
||||
programs.gpg.enable = true;
|
||||
services.gpg-agent = {
|
||||
enable = true;
|
||||
defaultCacheTtl = 86400; # Resets when used
|
||||
defaultCacheTtlSsh = 86400; # Resets when used
|
||||
maxCacheTtl = 34560000; # Can never reset
|
||||
maxCacheTtlSsh = 34560000; # Can never reset
|
||||
pinentryFlavor = "tty";
|
||||
};
|
||||
home = lib.mkIf config.gui.enable { packages = with pkgs; [ pinentry ]; };
|
||||
};
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,22 +0,0 @@
|
||||
{ config, ... }:
|
||||
{
|
||||
|
||||
# Wait for secret to be placed on the machine
|
||||
systemd.services.wait-for-identity = {
|
||||
description = "Wait until identity file exists on the machine";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
script = ''
|
||||
for i in $(seq 1 10); do
|
||||
if [ -f ${config.identityFile} ]; then
|
||||
echo "Identity file found."
|
||||
exit 0
|
||||
fi
|
||||
sleep 6
|
||||
done
|
||||
'';
|
||||
};
|
||||
}
|
@ -1,48 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
{
|
||||
|
||||
config = lib.mkIf config.services.immich.enable {
|
||||
|
||||
services.immich = {
|
||||
port = 2283;
|
||||
group = "shared";
|
||||
database.enable = true;
|
||||
redis.enable = true;
|
||||
machine-learning.enable = true;
|
||||
machine-learning.environment = { };
|
||||
mediaLocation = "/data/images";
|
||||
secretsFile = null;
|
||||
settings.server.externalDomain = "https://${config.hostnames.photos}";
|
||||
environment = {
|
||||
IMMICH_ENV = "production";
|
||||
IMMICH_LOG_LEVEL = "log";
|
||||
NO_COLOR = "false";
|
||||
IMMICH_TRUSTED_PROXIES = "127.0.0.1";
|
||||
};
|
||||
};
|
||||
|
||||
caddy.routes = [
|
||||
{
|
||||
match = [ { host = [ config.hostnames.photos ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [ { dial = "localhost:${builtins.toString config.services.immich.port}"; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains = [ config.hostnames.photos ];
|
||||
|
||||
# Point localhost to the local domain
|
||||
networking.hosts."127.0.0.1" = [ config.hostnames.photos ];
|
||||
|
||||
# Backups
|
||||
services.restic.backups.default.paths = [ "/data/images" ];
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -1,65 +0,0 @@
|
||||
# InfluxDB is a timeseries database similar to Prometheus. While
|
||||
# VictoriaMetrics can also act as an InfluxDB, this version of it allows for
|
||||
# infinite retention separate from our other metrics, which can be nice for
|
||||
# recording health information, for example.
|
||||
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
|
||||
config = lib.mkIf config.services.influxdb2.enable {
|
||||
|
||||
services.influxdb2 = {
|
||||
provision = {
|
||||
enable = true;
|
||||
initialSetup = {
|
||||
bucket = "default";
|
||||
organization = "main";
|
||||
passwordFile = config.secrets.influxdb2Password.dest;
|
||||
retention = 0; # Keep data forever
|
||||
tokenFile = config.secrets.influxdb2Token.dest;
|
||||
username = "admin";
|
||||
};
|
||||
};
|
||||
settings = { };
|
||||
};
|
||||
|
||||
# Create credentials file for InfluxDB admin
|
||||
secrets.influxdb2Password = lib.mkIf config.services.influxdb2.enable {
|
||||
source = ../../../private/influxdb2-password.age;
|
||||
dest = "${config.secretsDirectory}/influxdb2-password";
|
||||
owner = "influxdb2";
|
||||
group = "influxdb2";
|
||||
permissions = "0440";
|
||||
};
|
||||
systemd.services.influxdb2Password-secret = lib.mkIf config.services.influxdb2.enable {
|
||||
requiredBy = [ "influxdb2.service" ];
|
||||
before = [ "influxdb2.service" ];
|
||||
};
|
||||
secrets.influxdb2Token = lib.mkIf config.services.influxdb2.enable {
|
||||
source = ../../../private/influxdb2-token.age;
|
||||
dest = "${config.secretsDirectory}/influxdb2-token";
|
||||
owner = "influxdb2";
|
||||
group = "influxdb2";
|
||||
permissions = "0440";
|
||||
};
|
||||
systemd.services.influxdb2Token-secret = lib.mkIf config.services.influxdb2.enable {
|
||||
requiredBy = [ "influxdb2.service" ];
|
||||
before = [ "influxdb2.service" ];
|
||||
};
|
||||
|
||||
caddy.routes = lib.mkIf config.services.influxdb2.enable [
|
||||
{
|
||||
match = [ { host = [ config.hostnames.influxdb ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [ { dial = "localhost:8086"; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains = [ config.hostnames.influxdb ];
|
||||
};
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
|
||||
config = lib.mkIf config.services.thelounge.enable {
|
||||
|
||||
services.thelounge = {
|
||||
public = false;
|
||||
port = 9000;
|
||||
extraConfig = {
|
||||
reverseProxy = true;
|
||||
maxHistory = 10000;
|
||||
};
|
||||
};
|
||||
|
||||
# Adding new users:
|
||||
# nix shell nixpkgs#thelounge
|
||||
# sudo su - thelounge -s /bin/sh -c "thelounge add myuser"
|
||||
|
||||
# Allow web traffic to Caddy
|
||||
caddy.routes = [
|
||||
{
|
||||
match = [ { host = [ config.hostnames.irc ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [ { dial = "localhost:${builtins.toString config.services.thelounge.port}"; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains = [ config.hostnames.irc ];
|
||||
};
|
||||
}
|
@ -1,79 +0,0 @@
|
||||
# Jellyfin is a self-hosted video streaming service. This means I can play my
|
||||
# server's videos from a webpage, mobile app, or TV client.
|
||||
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
config = lib.mkIf config.services.jellyfin.enable {
|
||||
|
||||
services.jellyfin.group = "shared";
|
||||
users.users.jellyfin = {
|
||||
isSystemUser = true;
|
||||
};
|
||||
|
||||
caddy.routes = [
|
||||
# Prevent public access to Prometheus metrics.
|
||||
{
|
||||
match = [
|
||||
{
|
||||
host = [ config.hostnames.stream ];
|
||||
path = [ "/metrics*" ];
|
||||
}
|
||||
];
|
||||
handle = [
|
||||
{
|
||||
handler = "static_response";
|
||||
status_code = "403";
|
||||
}
|
||||
];
|
||||
}
|
||||
# Allow access to normal route.
|
||||
{
|
||||
match = [ { host = [ config.hostnames.stream ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [ { dial = "localhost:8096"; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains = [ config.hostnames.stream ];
|
||||
|
||||
# Create videos directory, allow anyone in Jellyfin group to manage it
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /var/lib/jellyfin 0775 jellyfin shared"
|
||||
"d /var/lib/jellyfin/library 0775 jellyfin shared"
|
||||
];
|
||||
|
||||
# Enable VA-API for hardware transcoding
|
||||
hardware.graphics = {
|
||||
enable = true;
|
||||
extraPackages = [ pkgs.libva ];
|
||||
};
|
||||
environment.systemPackages = [ pkgs.libva-utils ];
|
||||
environment.variables = {
|
||||
# VAAPI and VDPAU config for accelerated video.
|
||||
# See https://wiki.archlinux.org/index.php/Hardware_video_acceleration
|
||||
"VDPAU_DRIVER" = "radeonsi";
|
||||
"LIBVA_DRIVER_NAME" = "radeonsi";
|
||||
};
|
||||
users.users.jellyfin.extraGroups = [
|
||||
"render"
|
||||
"video"
|
||||
]; # Access to /dev/dri
|
||||
|
||||
# Fix issue where Jellyfin-created directories don't allow access for media group
|
||||
systemd.services.jellyfin.serviceConfig.UMask = lib.mkForce "0007";
|
||||
|
||||
# Requires MetricsEnable is true in /var/lib/jellyfin/config/system.xml
|
||||
prometheus.scrapeTargets = [ "127.0.0.1:8096" ];
|
||||
};
|
||||
}
|
@ -1,40 +0,0 @@
|
||||
# Keybase is an encrypted communications tool with a synchronized encrypted
|
||||
# filestore that can be mounted onto a machine's filesystem.
|
||||
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
options.keybase.enable = lib.mkEnableOption "Keybase.";
|
||||
|
||||
config = lib.mkIf config.keybase.enable {
|
||||
|
||||
home-manager.users.${config.user} = lib.mkIf config.keybase.enable {
|
||||
|
||||
services.keybase.enable = true;
|
||||
services.kbfs = {
|
||||
enable = true;
|
||||
mountPoint = "keybase";
|
||||
};
|
||||
|
||||
# https://github.com/nix-community/home-manager/issues/4722
|
||||
systemd.user.services.kbfs.Service.PrivateTmp = lib.mkForce false;
|
||||
|
||||
home.packages = [ (lib.mkIf config.gui.enable pkgs.keybase-gui) ];
|
||||
home.file =
|
||||
let
|
||||
ignorePatterns = ''
|
||||
keybase/
|
||||
kbfs/'';
|
||||
in
|
||||
{
|
||||
".rgignore".text = ignorePatterns;
|
||||
".fdignore".text = ignorePatterns;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
@ -1,18 +0,0 @@
|
||||
# Mullvad is a VPN service. This isn't currently in use for me at the moment.
|
||||
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
options.mullvad.enable = lib.mkEnableOption "Mullvad VPN.";
|
||||
|
||||
config = lib.mkIf config.mullvad.enable {
|
||||
|
||||
services.mullvad-vpn.enable = true;
|
||||
environment.systemPackages = [ pkgs.mullvad-vpn ];
|
||||
};
|
||||
}
|
@ -1,40 +0,0 @@
|
||||
# n8n is an automation integration tool for connecting data from services
|
||||
# together with triggers.
|
||||
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
|
||||
config = lib.mkIf config.services.n8n.enable {
|
||||
|
||||
unfreePackages = [ "n8n" ];
|
||||
|
||||
services.n8n = {
|
||||
webhookUrl = "https://${config.hostnames.n8n}";
|
||||
settings = {
|
||||
listen_address = "127.0.0.1";
|
||||
port = 5678;
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.n8n.environment = {
|
||||
N8N_EDITOR_BASE_URL = config.services.n8n.webhookUrl;
|
||||
};
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains = [ config.hostnames.n8n ];
|
||||
|
||||
# Allow web traffic to Caddy
|
||||
caddy.routes = [
|
||||
{
|
||||
match = [ { host = [ config.hostnames.n8n ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [ { dial = "localhost:${builtins.toString config.services.n8n.settings.port}"; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
@ -1,20 +0,0 @@
|
||||
# Netdata is an out-of-the-box monitoring tool that exposes many different
|
||||
# metrics. Not currently in use, in favor of VictoriaMetrics and Grafana.
|
||||
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
|
||||
options.netdata.enable = lib.mkEnableOption "Netdata metrics.";
|
||||
|
||||
config = lib.mkIf config.netdata.enable {
|
||||
|
||||
services.netdata = {
|
||||
enable = true;
|
||||
|
||||
# Disable local dashboard (unsecured)
|
||||
config = {
|
||||
web.mode = "none";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
@ -1,228 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
config = lib.mkIf config.services.nextcloud.enable {
|
||||
|
||||
services.nextcloud = {
|
||||
package = pkgs.nextcloud30; # Required to specify
|
||||
configureRedis = true;
|
||||
datadir = "/data/nextcloud";
|
||||
database.createLocally = true;
|
||||
https = true;
|
||||
hostName = "localhost";
|
||||
maxUploadSize = "50G";
|
||||
config = {
|
||||
adminpassFile = config.secrets.nextcloud.dest;
|
||||
dbtype = "pgsql";
|
||||
};
|
||||
settings = {
|
||||
default_phone_region = "US";
|
||||
# Allow access when hitting either of these hosts or IPs
|
||||
trusted_domains = [ config.hostnames.content ];
|
||||
trusted_proxies = [ "127.0.0.1" ];
|
||||
maintenance_window_start = 4; # Run jobs at 4am UTC
|
||||
log_type = "file";
|
||||
loglevel = 1; # Include all actions in the log
|
||||
};
|
||||
extraAppsEnable = true;
|
||||
extraApps = {
|
||||
calendar = config.services.nextcloud.package.packages.apps.calendar;
|
||||
contacts = config.services.nextcloud.package.packages.apps.contacts;
|
||||
# These apps are defined and pinned by overlay in flake.
|
||||
news = pkgs.nextcloudApps.news;
|
||||
external = pkgs.nextcloudApps.external;
|
||||
cookbook = pkgs.nextcloudApps.cookbook;
|
||||
snappymail = pkgs.nextcloudApps.snappymail;
|
||||
};
|
||||
phpOptions = {
|
||||
"opcache.interned_strings_buffer" = "16";
|
||||
"output_buffering" = "0";
|
||||
};
|
||||
};
|
||||
|
||||
# Don't let Nginx use main ports (using Caddy instead)
|
||||
services.nginx.enable = false;
|
||||
|
||||
services.phpfpm.pools.nextcloud.settings = {
|
||||
"listen.owner" = config.services.caddy.user;
|
||||
"listen.group" = config.services.caddy.group;
|
||||
};
|
||||
users.users.caddy.extraGroups = [ "nextcloud" ];
|
||||
|
||||
# Point Caddy to Nginx
|
||||
caddy.routes = [
|
||||
{
|
||||
match = [ { host = [ config.hostnames.content ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "subroute";
|
||||
routes = [
|
||||
# Sets variables and headers
|
||||
{
|
||||
handle = [
|
||||
{
|
||||
handler = "vars";
|
||||
# Grab the webroot out of the written config
|
||||
# The webroot is a symlinked combined Nextcloud directory
|
||||
root = config.services.nginx.virtualHosts.${config.services.nextcloud.hostName}.root;
|
||||
}
|
||||
{
|
||||
handler = "headers";
|
||||
response.set.Strict-Transport-Security = [ "max-age=31536000;" ];
|
||||
}
|
||||
];
|
||||
}
|
||||
# Reroute carddav and caldav traffic
|
||||
{
|
||||
match = [
|
||||
{
|
||||
path = [
|
||||
"/.well-known/carddav"
|
||||
"/.well-known/caldav"
|
||||
];
|
||||
}
|
||||
];
|
||||
handle = [
|
||||
{
|
||||
handler = "static_response";
|
||||
headers = {
|
||||
Location = [ "/remote.php/dav" ];
|
||||
};
|
||||
status_code = 301;
|
||||
}
|
||||
];
|
||||
}
|
||||
# Block traffic to sensitive files
|
||||
{
|
||||
match = [
|
||||
{
|
||||
path = [
|
||||
"/.htaccess"
|
||||
"/data/*"
|
||||
"/config/*"
|
||||
"/db_structure"
|
||||
"/.xml"
|
||||
"/README"
|
||||
"/3rdparty/*"
|
||||
"/lib/*"
|
||||
"/templates/*"
|
||||
"/occ"
|
||||
"/console.php"
|
||||
];
|
||||
}
|
||||
];
|
||||
handle = [
|
||||
{
|
||||
handler = "static_response";
|
||||
status_code = 404;
|
||||
}
|
||||
];
|
||||
}
|
||||
# Redirect index.php to the homepage
|
||||
{
|
||||
match = [
|
||||
{
|
||||
file = {
|
||||
try_files = [ "{http.request.uri.path}/index.php" ];
|
||||
};
|
||||
not = [ { path = [ "*/" ]; } ];
|
||||
}
|
||||
];
|
||||
handle = [
|
||||
{
|
||||
handler = "static_response";
|
||||
headers = {
|
||||
Location = [ "{http.request.orig_uri.path}/" ];
|
||||
};
|
||||
status_code = 308;
|
||||
}
|
||||
];
|
||||
}
|
||||
# Rewrite paths to be relative
|
||||
{
|
||||
match = [
|
||||
{
|
||||
file = {
|
||||
split_path = [ ".php" ];
|
||||
try_files = [
|
||||
"{http.request.uri.path}"
|
||||
"{http.request.uri.path}/index.php"
|
||||
"index.php"
|
||||
];
|
||||
};
|
||||
}
|
||||
];
|
||||
handle = [
|
||||
{
|
||||
handler = "rewrite";
|
||||
uri = "{http.matchers.file.relative}";
|
||||
}
|
||||
];
|
||||
}
|
||||
# Send all PHP traffic to Nextcloud PHP service
|
||||
{
|
||||
match = [ { path = [ "*.php" ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
transport = {
|
||||
protocol = "fastcgi";
|
||||
split_path = [ ".php" ];
|
||||
};
|
||||
upstreams = [ { dial = "unix//run/phpfpm/nextcloud.sock"; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
# Finally, send the rest to the file server
|
||||
{ handle = [ { handler = "file_server"; } ]; }
|
||||
];
|
||||
}
|
||||
];
|
||||
terminal = true;
|
||||
}
|
||||
];
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains = [ config.hostnames.content ];
|
||||
|
||||
# Create credentials file for nextcloud
|
||||
secrets.nextcloud = {
|
||||
source = ../../../private/nextcloud.age;
|
||||
dest = "${config.secretsDirectory}/nextcloud";
|
||||
owner = "nextcloud";
|
||||
group = "nextcloud";
|
||||
permissions = "0440";
|
||||
};
|
||||
systemd.services.nextcloud-secret = {
|
||||
requiredBy = [ "nextcloud-setup.service" ];
|
||||
before = [ "nextcloud-setup.service" ];
|
||||
};
|
||||
|
||||
# Grant user access to Nextcloud directories
|
||||
users.users.${config.user}.extraGroups = [ "nextcloud" ];
|
||||
|
||||
# Open to groups, allowing for backups
|
||||
systemd.services.phpfpm-nextcloud.serviceConfig.StateDirectoryMode = lib.mkForce "0770";
|
||||
|
||||
# Log metrics to prometheus
|
||||
networking.hosts."127.0.0.1" = [ config.hostnames.content ];
|
||||
services.prometheus.exporters.nextcloud = {
|
||||
enable = config.prometheus.exporters.enable;
|
||||
username = config.services.nextcloud.config.adminuser;
|
||||
url = "https://${config.hostnames.content}";
|
||||
passwordFile = config.services.nextcloud.config.adminpassFile;
|
||||
};
|
||||
prometheus.scrapeTargets = [
|
||||
"127.0.0.1:${builtins.toString config.services.prometheus.exporters.nextcloud.port}"
|
||||
];
|
||||
# Allows nextcloud-exporter to read passwordFile
|
||||
users.users.nextcloud-exporter.extraGroups =
|
||||
lib.mkIf config.services.prometheus.exporters.nextcloud.enable
|
||||
[ "nextcloud" ];
|
||||
};
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
{
|
||||
|
||||
config = lib.mkIf config.services.ntfy-sh.enable {
|
||||
services.ntfy-sh = {
|
||||
settings = {
|
||||
base-url = "https://${config.hostnames.notifications}";
|
||||
upstream-base-url = "https://ntfy.sh";
|
||||
listen-http = ":8333";
|
||||
behind-proxy = true;
|
||||
auth-default-access = "deny-all";
|
||||
auth-file = "/var/lib/ntfy-sh/user.db";
|
||||
};
|
||||
};
|
||||
|
||||
caddy.routes = [
|
||||
{
|
||||
match = [ { host = [ config.hostnames.notifications ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [ { dial = "localhost${config.services.ntfy-sh.settings.listen-http}"; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains = [ config.hostnames.notifications ];
|
||||
|
||||
};
|
||||
}
|
@ -1,65 +0,0 @@
|
||||
# Paperless-ngx is a document scanning and management solution.
|
||||
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
|
||||
config = lib.mkIf config.services.paperless.enable {
|
||||
|
||||
services.paperless = {
|
||||
mediaDir = "/data/generic/paperless";
|
||||
passwordFile = config.secrets.paperless.dest;
|
||||
settings = {
|
||||
PAPERLESS_OCR_USER_ARGS = builtins.toJSON { invalidate_digital_signatures = true; };
|
||||
|
||||
# Enable if changing the path name in Caddy
|
||||
# PAPERLESS_FORCE_SCRIPT_NAME = "/paperless";
|
||||
# PAPERLESS_STATIC_URL = "/paperless/static/";
|
||||
};
|
||||
};
|
||||
|
||||
# Allow Nextcloud and user to see files
|
||||
users.users.nextcloud.extraGroups = lib.mkIf config.services.nextcloud.enable [ "paperless" ];
|
||||
users.users.${config.user}.extraGroups = [ "paperless" ];
|
||||
|
||||
caddy.routes = [
|
||||
{
|
||||
match = [
|
||||
{
|
||||
host = [ config.hostnames.paperless ];
|
||||
# path = [ "/paperless*" ]; # Change path name in Caddy
|
||||
}
|
||||
];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [ { dial = "localhost:${builtins.toString config.services.paperless.port}"; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains = [ config.hostnames.paperless ];
|
||||
|
||||
secrets.paperless = {
|
||||
source = ../../../private/prometheus.age;
|
||||
dest = "${config.secretsDirectory}/paperless";
|
||||
owner = "paperless";
|
||||
group = "paperless";
|
||||
permissions = "0440";
|
||||
};
|
||||
systemd.services.paperless-secret = {
|
||||
requiredBy = [ "paperless.service" ];
|
||||
before = [ "paperless.service" ];
|
||||
};
|
||||
|
||||
# Fix paperless shared permissions
|
||||
systemd.services.paperless-web.serviceConfig.UMask = lib.mkForce "0026";
|
||||
systemd.services.paperless-scheduler.serviceConfig.UMask = lib.mkForce "0026";
|
||||
systemd.services.paperless-task-queue.serviceConfig.UMask = lib.mkForce "0026";
|
||||
|
||||
# Backups
|
||||
services.restic.backups.default.paths = [ "/data/generic/paperless/documents" ];
|
||||
|
||||
};
|
||||
}
|
@ -1,36 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
services.postgresql = {
|
||||
package = pkgs.postgresql_15;
|
||||
settings = { };
|
||||
authentication = ''
|
||||
local all postgres peer map=root
|
||||
local all admin peer map=admin
|
||||
'';
|
||||
identMap = ''
|
||||
root postgres postgres
|
||||
root root postgres
|
||||
admin ${config.user} admin
|
||||
'';
|
||||
ensureUsers = [
|
||||
{
|
||||
name = "admin";
|
||||
ensureClauses = {
|
||||
createdb = true;
|
||||
createrole = true;
|
||||
login = true;
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
home-manager.users.${config.user}.home.packages = lib.mkIf config.services.postgresql.enable [
|
||||
pkgs.pgcli # Postgres client with autocomplete
|
||||
];
|
||||
}
|
@ -1,119 +0,0 @@
|
||||
# Prometheus is a timeseries database that exposes system and service metrics
|
||||
# for use in visualizing, monitoring, and alerting (with Grafana).
|
||||
|
||||
# Instead of running traditional Prometheus, I generally run VictoriaMetrics as
|
||||
# a more efficient drop-in replacement.
|
||||
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
options.prometheus = {
|
||||
exporters.enable = lib.mkEnableOption "Enable Prometheus exporters";
|
||||
scrapeTargets = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "Prometheus scrape targets";
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
|
||||
config =
|
||||
let
|
||||
|
||||
# If hosting Grafana, host local Prometheus and listen for inbound jobs. If
|
||||
# not hosting Grafana, send remote Prometheus writes to primary host.
|
||||
isServer = config.services.grafana.enable;
|
||||
in
|
||||
{
|
||||
|
||||
# Turn on exporters if any Prometheus scraper is running
|
||||
prometheus.exporters.enable = builtins.any (x: x) [
|
||||
config.services.prometheus.enable
|
||||
config.services.victoriametrics.enable
|
||||
config.services.vmagent.enable
|
||||
];
|
||||
|
||||
prometheus.scrapeTargets = [
|
||||
"127.0.0.1:${builtins.toString config.services.prometheus.exporters.node.port}"
|
||||
"127.0.0.1:${builtins.toString config.services.prometheus.exporters.systemd.port}"
|
||||
"127.0.0.1:${builtins.toString config.services.prometheus.exporters.process.port}"
|
||||
];
|
||||
|
||||
services.prometheus = {
|
||||
exporters.node.enable = config.prometheus.exporters.enable;
|
||||
exporters.node.enabledCollectors = [ ];
|
||||
exporters.node.disabledCollectors = [ "cpufreq" ];
|
||||
exporters.systemd.enable = config.prometheus.exporters.enable;
|
||||
exporters.process.enable = config.prometheus.exporters.enable;
|
||||
exporters.process.settings.process_names = [
|
||||
# Remove nix store path from process name
|
||||
{
|
||||
name = "{{.Matches.Wrapped}} {{ .Matches.Args }}";
|
||||
cmdline = [ "^/nix/store[^ ]*/(?P<Wrapped>[^ /]*) (?P<Args>.*)" ];
|
||||
}
|
||||
];
|
||||
extraFlags = lib.mkIf isServer [ "--web.enable-remote-write-receiver" ];
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = config.networking.hostName;
|
||||
static_configs = [ { targets = config.scrapeTargets; } ];
|
||||
}
|
||||
];
|
||||
webExternalUrl = lib.mkIf isServer "https://${config.hostnames.prometheus}";
|
||||
# Web config file: https://prometheus.io/docs/prometheus/latest/configuration/https/
|
||||
webConfigFile = lib.mkIf isServer (
|
||||
(pkgs.formats.yaml { }).generate "webconfig.yml" {
|
||||
basic_auth_users = {
|
||||
# Generate password: htpasswd -nBC 10 "" | tr -d ':\n'
|
||||
# Encrypt and place in private/prometheus.age
|
||||
"prometheus" = "$2y$10$r7FWHLHTGPAY312PdhkPEuvb05aGn9Nk1IO7qtUUUjmaDl35l6sLa";
|
||||
};
|
||||
}
|
||||
);
|
||||
remoteWrite = lib.mkIf (!isServer) [
|
||||
{
|
||||
name = config.networking.hostName;
|
||||
url = "https://${config.hostnames.prometheus}/api/v1/write";
|
||||
basic_auth = {
|
||||
# Uses password hashed with bcrypt above
|
||||
username = "prometheus";
|
||||
password_file = config.secrets.prometheus.dest;
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
# Create credentials file for remote Prometheus push
|
||||
secrets.prometheus = lib.mkIf (config.services.prometheus.enable && !isServer) {
|
||||
source = ../../../private/prometheus.age;
|
||||
dest = "${config.secretsDirectory}/prometheus";
|
||||
owner = "prometheus";
|
||||
group = "prometheus";
|
||||
permissions = "0440";
|
||||
};
|
||||
systemd.services.prometheus-secret = lib.mkIf (config.services.prometheus.enable && !isServer) {
|
||||
requiredBy = [ "prometheus.service" ];
|
||||
before = [ "prometheus.service" ];
|
||||
};
|
||||
|
||||
caddy.routes = lib.mkIf (config.services.prometheus.enable && isServer) [
|
||||
{
|
||||
match = [ { host = [ config.hostnames.prometheus ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [ { dial = "localhost:${config.services.prometheus.port}"; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains =
|
||||
if (config.services.prometheus.enable && isServer) then [ config.hostnames.prometheus ] else [ ];
|
||||
};
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
# Samba is a Windows-compatible file-sharing service.
|
||||
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
|
||||
config = {
|
||||
|
||||
services.samba = lib.mkIf config.services.samba.enable {
|
||||
openFirewall = true;
|
||||
settings.data = {
|
||||
path = "/data";
|
||||
browseable = "yes";
|
||||
"read only" = "no";
|
||||
"guest ok" = "no";
|
||||
comment = "NAS";
|
||||
};
|
||||
};
|
||||
|
||||
# Allows Windows clients to discover server
|
||||
services.samba-wsdd.enable = true;
|
||||
networking.firewall.allowedTCPPorts = [ 5357 ];
|
||||
networking.firewall.allowedUDPPorts = [ 3702 ];
|
||||
|
||||
# Allow client browsing Samba and virtual filesystem shares
|
||||
services.gvfs = lib.mkIf (config.gui.enable && config.nautilus.enable) { enable = true; };
|
||||
|
||||
# # Permissions required to mount Samba with GVFS, if not using desktop environment
|
||||
# environment.systemPackages = lib.mkIf (config.gui.enable
|
||||
# && config.nautilus.enable
|
||||
# && config.services.xserver.windowManager.i3.enable)
|
||||
# [ pkgs.lxqt.lxqt-policykit ];
|
||||
};
|
||||
}
|
@ -1,102 +0,0 @@
|
||||
# Secrets management method taken from here:
|
||||
# https://xeiaso.net/blog/nixos-encrypted-secrets-2021-01-20
|
||||
|
||||
# In my case, I pre-encrypt my secrets and commit them to git.
|
||||
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
options = {
|
||||
|
||||
secretsDirectory = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Default path to place secrets.";
|
||||
default = "/var/private";
|
||||
};
|
||||
|
||||
secrets = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule {
|
||||
options = {
|
||||
source = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = "Path to encrypted secret.";
|
||||
};
|
||||
dest = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Resulting path for decrypted secret.";
|
||||
};
|
||||
owner = lib.mkOption {
|
||||
default = "root";
|
||||
type = lib.types.str;
|
||||
description = "User to own the secret.";
|
||||
};
|
||||
group = lib.mkOption {
|
||||
default = "root";
|
||||
type = lib.types.str;
|
||||
description = "Group to own the secret.";
|
||||
};
|
||||
permissions = lib.mkOption {
|
||||
default = "0400";
|
||||
type = lib.types.str;
|
||||
description = "Permissions expressed as octal.";
|
||||
};
|
||||
prefix = lib.mkOption {
|
||||
default = "";
|
||||
type = lib.types.str;
|
||||
description = "Prefix for secret value (for environment files).";
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
description = "Set of secrets to decrypt to disk.";
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf pkgs.stdenv.isLinux {
|
||||
|
||||
# Create a default directory to place secrets
|
||||
|
||||
systemd.tmpfiles.rules = [ "d ${config.secretsDirectory} 0755 root wheel" ];
|
||||
|
||||
# Declare oneshot service to decrypt secret using SSH host key
|
||||
# - Requires that the secret is already encrypted for the host
|
||||
# - Encrypt secrets: nix run github:nmasur/dotfiles#encrypt-secret
|
||||
|
||||
systemd.services = lib.mapAttrs' (name: attrs: {
|
||||
name = "${name}-secret";
|
||||
value = {
|
||||
|
||||
description = "Decrypt secret for ${name}";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
bindsTo = [ "wait-for-identity.service" ];
|
||||
after = [ "wait-for-identity.service" ];
|
||||
serviceConfig.Type = "oneshot";
|
||||
script = ''
|
||||
echo "${attrs.prefix}$(
|
||||
${pkgs.age}/bin/age --decrypt \
|
||||
--identity ${config.identityFile} ${attrs.source}
|
||||
)" > ${attrs.dest}
|
||||
|
||||
chown '${attrs.owner}':'${attrs.group}' '${attrs.dest}'
|
||||
chmod '${attrs.permissions}' '${attrs.dest}'
|
||||
'';
|
||||
};
|
||||
}) config.secrets;
|
||||
|
||||
# Example declaration
|
||||
# config.secrets.my-secret = {
|
||||
# source = ../../private/my-secret.age;
|
||||
# dest = "/var/lib/private/my-secret";
|
||||
# owner = "my-app";
|
||||
# group = "my-app";
|
||||
# permissions = "0440";
|
||||
# };
|
||||
};
|
||||
}
|
@ -1,49 +0,0 @@
|
||||
# SSHD service for allowing SSH access to my machines.
|
||||
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
options = {
|
||||
publicKeys = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.listOf lib.types.str);
|
||||
description = "Public SSH key authorized for this system.";
|
||||
default = null;
|
||||
};
|
||||
permitRootLogin = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Root login settings.";
|
||||
default = "no";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.openssh.enable {
|
||||
services.openssh = {
|
||||
ports = [ 22 ];
|
||||
allowSFTP = true;
|
||||
settings = {
|
||||
GatewayPorts = "no";
|
||||
X11Forwarding = false;
|
||||
PasswordAuthentication = false;
|
||||
PermitRootLogin = lib.mkDefault config.permitRootLogin;
|
||||
};
|
||||
};
|
||||
|
||||
users.users.${config.user}.openssh.authorizedKeys.keys = lib.mkIf (
|
||||
config.publicKeys != null
|
||||
) config.publicKeys;
|
||||
|
||||
# Implement a simple fail2ban service for sshd
|
||||
services.sshguard.enable = true;
|
||||
|
||||
# Add terminfo for SSH from popular terminal emulators
|
||||
# Fix: terminfo now installs contour, which is broken on ARM
|
||||
# - https://github.com/NixOS/nixpkgs/pull/253334
|
||||
# - Will disable until fixed
|
||||
environment.enableAllTerminfo = pkgs.stdenv.isLinux && pkgs.stdenv.isx86_64;
|
||||
};
|
||||
}
|
@ -1,103 +0,0 @@
|
||||
# Transmission is a bittorrent client, which can run in the background for
|
||||
# automated downloads with a web GUI.
|
||||
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
config =
|
||||
let
|
||||
namespace = config.networking.wireguard.interfaces.wg0.interfaceNamespace;
|
||||
vpnIp = lib.strings.removeSuffix "/32" (
|
||||
builtins.head config.networking.wireguard.interfaces.wg0.ips
|
||||
);
|
||||
in
|
||||
lib.mkIf config.services.transmission.enable {
|
||||
|
||||
# Setup transmission
|
||||
services.transmission = {
|
||||
settings = {
|
||||
port-forwarding-enabled = false;
|
||||
rpc-authentication-required = true;
|
||||
rpc-port = 9091;
|
||||
rpc-bind-address = "0.0.0.0";
|
||||
rpc-username = config.user;
|
||||
# This is a salted hash of the real password
|
||||
# https://github.com/tomwijnroks/transmission-pwgen
|
||||
rpc-password = "{c4c5145f6e18bcd3c7429214a832440a45285ce26jDOBGVW";
|
||||
rpc-host-whitelist = config.hostnames.transmission;
|
||||
rpc-host-whitelist-enabled = true;
|
||||
rpc-whitelist = lib.mkDefault "127.0.0.1"; # Overwritten by Cloudflare
|
||||
rpc-whitelist-enabled = true;
|
||||
};
|
||||
};
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains = [ config.hostnames.transmission ];
|
||||
|
||||
# Bind transmission to wireguard namespace
|
||||
systemd.services.transmission = lib.mkIf config.wireguard.enable {
|
||||
bindsTo = [ "netns@${namespace}.service" ];
|
||||
requires = [
|
||||
"network-online.target"
|
||||
"transmission-secret.service"
|
||||
];
|
||||
after = [
|
||||
"wireguard-wg0.service"
|
||||
"transmission-secret.service"
|
||||
];
|
||||
unitConfig.JoinsNamespaceOf = "netns@${namespace}.service";
|
||||
serviceConfig.NetworkNamespacePath = "/var/run/netns/${namespace}";
|
||||
};
|
||||
|
||||
# Create reverse proxy for web UI
|
||||
caddy.routes =
|
||||
let
|
||||
# Set if the download domain is the same as the Transmission domain
|
||||
useDownloadDomain = config.hostnames.download == config.hostnames.transmission;
|
||||
in
|
||||
lib.mkAfter [
|
||||
{
|
||||
group = if useDownloadDomain then "download" else "transmission";
|
||||
match = [
|
||||
{
|
||||
host = [ config.hostnames.transmission ];
|
||||
path = if useDownloadDomain then [ "/transmission*" ] else null;
|
||||
}
|
||||
];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [
|
||||
{ dial = "localhost:${builtins.toString config.services.transmission.settings.rpc-port}"; }
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
# Caddy and Transmission both try to set rmem_max for larger UDP packets.
|
||||
# We will choose Transmission's recommendation (4 MB).
|
||||
boot.kernel.sysctl."net.core.rmem_max" = 4194304;
|
||||
|
||||
# Allow inbound connections to reach namespace
|
||||
systemd.services.transmission-web-netns = lib.mkIf config.wireguard.enable {
|
||||
description = "Forward to transmission in wireguard namespace";
|
||||
requires = [ "transmission.service" ];
|
||||
after = [ "transmission.service" ];
|
||||
serviceConfig = {
|
||||
Restart = "on-failure";
|
||||
TimeoutStopSec = 300;
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
script = ''
|
||||
${pkgs.iproute2}/bin/ip netns exec ${namespace} ${pkgs.iproute2}/bin/ip link set dev lo up
|
||||
${pkgs.socat}/bin/socat tcp-listen:9091,fork,reuseaddr exec:'${pkgs.iproute2}/bin/ip netns exec ${namespace} ${pkgs.socat}/bin/socat STDIO "tcp-connect:${vpnIp}:9091"',nofork
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
|
||||
config = lib.mkIf config.services.uptime-kuma.enable {
|
||||
|
||||
services.uptime-kuma = {
|
||||
settings = {
|
||||
PORT = "3033";
|
||||
};
|
||||
};
|
||||
|
||||
# Allow web traffic to Caddy
|
||||
caddy.routes = [
|
||||
{
|
||||
match = [ { host = [ config.hostnames.status ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [
|
||||
{ dial = "localhost:${config.services.uptime-kuma.settings.PORT}"; }
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains = [ config.hostnames.status ];
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -1,129 +0,0 @@
|
||||
# Vaultwarden is an implementation of the Bitwarden password manager backend
|
||||
# service, which allows for self-hosting the synchronization of a Bitwarden
|
||||
# password manager client.
|
||||
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
vaultwardenPath = "/var/lib/bitwarden_rs"; # Default service directory
|
||||
in
|
||||
{
|
||||
|
||||
config = lib.mkIf config.services.vaultwarden.enable {
|
||||
services.vaultwarden = {
|
||||
config = {
|
||||
DOMAIN = "https://${config.hostnames.secrets}";
|
||||
SIGNUPS_ALLOWED = false;
|
||||
SIGNUPS_VERIFY = true;
|
||||
INVITATIONS_ALLOWED = true;
|
||||
WEB_VAULT_ENABLED = true;
|
||||
ROCKET_ADDRESS = "127.0.0.1";
|
||||
ROCKET_PORT = 8222;
|
||||
WEBSOCKET_ENABLED = true;
|
||||
WEBSOCKET_ADDRESS = "0.0.0.0";
|
||||
WEBSOCKET_PORT = 3012;
|
||||
LOGIN_RATELIMIT_SECONDS = 60;
|
||||
LOGIN_RATELIMIT_MAX_BURST = 10;
|
||||
ADMIN_RATELIMIT_SECONDS = 300;
|
||||
ADMIN_RATELIMIT_MAX_BURST = 3;
|
||||
};
|
||||
environmentFile = config.secrets.vaultwarden.dest;
|
||||
dbBackend = "sqlite";
|
||||
};
|
||||
|
||||
secrets.vaultwarden = {
|
||||
source = ../../../private/vaultwarden.age;
|
||||
dest = "${config.secretsDirectory}/vaultwarden";
|
||||
owner = "vaultwarden";
|
||||
group = "vaultwarden";
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 3012 ];
|
||||
|
||||
caddy.routes = [
|
||||
{
|
||||
match = [ { host = [ config.hostnames.secrets ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [
|
||||
{ dial = "localhost:${builtins.toString config.services.vaultwarden.config.ROCKET_PORT}"; }
|
||||
];
|
||||
headers.request.add."X-Real-IP" = [ "{http.request.remote.host}" ];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains = [ config.hostnames.secrets ];
|
||||
|
||||
## Backup config
|
||||
|
||||
# Open to groups, allowing for backups
|
||||
systemd.services.vaultwarden.serviceConfig.StateDirectoryMode = lib.mkForce "0770";
|
||||
systemd.tmpfiles.rules = [
|
||||
"f ${vaultwardenPath}/db.sqlite3 0660 vaultwarden vaultwarden"
|
||||
"f ${vaultwardenPath}/db.sqlite3-shm 0660 vaultwarden vaultwarden"
|
||||
"f ${vaultwardenPath}/db.sqlite3-wal 0660 vaultwarden vaultwarden"
|
||||
];
|
||||
|
||||
# Allow litestream and vaultwarden to share a sqlite database
|
||||
users.users.litestream.extraGroups = [ "vaultwarden" ];
|
||||
users.users.vaultwarden.extraGroups = [ "litestream" ];
|
||||
|
||||
# Backup sqlite database with litestream
|
||||
services.litestream = {
|
||||
settings = {
|
||||
dbs = [
|
||||
{
|
||||
path = "${vaultwardenPath}/db.sqlite3";
|
||||
replicas = [
|
||||
{ url = "s3://${config.backup.s3.bucket}.${config.backup.s3.endpoint}/vaultwarden"; }
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# Don't start litestream unless vaultwarden is up
|
||||
systemd.services.litestream = {
|
||||
after = [ "vaultwarden.service" ];
|
||||
requires = [ "vaultwarden.service" ];
|
||||
};
|
||||
|
||||
# Run a separate file backup on a schedule
|
||||
systemd.timers.vaultwarden-backup = {
|
||||
timerConfig = {
|
||||
OnCalendar = "*-*-* 06:00:00"; # Once per day
|
||||
Unit = "vaultwarden-backup.service";
|
||||
};
|
||||
wantedBy = [ "timers.target" ];
|
||||
};
|
||||
|
||||
# Backup other Vaultwarden data to object storage
|
||||
systemd.services.vaultwarden-backup = {
|
||||
description = "Backup Vaultwarden files";
|
||||
environment.AWS_ACCESS_KEY_ID = config.backup.s3.accessKeyId;
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = "vaultwarden";
|
||||
Group = "backup";
|
||||
EnvironmentFile = config.secrets.backup.dest;
|
||||
};
|
||||
script = ''
|
||||
${pkgs.awscli2}/bin/aws s3 sync \
|
||||
${vaultwardenPath}/ \
|
||||
s3://${config.backup.s3.bucket}/vaultwarden/ \
|
||||
--endpoint-url=https://${config.backup.s3.endpoint} \
|
||||
--exclude "*db.sqlite3*" \
|
||||
--exclude ".db.sqlite3*"
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
@ -1,110 +0,0 @@
|
||||
# VictoriaMetrics is a more efficient drop-in replacement for Prometheus and
|
||||
# InfluxDB (timeseries databases built for monitoring system metrics).
|
||||
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
pkgs-stable,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
|
||||
username = "prometheus";
|
||||
|
||||
prometheusConfig = {
|
||||
scrape_configs = [
|
||||
{
|
||||
job_name = config.networking.hostName;
|
||||
stream_parse = true;
|
||||
static_configs = [ { targets = config.prometheus.scrapeTargets; } ];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
authConfig = (pkgs.formats.yaml { }).generate "auth.yml" {
|
||||
users = [
|
||||
{
|
||||
username = username;
|
||||
password = "%{PASSWORD}";
|
||||
url_prefix = "http://localhost${config.services.victoriametrics.listenAddress}";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
authPort = "8427";
|
||||
in
|
||||
{
|
||||
|
||||
config = {
|
||||
|
||||
services.victoriametrics.extraOptions = [
|
||||
"-promscrape.config=${(pkgs.formats.yaml { }).generate "scrape.yml" prometheusConfig}"
|
||||
];
|
||||
|
||||
systemd.services.vmauth = lib.mkIf config.services.victoriametrics.enable {
|
||||
description = "VictoriaMetrics basic auth proxy";
|
||||
after = [ "network.target" ];
|
||||
startLimitBurst = 5;
|
||||
serviceConfig = {
|
||||
Restart = "on-failure";
|
||||
RestartSec = 1;
|
||||
DynamicUser = true;
|
||||
EnvironmentFile = config.secrets.vmauth.dest;
|
||||
ExecStart = ''
|
||||
${pkgs.victoriametrics}/bin/vmauth \
|
||||
-auth.config=${authConfig} \
|
||||
-httpListenAddr=:${authPort}'';
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
|
||||
secrets.vmauth = lib.mkIf config.services.victoriametrics.enable {
|
||||
source = ../../../private/prometheus.age;
|
||||
dest = "${config.secretsDirectory}/vmauth";
|
||||
prefix = "PASSWORD=";
|
||||
};
|
||||
systemd.services.vmauth-secret = lib.mkIf config.services.victoriametrics.enable {
|
||||
requiredBy = [ "vmauth.service" ];
|
||||
before = [ "vmauth.service" ];
|
||||
};
|
||||
|
||||
caddy.routes = lib.mkIf config.services.victoriametrics.enable [
|
||||
{
|
||||
match = [ { host = [ config.hostnames.prometheus ]; } ];
|
||||
handle = [
|
||||
{
|
||||
handler = "reverse_proxy";
|
||||
upstreams = [ { dial = "localhost:${authPort}"; } ];
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
# Configure Cloudflare DNS to point to this machine
|
||||
services.cloudflare-dyndns.domains =
|
||||
if config.services.victoriametrics.enable then [ config.hostnames.prometheus ] else [ ];
|
||||
|
||||
# VMAgent
|
||||
|
||||
services.vmagent = {
|
||||
package = pkgs-stable.vmagent;
|
||||
prometheusConfig = prometheusConfig;
|
||||
remoteWrite = {
|
||||
url = "https://${config.hostnames.prometheus}/api/v1/write";
|
||||
basicAuthUsername = username;
|
||||
basicAuthPasswordFile = config.secrets.vmagent.dest;
|
||||
};
|
||||
};
|
||||
|
||||
secrets.vmagent = lib.mkIf config.services.vmagent.enable {
|
||||
source = ../../../private/prometheus.age;
|
||||
dest = "${config.secretsDirectory}/vmagent";
|
||||
};
|
||||
systemd.services.vmagent-secret = lib.mkIf config.services.vmagent.enable {
|
||||
requiredBy = [ "vmagent.service" ];
|
||||
before = [ "vmagent.service" ];
|
||||
};
|
||||
};
|
||||
}
|
@ -1,54 +0,0 @@
|
||||
# Wireguard is a VPN protocol that can be setup to create a mesh network
|
||||
# between machines on different LANs. This is currently not in use in my setup.
|
||||
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
options.wireguard.enable = lib.mkEnableOption "Wireguard VPN setup.";
|
||||
|
||||
config = lib.mkIf (pkgs.stdenv.isLinux) {
|
||||
|
||||
networking.wireguard = {
|
||||
enable = config.wireguard.enable;
|
||||
interfaces = {
|
||||
wg0 = {
|
||||
|
||||
# Something to use as a default value
|
||||
ips = lib.mkDefault [ "127.0.0.1/32" ];
|
||||
|
||||
# Establishes identity of this machine
|
||||
generatePrivateKeyFile = false;
|
||||
privateKeyFile = config.secrets.wireguard.dest;
|
||||
|
||||
# Move to network namespace for isolating programs
|
||||
interfaceNamespace = "wg";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Create namespace for Wireguard
|
||||
# This allows us to isolate specific programs to Wireguard
|
||||
systemd.services."netns@" = {
|
||||
enable = config.wireguard.enable;
|
||||
description = "%I network namespace";
|
||||
before = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = "${pkgs.iproute2}/bin/ip netns add %I";
|
||||
ExecStop = "${pkgs.iproute2}/bin/ip netns del %I";
|
||||
};
|
||||
};
|
||||
|
||||
# Create private key file for wireguard
|
||||
secrets.wireguard = lib.mkIf config.wireguard.enable {
|
||||
source = ../../../private/wireguard.age;
|
||||
dest = "${config.secretsDirectory}/wireguard";
|
||||
};
|
||||
};
|
||||
}
|
Reference in New Issue
Block a user