16 Commits

Author SHA1 Message Date
22cba9acac use bind for local dns 2023-07-18 03:52:37 +00:00
9e8bac6834 setup bind 2023-07-17 22:37:26 -04:00
b07a8f5e20 auto empty trash on login 2023-07-17 22:24:41 -04:00
8eb7ef0be7 fix avahi and dhcp, mark swan as physical 2023-07-18 02:00:38 +00:00
22ab2acf66 fix: swan not using avahi because not set as physical 2023-07-17 11:38:08 -04:00
d85e4b1593 fix: caddy denylist and jellyfin prometheus 2023-07-16 21:04:07 +00:00
6ea99eca5d enable caddy prometheus metrics 2023-07-16 20:13:41 +00:00
60e779085e add victoriametrics to tempest 2023-07-16 10:43:55 -04:00
6abcdfa3bd switch flame to victoriametrics 2023-07-16 14:43:14 +00:00
0f0a64b5c4 add victoriametrics 2023-07-16 13:50:58 +00:00
edb4ec77ca set caddy prometheus port dynamically 2023-07-16 03:34:03 +00:00
3cc264a857 fix: register gitea runner 2023-07-16 03:33:35 +00:00
76a7480a1d working prometheus setup with processes 2023-07-16 01:04:52 +00:00
9d4bf082c7 fix: prometheus remote write 2023-07-14 02:52:23 +00:00
e86b2f184f fix: cloudflare tunnel on tempest
requires openssh, but removing public key
2023-07-12 23:33:35 -04:00
d14054ab17 update to nextcloud 27 2023-07-13 03:22:45 +00:00
27 changed files with 544 additions and 239 deletions

View File

@ -110,20 +110,10 @@
};
outputs = { self, nixpkgs, ... }@inputs:
outputs = { nixpkgs, ... }@inputs:
let
# Common overlays to always use
overlays = [
inputs.nur.overlay
inputs.nix2vim.overlay
(import ./overlays/neovim-plugins.nix inputs)
(import ./overlays/calibre-web.nix)
(import ./overlays/disko.nix inputs)
(import ./overlays/tree-sitter.nix inputs)
];
# Global configuration for my systems
globals = let baseName = "masu.rs";
in rec {
@ -135,7 +125,6 @@
mail.imapHost = "imap.purelymail.com";
mail.smtpHost = "smtp.purelymail.com";
dotfilesRepo = "git@github.com:nmasur/dotfiles";
nixpkgs.overlays = overlays;
hostnames = {
git = "git.${baseName}";
metrics = "metrics.${baseName}";
@ -148,6 +137,16 @@
};
};
# Common overlays to always use
overlays = [
inputs.nur.overlay
inputs.nix2vim.overlay
(import ./overlays/neovim-plugins.nix inputs)
(import ./overlays/calibre-web.nix)
(import ./overlays/disko.nix inputs)
(import ./overlays/tree-sitter.nix inputs)
];
# System types to support.
supportedSystems =
[ "x86_64-linux" "x86_64-darwin" "aarch64-linux" "aarch64-darwin" ];
@ -157,26 +156,20 @@
in rec {
nixosModules = {
globals = { config }: { config = globals; };
common = import ./modules/common;
nixos = import ./modules/nixos;
darwin = import ./modules/darwin;
};
# Contains my full system builds, including home-manager
# nixos-rebuild switch --flake .#tempest
nixosConfigurations = {
tempest = import ./hosts/tempest { inherit self; };
hydra = import ./hosts/hydra { inherit self; };
flame = import ./hosts/flame { inherit self; };
swan = import ./hosts/swan { inherit self; };
tempest = import ./hosts/tempest { inherit inputs globals overlays; };
hydra = import ./hosts/hydra { inherit inputs globals overlays; };
flame = import ./hosts/flame { inherit inputs globals overlays; };
swan = import ./hosts/swan { inherit inputs globals overlays; };
};
# Contains my full Mac system builds, including home-manager
# darwin-rebuild switch --flake .#lookingglass
darwinConfigurations = {
lookingglass = import ./hosts/lookingglass { inherit self; };
lookingglass =
import ./hosts/lookingglass { inherit inputs globals overlays; };
};
# For quickly applying home-manager settings with:
@ -192,8 +185,10 @@
diskoConfigurations = { root = import ./disks/root.nix; };
packages = let
aws = system: import ./hosts/aws { inherit self system; };
staff = system: import ./hosts/staff { inherit self system; };
aws = system:
import ./hosts/aws { inherit inputs globals overlays system; };
staff = system:
import ./hosts/staff { inherit inputs globals overlays system; };
neovim = system:
let pkgs = import nixpkgs { inherit system overlays; };
in import ./modules/common/neovim/package {

View File

@ -1,14 +1,17 @@
{ self, system, ... }:
{ inputs, system, globals, overlays, ... }:
self.inputs.nixos-generators.nixosGenerate {
inputs.nixos-generators.nixosGenerate {
inherit system;
format = "amazon";
modules = [
self.inputs.home-manager.nixosModules.home-manager
self.nixosModules.globals
self.nixosModules.common
self.nixosModules.nixos
inputs.home-manager.nixosModules.home-manager
{
nixpkgs.overlays = overlays;
user = globals.user;
fullName = globals.fullName;
dotfilesRepo = globals.dotfilesRepo;
gitName = globals.gitName;
gitEmail = globals.gitEmail;
networking.hostName = "sheep";
gui.enable = false;
theme.colors = (import ../../colorscheme/gruvbox).dark;
@ -18,6 +21,9 @@ self.inputs.nixos-generators.nixosGenerate {
# AWS settings require this
permitRootLogin = "prohibit-password";
}
../../modules/common
../../modules/nixos
../../modules/nixos/services/sshd.nix
] ++ [
# Required to fix diskSize errors during build
({ ... }: { amazonImage.sizeMB = 16 * 1024; })

View File

@ -4,23 +4,24 @@
# How to install:
# https://blog.korfuri.fr/posts/2022/08/nixos-on-an-oracle-free-tier-ampere-machine/
{ self, ... }:
{ inputs, globals, overlays, ... }:
self.inputs.nixpkgs.lib.nixosSystem {
inputs.nixpkgs.lib.nixosSystem {
system = "aarch64-linux";
specialArgs = { };
modules = [
self.inputs.home-manager.nixosModules.home-manager
self.nixosModules.globals
self.nixosModules.common
self.nixosModules.nixos
globals
inputs.home-manager.nixosModules.home-manager
../../modules/common
../../modules/nixos
{
nixpkgs.overlays = overlays;
# Hardware
server = true;
networking.hostName = "flame";
imports =
[ (self.inputs.nixpkgs + "/nixos/modules/profiles/qemu-guest.nix") ];
imports = [ (inputs.nixpkgs + "/nixos/modules/profiles/qemu-guest.nix") ];
boot.initrd.availableKernelModules = [ "xhci_pci" "virtio_pci" "usbhid" ];
fileSystems."/" = {
@ -48,7 +49,8 @@ self.inputs.nixpkgs.lib.nixosSystem {
services.caddy.enable = true;
services.grafana.enable = true;
services.prometheus.enable = true;
services.openssh.enable = true;
services.victoriametrics.enable = true;
services.gitea.enable = true;
services.vaultwarden.enable = true;
services.minecraft-server.enable = true; # Setup Minecraft server
@ -70,6 +72,9 @@ self.inputs.nixpkgs.lib.nixosSystem {
accessKeyId = "0026b0e73b2e2c80000000005";
};
# # Grant access to Jellyfin directories from Nextcloud
# users.users.nextcloud.extraGroups = [ "jellyfin" ];
# # Wireguard config for Transmission
# wireguard.enable = true;
# networking.wireguard.interfaces.wg0 = {

View File

@ -1,20 +1,21 @@
# The Hydra
# System configuration for WSL
{ self, ... }:
{ inputs, globals, overlays, ... }:
self.inputs.nixpkgs.lib.nixosSystem {
inputs.nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
specialArgs = { };
modules = [
self.inputs.wsl.nixosModules.wsl
self.inputs.home-manager.nixosModules.home-manager
self.nixosModules.globals
self.nixosModules.common
self.nixosModules.nixos
self.nixosModules.wsl
../../modules/common
../../modules/nixos
../../modules/wsl
globals
inputs.wsl.nixosModules.wsl
inputs.home-manager.nixosModules.home-manager
{
networking.hostName = "hydra";
nixpkgs.overlays = overlays;
identityFile = "/home/${globals.user}/.ssh/id_ed25519";
gui.enable = false;
theme = {

View File

@ -1,46 +1,46 @@
# The Looking Glass
# System configuration for my work Macbook
{ self, ... }:
{ inputs, globals, overlays, ... }:
self.inputs.darwin.lib.darwinSystem {
inputs.darwin.lib.darwinSystem {
system = "x86_64-darwin";
specialArgs = { };
modules = [
self.inputs.home-manager.darwinModules.home-manager
self.nixosModules.common
self.nixosModules.darwin
({ config, lib, ... }: {
config = rec {
user = lib.mkForce "Noah.Masur";
gitName = lib.mkForce "Noah-Masur_1701";
gitEmail = lib.mkForce "${user}@take2games.com";
nixpkgs.overlays = [ self.inputs.firefox-darwin.overlay ];
networking.hostName = "lookingglass";
identityFile = "/Users/${user}/.ssh/id_ed25519";
gui.enable = true;
theme = {
colors = (import ../../colorscheme/gruvbox-dark).dark;
dark = true;
};
mail.user = globals.user;
charm.enable = true;
neovim.enable = true;
mail.enable = true;
mail.aerc.enable = true;
mail.himalaya.enable = false;
kitty.enable = true;
discord.enable = true;
firefox.enable = true;
dotfiles.enable = true;
nixlang.enable = true;
terraform.enable = true;
python.enable = true;
lua.enable = true;
kubernetes.enable = true;
_1password.enable = true;
slack.enable = true;
};
../../modules/common
../../modules/darwin
(globals // rec {
user = "Noah.Masur";
gitName = "Noah-Masur_1701";
gitEmail = "${user}@take2games.com";
})
inputs.home-manager.darwinModules.home-manager
{
nixpkgs.overlays = [ inputs.firefox-darwin.overlay ] ++ overlays;
networking.hostName = "lookingglass";
identityFile = "/Users/Noah.Masur/.ssh/id_ed25519";
gui.enable = true;
theme = {
colors = (import ../../colorscheme/gruvbox-dark).dark;
dark = true;
};
mail.user = globals.user;
charm.enable = true;
neovim.enable = true;
mail.enable = true;
mail.aerc.enable = true;
mail.himalaya.enable = false;
kitty.enable = true;
discord.enable = true;
firefox.enable = true;
dotfiles.enable = true;
nixlang.enable = true;
terraform.enable = true;
python.enable = true;
lua.enable = true;
kubernetes.enable = true;
_1password.enable = true;
slack.enable = true;
}
];
}

View File

@ -1,32 +1,31 @@
# The Staff
# ISO configuration for my USB drive
{ self, system, ... }:
{ inputs, system, overlays, ... }:
self.inputs.nixos-generators.nixosGenerate {
inputs.nixos-generators.nixosGenerate {
inherit system;
format = "install-iso";
modules = [
self.nixosModules.global
self.nixosModules.common
self.nixosModules.nixos
({ config, pkgs, ... }: {
networking.hostName = "staff";
users.extraUsers.root.openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB+AbmjGEwITk5CK9y7+Rg27Fokgj9QEjgc9wST6MA3s"
];
services.openssh = {
enable = true;
ports = [ 22 ];
allowSFTP = true;
settings = {
GatewayPorts = "no";
X11Forwarding = false;
PasswordAuthentication = false;
PermitRootLogin = "yes";
};
modules = [{
nixpkgs.overlays = overlays;
networking.hostName = "staff";
users.extraUsers.root.openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB+AbmjGEwITk5CK9y7+Rg27Fokgj9QEjgc9wST6MA3s"
];
services.openssh = {
enable = true;
ports = [ 22 ];
allowSFTP = true;
settings = {
GatewayPorts = "no";
X11Forwarding = false;
PasswordAuthentication = false;
PermitRootLogin = "yes";
};
environment.systemPackages = with pkgs; [
};
environment.systemPackages =
let pkgs = import inputs.nixpkgs { inherit system overlays; };
in with pkgs; [
git
vim
wget
@ -36,10 +35,9 @@ self.inputs.nixos-generators.nixosGenerate {
colors = (import ../../colorscheme/gruvbox).dark;
})
];
nix.extraOptions = ''
experimental-features = nix-command flakes
warn-dirty = false
'';
})
];
nix.extraOptions = ''
experimental-features = nix-command flakes
warn-dirty = false
'';
}];
}

View File

@ -1,20 +1,21 @@
# The Swan
# System configuration for my home NAS server
{ self, ... }:
{ inputs, globals, overlays, ... }:
self.inputs.nixpkgs.lib.nixosSystem {
inputs.nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
specialArgs = { };
modules = [
self.inputs.home-manager.nixosModules.home-manager
self.inputs.disko.nixosModules.disko
self.nixosModules.globals
self.nixosModules.common
self.nixosModules.nixos
globals
inputs.home-manager.nixosModules.home-manager
inputs.disko.nixosModules.disko
../../modules/common
../../modules/nixos
{
# Hardware
server = true;
physical = true;
networking.hostName = "swan";
boot.initrd.availableKernelModules =
@ -46,16 +47,20 @@ self.inputs.nixpkgs.lib.nixosSystem {
gui.enable = false;
theme = { colors = (import ../../colorscheme/gruvbox).dark; };
nixpkgs.overlays = overlays;
neovim.enable = true;
cloudflare.enable = true;
dotfiles.enable = true;
arrs.enable = true;
services.bind.enable = true;
services.caddy.enable = true;
services.jellyfin.enable = true;
services.nextcloud.enable = true;
services.calibre-web.enable = true;
services.prometheus.enable = true;
services.openssh.enable = true;
services.prometheus.enable = false;
services.vmagent.enable = true;
services.samba.enable = true;
cloudflareTunnel = {

View File

@ -1,16 +1,18 @@
# The Tempest
# System configuration for my desktop
{ self, ... }:
{ inputs, globals, overlays, ... }:
self.inputs.nixpkgs.lib.nixosSystem {
inputs.nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
self.inputs.home-manager.nixosModules.home-manager
self.nixosModules.globals
self.nixosModules.common
self.nixosModules.nixos
globals
inputs.home-manager.nixosModules.home-manager
../../modules/common
../../modules/nixos
{
nixpkgs.overlays = overlays;
# Hardware
physical = true;
networking.hostName = "tempest";
@ -51,7 +53,7 @@ self.inputs.nixpkgs.lib.nixosSystem {
# Must be prepared ahead
identityFile = "/home/${globals.user}/.ssh/id_ed25519";
passwordHash = self.inputs.nixpkgs.lib.fileContents ../../password.sha512;
passwordHash = inputs.nixpkgs.lib.fileContents ../../password.sha512;
# Theming
gui.enable = true;
@ -59,8 +61,8 @@ self.inputs.nixpkgs.lib.nixosSystem {
colors = (import ../../colorscheme/gruvbox-dark).dark;
dark = true;
};
wallpaper = "${self.inputs.wallpapers}/gruvbox/road.jpg";
gtk.theme.name = self.inputs.nixpkgs.lib.mkDefault "Adwaita-dark";
wallpaper = "${inputs.wallpapers}/gruvbox/road.jpg";
gtk.theme.name = inputs.nixpkgs.lib.mkDefault "Adwaita-dark";
# Programs and services
charm.enable = true;
@ -89,7 +91,9 @@ self.inputs.nixpkgs.lib.nixosSystem {
leagueoflegends.enable = true;
ryujinx.enable = true;
};
services.vmagent.enable = true;
services.openssh.enable = true; # Required for Cloudflare tunnel
cloudflareTunnel = {
enable = true;
id = "ac133a82-31fb-480c-942a-cdbcd4c58173";

View File

@ -40,6 +40,7 @@
defaultApplications."inode/directory" =
lib.mkBefore [ "org.gnome.Nautilus.desktop" ];
};
};
# # Set default for opening directories
@ -50,6 +51,13 @@
# lib.mkForce [ "org.gnome.Nautilus.desktop" ];
# };
# Delete Trash files older than 1 week
systemd.user.services.empty-trash = {
description = "Empty Trash on a regular basis";
wantedBy = [ "default.target" ];
script = "${pkgs.trash-cli}/bin/trash-empty 7";
};
};
}

View File

@ -1,6 +1,6 @@
{ config, pkgs, lib, ... }: {
boot.loader = lib.mkIf config.physical {
boot.loader = lib.mkIf (config.physical && !config.server) {
grub = {
enable = true;

View File

@ -2,12 +2,7 @@
config = lib.mkIf config.physical {
# The global useDHCP flag is deprecated, therefore explicitly set to false here.
# Per-interface useDHCP will be mandatory in the future, so this generated config
# replicates the default behaviour.
networking.useDHCP = false;
networking.interfaces.enp5s0.useDHCP = true;
networking.interfaces.wlp4s0.useDHCP = true;
networking.useDHCP = true;
networking.firewall.allowPing = lib.mkIf config.server true;
@ -15,6 +10,9 @@
services.avahi = {
enable = true;
domainName = "local";
ipv6 = false; # Should work either way
# Resolve local hostnames using Avahi DNS
nssmdns = true;
publish = {
enable = true;
addresses = true;
@ -22,10 +20,6 @@
workstation = true;
};
};
# Resolve local hostnames using Avahi DNS
services.avahi.nssmdns = true;
};
}

View File

@ -1,6 +1,6 @@
{ config, pkgs, lib, ... }: {
{ config, lib, ... }: {
config = lib.mkIf (pkgs.stdenv.isLinux && config.server) {
config = lib.mkIf config.server {
# Servers need a bootloader or they won't start
boot.loader.systemd-boot.enable = true;

View File

@ -1,6 +1,6 @@
{ config, pkgs, lib, ... }: {
config = lib.mkIf config.physical {
config = lib.mkIf (config.physical && !config.server) {
# Prevent wake from keyboard
powerManagement.powerDownCommands = ''

View File

@ -1,16 +1,21 @@
{ config, pkgs, lib, ... }: {
{ config, lib, ... }: {
options = { zfs.enable = lib.mkEnableOption "ZFS file system."; };
config =
lib.mkIf (pkgs.stdenv.isLinux && config.server && config.zfs.enable) {
config = lib.mkIf (config.server && config.zfs.enable) {
# Only use compatible Linux kernel, since ZFS can be behind
boot.kernelPackages =
config.boot.zfs.package.latestCompatibleLinuxPackages;
boot.kernelParams = [ "nohibernate" ];
boot.supportedFilesystems = [ "zfs" ];
# Only use compatible Linux kernel, since ZFS can be behind
boot.kernelPackages = config.boot.zfs.package.latestCompatibleLinuxPackages;
boot.kernelParams = [ "nohibernate" ];
boot.supportedFilesystems = [ "zfs" ];
services.prometheus.exporters.zfs.enable =
config.prometheus.exporters.enable;
prometheus.scrapeTargets = [
"127.0.0.1:${
builtins.toString config.services.prometheus.exporters.zfs.port
}"
];
};
};
}

View File

@ -0,0 +1,55 @@
{ config, pkgs, lib, ... }:
let
localIp = "192.168.1.218";
localServices = [
config.hostnames.stream
config.hostnames.content
config.hostnames.books
config.hostnames.download
];
mkRecord = service: "${service} A ${localIp}";
localRecords = lib.concatLines (map mkRecord localServices);
in {
config = lib.mkIf config.services.bind.enable {
caddy.cidrAllowlist = [ "192.168.0.0/16" ];
services.bind = {
cacheNetworks = [ "127.0.0.0/24" "192.168.0.0/16" ];
forwarders = [ "1.1.1.1" "1.0.0.1" ];
ipv4Only = true;
# Use rpz zone as an override
extraOptions = ''response-policy { zone "rpz"; };'';
zones = {
rpz = {
master = true;
file = pkgs.writeText "db.rpz" ''
$TTL 60 ; 1 minute
@ IN SOA localhost. root.localhost. (
2023071800 ; serial
1h ; refresh
30m ; retry
1w ; expire
30m ; minimum ttl
)
IN NS localhost.
localhost A 127.0.0.1
${localRecords}
'';
};
};
};
networking.firewall.allowedTCPPorts = [ 53 ];
networking.firewall.allowedUDPPorts = [ 53 ];
};
}

View File

@ -1,52 +1,70 @@
{ config, pkgs, lib, ... }: {
options = {
caddy.tlsPolicies = lib.mkOption {
type = lib.types.listOf lib.types.attrs;
description = "Caddy JSON TLS policies";
default = [ ];
};
caddy.routes = lib.mkOption {
type = lib.types.listOf lib.types.attrs;
description = "Caddy JSON routes for http servers";
default = [ ];
};
caddy.blocks = lib.mkOption {
type = lib.types.listOf lib.types.attrs;
description = "Caddy JSON error blocks for http servers";
default = [ ];
caddy = {
tlsPolicies = lib.mkOption {
type = lib.types.listOf lib.types.attrs;
description = "Caddy JSON TLS policies";
default = [ ];
};
routes = lib.mkOption {
type = lib.types.listOf lib.types.attrs;
description = "Caddy JSON routes for http servers";
default = [ ];
};
blocks = lib.mkOption {
type = lib.types.listOf lib.types.attrs;
description = "Caddy JSON error blocks for http servers";
default = [ ];
};
cidrAllowlist = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "CIDR blocks to allow for requests";
default = [ "127.0.0.1/32" ];
};
};
};
config =
lib.mkIf (config.services.caddy.enable && config.caddy.routes != [ ]) {
config = lib.mkIf config.services.caddy.enable {
services.caddy = {
adapter = "''"; # Required to enable JSON
configFile = pkgs.writeText "Caddyfile" (builtins.toJSON {
apps.http.servers.main = {
listen = [ ":443" ];
routes = config.caddy.routes;
errors.routes = config.caddy.blocks;
# logs = { }; # Uncomment to collect access logs
# Force Caddy to 403 if not coming from allowlisted source
caddy.routes = [{
match = [{ not = [{ remote_ip.ranges = config.caddy.cidrAllowlist; }]; }];
handle = [{
handler = "static_response";
status_code = "403";
}];
}];
services.caddy = {
adapter = "''"; # Required to enable JSON
configFile = pkgs.writeText "Caddyfile" (builtins.toJSON {
apps.http.servers.main = {
listen = [ ":443" ];
routes = config.caddy.routes;
errors.routes = config.caddy.blocks;
# logs = { }; # Uncomment to collect access logs
};
apps.http.servers.metrics = { }; # Enables Prometheus metrics
apps.tls.automation.policies = config.caddy.tlsPolicies;
logging.logs.main = {
encoder = { format = "console"; };
writer = {
output = "file";
filename = "${config.services.caddy.logDir}/caddy.log";
roll = true;
};
apps.tls.automation.policies = config.caddy.tlsPolicies;
logging.logs.main = {
encoder = { format = "console"; };
writer = {
output = "file";
filename = "${config.services.caddy.logDir}/caddy.log";
roll = true;
};
level = "INFO";
};
});
};
networking.firewall.allowedTCPPorts = [ 80 443 ];
networking.firewall.allowedUDPPorts = [ 443 ];
level = "INFO";
};
});
};
networking.firewall.allowedTCPPorts = [ 80 443 ];
networking.firewall.allowedUDPPorts = [ 443 ];
prometheus.scrapeTargets = [ "127.0.0.1:2019" ];
};
}

View File

@ -41,13 +41,7 @@ in {
config = lib.mkIf config.cloudflare.enable {
# Forces Caddy to error if coming from a non-Cloudflare IP
caddy.blocks = [{
match = [{ not = [{ remote_ip.ranges = cloudflareIpRanges; }]; }];
handle = [{
handler = "static_response";
abort = true;
}];
}];
caddy.cidrAllowlist = cloudflareIpRanges;
# Tell Caddy to use Cloudflare DNS for ACME challenge validation
services.caddy.package = (pkgs.callPackage ../../../overlays/caddy.nix {

View File

@ -3,6 +3,7 @@
imports = [
./arr.nix
./backups.nix
./bind.nix
./caddy.nix
./calibre.nix
./cloudflare-tunnel.nix
@ -24,6 +25,7 @@
./sshd.nix
./transmission.nix
./vaultwarden.nix
./victoriametrics.nix
./wireguard.nix
];

View File

@ -10,9 +10,9 @@
enable = true;
labels = [
# Provide a Debian base with NodeJS for actions
"debian-latest:docker://node:18-bullseye"
# "debian-latest:docker://node:18-bullseye"
# Fake the Ubuntu name, because Node provides no Ubuntu builds
"ubuntu-latest:docker://node:18-bullseye"
# "ubuntu-latest:docker://node:18-bullseye"
# Provide native execution on the host using below packages
"native:host"
];
@ -31,6 +31,23 @@
tokenFile = config.secrets.giteaRunnerToken.dest;
};
secrets.giteaRunnerToken = {
source = ../../../private/gitea-runner-token.age; # TOKEN=xyz
dest = "${config.secretsDirectory}/gitea-runner-token";
};
systemd.services.giteaRunnerToken-secret = {
requiredBy = [
"gitea-runner-${
config.services.gitea-actions-runner.instances.${config.networking.hostName}.name
}.service"
];
before = [
"gitea-runner-${
config.services.gitea-actions-runner.instances.${config.networking.hostName}.name
}.service"
];
};
};
}

View File

@ -13,7 +13,12 @@
match = [{ host = [ config.hostnames.metrics ]; }];
handle = [{
handler = "reverse_proxy";
upstreams = [{ dial = "localhost:3000"; }];
upstreams = [{
dial = "localhost:${
builtins.toString
config.services.grafana.settings.server.http_port
}";
}];
}];
}];

View File

@ -5,13 +5,25 @@
services.jellyfin.group = "media";
users.users.jellyfin = { isSystemUser = true; };
caddy.routes = [{
match = [{ host = [ config.hostnames.stream ]; }];
handle = [{
handler = "reverse_proxy";
upstreams = [{ dial = "localhost:8096"; }];
}];
}];
caddy.routes = [
{
match = [{
host = [ config.hostnames.stream ];
path = [ "/metrics*" ];
}];
handle = [{
handler = "static_response";
status_code = "403";
}];
}
{
match = [{ host = [ config.hostnames.stream ]; }];
handle = [{
handler = "reverse_proxy";
upstreams = [{ dial = "localhost:8096"; }];
}];
}
];
# Create videos directory, allow anyone in Jellyfin group to manage it
systemd.tmpfiles.rules = [
@ -35,6 +47,9 @@
users.users.jellyfin.extraGroups =
[ "render" "video" ]; # Access to /dev/dri
# Requires MetricsEnable is true in /var/lib/jellyfin/config/system.xml
prometheus.scrapeTargets = [ "127.0.0.1:8096" ];
};
}

View File

@ -1,9 +1,15 @@
{ config, pkgs, lib, ... }: {
{ config, pkgs, lib, ... }:
let
port = 8080;
in {
config = lib.mkIf config.services.nextcloud.enable {
services.nextcloud = {
package = pkgs.nextcloud26; # Required to specify
package = pkgs.nextcloud27; # Required to specify
datadir = "/data/nextcloud";
https = true;
hostName = "localhost";
@ -11,13 +17,14 @@
config = {
adminpassFile = config.secrets.nextcloud.dest;
extraTrustedDomains = [ config.hostnames.content ];
trustedProxies = [ "127.0.0.1" ];
};
};
# Don't let Nginx use main ports (using Caddy instead)
services.nginx.virtualHosts."localhost".listen = [{
addr = "127.0.0.1";
port = 8080;
port = port;
}];
# Point Caddy to Nginx
@ -25,7 +32,7 @@
match = [{ host = [ config.hostnames.content ]; }];
handle = [{
handler = "reverse_proxy";
upstreams = [{ dial = "localhost:8080"; }];
upstreams = [{ dial = "localhost:${builtins.toString port}"; }];
}];
}];
@ -74,6 +81,23 @@
requires = [ "phpfpm-nextcloud.service" ];
};
# Log metrics to prometheus
services.prometheus.exporters.nextcloud = {
enable = config.prometheus.exporters.enable;
username = config.services.nextcloud.config.adminuser;
url = "http://localhost:${builtins.toString port}";
passwordFile = config.services.nextcloud.config.adminpassFile;
};
prometheus.scrapeTargets = [
"127.0.0.1:${
builtins.toString config.services.prometheus.exporters.nextcloud.port
}"
];
# Allows nextcloud-exporter to read passwordFile
users.users.nextcloud-exporter.extraGroups =
lib.mkIf config.services.prometheus.exporters.nextcloud.enable
[ "nextcloud" ];
};
}

View File

@ -1,18 +1,58 @@
{ config, pkgs, lib, ... }: {
options.prometheus = {
exporters.enable = lib.mkEnableOption "Enable Prometheus exporters";
scrapeTargets = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "Prometheus scrape targets";
default = [ ];
};
};
config = let
# If hosting Grafana, host local Prometheus and listen for inbound jobs. If
# not hosting Grafana, send remote Prometheus writes to primary host.
isServer = config.services.grafana.enable;
in lib.mkIf config.services.prometheus.enable {
in {
# Turn on exporters if any Prometheus scraper is running
prometheus.exporters.enable = builtins.any (x: x) [
config.services.prometheus.enable
config.services.victoriametrics.enable
config.services.vmagent.enable
];
prometheus.scrapeTargets = [
"127.0.0.1:${
builtins.toString config.services.prometheus.exporters.node.port
}"
"127.0.0.1:${
builtins.toString config.services.prometheus.exporters.systemd.port
}"
"127.0.0.1:${
builtins.toString config.services.prometheus.exporters.process.port
}"
];
services.prometheus = {
exporters.node.enable = true;
exporters.node.enable = config.prometheus.exporters.enable;
exporters.node.enabledCollectors = [ ];
exporters.node.disabledCollectors = [ "cpufreq" ];
exporters.systemd.enable = config.prometheus.exporters.enable;
exporters.process.enable = config.prometheus.exporters.enable;
exporters.process.settings.process_names = [
# Remove nix store path from process name
{
name = "{{.Matches.Wrapped}} {{ .Matches.Args }}";
cmdline = [ "^/nix/store[^ ]*/(?P<Wrapped>[^ /]*) (?P<Args>.*)" ];
}
];
extraFlags = lib.mkIf isServer [ "--web.enable-remote-write-receiver" ];
scrapeConfigs = [{
job_name = "local";
static_configs = [{ targets = [ "127.0.0.1:9100" ]; }];
job_name = config.networking.hostName;
static_configs = [{ targets = config.scrapeTargets; }];
}];
webExternalUrl =
lib.mkIf isServer "https://${config.hostnames.prometheus}";
@ -28,7 +68,7 @@
});
remoteWrite = lib.mkIf (!isServer) [{
name = config.networking.hostName;
url = "https://${config.hostnames.prometheus}";
url = "https://${config.hostnames.prometheus}/api/v1/write";
basic_auth = {
# Uses password hashed with bcrypt above
username = "prometheus";
@ -38,23 +78,26 @@
};
# Create credentials file for remote Prometheus push
secrets.prometheus = lib.mkIf (!isServer) {
source = ../../../private/prometheus.age;
dest = "${config.secretsDirectory}/prometheus";
owner = "prometheus";
group = "prometheus";
permissions = "0440";
};
systemd.services.prometheus-secret = lib.mkIf (!isServer) {
requiredBy = [ "prometheus.service" ];
before = [ "prometheus.service" ];
};
secrets.prometheus =
lib.mkIf (config.services.prometheus.enable && !isServer) {
source = ../../../private/prometheus.age;
dest = "${config.secretsDirectory}/prometheus";
owner = "prometheus";
group = "prometheus";
permissions = "0440";
};
systemd.services.prometheus-secret =
lib.mkIf (config.services.prometheus.enable && !isServer) {
requiredBy = [ "prometheus.service" ];
before = [ "prometheus.service" ];
};
caddy.routes = lib.mkIf isServer [{
caddy.routes = lib.mkIf (config.services.prometheus.enable && isServer) [{
match = [{ host = [ config.hostnames.prometheus ]; }];
handle = [{
handler = "reverse_proxy";
upstreams = [{ dial = "localhost:9090"; }];
upstreams =
[{ dial = "localhost:${config.services.prometheus.port}"; }];
}];
}];

View File

@ -39,6 +39,11 @@
type = lib.types.str;
description = "Permissions expressed as octal.";
};
prefix = lib.mkOption {
default = "";
type = lib.types.str;
description = "Prefix for secret value (for environment files).";
};
};
});
description = "Set of secrets to decrypt to disk.";
@ -65,10 +70,10 @@
wantedBy = [ "multi-user.target" ];
serviceConfig.Type = "oneshot";
script = ''
${pkgs.age}/bin/age --decrypt \
--identity ${config.identityFile} \
--output ${attrs.dest} \
${attrs.source}
echo "${attrs.prefix}$(
${pkgs.age}/bin/age --decrypt \
--identity ${config.identityFile} ${attrs.source}
)" > ${attrs.dest}
chown '${attrs.owner}':'${attrs.group}' '${attrs.dest}'
chmod '${attrs.permissions}' '${attrs.dest}'

View File

@ -13,9 +13,8 @@
};
};
config = lib.mkIf (config.publicKey != null) {
config = lib.mkIf config.services.openssh.enable {
services.openssh = {
enable = true;
ports = [ 22 ];
allowSFTP = true;
settings = {
@ -27,7 +26,7 @@
};
users.users.${config.user}.openssh.authorizedKeys.keys =
[ config.publicKey ];
lib.mkIf (config.publicKey != null) [ config.publicKey ];
# Implement a simple fail2ban service for sshd
services.sshguard.enable = true;

View File

@ -0,0 +1,95 @@
{ config, pkgs, lib, ... }:
let
username = "prometheus";
prometheusConfig = (pkgs.formats.yaml { }).generate "prometheus.yml" {
scrape_configs = [{
job_name = config.networking.hostName;
stream_parse = true;
static_configs = [{ targets = config.prometheus.scrapeTargets; }];
}];
};
authConfig = (pkgs.formats.yaml { }).generate "auth.yml" {
users = [{
username = username;
password = "%{PASSWORD}";
url_prefix =
"http://localhost${config.services.victoriametrics.listenAddress}";
}];
};
authPort = "8427";
in {
config = {
services.victoriametrics.extraOptions =
[ "-promscrape.config=${prometheusConfig}" ];
systemd.services.vmauth = lib.mkIf config.services.victoriametrics.enable {
description = "VictoriaMetrics basic auth proxy";
after = [ "network.target" ];
startLimitBurst = 5;
serviceConfig = {
Restart = "on-failure";
RestartSec = 1;
DynamicUser = true;
EnvironmentFile = config.secrets.vmauth.dest;
ExecStart = ''
${pkgs.victoriametrics}/bin/vmauth \
-auth.config=${authConfig} \
-httpListenAddr=:${authPort}'';
};
wantedBy = [ "multi-user.target" ];
};
secrets.vmauth = lib.mkIf config.services.victoriametrics.enable {
source = ../../../private/prometheus.age;
dest = "${config.secretsDirectory}/vmauth";
prefix = "PASSWORD=";
};
systemd.services.vmauth-secret =
lib.mkIf config.services.victoriametrics.enable {
requiredBy = [ "vmauth.service" ];
before = [ "vmauth.service" ];
};
caddy.routes = lib.mkIf config.services.victoriametrics.enable [{
match = [{ host = [ config.hostnames.prometheus ]; }];
handle = [{
handler = "reverse_proxy";
upstreams = [{ dial = "localhost:${authPort}"; }];
}];
}];
# VMAgent
services.vmagent.prometheusConfig = prometheusConfig; # Overwritten below
systemd.services.vmagent.serviceConfig =
lib.mkIf config.services.vmagent.enable {
ExecStart = lib.mkForce ''
${pkgs.victoriametrics}/bin/vmagent \
-promscrape.config=${prometheusConfig} \
-remoteWrite.url="https://${config.hostnames.prometheus}/api/v1/write" \
-remoteWrite.basicAuth.username=${username} \
-remoteWrite.basicAuth.passwordFile=${config.secrets.vmagent.dest}'';
};
secrets.vmagent = lib.mkIf config.services.vmagent.enable {
source = ../../../private/prometheus.age;
dest = "${config.secretsDirectory}/vmagent";
owner = "vmagent";
group = "vmagent";
};
systemd.services.vmagent-secret = lib.mkIf config.services.vmagent.enable {
requiredBy = [ "vmagent.service" ];
before = [ "vmagent.service" ];
};
};
}

View File

@ -0,0 +1,12 @@
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IHNzaC1lZDI1NTE5IE1nSGFPdyBoOVF1
NmZocHpQQnRJcWpWUHh2bU93NkdnZWNzSlFiaHdTd24rcHpsczFRCmJaSzNkNGs1
UDJCN2dYUVE3UTE1OU5RUWljQlN4dmxuUnpOMFYxQTdUaVEKLT4gc3NoLWVkMjU1
MTkgWXlTVU1RIE5HdGd6aTlKM0lFUlYzT1VhS05nZ2ZxTndVZHBNQlJxYlovdXkx
ei96d2cKdzlUYVFFaEIzaS9LZmY3MzM1RmNnR0xjOEpHK1kxM0FMTWRQSlVnczVF
dwotPiBzc2gtZWQyNTUxOSBuanZYNUEgQ1lhMGQvUy9OWkRBR3BZV1pFNmNtb2pq
Y2VEUzhRWGVWUkZJY1l4RGtWdwphdFZtM0ZLZURvYVZQYjV4bWVPdWJxa3RmWmVh
SHl0T0pQWmxnVlFPR2drCi0tLSBnd2lwS3dqUk5Jelg0b3RxbFdEcnJ6ZkkvZTVN
UllBeUUyOXBxVDBKMG5BCkGo9kj9sMVhbnXVM35lGScAb8r5LH9vf5jOdhLC/Wj2
+uA0ONIh7F2GELzf5Cw1KZJ8aHTURM2r41vZvfAQN1RwrmYOiUzlyMrvTDe78cY=
-----END AGE ENCRYPTED FILE-----