1 Commits

Author SHA1 Message Date
b0b9827a2e try to configure using nixosModules
doesn't seem like there's that much benefit?
2023-07-10 17:56:15 -06:00
27 changed files with 239 additions and 544 deletions

View File

@ -110,10 +110,20 @@
}; };
outputs = { nixpkgs, ... }@inputs: outputs = { self, nixpkgs, ... }@inputs:
let let
# Common overlays to always use
overlays = [
inputs.nur.overlay
inputs.nix2vim.overlay
(import ./overlays/neovim-plugins.nix inputs)
(import ./overlays/calibre-web.nix)
(import ./overlays/disko.nix inputs)
(import ./overlays/tree-sitter.nix inputs)
];
# Global configuration for my systems # Global configuration for my systems
globals = let baseName = "masu.rs"; globals = let baseName = "masu.rs";
in rec { in rec {
@ -125,6 +135,7 @@
mail.imapHost = "imap.purelymail.com"; mail.imapHost = "imap.purelymail.com";
mail.smtpHost = "smtp.purelymail.com"; mail.smtpHost = "smtp.purelymail.com";
dotfilesRepo = "git@github.com:nmasur/dotfiles"; dotfilesRepo = "git@github.com:nmasur/dotfiles";
nixpkgs.overlays = overlays;
hostnames = { hostnames = {
git = "git.${baseName}"; git = "git.${baseName}";
metrics = "metrics.${baseName}"; metrics = "metrics.${baseName}";
@ -137,16 +148,6 @@
}; };
}; };
# Common overlays to always use
overlays = [
inputs.nur.overlay
inputs.nix2vim.overlay
(import ./overlays/neovim-plugins.nix inputs)
(import ./overlays/calibre-web.nix)
(import ./overlays/disko.nix inputs)
(import ./overlays/tree-sitter.nix inputs)
];
# System types to support. # System types to support.
supportedSystems = supportedSystems =
[ "x86_64-linux" "x86_64-darwin" "aarch64-linux" "aarch64-darwin" ]; [ "x86_64-linux" "x86_64-darwin" "aarch64-linux" "aarch64-darwin" ];
@ -156,20 +157,26 @@
in rec { in rec {
nixosModules = {
globals = { config }: { config = globals; };
common = import ./modules/common;
nixos = import ./modules/nixos;
darwin = import ./modules/darwin;
};
# Contains my full system builds, including home-manager # Contains my full system builds, including home-manager
# nixos-rebuild switch --flake .#tempest # nixos-rebuild switch --flake .#tempest
nixosConfigurations = { nixosConfigurations = {
tempest = import ./hosts/tempest { inherit inputs globals overlays; }; tempest = import ./hosts/tempest { inherit self; };
hydra = import ./hosts/hydra { inherit inputs globals overlays; }; hydra = import ./hosts/hydra { inherit self; };
flame = import ./hosts/flame { inherit inputs globals overlays; }; flame = import ./hosts/flame { inherit self; };
swan = import ./hosts/swan { inherit inputs globals overlays; }; swan = import ./hosts/swan { inherit self; };
}; };
# Contains my full Mac system builds, including home-manager # Contains my full Mac system builds, including home-manager
# darwin-rebuild switch --flake .#lookingglass # darwin-rebuild switch --flake .#lookingglass
darwinConfigurations = { darwinConfigurations = {
lookingglass = lookingglass = import ./hosts/lookingglass { inherit self; };
import ./hosts/lookingglass { inherit inputs globals overlays; };
}; };
# For quickly applying home-manager settings with: # For quickly applying home-manager settings with:
@ -185,10 +192,8 @@
diskoConfigurations = { root = import ./disks/root.nix; }; diskoConfigurations = { root = import ./disks/root.nix; };
packages = let packages = let
aws = system: aws = system: import ./hosts/aws { inherit self system; };
import ./hosts/aws { inherit inputs globals overlays system; }; staff = system: import ./hosts/staff { inherit self system; };
staff = system:
import ./hosts/staff { inherit inputs globals overlays system; };
neovim = system: neovim = system:
let pkgs = import nixpkgs { inherit system overlays; }; let pkgs = import nixpkgs { inherit system overlays; };
in import ./modules/common/neovim/package { in import ./modules/common/neovim/package {

View File

@ -1,17 +1,14 @@
{ inputs, system, globals, overlays, ... }: { self, system, ... }:
inputs.nixos-generators.nixosGenerate { self.inputs.nixos-generators.nixosGenerate {
inherit system; inherit system;
format = "amazon"; format = "amazon";
modules = [ modules = [
inputs.home-manager.nixosModules.home-manager self.inputs.home-manager.nixosModules.home-manager
self.nixosModules.globals
self.nixosModules.common
self.nixosModules.nixos
{ {
nixpkgs.overlays = overlays;
user = globals.user;
fullName = globals.fullName;
dotfilesRepo = globals.dotfilesRepo;
gitName = globals.gitName;
gitEmail = globals.gitEmail;
networking.hostName = "sheep"; networking.hostName = "sheep";
gui.enable = false; gui.enable = false;
theme.colors = (import ../../colorscheme/gruvbox).dark; theme.colors = (import ../../colorscheme/gruvbox).dark;
@ -21,9 +18,6 @@ inputs.nixos-generators.nixosGenerate {
# AWS settings require this # AWS settings require this
permitRootLogin = "prohibit-password"; permitRootLogin = "prohibit-password";
} }
../../modules/common
../../modules/nixos
../../modules/nixos/services/sshd.nix
] ++ [ ] ++ [
# Required to fix diskSize errors during build # Required to fix diskSize errors during build
({ ... }: { amazonImage.sizeMB = 16 * 1024; }) ({ ... }: { amazonImage.sizeMB = 16 * 1024; })

View File

@ -4,24 +4,23 @@
# How to install: # How to install:
# https://blog.korfuri.fr/posts/2022/08/nixos-on-an-oracle-free-tier-ampere-machine/ # https://blog.korfuri.fr/posts/2022/08/nixos-on-an-oracle-free-tier-ampere-machine/
{ inputs, globals, overlays, ... }: { self, ... }:
inputs.nixpkgs.lib.nixosSystem { self.inputs.nixpkgs.lib.nixosSystem {
system = "aarch64-linux"; system = "aarch64-linux";
specialArgs = { }; specialArgs = { };
modules = [ modules = [
globals self.inputs.home-manager.nixosModules.home-manager
inputs.home-manager.nixosModules.home-manager self.nixosModules.globals
../../modules/common self.nixosModules.common
../../modules/nixos self.nixosModules.nixos
{ {
nixpkgs.overlays = overlays;
# Hardware # Hardware
server = true; server = true;
networking.hostName = "flame"; networking.hostName = "flame";
imports = [ (inputs.nixpkgs + "/nixos/modules/profiles/qemu-guest.nix") ]; imports =
[ (self.inputs.nixpkgs + "/nixos/modules/profiles/qemu-guest.nix") ];
boot.initrd.availableKernelModules = [ "xhci_pci" "virtio_pci" "usbhid" ]; boot.initrd.availableKernelModules = [ "xhci_pci" "virtio_pci" "usbhid" ];
fileSystems."/" = { fileSystems."/" = {
@ -49,8 +48,7 @@ inputs.nixpkgs.lib.nixosSystem {
services.caddy.enable = true; services.caddy.enable = true;
services.grafana.enable = true; services.grafana.enable = true;
services.openssh.enable = true; services.prometheus.enable = true;
services.victoriametrics.enable = true;
services.gitea.enable = true; services.gitea.enable = true;
services.vaultwarden.enable = true; services.vaultwarden.enable = true;
services.minecraft-server.enable = true; # Setup Minecraft server services.minecraft-server.enable = true; # Setup Minecraft server
@ -72,9 +70,6 @@ inputs.nixpkgs.lib.nixosSystem {
accessKeyId = "0026b0e73b2e2c80000000005"; accessKeyId = "0026b0e73b2e2c80000000005";
}; };
# # Grant access to Jellyfin directories from Nextcloud
# users.users.nextcloud.extraGroups = [ "jellyfin" ];
# # Wireguard config for Transmission # # Wireguard config for Transmission
# wireguard.enable = true; # wireguard.enable = true;
# networking.wireguard.interfaces.wg0 = { # networking.wireguard.interfaces.wg0 = {

View File

@ -1,21 +1,20 @@
# The Hydra # The Hydra
# System configuration for WSL # System configuration for WSL
{ inputs, globals, overlays, ... }: { self, ... }:
inputs.nixpkgs.lib.nixosSystem { self.inputs.nixpkgs.lib.nixosSystem {
system = "x86_64-linux"; system = "x86_64-linux";
specialArgs = { }; specialArgs = { };
modules = [ modules = [
../../modules/common self.inputs.wsl.nixosModules.wsl
../../modules/nixos self.inputs.home-manager.nixosModules.home-manager
../../modules/wsl self.nixosModules.globals
globals self.nixosModules.common
inputs.wsl.nixosModules.wsl self.nixosModules.nixos
inputs.home-manager.nixosModules.home-manager self.nixosModules.wsl
{ {
networking.hostName = "hydra"; networking.hostName = "hydra";
nixpkgs.overlays = overlays;
identityFile = "/home/${globals.user}/.ssh/id_ed25519"; identityFile = "/home/${globals.user}/.ssh/id_ed25519";
gui.enable = false; gui.enable = false;
theme = { theme = {

View File

@ -1,46 +1,46 @@
# The Looking Glass # The Looking Glass
# System configuration for my work Macbook # System configuration for my work Macbook
{ inputs, globals, overlays, ... }: { self, ... }:
inputs.darwin.lib.darwinSystem { self.inputs.darwin.lib.darwinSystem {
system = "x86_64-darwin"; system = "x86_64-darwin";
specialArgs = { }; specialArgs = { };
modules = [ modules = [
../../modules/common self.inputs.home-manager.darwinModules.home-manager
../../modules/darwin self.nixosModules.common
(globals // rec { self.nixosModules.darwin
user = "Noah.Masur"; ({ config, lib, ... }: {
gitName = "Noah-Masur_1701"; config = rec {
gitEmail = "${user}@take2games.com"; user = lib.mkForce "Noah.Masur";
}) gitName = lib.mkForce "Noah-Masur_1701";
inputs.home-manager.darwinModules.home-manager gitEmail = lib.mkForce "${user}@take2games.com";
{ nixpkgs.overlays = [ self.inputs.firefox-darwin.overlay ];
nixpkgs.overlays = [ inputs.firefox-darwin.overlay ] ++ overlays; networking.hostName = "lookingglass";
networking.hostName = "lookingglass"; identityFile = "/Users/${user}/.ssh/id_ed25519";
identityFile = "/Users/Noah.Masur/.ssh/id_ed25519"; gui.enable = true;
gui.enable = true; theme = {
theme = { colors = (import ../../colorscheme/gruvbox-dark).dark;
colors = (import ../../colorscheme/gruvbox-dark).dark; dark = true;
dark = true; };
mail.user = globals.user;
charm.enable = true;
neovim.enable = true;
mail.enable = true;
mail.aerc.enable = true;
mail.himalaya.enable = false;
kitty.enable = true;
discord.enable = true;
firefox.enable = true;
dotfiles.enable = true;
nixlang.enable = true;
terraform.enable = true;
python.enable = true;
lua.enable = true;
kubernetes.enable = true;
_1password.enable = true;
slack.enable = true;
}; };
mail.user = globals.user; })
charm.enable = true;
neovim.enable = true;
mail.enable = true;
mail.aerc.enable = true;
mail.himalaya.enable = false;
kitty.enable = true;
discord.enable = true;
firefox.enable = true;
dotfiles.enable = true;
nixlang.enable = true;
terraform.enable = true;
python.enable = true;
lua.enable = true;
kubernetes.enable = true;
_1password.enable = true;
slack.enable = true;
}
]; ];
} }

View File

@ -1,31 +1,32 @@
# The Staff # The Staff
# ISO configuration for my USB drive # ISO configuration for my USB drive
{ inputs, system, overlays, ... }: { self, system, ... }:
inputs.nixos-generators.nixosGenerate { self.inputs.nixos-generators.nixosGenerate {
inherit system; inherit system;
format = "install-iso"; format = "install-iso";
modules = [{ modules = [
nixpkgs.overlays = overlays; self.nixosModules.global
networking.hostName = "staff"; self.nixosModules.common
users.extraUsers.root.openssh.authorizedKeys.keys = [ self.nixosModules.nixos
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB+AbmjGEwITk5CK9y7+Rg27Fokgj9QEjgc9wST6MA3s" ({ config, pkgs, ... }: {
]; networking.hostName = "staff";
services.openssh = { users.extraUsers.root.openssh.authorizedKeys.keys = [
enable = true; "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB+AbmjGEwITk5CK9y7+Rg27Fokgj9QEjgc9wST6MA3s"
ports = [ 22 ]; ];
allowSFTP = true; services.openssh = {
settings = { enable = true;
GatewayPorts = "no"; ports = [ 22 ];
X11Forwarding = false; allowSFTP = true;
PasswordAuthentication = false; settings = {
PermitRootLogin = "yes"; GatewayPorts = "no";
X11Forwarding = false;
PasswordAuthentication = false;
PermitRootLogin = "yes";
};
}; };
}; environment.systemPackages = with pkgs; [
environment.systemPackages =
let pkgs = import inputs.nixpkgs { inherit system overlays; };
in with pkgs; [
git git
vim vim
wget wget
@ -35,9 +36,10 @@ inputs.nixos-generators.nixosGenerate {
colors = (import ../../colorscheme/gruvbox).dark; colors = (import ../../colorscheme/gruvbox).dark;
}) })
]; ];
nix.extraOptions = '' nix.extraOptions = ''
experimental-features = nix-command flakes experimental-features = nix-command flakes
warn-dirty = false warn-dirty = false
''; '';
}]; })
];
} }

View File

@ -1,21 +1,20 @@
# The Swan # The Swan
# System configuration for my home NAS server # System configuration for my home NAS server
{ inputs, globals, overlays, ... }: { self, ... }:
inputs.nixpkgs.lib.nixosSystem { self.inputs.nixpkgs.lib.nixosSystem {
system = "x86_64-linux"; system = "x86_64-linux";
specialArgs = { }; specialArgs = { };
modules = [ modules = [
globals self.inputs.home-manager.nixosModules.home-manager
inputs.home-manager.nixosModules.home-manager self.inputs.disko.nixosModules.disko
inputs.disko.nixosModules.disko self.nixosModules.globals
../../modules/common self.nixosModules.common
../../modules/nixos self.nixosModules.nixos
{ {
# Hardware # Hardware
server = true; server = true;
physical = true;
networking.hostName = "swan"; networking.hostName = "swan";
boot.initrd.availableKernelModules = boot.initrd.availableKernelModules =
@ -47,20 +46,16 @@ inputs.nixpkgs.lib.nixosSystem {
gui.enable = false; gui.enable = false;
theme = { colors = (import ../../colorscheme/gruvbox).dark; }; theme = { colors = (import ../../colorscheme/gruvbox).dark; };
nixpkgs.overlays = overlays;
neovim.enable = true; neovim.enable = true;
cloudflare.enable = true; cloudflare.enable = true;
dotfiles.enable = true; dotfiles.enable = true;
arrs.enable = true; arrs.enable = true;
services.bind.enable = true;
services.caddy.enable = true; services.caddy.enable = true;
services.jellyfin.enable = true; services.jellyfin.enable = true;
services.nextcloud.enable = true; services.nextcloud.enable = true;
services.calibre-web.enable = true; services.calibre-web.enable = true;
services.openssh.enable = true; services.prometheus.enable = true;
services.prometheus.enable = false;
services.vmagent.enable = true;
services.samba.enable = true; services.samba.enable = true;
cloudflareTunnel = { cloudflareTunnel = {

View File

@ -1,18 +1,16 @@
# The Tempest # The Tempest
# System configuration for my desktop # System configuration for my desktop
{ inputs, globals, overlays, ... }: { self, ... }:
inputs.nixpkgs.lib.nixosSystem { self.inputs.nixpkgs.lib.nixosSystem {
system = "x86_64-linux"; system = "x86_64-linux";
modules = [ modules = [
globals self.inputs.home-manager.nixosModules.home-manager
inputs.home-manager.nixosModules.home-manager self.nixosModules.globals
../../modules/common self.nixosModules.common
../../modules/nixos self.nixosModules.nixos
{ {
nixpkgs.overlays = overlays;
# Hardware # Hardware
physical = true; physical = true;
networking.hostName = "tempest"; networking.hostName = "tempest";
@ -53,7 +51,7 @@ inputs.nixpkgs.lib.nixosSystem {
# Must be prepared ahead # Must be prepared ahead
identityFile = "/home/${globals.user}/.ssh/id_ed25519"; identityFile = "/home/${globals.user}/.ssh/id_ed25519";
passwordHash = inputs.nixpkgs.lib.fileContents ../../password.sha512; passwordHash = self.inputs.nixpkgs.lib.fileContents ../../password.sha512;
# Theming # Theming
gui.enable = true; gui.enable = true;
@ -61,8 +59,8 @@ inputs.nixpkgs.lib.nixosSystem {
colors = (import ../../colorscheme/gruvbox-dark).dark; colors = (import ../../colorscheme/gruvbox-dark).dark;
dark = true; dark = true;
}; };
wallpaper = "${inputs.wallpapers}/gruvbox/road.jpg"; wallpaper = "${self.inputs.wallpapers}/gruvbox/road.jpg";
gtk.theme.name = inputs.nixpkgs.lib.mkDefault "Adwaita-dark"; gtk.theme.name = self.inputs.nixpkgs.lib.mkDefault "Adwaita-dark";
# Programs and services # Programs and services
charm.enable = true; charm.enable = true;
@ -91,9 +89,7 @@ inputs.nixpkgs.lib.nixosSystem {
leagueoflegends.enable = true; leagueoflegends.enable = true;
ryujinx.enable = true; ryujinx.enable = true;
}; };
services.vmagent.enable = true;
services.openssh.enable = true; # Required for Cloudflare tunnel
cloudflareTunnel = { cloudflareTunnel = {
enable = true; enable = true;
id = "ac133a82-31fb-480c-942a-cdbcd4c58173"; id = "ac133a82-31fb-480c-942a-cdbcd4c58173";

View File

@ -40,7 +40,6 @@
defaultApplications."inode/directory" = defaultApplications."inode/directory" =
lib.mkBefore [ "org.gnome.Nautilus.desktop" ]; lib.mkBefore [ "org.gnome.Nautilus.desktop" ];
}; };
}; };
# # Set default for opening directories # # Set default for opening directories
@ -51,13 +50,6 @@
# lib.mkForce [ "org.gnome.Nautilus.desktop" ]; # lib.mkForce [ "org.gnome.Nautilus.desktop" ];
# }; # };
# Delete Trash files older than 1 week
systemd.user.services.empty-trash = {
description = "Empty Trash on a regular basis";
wantedBy = [ "default.target" ];
script = "${pkgs.trash-cli}/bin/trash-empty 7";
};
}; };
} }

View File

@ -1,6 +1,6 @@
{ config, pkgs, lib, ... }: { { config, pkgs, lib, ... }: {
boot.loader = lib.mkIf (config.physical && !config.server) { boot.loader = lib.mkIf config.physical {
grub = { grub = {
enable = true; enable = true;

View File

@ -2,7 +2,12 @@
config = lib.mkIf config.physical { config = lib.mkIf config.physical {
networking.useDHCP = true; # The global useDHCP flag is deprecated, therefore explicitly set to false here.
# Per-interface useDHCP will be mandatory in the future, so this generated config
# replicates the default behaviour.
networking.useDHCP = false;
networking.interfaces.enp5s0.useDHCP = true;
networking.interfaces.wlp4s0.useDHCP = true;
networking.firewall.allowPing = lib.mkIf config.server true; networking.firewall.allowPing = lib.mkIf config.server true;
@ -10,9 +15,6 @@
services.avahi = { services.avahi = {
enable = true; enable = true;
domainName = "local"; domainName = "local";
ipv6 = false; # Should work either way
# Resolve local hostnames using Avahi DNS
nssmdns = true;
publish = { publish = {
enable = true; enable = true;
addresses = true; addresses = true;
@ -20,6 +22,10 @@
workstation = true; workstation = true;
}; };
}; };
# Resolve local hostnames using Avahi DNS
services.avahi.nssmdns = true;
}; };
} }

View File

@ -1,6 +1,6 @@
{ config, lib, ... }: { { config, pkgs, lib, ... }: {
config = lib.mkIf config.server { config = lib.mkIf (pkgs.stdenv.isLinux && config.server) {
# Servers need a bootloader or they won't start # Servers need a bootloader or they won't start
boot.loader.systemd-boot.enable = true; boot.loader.systemd-boot.enable = true;

View File

@ -1,6 +1,6 @@
{ config, pkgs, lib, ... }: { { config, pkgs, lib, ... }: {
config = lib.mkIf (config.physical && !config.server) { config = lib.mkIf config.physical {
# Prevent wake from keyboard # Prevent wake from keyboard
powerManagement.powerDownCommands = '' powerManagement.powerDownCommands = ''

View File

@ -1,21 +1,16 @@
{ config, lib, ... }: { { config, pkgs, lib, ... }: {
options = { zfs.enable = lib.mkEnableOption "ZFS file system."; }; options = { zfs.enable = lib.mkEnableOption "ZFS file system."; };
config = lib.mkIf (config.server && config.zfs.enable) { config =
lib.mkIf (pkgs.stdenv.isLinux && config.server && config.zfs.enable) {
# Only use compatible Linux kernel, since ZFS can be behind # Only use compatible Linux kernel, since ZFS can be behind
boot.kernelPackages = config.boot.zfs.package.latestCompatibleLinuxPackages; boot.kernelPackages =
boot.kernelParams = [ "nohibernate" ]; config.boot.zfs.package.latestCompatibleLinuxPackages;
boot.supportedFilesystems = [ "zfs" ]; boot.kernelParams = [ "nohibernate" ];
services.prometheus.exporters.zfs.enable = boot.supportedFilesystems = [ "zfs" ];
config.prometheus.exporters.enable;
prometheus.scrapeTargets = [
"127.0.0.1:${
builtins.toString config.services.prometheus.exporters.zfs.port
}"
];
}; };
} }

View File

@ -1,55 +0,0 @@
{ config, pkgs, lib, ... }:
let
localIp = "192.168.1.218";
localServices = [
config.hostnames.stream
config.hostnames.content
config.hostnames.books
config.hostnames.download
];
mkRecord = service: "${service} A ${localIp}";
localRecords = lib.concatLines (map mkRecord localServices);
in {
config = lib.mkIf config.services.bind.enable {
caddy.cidrAllowlist = [ "192.168.0.0/16" ];
services.bind = {
cacheNetworks = [ "127.0.0.0/24" "192.168.0.0/16" ];
forwarders = [ "1.1.1.1" "1.0.0.1" ];
ipv4Only = true;
# Use rpz zone as an override
extraOptions = ''response-policy { zone "rpz"; };'';
zones = {
rpz = {
master = true;
file = pkgs.writeText "db.rpz" ''
$TTL 60 ; 1 minute
@ IN SOA localhost. root.localhost. (
2023071800 ; serial
1h ; refresh
30m ; retry
1w ; expire
30m ; minimum ttl
)
IN NS localhost.
localhost A 127.0.0.1
${localRecords}
'';
};
};
};
networking.firewall.allowedTCPPorts = [ 53 ];
networking.firewall.allowedUDPPorts = [ 53 ];
};
}

View File

@ -1,70 +1,52 @@
{ config, pkgs, lib, ... }: { { config, pkgs, lib, ... }: {
options = { options = {
caddy = { caddy.tlsPolicies = lib.mkOption {
tlsPolicies = lib.mkOption { type = lib.types.listOf lib.types.attrs;
type = lib.types.listOf lib.types.attrs; description = "Caddy JSON TLS policies";
description = "Caddy JSON TLS policies"; default = [ ];
default = [ ]; };
}; caddy.routes = lib.mkOption {
routes = lib.mkOption { type = lib.types.listOf lib.types.attrs;
type = lib.types.listOf lib.types.attrs; description = "Caddy JSON routes for http servers";
description = "Caddy JSON routes for http servers"; default = [ ];
default = [ ]; };
}; caddy.blocks = lib.mkOption {
blocks = lib.mkOption { type = lib.types.listOf lib.types.attrs;
type = lib.types.listOf lib.types.attrs; description = "Caddy JSON error blocks for http servers";
description = "Caddy JSON error blocks for http servers"; default = [ ];
default = [ ];
};
cidrAllowlist = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "CIDR blocks to allow for requests";
default = [ "127.0.0.1/32" ];
};
}; };
}; };
config = lib.mkIf config.services.caddy.enable { config =
lib.mkIf (config.services.caddy.enable && config.caddy.routes != [ ]) {
# Force Caddy to 403 if not coming from allowlisted source services.caddy = {
caddy.routes = [{ adapter = "''"; # Required to enable JSON
match = [{ not = [{ remote_ip.ranges = config.caddy.cidrAllowlist; }]; }]; configFile = pkgs.writeText "Caddyfile" (builtins.toJSON {
handle = [{ apps.http.servers.main = {
handler = "static_response"; listen = [ ":443" ];
status_code = "403"; routes = config.caddy.routes;
}]; errors.routes = config.caddy.blocks;
}]; # logs = { }; # Uncomment to collect access logs
services.caddy = {
adapter = "''"; # Required to enable JSON
configFile = pkgs.writeText "Caddyfile" (builtins.toJSON {
apps.http.servers.main = {
listen = [ ":443" ];
routes = config.caddy.routes;
errors.routes = config.caddy.blocks;
# logs = { }; # Uncomment to collect access logs
};
apps.http.servers.metrics = { }; # Enables Prometheus metrics
apps.tls.automation.policies = config.caddy.tlsPolicies;
logging.logs.main = {
encoder = { format = "console"; };
writer = {
output = "file";
filename = "${config.services.caddy.logDir}/caddy.log";
roll = true;
}; };
level = "INFO"; apps.tls.automation.policies = config.caddy.tlsPolicies;
}; logging.logs.main = {
}); encoder = { format = "console"; };
writer = {
output = "file";
filename = "${config.services.caddy.logDir}/caddy.log";
roll = true;
};
level = "INFO";
};
});
};
networking.firewall.allowedTCPPorts = [ 80 443 ];
networking.firewall.allowedUDPPorts = [ 443 ];
}; };
networking.firewall.allowedTCPPorts = [ 80 443 ];
networking.firewall.allowedUDPPorts = [ 443 ];
prometheus.scrapeTargets = [ "127.0.0.1:2019" ];
};
} }

View File

@ -41,7 +41,13 @@ in {
config = lib.mkIf config.cloudflare.enable { config = lib.mkIf config.cloudflare.enable {
# Forces Caddy to error if coming from a non-Cloudflare IP # Forces Caddy to error if coming from a non-Cloudflare IP
caddy.cidrAllowlist = cloudflareIpRanges; caddy.blocks = [{
match = [{ not = [{ remote_ip.ranges = cloudflareIpRanges; }]; }];
handle = [{
handler = "static_response";
abort = true;
}];
}];
# Tell Caddy to use Cloudflare DNS for ACME challenge validation # Tell Caddy to use Cloudflare DNS for ACME challenge validation
services.caddy.package = (pkgs.callPackage ../../../overlays/caddy.nix { services.caddy.package = (pkgs.callPackage ../../../overlays/caddy.nix {

View File

@ -3,7 +3,6 @@
imports = [ imports = [
./arr.nix ./arr.nix
./backups.nix ./backups.nix
./bind.nix
./caddy.nix ./caddy.nix
./calibre.nix ./calibre.nix
./cloudflare-tunnel.nix ./cloudflare-tunnel.nix
@ -25,7 +24,6 @@
./sshd.nix ./sshd.nix
./transmission.nix ./transmission.nix
./vaultwarden.nix ./vaultwarden.nix
./victoriametrics.nix
./wireguard.nix ./wireguard.nix
]; ];

View File

@ -10,9 +10,9 @@
enable = true; enable = true;
labels = [ labels = [
# Provide a Debian base with NodeJS for actions # Provide a Debian base with NodeJS for actions
# "debian-latest:docker://node:18-bullseye" "debian-latest:docker://node:18-bullseye"
# Fake the Ubuntu name, because Node provides no Ubuntu builds # Fake the Ubuntu name, because Node provides no Ubuntu builds
# "ubuntu-latest:docker://node:18-bullseye" "ubuntu-latest:docker://node:18-bullseye"
# Provide native execution on the host using below packages # Provide native execution on the host using below packages
"native:host" "native:host"
]; ];
@ -31,23 +31,6 @@
tokenFile = config.secrets.giteaRunnerToken.dest; tokenFile = config.secrets.giteaRunnerToken.dest;
}; };
secrets.giteaRunnerToken = {
source = ../../../private/gitea-runner-token.age; # TOKEN=xyz
dest = "${config.secretsDirectory}/gitea-runner-token";
};
systemd.services.giteaRunnerToken-secret = {
requiredBy = [
"gitea-runner-${
config.services.gitea-actions-runner.instances.${config.networking.hostName}.name
}.service"
];
before = [
"gitea-runner-${
config.services.gitea-actions-runner.instances.${config.networking.hostName}.name
}.service"
];
};
}; };
} }

View File

@ -13,12 +13,7 @@
match = [{ host = [ config.hostnames.metrics ]; }]; match = [{ host = [ config.hostnames.metrics ]; }];
handle = [{ handle = [{
handler = "reverse_proxy"; handler = "reverse_proxy";
upstreams = [{ upstreams = [{ dial = "localhost:3000"; }];
dial = "localhost:${
builtins.toString
config.services.grafana.settings.server.http_port
}";
}];
}]; }];
}]; }];

View File

@ -5,25 +5,13 @@
services.jellyfin.group = "media"; services.jellyfin.group = "media";
users.users.jellyfin = { isSystemUser = true; }; users.users.jellyfin = { isSystemUser = true; };
caddy.routes = [ caddy.routes = [{
{ match = [{ host = [ config.hostnames.stream ]; }];
match = [{ handle = [{
host = [ config.hostnames.stream ]; handler = "reverse_proxy";
path = [ "/metrics*" ]; upstreams = [{ dial = "localhost:8096"; }];
}]; }];
handle = [{ }];
handler = "static_response";
status_code = "403";
}];
}
{
match = [{ host = [ config.hostnames.stream ]; }];
handle = [{
handler = "reverse_proxy";
upstreams = [{ dial = "localhost:8096"; }];
}];
}
];
# Create videos directory, allow anyone in Jellyfin group to manage it # Create videos directory, allow anyone in Jellyfin group to manage it
systemd.tmpfiles.rules = [ systemd.tmpfiles.rules = [
@ -47,9 +35,6 @@
users.users.jellyfin.extraGroups = users.users.jellyfin.extraGroups =
[ "render" "video" ]; # Access to /dev/dri [ "render" "video" ]; # Access to /dev/dri
# Requires MetricsEnable is true in /var/lib/jellyfin/config/system.xml
prometheus.scrapeTargets = [ "127.0.0.1:8096" ];
}; };
} }

View File

@ -1,15 +1,9 @@
{ config, pkgs, lib, ... }: { config, pkgs, lib, ... }: {
let
port = 8080;
in {
config = lib.mkIf config.services.nextcloud.enable { config = lib.mkIf config.services.nextcloud.enable {
services.nextcloud = { services.nextcloud = {
package = pkgs.nextcloud27; # Required to specify package = pkgs.nextcloud26; # Required to specify
datadir = "/data/nextcloud"; datadir = "/data/nextcloud";
https = true; https = true;
hostName = "localhost"; hostName = "localhost";
@ -17,14 +11,13 @@ in {
config = { config = {
adminpassFile = config.secrets.nextcloud.dest; adminpassFile = config.secrets.nextcloud.dest;
extraTrustedDomains = [ config.hostnames.content ]; extraTrustedDomains = [ config.hostnames.content ];
trustedProxies = [ "127.0.0.1" ];
}; };
}; };
# Don't let Nginx use main ports (using Caddy instead) # Don't let Nginx use main ports (using Caddy instead)
services.nginx.virtualHosts."localhost".listen = [{ services.nginx.virtualHosts."localhost".listen = [{
addr = "127.0.0.1"; addr = "127.0.0.1";
port = port; port = 8080;
}]; }];
# Point Caddy to Nginx # Point Caddy to Nginx
@ -32,7 +25,7 @@ in {
match = [{ host = [ config.hostnames.content ]; }]; match = [{ host = [ config.hostnames.content ]; }];
handle = [{ handle = [{
handler = "reverse_proxy"; handler = "reverse_proxy";
upstreams = [{ dial = "localhost:${builtins.toString port}"; }]; upstreams = [{ dial = "localhost:8080"; }];
}]; }];
}]; }];
@ -81,23 +74,6 @@ in {
requires = [ "phpfpm-nextcloud.service" ]; requires = [ "phpfpm-nextcloud.service" ];
}; };
# Log metrics to prometheus
services.prometheus.exporters.nextcloud = {
enable = config.prometheus.exporters.enable;
username = config.services.nextcloud.config.adminuser;
url = "http://localhost:${builtins.toString port}";
passwordFile = config.services.nextcloud.config.adminpassFile;
};
prometheus.scrapeTargets = [
"127.0.0.1:${
builtins.toString config.services.prometheus.exporters.nextcloud.port
}"
];
# Allows nextcloud-exporter to read passwordFile
users.users.nextcloud-exporter.extraGroups =
lib.mkIf config.services.prometheus.exporters.nextcloud.enable
[ "nextcloud" ];
}; };
} }

View File

@ -1,58 +1,18 @@
{ config, pkgs, lib, ... }: { { config, pkgs, lib, ... }: {
options.prometheus = {
exporters.enable = lib.mkEnableOption "Enable Prometheus exporters";
scrapeTargets = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "Prometheus scrape targets";
default = [ ];
};
};
config = let config = let
# If hosting Grafana, host local Prometheus and listen for inbound jobs. If # If hosting Grafana, host local Prometheus and listen for inbound jobs. If
# not hosting Grafana, send remote Prometheus writes to primary host. # not hosting Grafana, send remote Prometheus writes to primary host.
isServer = config.services.grafana.enable; isServer = config.services.grafana.enable;
in { in lib.mkIf config.services.prometheus.enable {
# Turn on exporters if any Prometheus scraper is running
prometheus.exporters.enable = builtins.any (x: x) [
config.services.prometheus.enable
config.services.victoriametrics.enable
config.services.vmagent.enable
];
prometheus.scrapeTargets = [
"127.0.0.1:${
builtins.toString config.services.prometheus.exporters.node.port
}"
"127.0.0.1:${
builtins.toString config.services.prometheus.exporters.systemd.port
}"
"127.0.0.1:${
builtins.toString config.services.prometheus.exporters.process.port
}"
];
services.prometheus = { services.prometheus = {
exporters.node.enable = config.prometheus.exporters.enable; exporters.node.enable = true;
exporters.node.enabledCollectors = [ ];
exporters.node.disabledCollectors = [ "cpufreq" ];
exporters.systemd.enable = config.prometheus.exporters.enable;
exporters.process.enable = config.prometheus.exporters.enable;
exporters.process.settings.process_names = [
# Remove nix store path from process name
{
name = "{{.Matches.Wrapped}} {{ .Matches.Args }}";
cmdline = [ "^/nix/store[^ ]*/(?P<Wrapped>[^ /]*) (?P<Args>.*)" ];
}
];
extraFlags = lib.mkIf isServer [ "--web.enable-remote-write-receiver" ];
scrapeConfigs = [{ scrapeConfigs = [{
job_name = config.networking.hostName; job_name = "local";
static_configs = [{ targets = config.scrapeTargets; }]; static_configs = [{ targets = [ "127.0.0.1:9100" ]; }];
}]; }];
webExternalUrl = webExternalUrl =
lib.mkIf isServer "https://${config.hostnames.prometheus}"; lib.mkIf isServer "https://${config.hostnames.prometheus}";
@ -68,7 +28,7 @@
}); });
remoteWrite = lib.mkIf (!isServer) [{ remoteWrite = lib.mkIf (!isServer) [{
name = config.networking.hostName; name = config.networking.hostName;
url = "https://${config.hostnames.prometheus}/api/v1/write"; url = "https://${config.hostnames.prometheus}";
basic_auth = { basic_auth = {
# Uses password hashed with bcrypt above # Uses password hashed with bcrypt above
username = "prometheus"; username = "prometheus";
@ -78,26 +38,23 @@
}; };
# Create credentials file for remote Prometheus push # Create credentials file for remote Prometheus push
secrets.prometheus = secrets.prometheus = lib.mkIf (!isServer) {
lib.mkIf (config.services.prometheus.enable && !isServer) { source = ../../../private/prometheus.age;
source = ../../../private/prometheus.age; dest = "${config.secretsDirectory}/prometheus";
dest = "${config.secretsDirectory}/prometheus"; owner = "prometheus";
owner = "prometheus"; group = "prometheus";
group = "prometheus"; permissions = "0440";
permissions = "0440"; };
}; systemd.services.prometheus-secret = lib.mkIf (!isServer) {
systemd.services.prometheus-secret = requiredBy = [ "prometheus.service" ];
lib.mkIf (config.services.prometheus.enable && !isServer) { before = [ "prometheus.service" ];
requiredBy = [ "prometheus.service" ]; };
before = [ "prometheus.service" ];
};
caddy.routes = lib.mkIf (config.services.prometheus.enable && isServer) [{ caddy.routes = lib.mkIf isServer [{
match = [{ host = [ config.hostnames.prometheus ]; }]; match = [{ host = [ config.hostnames.prometheus ]; }];
handle = [{ handle = [{
handler = "reverse_proxy"; handler = "reverse_proxy";
upstreams = upstreams = [{ dial = "localhost:9090"; }];
[{ dial = "localhost:${config.services.prometheus.port}"; }];
}]; }];
}]; }];

View File

@ -39,11 +39,6 @@
type = lib.types.str; type = lib.types.str;
description = "Permissions expressed as octal."; description = "Permissions expressed as octal.";
}; };
prefix = lib.mkOption {
default = "";
type = lib.types.str;
description = "Prefix for secret value (for environment files).";
};
}; };
}); });
description = "Set of secrets to decrypt to disk."; description = "Set of secrets to decrypt to disk.";
@ -70,10 +65,10 @@
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
serviceConfig.Type = "oneshot"; serviceConfig.Type = "oneshot";
script = '' script = ''
echo "${attrs.prefix}$( ${pkgs.age}/bin/age --decrypt \
${pkgs.age}/bin/age --decrypt \ --identity ${config.identityFile} \
--identity ${config.identityFile} ${attrs.source} --output ${attrs.dest} \
)" > ${attrs.dest} ${attrs.source}
chown '${attrs.owner}':'${attrs.group}' '${attrs.dest}' chown '${attrs.owner}':'${attrs.group}' '${attrs.dest}'
chmod '${attrs.permissions}' '${attrs.dest}' chmod '${attrs.permissions}' '${attrs.dest}'

View File

@ -13,8 +13,9 @@
}; };
}; };
config = lib.mkIf config.services.openssh.enable { config = lib.mkIf (config.publicKey != null) {
services.openssh = { services.openssh = {
enable = true;
ports = [ 22 ]; ports = [ 22 ];
allowSFTP = true; allowSFTP = true;
settings = { settings = {
@ -26,7 +27,7 @@
}; };
users.users.${config.user}.openssh.authorizedKeys.keys = users.users.${config.user}.openssh.authorizedKeys.keys =
lib.mkIf (config.publicKey != null) [ config.publicKey ]; [ config.publicKey ];
# Implement a simple fail2ban service for sshd # Implement a simple fail2ban service for sshd
services.sshguard.enable = true; services.sshguard.enable = true;

View File

@ -1,95 +0,0 @@
{ config, pkgs, lib, ... }:
let
username = "prometheus";
prometheusConfig = (pkgs.formats.yaml { }).generate "prometheus.yml" {
scrape_configs = [{
job_name = config.networking.hostName;
stream_parse = true;
static_configs = [{ targets = config.prometheus.scrapeTargets; }];
}];
};
authConfig = (pkgs.formats.yaml { }).generate "auth.yml" {
users = [{
username = username;
password = "%{PASSWORD}";
url_prefix =
"http://localhost${config.services.victoriametrics.listenAddress}";
}];
};
authPort = "8427";
in {
config = {
services.victoriametrics.extraOptions =
[ "-promscrape.config=${prometheusConfig}" ];
systemd.services.vmauth = lib.mkIf config.services.victoriametrics.enable {
description = "VictoriaMetrics basic auth proxy";
after = [ "network.target" ];
startLimitBurst = 5;
serviceConfig = {
Restart = "on-failure";
RestartSec = 1;
DynamicUser = true;
EnvironmentFile = config.secrets.vmauth.dest;
ExecStart = ''
${pkgs.victoriametrics}/bin/vmauth \
-auth.config=${authConfig} \
-httpListenAddr=:${authPort}'';
};
wantedBy = [ "multi-user.target" ];
};
secrets.vmauth = lib.mkIf config.services.victoriametrics.enable {
source = ../../../private/prometheus.age;
dest = "${config.secretsDirectory}/vmauth";
prefix = "PASSWORD=";
};
systemd.services.vmauth-secret =
lib.mkIf config.services.victoriametrics.enable {
requiredBy = [ "vmauth.service" ];
before = [ "vmauth.service" ];
};
caddy.routes = lib.mkIf config.services.victoriametrics.enable [{
match = [{ host = [ config.hostnames.prometheus ]; }];
handle = [{
handler = "reverse_proxy";
upstreams = [{ dial = "localhost:${authPort}"; }];
}];
}];
# VMAgent
services.vmagent.prometheusConfig = prometheusConfig; # Overwritten below
systemd.services.vmagent.serviceConfig =
lib.mkIf config.services.vmagent.enable {
ExecStart = lib.mkForce ''
${pkgs.victoriametrics}/bin/vmagent \
-promscrape.config=${prometheusConfig} \
-remoteWrite.url="https://${config.hostnames.prometheus}/api/v1/write" \
-remoteWrite.basicAuth.username=${username} \
-remoteWrite.basicAuth.passwordFile=${config.secrets.vmagent.dest}'';
};
secrets.vmagent = lib.mkIf config.services.vmagent.enable {
source = ../../../private/prometheus.age;
dest = "${config.secretsDirectory}/vmagent";
owner = "vmagent";
group = "vmagent";
};
systemd.services.vmagent-secret = lib.mkIf config.services.vmagent.enable {
requiredBy = [ "vmagent.service" ];
before = [ "vmagent.service" ];
};
};
}

View File

@ -1,12 +0,0 @@
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IHNzaC1lZDI1NTE5IE1nSGFPdyBoOVF1
NmZocHpQQnRJcWpWUHh2bU93NkdnZWNzSlFiaHdTd24rcHpsczFRCmJaSzNkNGs1
UDJCN2dYUVE3UTE1OU5RUWljQlN4dmxuUnpOMFYxQTdUaVEKLT4gc3NoLWVkMjU1
MTkgWXlTVU1RIE5HdGd6aTlKM0lFUlYzT1VhS05nZ2ZxTndVZHBNQlJxYlovdXkx
ei96d2cKdzlUYVFFaEIzaS9LZmY3MzM1RmNnR0xjOEpHK1kxM0FMTWRQSlVnczVF
dwotPiBzc2gtZWQyNTUxOSBuanZYNUEgQ1lhMGQvUy9OWkRBR3BZV1pFNmNtb2pq
Y2VEUzhRWGVWUkZJY1l4RGtWdwphdFZtM0ZLZURvYVZQYjV4bWVPdWJxa3RmWmVh
SHl0T0pQWmxnVlFPR2drCi0tLSBnd2lwS3dqUk5Jelg0b3RxbFdEcnJ6ZkkvZTVN
UllBeUUyOXBxVDBKMG5BCkGo9kj9sMVhbnXVM35lGScAb8r5LH9vf5jOdhLC/Wj2
+uA0ONIh7F2GELzf5Cw1KZJ8aHTURM2r41vZvfAQN1RwrmYOiUzlyMrvTDe78cY=
-----END AGE ENCRYPTED FILE-----