21 Commits

Author SHA1 Message Date
2191a11dbf router: add glance (very pretty) 2025-05-15 01:31:41 -07:00
37d79d877e WIP: router: fix caddy handler order 2025-05-13 02:37:40 -07:00
96317180e7 WIP: router: add glance (very pretty) 2025-05-13 02:21:31 -07:00
9cee4d75c4 router: dns: remove default adguard rate limit to fix intermittent slow queries 2025-05-13 02:10:38 -07:00
4ffdb4da4f router: caddy http3 and compression 2025-05-12 00:11:03 -07:00
4fce23e446 renovate: add nix lock file to config 2025-05-11 21:41:34 -07:00
49c781c1a8 router: option to disable desktop to save space
# Conflicts:
#	hosts/router/default.nix
2025-05-11 21:36:28 -07:00
1fbba65785 router: add secrix for secrets; add cloudflare api key 2025-05-11 21:35:03 -07:00
bb633e5bce router: services: caddy acme dns provider cloudflare 2025-05-11 20:29:16 -07:00
2aa3d87184 router: services: caddy subpath proxies for grafana and adguardhome 2025-05-11 18:41:59 -07:00
05d558e836 router: refactor firewall nftables config 2025-05-11 17:56:17 -07:00
8f7e00f27a router: add vnStat service 2025-05-11 15:58:51 -07:00
renovate[bot]
5e023e2982 Add renovate.json 2025-05-06 00:27:13 -07:00
0674c870c7 updates: nixpkgs, home-manager; add texlive 2025-04-30 16:58:15 -07:00
e484d6baa3 updates: nixpkgs, home-manager 2025-04-18 14:01:48 -07:00
9487d5bdea router: add static routes to opnsense to fix vpn issues 2025-04-15 10:35:18 -07:00
9bbd0cfbdd updates: linux 6.13, nixpkgs, home-manager 2025-04-09 00:27:22 -07:00
49278204a4 router: ifconfig: disable linux arp proxy behavior by default
By default, Linux will respond to ARP requests that belong to other interfaces. Normally this isn't a problem, but it causes issues since my WAN and LAN20 are technically bridged.
2025-03-29 23:01:40 -07:00
02bab65de8 router: firewall: proper filtering for hosts proxied by cloudflare 2025-03-26 15:20:15 -07:00
ac1f427677 router: dns: add more upstream providers; add sysdomain hosts for truenas, debbi, etappi 2025-03-26 00:21:19 -07:00
c353ec4020 router: refactor config into separate files, add workaround for networkd issues 2025-03-26 00:18:45 -07:00
14 changed files with 1163 additions and 711 deletions

35
flake.lock generated
View File

@@ -7,11 +7,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1742957044, "lastModified": 1747009742,
"narHash": "sha256-gwW0tBIA77g6qq45y220drTy0DmThF3fJMwVFUtYV9c=", "narHash": "sha256-TNhbM7R45fpq2cdWzvFj+H5ZTcE//I5XSe78GFh0cDY=",
"owner": "nix-community", "owner": "nix-community",
"repo": "home-manager", "repo": "home-manager",
"rev": "ce287a5cd3ef78203bc78021447f937a988d9f6f", "rev": "c74665abd6e4e37d3140e68885bc49a994ffa53c",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -58,11 +58,11 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1742669843, "lastModified": 1746904237,
"narHash": "sha256-G5n+FOXLXcRx+3hCJ6Rt6ZQyF1zqQ0DL0sWAMn2Nk0w=", "narHash": "sha256-3e+AVBczosP5dCLQmMoMEogM57gmZ2qrVSrmq9aResQ=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "1e5b653dff12029333a6546c11e108ede13052eb", "rev": "d89fc19e405cb2d55ce7cc114356846a0ee5e956",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -100,7 +100,28 @@
"home-manager": "home-manager", "home-manager": "home-manager",
"nixos-generators": "nixos-generators", "nixos-generators": "nixos-generators",
"nixpkgs": "nixpkgs", "nixpkgs": "nixpkgs",
"plasma-manager": "plasma-manager" "plasma-manager": "plasma-manager",
"secrix": "secrix"
}
},
"secrix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1746643487,
"narHash": "sha256-dcB/DArJObCvqE/ZEdQSDW2BZMeDyF83Se5KPfJvz60=",
"owner": "Platonic-Systems",
"repo": "secrix",
"rev": "4c64203fa5b377953b1fb6d5388187df8b60c6d5",
"type": "github"
},
"original": {
"owner": "Platonic-Systems",
"repo": "secrix",
"type": "github"
} }
} }
}, },

View File

@@ -18,9 +18,15 @@
url = "github:nix-community/nixos-generators"; url = "github:nix-community/nixos-generators";
inputs.nixpkgs.follows = "nixpkgs"; inputs.nixpkgs.follows = "nixpkgs";
}; };
secrix = {
url = "github:Platonic-Systems/secrix";
inputs.nixpkgs.follows = "nixpkgs";
};
}; };
outputs = { self, nixpkgs, home-manager, plasma-manager, nixos-generators }: { outputs = { self, nixpkgs, home-manager, plasma-manager, nixos-generators, secrix }: {
apps.x86_64-linux.secrix = secrix.secrix self;
nixosConfigurations = { nixosConfigurations = {
Yura-PC = nixpkgs.lib.nixosSystem { Yura-PC = nixpkgs.lib.nixosSystem {
system = "x86_64-linux"; system = "x86_64-linux";
@@ -52,6 +58,7 @@
router = nixpkgs.lib.nixosSystem { router = nixpkgs.lib.nixosSystem {
system = "x86_64-linux"; system = "x86_64-linux";
modules = [ modules = [
secrix.nixosModules.default
./modules ./modules
./hosts/common.nix ./hosts/common.nix
./hosts/router ./hosts/router

View File

@@ -19,6 +19,7 @@ in
SHELL = "fish"; SHELL = "fish";
}; };
# TODO: remove (replace by bitwarden-desktop)
services.gnome-keyring = { services.gnome-keyring = {
enable = true; enable = true;
components = [ "pkcs11" "ssh" ]; components = [ "pkcs11" "ssh" ];
@@ -48,7 +49,7 @@ in
ll = "exa -l --color=always --group-directories-first --icons"; # long format ll = "exa -l --color=always --group-directories-first --icons"; # long format
lt = "exa -aT --color=always --group-directories-first --icons"; # tree listing lt = "exa -aT --color=always --group-directories-first --icons"; # tree listing
"l." = "exa -a | rg '^\.'"; # show only dotfiles "l." = "exa -a | rg '^\.'"; # show only dotfiles
# Replace cat with bat # Replace cat with bat
cat = "bat"; cat = "bat";
}; };
@@ -161,6 +162,7 @@ in
shellExpand = true; shellExpand = true;
}; };
dolphinrc.General.ShowFullPath = true; dolphinrc.General.ShowFullPath = true;
dolphinrc.DetailsMode.PreviewSize.persistent = true;
kactivitymanagerdrc = { kactivitymanagerdrc = {
activities."809dc779-bf5b-49e6-8e3f-cbe283cb05b6" = "Default"; activities."809dc779-bf5b-49e6-8e3f-cbe283cb05b6" = "Default";
activities."b34a506d-ac4f-4797-8c08-6ef45bc49341" = "Fun"; activities."b34a506d-ac4f-4797-8c08-6ef45bc49341" = "Fun";

View File

@@ -32,7 +32,7 @@
boot.loader.timeout = 3; boot.loader.timeout = 3;
boot.loader.systemd-boot.configurationLimit = 5; boot.loader.systemd-boot.configurationLimit = 5;
boot.kernelPackages = pkgs.linuxKernel.packages.linux_6_12; boot.kernelPackages = pkgs.linuxKernel.packages.linux_6_13;
# https://nixos.wiki/wiki/Accelerated_Video_Playback # https://nixos.wiki/wiki/Accelerated_Video_Playback
hardware.graphics = { hardware.graphics = {
@@ -125,6 +125,7 @@
# Nix # Nix
nixd nixd
nil
# Gleam # Gleam
gleam gleam
@@ -159,7 +160,7 @@
# https://discourse.nixos.org/t/firefox-does-not-use-kde-window-decorations-and-cursor/32132/3 # https://discourse.nixos.org/t/firefox-does-not-use-kde-window-decorations-and-cursor/32132/3
# programs.dconf.enable = true; # programs.dconf.enable = true;
# programs.firefox = { # programs.firefox = {
# enable = true; # enable = true;
# preferences = { # preferences = {
# "widget.use-xdg-desktop-portal.file-picker" = 1; # "widget.use-xdg-desktop-portal.file-picker" = 1;
@@ -178,9 +179,9 @@
programs.nix-ld.enable = true; programs.nix-ld.enable = true;
programs.nix-ld.libraries = with pkgs; [ programs.nix-ld.libraries = with pkgs; [
# Add any missing dynamic libraries for unpackaged # Add any missing dynamic libraries for unpackaged
# programs here, NOT in environment.systemPackages # programs here, NOT in environment.systemPackages
# For JetBrains stuff # For JetBrains stuff
# https://github.com/NixOS/nixpkgs/issues/240444 # https://github.com/NixOS/nixpkgs/issues/240444
]; ];
@@ -207,7 +208,7 @@
]; ];
# fonts.fontDir.enable = true; # fonts.fontDir.enable = true;
# fonts.fontconfig.allowBitmaps = false; # fonts.fontconfig.allowBitmaps = false;
environment.systemPackages = with pkgs; [ environment.systemPackages = with pkgs; [
dust dust
eza eza
@@ -237,12 +238,14 @@
whois whois
yt-dlp yt-dlp
] ++ [ ] ++ [
bitwarden-desktop
darkman darkman
host-spawn # for flatpaks host-spawn # for flatpaks
kdePackages.filelight kdePackages.filelight
kdePackages.flatpak-kcm kdePackages.flatpak-kcm
kdePackages.kate kdePackages.kate
kdePackages.yakuake kdePackages.yakuake
# TODO: remove (replace by bitwarden-desktop)
gcr gcr
gnome-keyring # config for this and some others gnome-keyring # config for this and some others
mpv mpv
@@ -261,6 +264,7 @@
jetbrains.webstorm jetbrains.webstorm
android-studio android-studio
rustup rustup
zed-editor
]; ];
# Some programs need SUID wrappers, can be configured further or are # Some programs need SUID wrappers, can be configured further or are

View File

@@ -1,280 +1,21 @@
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
let let
domain = "cazzzer.com"; vars = import ./vars.nix;
ldomain = "l.${domain}"; enableDesktop = false;
sysdomain = "sys.${domain}";
lanLL = "fe80::be24:11ff:fe83:d8de";
mkIfConfig = {
name_,
domain_,
p4_, # /24
p6_, # /64
ulaPrefix_, # /64
token? 1,
}: rec {
name = name_;
domain = domain_;
p4 = p4_;
p4Size = 24;
net4 = "${p4}.0/${toString p4Size}";
addr4 = "${p4}.${toString token}";
addr4Sized = "${addr4}/${toString p4Size}";
p6 = p6_;
p6Size = 64;
net6 = "${p6}::/${toString p6Size}";
ip6Token = "::${toString token}";
addr6 = "${p6}${ip6Token}";
addr6Sized = "${addr6}/${toString p6Size}";
ulaPrefix = ulaPrefix_;
ulaSize = 64;
ulaNet = "${ulaPrefix}::/${toString ulaSize}";
ulaAddr = "${ulaPrefix}${ip6Token}";
ulaAddrSized = "${ulaAddr}/${toString ulaSize}";
};
p4 = "10.17"; # .0.0/16
pdFromWan = ""; # ::/60
ulaPrefix = "fdab:07d3:581d"; # ::/48
ifs = rec {
wan = rec {
name = "wan";
addr4 = "192.168.1.61";
addr4Sized = "${addr4}/24";
gw4 = "192.168.1.254";
};
lan = mkIfConfig {
name_ = "lan";
domain_ = "lan.${ldomain}";
p4_ = "${p4}.1"; # .0/24
p6_ = "${pdFromWan}f"; # ::/64
ulaPrefix_ = "${ulaPrefix}:0001"; # ::/64
};
lan10 = mkIfConfig {
name_ = "${lan.name}.10";
domain_ = "lab.${ldomain}";
p4_ = "${p4}.10"; # .0/24
p6_ = "${pdFromWan}e"; # ::/64
ulaPrefix_ = "${ulaPrefix}:0010"; # ::/64
};
lan20 = mkIfConfig {
name_ = "${lan.name}.20";
domain_ = "life.${ldomain}";
p4_ = "${p4}.20"; # .0/24
p6_ = ""; # p6 not defined for lan20, managed by Att box
ulaPrefix_ = "${ulaPrefix}:0020"; # ::/64
};
lan30 = mkIfConfig {
name_ = "${lan.name}.30";
domain_ = "iot.${ldomain}";
p4_ = "${p4}.30"; # .0/24
p6_ = "${pdFromWan}c"; # ::/64
ulaPrefix_ = "${ulaPrefix}:0030"; # ::/64
};
lan40 = mkIfConfig {
name_ = "${lan.name}.40";
domain_ = "kube.${ldomain}";
p4_ = "${p4}.40"; # .0/24
p6_ = "${pdFromWan}b"; # ::/64
ulaPrefix_ = "${ulaPrefix}:0040"; # ::/64
};
lan50 = mkIfConfig {
name_ = "${lan.name}.50";
domain_ = "prox.${ldomain}";
p4_ = "${p4}.50"; # .0/24
p6_ = "${pdFromWan}a"; # ::/64
ulaPrefix_ = "${ulaPrefix}:0050"; # ::/64
};
};
# Reservations added to Kea
reservations.lan.v4.reservations = [
{
hw-address = "64:66:b3:78:9c:09";
hostname = "openwrt";
ip-address = "${ifs.lan.p4}.2";
}
{
hw-address = "40:86:cb:19:9d:70";
hostname = "dlink-switchy";
ip-address = "${ifs.lan.p4}.3";
}
{
hw-address = "6c:cd:d6:af:4f:6f";
hostname = "netgear-switchy";
ip-address = "${ifs.lan.p4}.4";
}
{
hw-address = "74:d4:35:1d:0e:80";
hostname = "pve-1";
ip-address = "${ifs.lan.p4}.5";
}
{
hw-address = "00:25:90:f3:d0:e0";
hostname = "pve-2";
ip-address = "${ifs.lan.p4}.6";
}
{
hw-address = "a8:a1:59:d0:57:87";
hostname = "pve-3";
ip-address = "${ifs.lan.p4}.7";
}
{
hw-address = "22:d0:43:c6:31:92";
hostname = "truenas";
ip-address = "${ifs.lan.p4}.10";
}
{
hw-address = "1e:d5:56:ec:c7:4a";
hostname = "debbi";
ip-address = "${ifs.lan.p4}.11";
}
{
hw-address = "ee:42:75:2e:f1:a6";
hostname = "etappi";
ip-address = "${ifs.lan.p4}.12";
}
];
reservations.lan.v6.reservations = [
{
duid = "00:03:00:01:64:66:b3:78:9c:09";
hostname = "openwrt";
ip-addresses = [ "${ifs.lan.p6}::1:2" ];
}
{
duid = "00:01:00:01:2e:c0:63:23:22:d0:43:c6:31:92";
hostname = "truenas";
ip-addresses = [ "${ifs.lan.p6}::10:1" ];
}
{
duid = "00:02:00:00:ab:11:09:41:25:21:32:71:e3:77";
hostname = "debbi";
ip-addresses = [ "${ifs.lan.p6}::11:1" ];
}
{
duid = "00:02:00:00:ab:11:6b:56:93:72:0b:3c:84:11";
hostname = "etappi";
ip-addresses = [ "${ifs.lan.p6}::12:1" ];
}
];
reservations.lan20.v4.reservations = [
{
# Router
hw-address = "1c:3b:f3:da:5f:cc";
hostname = "archer-ax3000";
ip-address = "${ifs.lan20.p4}.2";
}
{
# Printer
hw-address = "30:cd:a7:c5:40:71";
hostname = "SEC30CDA7C54071";
ip-address = "${ifs.lan20.p4}.9";
}
{
# 3D Printer
hw-address = "20:f8:5e:ff:ae:5f";
hostname = "GS_ffae5f";
ip-address = "${ifs.lan20.p4}.11";
}
{
hw-address = "70:85:c2:d8:87:3f";
hostname = "Yura-PC";
ip-address = "${ifs.lan20.p4}.40";
}
];
alpinaDomains = [
"|"
"|nc."
"|sonarr."
"|radarr."
"|prowlarr."
"|qbit."
"|gitea."
"|traefik."
"|auth."
"||s3."
"|minio."
"|jellyfin."
"|whoami."
"|grafana."
"|influxdb."
"|uptime."
"|opnsense."
"|vpgen."
"|woodpecker."
"||pgrok."
"|sync."
];
mkVlanDev = { id, name }: {
netdevConfig = {
Kind = "vlan";
Name = name;
};
vlanConfig.Id = id;
};
mkLanConfig = ifObj: {
matchConfig.Name = ifObj.name;
networkConfig = {
IPv4Forwarding = true;
IPv6SendRA = true;
Address = [ ifObj.addr4Sized ];
};
ipv6Prefixes = lib.optionals (ifObj.p6 != "") [ {
Prefix = ifObj.net6;
Assign = true;
# Token = [ "static::1" "eui64" ];
Token = [ "static:${ifObj.ip6Token}" ];
} ]
++
lib.optionals (ifObj.ulaPrefix != "") [ {
Prefix = ifObj.ulaNet;
Assign = true;
Token = [ "static:${ifObj.ip6Token}" ];
} ];
ipv6RoutePrefixes = [ { Route = "${ulaPrefix}::/48"; } ];
ipv6SendRAConfig = {
Managed = (ifObj.p6 != "");
OtherInformation = (ifObj.p6 != "");
EmitDNS = true;
DNS = [ ifObj.ulaAddr ];
};
};
mkDhcp4Subnet = id: ifObj: {
id = id;
subnet = ifObj.net4;
pools = [ { pool = "${ifObj.p4}.100 - ${ifObj.p4}.199"; } ];
ddns-qualifying-suffix = "4.${ifObj.domain}";
option-data = [
{ name = "routers"; data = ifObj.addr4; }
{ name = "domain-name-servers"; data = ifObj.addr4; }
{ name = "domain-name"; data = "4.${ifObj.domain}"; }
];
};
mkDhcp6Subnet = id: ifObj: {
id = id;
interface = ifObj.name;
subnet = ifObj.net6;
rapid-commit = true;
pools = [ { pool = "${ifObj.p6}::1:1000/116"; } ];
ddns-qualifying-suffix = "6.${ifObj.domain}";
option-data = [
{ name = "domain-search"; data = "6.${ifObj.domain}"; }
];
};
in in
{ {
imports = imports =
[ # Include the results of the hardware scan. [ # Include the results of the hardware scan.
./hardware-configuration.nix ./hardware-configuration.nix
./ifconfig.nix
./firewall.nix
./dns.nix
./kea.nix
./glance.nix
./services.nix
]; ];
# Secrix for secrets management
secrix.hostPubKey = vars.pubkey;
# Bootloader. # Bootloader.
boot.loader.systemd-boot.enable = true; boot.loader.systemd-boot.enable = true;
@@ -287,441 +28,19 @@ in
boot.loader.systemd-boot.configurationLimit = 5; boot.loader.systemd-boot.configurationLimit = 5;
boot.kernelPackages = pkgs.linuxKernel.packages.linux_6_12; boot.kernelPackages = pkgs.linuxKernel.packages.linux_6_12;
boot.growPartition = true; boot.growPartition = true;
# https://github.com/quic-go/quic-go/wiki/UDP-Buffer-Sizes
# For upstream quic dns
boot.kernel.sysctl."net.core.wmem_max" = 7500000;
boot.kernel.sysctl."net.core.rmem_max" = 7500000;
networking.hostName = "grouter"; networking.hostName = "grouter";
# It is impossible to do multiple prefix requests with networkd,
# so I use dhcpcd for this
# https://github.com/systemd/systemd/issues/22571
networking.dhcpcd.enable = true;
# https://github.com/systemd/systemd/issues/22571#issuecomment-2094905496
# https://gist.github.com/csamsel/0f8cca3b2e64d7e4cc47819ec5ba9396
networking.dhcpcd.extraConfig = ''
duid
ipv6only
nodhcp6
noipv6rs
nohook resolv.conf, yp, hostname, ntp
option rapid_commit
interface ${ifs.wan.name}
ipv6rs
dhcp6
# this doesn't play well with networkd
# ia_na
# ia_pd 1 ${ifs.lan.name}/0
# ia_pd 2 ${ifs.lan10.name}/0
# ia_pd 3 ${ifs.lan20.name}/0
# request the leases just for routing (so that the att box knows we're here)
# actual ip assignments are static, based on $pdFromWan
ia_pd 1 -
ia_pd 2 -
ia_pd 3 -
ia_pd 4 -
ia_pd 5 -
# ia_pd 6 -
# ia_pd 7 -
# ia_pd 8 -
'';
networking.useNetworkd = true;
systemd.network.enable = true;
systemd.network = {
# Global options
config.networkConfig = {
IPv4Forwarding = true;
IPv6Forwarding = true;
};
# This is applied by udev, not networkd
# https://nixos.wiki/wiki/Systemd-networkd
# https://nixos.org/manual/nixos/stable/#sec-rename-ifs
links = {
"10-wan" = {
matchConfig.PermanentMACAddress = "bc:24:11:4f:c9:c4";
linkConfig.Name = ifs.wan.name;
};
"10-lan" = {
matchConfig.PermanentMACAddress = "bc:24:11:83:d8:de";
linkConfig.Name = ifs.lan.name;
};
};
netdevs = {
"10-vlan10" = mkVlanDev { id = 10; name = ifs.lan10.name; };
"10-vlan20" = mkVlanDev { id = 20; name = ifs.lan20.name; };
"10-vlan30" = mkVlanDev { id = 30; name = ifs.lan30.name; };
"10-vlan40" = mkVlanDev { id = 40; name = ifs.lan40.name; };
"10-vlan50" = mkVlanDev { id = 50; name = ifs.lan50.name; };
};
networks = {
"10-wan" = {
matchConfig.Name = ifs.wan.name;
networkConfig = {
# start a DHCP Client for IPv4 Addressing/Routing
# DHCP = "ipv4";
# accept Router Advertisements for Stateless IPv6 Autoconfiguraton (SLAAC)
# let dhcpcd handle this
Address = [ ifs.wan.addr4Sized ];
IPv6AcceptRA = false;
};
routes = [ { Gateway = ifs.wan.gw4; } ];
# make routing on this interface a dependency for network-online.target
linkConfig.RequiredForOnline = "routable";
};
"20-lan" = (mkLanConfig ifs.lan) // {
vlan = [
ifs.lan10.name
ifs.lan20.name
ifs.lan30.name
ifs.lan40.name
ifs.lan50.name
];
};
"30-vlan10" = mkLanConfig ifs.lan10;
"30-vlan20" = mkLanConfig ifs.lan20;
"30-vlan30" = mkLanConfig ifs.lan30;
"30-vlan40" = mkLanConfig ifs.lan40;
"30-vlan50" = mkLanConfig ifs.lan50;
};
};
networking.firewall.enable = false;
networking.nftables.enable = true;
networking.nftables.tables.firewall = {
family = "inet";
content = ''
define ZONE_WAN_IFS = { ${ifs.wan.name} }
define ZONE_LAN_IFS = {
${ifs.lan.name},
${ifs.lan10.name},
${ifs.lan20.name},
${ifs.lan30.name},
${ifs.lan40.name},
${ifs.lan50.name},
}
define RFC1918 = { 10.0.0.0/8, 172.12.0.0/12, 192.168.0.0/16 }
define ALLOWED_TCP_PORTS = { ssh, https }
define ALLOWED_UDP_PORTS = { domain }
map port_forward_v4 {
type inet_proto . inet_service : ipv4_addr . inet_service
elements = {
tcp . 8006 : ${ifs.lan50.p4}.10 . 8006
}
}
set port_forward_v6 {
type inet_proto . ipv6_addr . inet_service
elements = {
tcp . ${ifs.lan.p6}::1 . https
}
}
chain input {
type filter hook input priority filter; policy drop;
# Drop router adverts from self
# peculiarity due to wan and lan20 being bridged
# TODO: figure out a less jank way to do this
ip6 saddr ${lanLL} icmpv6 type nd-router-advert drop
# Allow established and related connections
# All icmp stuff should (theoretically) be handled by ct related
# https://serverfault.com/a/632363
ct state established,related accept
# However, that doesn't happen for router advertisements from what I can tell
# TODO: more testing
# Allow ICMPv6 on link local addrs
ip6 nexthdr icmpv6 ip6 saddr fe80::/10 accept
ip6 nexthdr icmpv6 ip6 daddr fe80::/10 accept # TODO: not sure if necessary
# Allow all traffic from loopback interface
iif lo accept
# Allow DHCPv6 client traffic
ip6 daddr { fe80::/10, ff02::/16 } udp dport dhcpv6-server accept
# WAN zone input rules
iifname $ZONE_WAN_IFS jump zone_wan_input
# LAN zone input rules
iifname $ZONE_LAN_IFS jump zone_lan_input
}
chain forward {
type filter hook forward priority filter; policy drop;
# Allow established and related connections
ct state established,related accept
# WAN zone forward rules
iifname $ZONE_WAN_IFS jump zone_wan_forward
# LAN zone forward rules
iifname $ZONE_LAN_IFS jump zone_lan_forward
}
chain zone_wan_input {
# Allow SSH from WAN (if needed)
tcp dport ssh accept
}
chain zone_wan_forward {
# Port forwarding
ct status dnat accept
# Allowed IPv6 ports
meta l4proto . ip6 daddr . th dport @port_forward_v6 accept
}
chain zone_lan_input {
# Allow all ICMPv6 from LAN
ip6 nexthdr icmpv6 accept
# Allow all ICMP from LAN
ip protocol icmp accept
# Allow specific services from LAN
tcp dport $ALLOWED_TCP_PORTS accept
udp dport $ALLOWED_UDP_PORTS accept
}
chain zone_lan_forward {
# Allow port forwarded targets
# ct status dnat accept
# Allow all traffic from LAN to WAN, except ULAs
oifname $ZONE_WAN_IFS ip6 saddr fd00::/8 drop # Not sure if needed
oifname $ZONE_WAN_IFS accept;
# Allow traffic between LANs
oifname $ZONE_LAN_IFS accept
}
chain output {
# Accept anything out of self by default
type filter hook output priority filter; policy accept;
# NAT reflection
# oif lo ip daddr != 127.0.0.0/8 dnat ip to meta l4proto . th dport map @port_forward_v4
}
chain prerouting {
# Initial step, accept by default
type nat hook prerouting priority dstnat; policy accept;
# Port forwarding
fib daddr type local dnat ip to meta l4proto . th dport map @port_forward_v4
}
chain postrouting {
# Last step, accept by default
type nat hook postrouting priority srcnat; policy accept;
# Masquerade LAN addrs
oifname $ZONE_WAN_IFS ip saddr $RFC1918 masquerade
# Optional IPv6 masquerading (big L if enabled, don't forget to allow forwarding)
# oifname $ZONE_WAN_IFS ip6 saddr fd00::/8 masquerade
}
'';
};
services.kea.dhcp4.enable = true;
services.kea.dhcp4.settings = {
interfaces-config.interfaces = [
ifs.lan.name
ifs.lan10.name
ifs.lan20.name
ifs.lan30.name
ifs.lan40.name
ifs.lan50.name
];
dhcp-ddns.enable-updates = true;
ddns-qualifying-suffix = "4.default.${ldomain}";
subnet4 = [
((mkDhcp4Subnet 1 ifs.lan) // reservations.lan.v4)
(mkDhcp4Subnet 10 ifs.lan10)
((mkDhcp4Subnet 20 ifs.lan20) // reservations.lan20.v4)
(mkDhcp4Subnet 30 ifs.lan30)
(mkDhcp4Subnet 40 ifs.lan40)
(mkDhcp4Subnet 50 ifs.lan50)
];
};
services.kea.dhcp6.enable = true;
services.kea.dhcp6.settings = {
interfaces-config.interfaces = [
ifs.lan.name
ifs.lan10.name
# ifs.lan20.name # Managed by Att box
ifs.lan30.name
ifs.lan40.name
ifs.lan50.name
];
# TODO: https://kea.readthedocs.io/en/latest/arm/ddns.html#dual-stack-environments
dhcp-ddns.enable-updates = true;
ddns-qualifying-suffix = "6.default.${ldomain}";
subnet6 = [
((mkDhcp6Subnet 1 ifs.lan) // reservations.lan.v6)
(mkDhcp6Subnet 10 ifs.lan10)
(mkDhcp6Subnet 30 ifs.lan30)
(mkDhcp6Subnet 40 ifs.lan40)
(mkDhcp6Subnet 50 ifs.lan50)
];
};
services.kea.dhcp-ddns.enable = true;
services.kea.dhcp-ddns.settings = {
forward-ddns.ddns-domains = [
{
name = "${ldomain}.";
dns-servers = [ { ip-address = "::1"; port = 1053; } ];
}
];
};
services.resolved.enable = false;
networking.resolvconf.enable = true;
networking.resolvconf.useLocalResolver = true;
services.adguardhome.enable = true;
services.adguardhome.mutableSettings = false;
services.adguardhome.settings = {
dns = {
bootstrap_dns = [ "1.1.1.1" "9.9.9.9" ];
upstream_dns = [
"quic://p0.freedns.controld.com" # Default upstream
"[/${ldomain}/][::1]:1053" # Local domains to Knot (ddns)
"[/home/][${ifs.lan.ulaPrefix}::250]" # .home domains to opnsense (temporary)
];
};
# https://adguard-dns.io/kb/general/dns-filtering-syntax/
user_rules = [
# DNS rewrites
"|grouter.${domain}^$dnsrewrite=${ifs.lan.ulaAddr}"
"|pve-1.${sysdomain}^$dnsrewrite=${ifs.lan.p4}.5"
"|pve-3.${sysdomain}^$dnsrewrite=${ifs.lan.p4}.7"
"|pve-1.${sysdomain}^$dnsrewrite=${ifs.lan.ulaPrefix}::5:1"
"|pve-3.${sysdomain}^$dnsrewrite=${ifs.lan.ulaPrefix}::7:1"
"||lab.${domain}^$dnsrewrite=${ifs.lan.p6}::12:1"
"||lab.${domain}^$dnsrewrite=${ifs.lan.p4}.12"
# Allowed exceptions
"@@||googleads.g.doubleclick.net"
]
# Alpina DNS rewrites
++ map (host: "${host}${domain}^$dnsrewrite=${ifs.lan.p6}::11:1") alpinaDomains
++ map (host: "${host}${domain}^$dnsrewrite=${ifs.lan.p4}.11") alpinaDomains;
};
services.knot.enable = true;
services.knot.settings = {
# server.listen = "0.0.0.0@1053";
server.listen = "::1@1053";
# TODO: templates
zone = [
{
domain = ldomain;
storage = "/var/lib/knot/zones";
file = "${ldomain}.zone";
acl = [ "allow_localhost_update" ];
}
];
acl = [
{
id = "allow_localhost_update";
address = [ "::1" "127.0.0.1" ];
action = [ "update" ];
}
];
};
# Ensure the zone file exists
system.activationScripts.knotZoneFile = ''
ZONE_DIR="/var/lib/knot/zones"
ZONE_FILE="$ZONE_DIR/${ldomain}.zone"
# Create the directory if it doesn't exist
mkdir -p "$ZONE_DIR"
# Check if the zone file exists
if [ ! -f "$ZONE_FILE" ]; then
# Create the zone file with a basic SOA record
# Serial; Refresh; Retry; Expire; Negative Cache TTL;
echo "${ldomain}. 3600 SOA ns.${ldomain}. admin.${ldomain}. 1 86400 900 691200 3600" > "$ZONE_FILE"
echo "Created new zone file: $ZONE_FILE"
else
echo "Zone file already exists: $ZONE_FILE"
fi
# Ensure proper ownership and permissions
chown -R knot:knot "/var/lib/knot"
chmod 644 "$ZONE_FILE"
'';
# https://wiki.nixos.org/wiki/Prometheus
services.prometheus = {
enable = true;
exporters = {
# TODO: DNS, Kea, Knot, other exporters
node = {
enable = true;
enabledCollectors = [ "systemd" ];
};
};
scrapeConfigs = [
{
job_name = "node";
static_configs = [{
targets = [ "localhost:${toString config.services.prometheus.exporters.node.port}" ];
}];
}
];
};
# https://wiki.nixos.org/wiki/Grafana#Declarative_configuration
services.grafana = {
enable = true;
settings.server.http_port = 3001;
provision = {
enable = true;
datasources.settings.datasources = [
{
name = "Prometheus";
type = "prometheus";
url = "http://localhost:${toString config.services.prometheus.port}";
}
];
};
};
services.caddy = {
enable = true;
virtualHosts."grouter.${domain}".extraConfig = ''
reverse_proxy localhost:${toString config.services.grafana.settings.server.http_port}
tls internal
'';
};
# services.netdata.enable = true;
# Enable the X11 windowing system. # Enable the X11 windowing system.
# You can disable this if you're only using the Wayland session. # You can disable this if you're only using the Wayland session.
services.xserver.enable = false; services.xserver.enable = false;
# Enable the KDE Plasma Desktop Environment. # Enable the KDE Plasma Desktop Environment.
# Useful for debugging with wireshark. # Useful for debugging with wireshark.
# services.displayManager.sddm.enable = true;
hardware.graphics.enable = true; hardware.graphics.enable = true;
services.displayManager.sddm.wayland.enable = true; services.displayManager.sddm.enable = enableDesktop;
services.desktopManager.plasma6.enable = true; services.displayManager.sddm.wayland.enable = enableDesktop;
services.desktopManager.plasma6.enable = enableDesktop;
# No need for audio in VM # No need for audio in VM
services.pipewire.enable = false; services.pipewire.enable = false;
@@ -749,7 +68,7 @@ in
description = "Yura"; description = "Yura";
uid = 1000; uid = 1000;
group = "cazzzer"; group = "cazzzer";
extraGroups = [ "wheel" "docker" "wireshark" ]; extraGroups = [ "wheel" "wireshark" ];
}; };
programs.firefox.enable = true; programs.firefox.enable = true;
@@ -766,9 +85,7 @@ in
eza eza
fastfetch fastfetch
fd fd
kdePackages.filelight
kdePackages.kate kdePackages.kate
kdePackages.yakuake
ldns ldns
lsof lsof
micro micro

133
hosts/router/dns.nix Normal file
View File

@@ -0,0 +1,133 @@
{ config, lib, pkgs, ... }:
let
vars = import ./vars.nix;
domain = vars.domain;
ldomain = vars.ldomain;
sysdomain = vars.sysdomain;
ifs = vars.ifs;
alpinaDomains = [
"|"
"|nc."
"|sonarr."
"|radarr."
"|prowlarr."
"|qbit."
"|gitea."
"|traefik."
"|auth."
"||s3."
"|minio."
"|jellyfin."
"|whoami."
"|grafana."
"|influxdb."
"|uptime."
"|opnsense."
"|vpgen."
"|woodpecker."
"||pgrok."
"|sync."
];
in
{
# https://github.com/quic-go/quic-go/wiki/UDP-Buffer-Sizes
# For upstream quic dns
boot.kernel.sysctl."net.core.wmem_max" = 7500000;
boot.kernel.sysctl."net.core.rmem_max" = 7500000;
services.resolved.enable = false;
networking.resolvconf.enable = true;
networking.resolvconf.useLocalResolver = true;
services.adguardhome.enable = true;
services.adguardhome.mutableSettings = false;
# https://github.com/AdguardTeam/Adguardhome/wiki/Configuration
services.adguardhome.settings = {
dns = {
# Disable rate limit, default of 20 is too low
# https://github.com/AdguardTeam/AdGuardHome/issues/6726
ratelimit = 0;
bootstrap_dns = [ "1.1.1.1" "9.9.9.9" ];
upstream_dns = [
# Default upstreams
"quic://p0.freedns.controld.com"
"tls://one.one.one.one"
"tls://dns.quad9.net"
# Adguard uses upstream and not rewrite rules to resolve cname rewrites,
# and obviously my sysdomain entries don't exist in cloudflare.
"[/${sysdomain}/][::1]" # Sys domains to self (for cname rewrites)
"[/${ldomain}/][::1]:1053" # Local domains to Knot (ddns)
"[/home/][${ifs.lan.ulaPrefix}::250]" # .home domains to opnsense (temporary)
];
};
# https://adguard-dns.io/kb/general/dns-filtering-syntax/
user_rules = [
# DNS rewrites
"|grouter.${domain}^$dnsrewrite=${ifs.lan.ulaAddr}"
"|pve-1.${sysdomain}^$dnsrewrite=${ifs.lan.p4}.5"
"|pve-1.${sysdomain}^$dnsrewrite=${ifs.lan.ulaPrefix}::5:1"
"|pve-3.${sysdomain}^$dnsrewrite=${ifs.lan.p4}.7"
"|pve-3.${sysdomain}^$dnsrewrite=${ifs.lan.ulaPrefix}::7:1"
"|truenas.${sysdomain}^$dnsrewrite=${ifs.lan.p4}.10"
"|truenas.${sysdomain}^$dnsrewrite=${ifs.lan.ulaPrefix}::20d0:43ff:fec6:3192"
"|debbi.${sysdomain}^$dnsrewrite=${ifs.lan.p4}.11"
"|debbi.${sysdomain}^$dnsrewrite=${ifs.lan.ulaPrefix}::11:1"
"|etappi.${sysdomain}^$dnsrewrite=${ifs.lan.p4}.12"
"|etappi.${sysdomain}^$dnsrewrite=${ifs.lan.ulaPrefix}::12:1"
# Lab DNS rewrites
"||lab.${domain}^$dnsrewrite=etappi.${sysdomain}"
# Allowed exceptions
"@@||googleads.g.doubleclick.net"
]
# Alpina DNS rewrites
++ map (host: "${host}${domain}^$dnsrewrite=debbi.${sysdomain}") alpinaDomains;
};
services.knot.enable = true;
services.knot.settings = {
# server.listen = "0.0.0.0@1053";
server.listen = "::1@1053";
zone = [
{
domain = ldomain;
storage = "/var/lib/knot/zones";
file = "${ldomain}.zone";
acl = [ "allow_localhost_update" ];
}
];
acl = [
{
id = "allow_localhost_update";
address = [ "::1" "127.0.0.1" ];
action = [ "update" ];
}
];
};
# Ensure the zone file exists
system.activationScripts.knotZoneFile = ''
ZONE_DIR="/var/lib/knot/zones"
ZONE_FILE="$ZONE_DIR/${ldomain}.zone"
# Create the directory if it doesn't exist
mkdir -p "$ZONE_DIR"
# Check if the zone file exists
if [ ! -f "$ZONE_FILE" ]; then
# Create the zone file with a basic SOA record
# Serial; Refresh; Retry; Expire; Negative Cache TTL;
echo "${ldomain}. 3600 SOA ns.${ldomain}. admin.${ldomain}. 1 86400 900 691200 3600" > "$ZONE_FILE"
echo "Created new zone file: $ZONE_FILE"
else
echo "Zone file already exists: $ZONE_FILE"
fi
# Ensure proper ownership and permissions
chown -R knot:knot "/var/lib/knot"
chmod 644 "$ZONE_FILE"
'';
}

207
hosts/router/firewall.nix Normal file
View File

@@ -0,0 +1,207 @@
{ config, lib, pkgs, ... }:
let
vars = import ./vars.nix;
links = vars.links;
ifs = vars.ifs;
pdFromWan = vars.pdFromWan;
nftIdentifiers = ''
define ZONE_WAN_IFS = { ${ifs.wan.name} }
define ZONE_LAN_IFS = {
${ifs.lan.name},
${ifs.lan10.name},
${ifs.lan20.name},
${ifs.lan30.name},
${ifs.lan40.name},
${ifs.lan50.name},
}
define OPNSENSE_NET6 = ${vars.extra.opnsense.net6}
define ZONE_LAN_EXTRA_NET6 = {
# TODO: reevaluate this statement
${ifs.lan20.net6}, # needed since packets can come in from wan on these addrs
$OPNSENSE_NET6,
}
define RFC1918 = { 10.0.0.0/8, 172.12.0.0/12, 192.168.0.0/16 }
define CLOUDFLARE_NET6 = {
# https://www.cloudflare.com/ips-v6
# TODO: figure out a better way to get addrs dynamically from url
# perhaps building a nixos module/package that fetches the ips?
2400:cb00::/32,
2606:4700::/32,
2803:f800::/32,
2405:b500::/32,
2405:8100::/32,
2a06:98c0::/29,
2c0f:f248::/32,
}
'';
in
{
networking.firewall.enable = false;
networking.nftables.enable = true;
# networking.nftables.ruleset = nftIdentifiers; #doesn't work because it's appended to the end
networking.nftables.tables.nat4 = {
family = "ip";
content = ''
${nftIdentifiers}
map port_forward {
type inet_proto . inet_service : ipv4_addr . inet_service
elements = {
tcp . 8006 : ${ifs.lan50.p4}.10 . 8006
}
}
chain prerouting {
# Initial step, accept by default
type nat hook prerouting priority dstnat; policy accept;
# Port forwarding
fib daddr type local dnat ip to meta l4proto . th dport map @port_forward
}
chain postrouting {
# Last step, accept by default
type nat hook postrouting priority srcnat; policy accept;
# Masquerade LAN addrs
oifname $ZONE_WAN_IFS ip saddr $RFC1918 masquerade
}
'';
};
# Optional IPv6 masquerading (big L if enabled, don't forget to allow forwarding)
networking.nftables.tables.nat6 = {
family = "ip6";
enable = false;
content = ''
${nftIdentifiers}
chain postrouting {
type nat hook postrouting priority srcnat; policy accept;
oifname $ZONE_WAN_IFS ip6 saddr fd00::/8 masquerade
}
'';
};
networking.nftables.tables.firewall = {
family = "inet";
content = ''
${nftIdentifiers}
define ALLOWED_TCP_PORTS = { ssh, https }
define ALLOWED_UDP_PORTS = { bootps, dhcpv6-server, domain, https }
set port_forward_v6 {
type inet_proto . ipv6_addr . inet_service
elements = {
# syncthing on alpina
tcp . ${ifs.lan.p6}::11:1 . 22000 ,
udp . ${ifs.lan.p6}::11:1 . 22000 ,
}
}
set cloudflare_forward_v6 {
type ipv6_addr
elements = {
${ifs.lan.p6}::11:1,
}
}
chain input {
type filter hook input priority filter; policy drop;
# Drop router adverts from self
# peculiarity due to wan and lan20 being bridged
# TODO: figure out a less jank way to do this
iifname $ZONE_WAN_IFS ip6 saddr ${links.lanLL} icmpv6 type nd-router-advert log prefix "self radvt: " drop
# iifname $ZONE_WAN_IFS ip6 saddr ${links.lanLL} ip6 nexthdr icmpv6 log prefix "self icmpv6: " drop
# iifname $ZONE_WAN_IFS ip6 saddr ${links.lanLL} log prefix "self llv6: " drop
# iifname $ZONE_WAN_IFS ip6 saddr ${links.lanLL} log drop
# iifname $ZONE_LAN_IFS ip6 saddr ${links.wanLL} log drop
# Allow established and related connections
# All icmp stuff should (theoretically) be handled by ct related
# https://serverfault.com/a/632363
ct state established,related accept
# However, that doesn't happen for router advertisements from what I can tell
# TODO: more testing
# Allow ICMPv6 on local addrs
ip6 nexthdr icmpv6 ip6 saddr { fe80::/10, ${pdFromWan}0::/60 } accept
ip6 nexthdr icmpv6 ip6 daddr fe80::/10 accept # TODO: not sure if necessary
# Allow all traffic from loopback interface
iif lo accept
# Allow DHCPv6 traffic
# I thought dhcpv6-client traffic would be accepted by established/related,
# but apparently not.
ip6 daddr { fe80::/10, ff02::/16 } th dport { dhcpv6-client, dhcpv6-server } accept
# WAN zone input rules
iifname $ZONE_WAN_IFS jump zone_wan_input
# LAN zone input rules
# iifname $ZONE_LAN_IFS accept
iifname $ZONE_LAN_IFS jump zone_lan_input
ip6 saddr $ZONE_LAN_EXTRA_NET6 jump zone_lan_input
# log
}
chain forward {
type filter hook forward priority filter; policy drop;
# Allow established and related connections
ct state established,related accept
# WAN zone forward rules
iifname $ZONE_WAN_IFS jump zone_wan_forward
# LAN zone forward rules
iifname $ZONE_LAN_IFS jump zone_lan_forward
ip6 saddr $ZONE_LAN_EXTRA_NET6 jump zone_lan_forward
}
chain zone_wan_input {
# Allow SSH from WAN (if needed)
tcp dport ssh accept
}
chain zone_wan_forward {
# Port forwarding
ct status dnat accept
# Allowed IPv6 ports
meta l4proto . ip6 daddr . th dport @port_forward_v6 accept
# Allowed IPv6 from cloudflare
ip6 saddr $CLOUDFLARE_NET6 ip6 daddr @cloudflare_forward_v6 th dport https accept
}
chain zone_lan_input {
# Allow all ICMPv6 from LAN
ip6 nexthdr icmpv6 accept
# Allow all ICMP from LAN
ip protocol icmp accept
# Allow specific services from LAN
tcp dport $ALLOWED_TCP_PORTS accept
udp dport $ALLOWED_UDP_PORTS accept
}
chain zone_lan_forward {
# Allow port forwarded targets
# ct status dnat accept
# Allow all traffic from LAN to WAN, except ULAs
oifname $ZONE_WAN_IFS ip6 saddr fd00::/8 drop # Not sure if needed
oifname $ZONE_WAN_IFS accept;
# Allow traffic between LANs
oifname $ZONE_LAN_IFS accept
}
chain output {
# Accept anything out of self by default
type filter hook output priority filter; policy accept;
# NAT reflection
# oif lo ip daddr != 127.0.0.0/8 dnat ip to meta l4proto . th dport map @port_forward_v4
}
'';
};
}

166
hosts/router/glance.nix Normal file
View File

@@ -0,0 +1,166 @@
{ config, lib, pkgs, ... }:
let
vars = import ./vars.nix;
domain = vars.domain;
in
{
# Glance dashboard
services.glance.enable = true;
services.glance.settings.pages = [
{
name = "Home";
# hideDesktopNavigation = true; # Uncomment if needed
columns = [
{
size = "small";
widgets = [
{
type = "calendar";
firstDayOfWeek = "monday";
}
{
type = "rss";
limit = 10;
collapseAfter = 3;
cache = "12h";
feeds = [
{ url = "https://rtk0c.pages.dev/index.xml"; }
{ url = "https://www.yegor256.com/rss.xml"; }
{ url = "https://selfh.st/rss/"; title = "selfh.st"; }
{ url = "https://ciechanow.ski/atom.xml"; }
{ url = "https://www.joshwcomeau.com/rss.xml"; title = "Josh Comeau"; }
{ url = "https://samwho.dev/rss.xml"; }
{ url = "https://ishadeed.com/feed.xml"; title = "Ahmad Shadeed"; }
];
}
{
type = "twitch-channels";
channels = [
"theprimeagen"
"j_blow"
"piratesoftware"
"cohhcarnage"
"christitustech"
"EJ_SA"
];
}
];
}
{
size = "full";
widgets = [
{
type = "group";
widgets = [
{ type = "hacker-news"; }
{ type = "lobsters"; }
];
}
{
type = "videos";
channels = [
"UCXuqSBlHAE6Xw-yeJA0Tunw" # Linus Tech Tips
"UCR-DXc1voovS8nhAvccRZhg" # Jeff Geerling
"UCsBjURrPoezykLs9EqgamOA" # Fireship
"UCBJycsmduvYEL83R_U4JriQ" # Marques Brownlee
"UCHnyfMqiRRG1u-2MsSQLbXA" # Veritasium
];
}
{
type = "group";
widgets = [
{
type = "reddit";
subreddit = "technology";
showThumbnails = true;
}
{
type = "reddit";
subreddit = "selfhosted";
showThumbnails = true;
}
];
}
];
}
{
size = "small";
widgets = [
{
type = "weather";
location = "San Jose, California, United States";
units = "metric";
hourFormat = "12h";
# hideLocation = true; # Uncomment if needed
}
{
type = "markets";
markets = [
{ symbol = "SPY"; name = "S&P 500"; }
{ symbol = "BTC-USD"; name = "Bitcoin"; }
{ symbol = "NVDA"; name = "NVIDIA"; }
{ symbol = "AAPL"; name = "Apple"; }
{ symbol = "MSFT"; name = "Microsoft"; }
];
}
{
type = "releases";
cache = "1d";
# token = "..."; # Uncomment and set if needed
repositories = [
"glanceapp/glance"
"go-gitea/gitea"
"immich-app/immich"
"syncthing/syncthing"
];
}
];
}
];
}
{
name = "Infrastructure";
columns = [
{
size = "small";
widgets = [
{
type = "server-stats";
servers = [
{
type = "local";
name = "Router";
mountpoints."/nix/store".hide = true;
}
];
}
];
}
{
size = "full";
widgets = [
{
type = "iframe";
title = "Grafana";
title-url = "/grafana/";
source = "/grafana/d-solo/rYdddlPWk/node-exporter-full?orgId=1&from=1747211119196&to=1747297519196&timezone=browser&var-datasource=PBFA97CFB590B2093&var-job=node&var-node=localhost:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B%7Cmmcblk%5B0-9%5D%2B&refresh=1m&panelId=74&__feature.dashboardSceneSolo";
height = 400;
}
];
}
{
size = "small";
widgets = [
{
type = "dns-stats";
service = "adguard";
url = "http://localhost:${toString config.services.adguardhome.port}";
username = "";
password = "";
}
];
}
];
}
];
}

187
hosts/router/ifconfig.nix Normal file
View File

@@ -0,0 +1,187 @@
{ config, lib, pkgs, ... }:
let
vars = import ./vars.nix;
links = vars.links;
ifs = vars.ifs;
pdFromWan = vars.pdFromWan;
ulaPrefix = vars.ulaPrefix;
mkVlanDev = { id, name }: {
netdevConfig = {
Kind = "vlan";
Name = name;
};
vlanConfig.Id = id;
};
mkLanConfig = ifObj: {
matchConfig.Name = ifObj.name;
networkConfig = {
IPv4Forwarding = true;
IPv6SendRA = true;
Address = [ ifObj.addr4Sized ifObj.addr6Sized ifObj.ulaAddrSized ];
};
ipv6Prefixes = [
{
Prefix = ifObj.net6;
Assign = true;
# Token = [ "static::1" "eui64" ];
Token = [ "static:${ifObj.ip6Token}" ];
}
{
Prefix = ifObj.ulaNet;
Assign = true;
Token = [ "static:${ifObj.ulaToken}" ];
}
];
ipv6RoutePrefixes = [ { Route = "${ulaPrefix}::/48"; } ];
ipv6SendRAConfig = {
# don't manage the att box subnet
# should work fine either way though
Managed = (ifObj.p6 != "${pdFromWan}0");
OtherInformation = (ifObj.p6 != "${pdFromWan}0");
EmitDNS = true;
DNS = [ ifObj.ulaAddr ];
};
};
in
{
# By default, Linux will respond to ARP requests that belong to other interfaces.
# Normally this isn't a problem, but it causes issues
# since my WAN and LAN20 are technically bridged.
# https://networkengineering.stackexchange.com/questions/83071/why-linux-answers-arp-requests-for-ips-that-belong-to-different-network-interfac
boot.kernel.sysctl."net.ipv4.conf.default.arp_filter" = 1;
# It is impossible to do multiple prefix requests with networkd,
# so I use dhcpcd for this
# https://github.com/systemd/systemd/issues/22571
# https://github.com/systemd/systemd/issues/22571#issuecomment-2094905496
# https://gist.github.com/csamsel/0f8cca3b2e64d7e4cc47819ec5ba9396
networking.dhcpcd.enable = true;
networking.dhcpcd.allowInterfaces = [ ifs.wan.name ];
networking.dhcpcd.extraConfig = ''
debug
nohook resolv.conf, yp, hostname, ntp
interface ${ifs.wan.name}
ipv6only
duid
ipv6rs
dhcp6
# option rapid_commit
# DHCPv6 addr
ia_na
# DHCPv6 Prefix Delegation
# request the leases just for routing (so that the att box knows we're here)
# actual ip assignments are static, based on $pdFromWan
ia_pd 1/${ifs.lan.net6} -
ia_pd 10/${ifs.lan10.net6} -
# ia_pd 20/${pdFromWan}d::/64 - # for opnsense (legacy services)
ia_pd 30/${ifs.lan30.net6} -
ia_pd 40/${ifs.lan40.net6} -
ia_pd 50/${ifs.lan50.net6} -
# ia_pd 7 -
# ia_pd 8 -
# the leases can be assigned to the interfaces,
# but this doesn't play well with networkd
# ia_pd 1 ${ifs.lan.name}/0
# ia_pd 2 ${ifs.lan10.name}/0
# ia_pd 3 ${ifs.lan20.name}/0
'';
networking.useNetworkd = true;
systemd.network.enable = true;
systemd.network = {
# Global options
config.networkConfig = {
IPv4Forwarding = true;
IPv6Forwarding = true;
};
# This is applied by udev, not networkd
# https://nixos.wiki/wiki/Systemd-networkd
# https://nixos.org/manual/nixos/stable/#sec-rename-ifs
links = {
"10-wan" = {
matchConfig.PermanentMACAddress = links.wanMAC;
linkConfig.Name = ifs.wan.name;
};
"10-lan" = {
matchConfig.PermanentMACAddress = links.lanMAC;
linkConfig.Name = ifs.lan.name;
};
};
netdevs = {
"10-vlan10" = mkVlanDev { id = 10; name = ifs.lan10.name; };
"10-vlan20" = mkVlanDev { id = 20; name = ifs.lan20.name; };
"10-vlan30" = mkVlanDev { id = 30; name = ifs.lan30.name; };
"10-vlan40" = mkVlanDev { id = 40; name = ifs.lan40.name; };
"10-vlan50" = mkVlanDev { id = 50; name = ifs.lan50.name; };
};
networks = {
"10-wan" = {
matchConfig.Name = ifs.wan.name;
# make routing on this interface a dependency for network-online.target
linkConfig.RequiredForOnline = "routable";
networkConfig = {
# start a DHCP Client for IPv4 Addressing/Routing
# DHCP = "ipv4";
# accept Router Advertisements for Stateless IPv6 Autoconfiguraton (SLAAC)
# let dhcpcd handle this
Address = [ ifs.wan.addr4Sized ];
IPv6AcceptRA = false;
KeepConfiguration = true;
};
routes = [
{ Gateway = ifs.wan.gw4; }
];
};
"20-lan" = (mkLanConfig ifs.lan) // {
vlan = [
ifs.lan10.name
ifs.lan20.name
ifs.lan30.name
ifs.lan40.name
ifs.lan50.name
];
routes = vars.extra.opnsense.routes;
};
"30-vlan10" = mkLanConfig ifs.lan10;
"30-vlan20" = mkLanConfig ifs.lan20;
"30-vlan30" = mkLanConfig ifs.lan30;
"30-vlan40" = mkLanConfig ifs.lan40;
"30-vlan50" = mkLanConfig ifs.lan50;
};
};
# For some reason, the interfaces stop receiving route solicitations after a while.
# Regular router adverts still get sent out at intervals, but this breaks dhcp6 clients.
# Restarting networkd makes it work again, I have no clue why.
# This is jank af, but I've tried a bunch of other stuff with no success
# and I'm giving up (for now).
systemd.timers."restart-networkd" = {
wantedBy = [ "timers.target" ];
timerConfig = {
OnBootSec = "1m";
OnUnitActiveSec = "1m";
Unit = "restart-networkd.service";
};
};
systemd.services."restart-networkd" = {
script = ''
set -eu
${pkgs.systemd}/bin/systemctl restart systemd-networkd
'';
serviceConfig = {
Type = "oneshot";
User = "root";
};
};
}

183
hosts/router/kea.nix Normal file
View File

@@ -0,0 +1,183 @@
{ config, lib, pkgs, ... }:
let
vars = import ./vars.nix;
ldomain = vars.ldomain;
ifs = vars.ifs;
mkDhcp4Subnet = id: ifObj: {
id = id;
subnet = ifObj.net4;
pools = [ { pool = "${ifObj.p4}.100 - ${ifObj.p4}.199"; } ];
ddns-qualifying-suffix = "4.${ifObj.domain}";
option-data = [
{ name = "routers"; data = ifObj.addr4; }
{ name = "domain-name-servers"; data = ifObj.addr4; }
{ name = "domain-name"; data = "4.${ifObj.domain}"; }
];
};
mkDhcp6Subnet = id: ifObj: {
id = id;
interface = ifObj.name;
subnet = ifObj.net6;
rapid-commit = true;
pools = [ { pool = "${ifObj.p6}::1:1000/116"; } ];
ddns-qualifying-suffix = "6.${ifObj.domain}";
option-data = [
{ name = "domain-search"; data = "6.${ifObj.domain}"; }
];
};
# Reservations added to Kea
reservations.lan.v4.reservations = [
{
hw-address = "64:66:b3:78:9c:09";
hostname = "openwrt";
ip-address = "${ifs.lan.p4}.2";
}
{
hw-address = "40:86:cb:19:9d:70";
hostname = "dlink-switchy";
ip-address = "${ifs.lan.p4}.3";
}
{
hw-address = "6c:cd:d6:af:4f:6f";
hostname = "netgear-switchy";
ip-address = "${ifs.lan.p4}.4";
}
{
hw-address = "74:d4:35:1d:0e:80";
hostname = "pve-1";
ip-address = "${ifs.lan.p4}.5";
}
{
hw-address = "00:25:90:f3:d0:e0";
hostname = "pve-2";
ip-address = "${ifs.lan.p4}.6";
}
{
hw-address = "a8:a1:59:d0:57:87";
hostname = "pve-3";
ip-address = "${ifs.lan.p4}.7";
}
{
hw-address = "22:d0:43:c6:31:92";
hostname = "truenas";
ip-address = "${ifs.lan.p4}.10";
}
{
hw-address = "1e:d5:56:ec:c7:4a";
hostname = "debbi";
ip-address = "${ifs.lan.p4}.11";
}
{
hw-address = "ee:42:75:2e:f1:a6";
hostname = "etappi";
ip-address = "${ifs.lan.p4}.12";
}
];
reservations.lan.v6.reservations = [
{
duid = "00:03:00:01:64:66:b3:78:9c:09";
hostname = "openwrt";
ip-addresses = [ "${ifs.lan.p6}::1:2" ];
}
{
duid = "00:01:00:01:2e:c0:63:23:22:d0:43:c6:31:92";
hostname = "truenas";
ip-addresses = [ "${ifs.lan.p6}::10:1" ];
}
{
duid = "00:02:00:00:ab:11:09:41:25:21:32:71:e3:77";
hostname = "debbi";
ip-addresses = [ "${ifs.lan.p6}::11:1" ];
}
{
duid = "00:02:00:00:ab:11:6b:56:93:72:0b:3c:84:11";
hostname = "etappi";
ip-addresses = [ "${ifs.lan.p6}::12:1" ];
}
];
reservations.lan20.v4.reservations = [
{
# Router
hw-address = "1c:3b:f3:da:5f:cc";
hostname = "archer-ax3000";
ip-address = "${ifs.lan20.p4}.2";
}
{
# Printer
hw-address = "30:cd:a7:c5:40:71";
hostname = "SEC30CDA7C54071";
ip-address = "${ifs.lan20.p4}.9";
}
{
# 3D Printer
hw-address = "20:f8:5e:ff:ae:5f";
hostname = "GS_ffae5f";
ip-address = "${ifs.lan20.p4}.11";
}
{
hw-address = "70:85:c2:d8:87:3f";
hostname = "Yura-PC";
ip-address = "${ifs.lan20.p4}.40";
}
];
in
{
services.kea.dhcp4.enable = true;
services.kea.dhcp4.settings = {
interfaces-config.interfaces = [
ifs.lan.name
ifs.lan10.name
ifs.lan20.name
ifs.lan30.name
ifs.lan40.name
ifs.lan50.name
];
dhcp-ddns.enable-updates = true;
ddns-qualifying-suffix = "4.default.${ldomain}";
subnet4 = [
((mkDhcp4Subnet 1 ifs.lan) // reservations.lan.v4)
(mkDhcp4Subnet 10 ifs.lan10)
((mkDhcp4Subnet 20 ifs.lan20) // reservations.lan20.v4)
(mkDhcp4Subnet 30 ifs.lan30)
(mkDhcp4Subnet 40 ifs.lan40)
(mkDhcp4Subnet 50 ifs.lan50)
];
};
services.kea.dhcp6.enable = true;
services.kea.dhcp6.settings = {
interfaces-config.interfaces = [
ifs.lan.name
ifs.lan10.name
# ifs.lan20.name # Managed by Att box
ifs.lan30.name
ifs.lan40.name
ifs.lan50.name
];
# TODO: https://kea.readthedocs.io/en/latest/arm/ddns.html#dual-stack-environments
dhcp-ddns.enable-updates = true;
ddns-qualifying-suffix = "6.default.${ldomain}";
subnet6 = [
((mkDhcp6Subnet 1 ifs.lan) // reservations.lan.v6)
(mkDhcp6Subnet 10 ifs.lan10)
(mkDhcp6Subnet 30 ifs.lan30)
(mkDhcp6Subnet 40 ifs.lan40)
(mkDhcp6Subnet 50 ifs.lan50)
];
};
services.kea.dhcp-ddns.enable = true;
services.kea.dhcp-ddns.settings = {
forward-ddns.ddns-domains = [
{
name = "${ldomain}.";
dns-servers = [ { ip-address = "::1"; port = 1053; } ];
}
];
};
}

85
hosts/router/services.nix Normal file
View File

@@ -0,0 +1,85 @@
{ config, lib, pkgs, ... }:
let
vars = import ./vars.nix;
domain = vars.domain;
in
{
# vnStat for tracking network interface stats
services.vnstat.enable = true;
# https://wiki.nixos.org/wiki/Prometheus
services.prometheus = {
enable = true;
exporters = {
# TODO: DNS, Kea, Knot, other exporters
node = {
enable = true;
enabledCollectors = [ "systemd" ];
};
};
scrapeConfigs = [
{
job_name = "node";
static_configs = [{
targets = [ "localhost:${toString config.services.prometheus.exporters.node.port}" ];
}];
}
];
};
# https://wiki.nixos.org/wiki/Grafana#Declarative_configuration
services.grafana = {
enable = true;
settings = {
security.allow_embedding = true;
server = {
http_port = 3001;
domain = "grouter.${domain}";
root_url = "https://%(domain)s/grafana/";
serve_from_sub_path = true;
};
};
provision = {
enable = true;
datasources.settings.datasources = [
{
name = "Prometheus";
type = "prometheus";
url = "http://localhost:${toString config.services.prometheus.port}";
}
];
};
};
secrix.system.secrets.cf-api-key.encrypted.file = ../../secrets/cf_api_key.age;
systemd.services.caddy.serviceConfig.EnvironmentFile = config.secrix.system.secrets.cf-api-key.decrypted.path;
services.caddy = {
enable = true;
package = pkgs.caddy.withPlugins {
plugins = [ "github.com/caddy-dns/cloudflare@v0.2.1" ];
hash = "sha256-saKJatiBZ4775IV2C5JLOmZ4BwHKFtRZan94aS5pO90=";
};
virtualHosts."grouter.${domain}".extraConfig = ''
encode
tls {
dns cloudflare {env.CF_API_KEY}
resolvers 1.1.1.1
}
@grafana path /grafana /grafana/*
handle @grafana {
reverse_proxy localhost:${toString config.services.grafana.settings.server.http_port}
}
redir /adghome /adghome/
handle_path /adghome/* {
reverse_proxy localhost:${toString config.services.adguardhome.port}
basic_auth {
Bob $2a$14$HsWmmzQTN68K3vwiRAfiUuqIjKoXEXaj9TOLUtG2mO1vFpdovmyBy
}
}
handle /* {
reverse_proxy localhost:${toString config.services.glance.settings.server.port}
}
'';
};
}

123
hosts/router/vars.nix Normal file
View File

@@ -0,0 +1,123 @@
let
mkIfConfig = {
name_,
domain_,
p4_, # /24
p6_, # /64
ulaPrefix_, # /64
token? 1,
ip6Token_? "::${toString token}",
ulaToken_? "::${toString token}",
}: rec {
name = name_;
domain = domain_;
p4 = p4_;
p4Size = 24;
net4 = "${p4}.0/${toString p4Size}";
addr4 = "${p4}.${toString token}";
addr4Sized = "${addr4}/${toString p4Size}";
p6 = p6_;
p6Size = 64;
net6 = "${p6}::/${toString p6Size}";
ip6Token = ip6Token_;
addr6 = "${p6}${ip6Token}";
addr6Sized = "${addr6}/${toString p6Size}";
ulaPrefix = ulaPrefix_;
ulaSize = 64;
ulaNet = "${ulaPrefix}::/${toString ulaSize}";
ulaToken = ulaToken_;
ulaAddr = "${ulaPrefix}${ulaToken}";
ulaAddrSized = "${ulaAddr}/${toString ulaSize}";
};
in
rec {
pubkey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFobB87yYVwhuYrA+tfztLuks3s9jZOqEFktwGw1mo83 root@grouter";
domain = "cazzzer.com";
ldomain = "l.${domain}";
sysdomain = "sys.${domain}";
links = {
wanMAC = "bc:24:11:4f:c9:c4";
lanMAC = "bc:24:11:83:d8:de";
wanLL = "fe80::be24:11ff:fe4f:c9c4";
lanLL = "fe80::be24:11ff:fe83:d8de";
};
p4 = "10.17"; # .0.0/16
pdFromWan = ""; # ::/60
ulaPrefix = "fdab:07d3:581d"; # ::/48
ifs = rec {
wan = rec {
name = "wan";
addr4 = "192.168.1.61";
addr4Sized = "${addr4}/24";
gw4 = "192.168.1.254";
};
lan = mkIfConfig {
name_ = "lan";
domain_ = "lan.${ldomain}";
p4_ = "${p4}.1"; # .0/24
p6_ = "${pdFromWan}f"; # ::/64
ulaPrefix_ = "${ulaPrefix}:0001"; # ::/64
};
lan10 = mkIfConfig {
name_ = "${lan.name}.10";
domain_ = "lab.${ldomain}";
p4_ = "${p4}.10"; # .0/24
p6_ = "${pdFromWan}e"; # ::/64
ulaPrefix_ = "${ulaPrefix}:0010"; # ::/64
};
lan20 = mkIfConfig {
name_ = "${lan.name}.20";
domain_ = "life.${ldomain}";
p4_ = "${p4}.20"; # .0/24
p6_ = "${pdFromWan}0"; # ::/64 managed by Att box
ulaPrefix_ = "${ulaPrefix}:0020"; # ::/64
ip6Token_ = "::1:1"; # override ipv6 for lan20, since the Att box uses ::1 here
};
lan30 = mkIfConfig {
name_ = "${lan.name}.30";
domain_ = "iot.${ldomain}";
p4_ = "${p4}.30"; # .0/24
p6_ = "${pdFromWan}c"; # ::/64
ulaPrefix_ = "${ulaPrefix}:0030"; # ::/64
};
lan40 = mkIfConfig {
name_ = "${lan.name}.40";
domain_ = "kube.${ldomain}";
p4_ = "${p4}.40"; # .0/24
p6_ = "${pdFromWan}b"; # ::/64
ulaPrefix_ = "${ulaPrefix}:0040"; # ::/64
};
lan50 = mkIfConfig {
name_ = "${lan.name}.50";
domain_ = "prox.${ldomain}";
p4_ = "${p4}.50"; # .0/24
p6_ = "${pdFromWan}a"; # ::/64
ulaPrefix_ = "${ulaPrefix}:0050"; # ::/64
};
};
extra = {
opnsense = rec {
addr4 = "${ifs.lan.p4}.250";
ulaAddr = "${ifs.lan.ulaPrefix}::250";
p6 = "${pdFromWan}d";
net6 = "${p6}::/64";
# VPN routes on opnsense
routes = [
{
Destination = "10.6.0.0/24";
Gateway = addr4;
}
{
Destination = "10.18.0.0/20";
Gateway = addr4;
}
{
Destination = net6;
Gateway = ulaAddr;
}
];
};
};
}

12
renovate.json Normal file
View File

@@ -0,0 +1,12 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:recommended"
],
"lockFileMaintenance": {
"enabled": true
},
"nix": {
"enabled": true
}
}

5
secrets/cf_api_key.age Normal file
View File

@@ -0,0 +1,5 @@
age-encryption.org/v1
-> ssh-ed25519 D2MY/A Kj69kavxx+ATNHP5pX0JtGggU76f9uRwkZp2HbjwiWc
SbU3jIcQzUzaQjRHzVSoW1WKiUj+1ijbkUKqVb406fY
--- vMV0TcchFvxw1xetQQZ0xVi2KwjLFRfZBM1gl7BGbGI
<EFBFBD><EFBFBD>1<10><><EFBFBD><EFBFBD>K<EFBFBD><<3C>