router: move most configuration into modules with options

This commit is contained in:
2025-06-04 21:09:31 -07:00
parent a3f351cf38
commit 47c9cff8f5
16 changed files with 197 additions and 108 deletions

View File

@@ -1,91 +1,19 @@
{ config, lib, pkgs, ... }:
let
vars = import ./vars.nix;
enableDesktop = false;
in
{
imports =
[ # Include the results of the hardware scan.
./hardware-configuration.nix
./ifconfig.nix
./wireguard.nix
./firewall.nix
./dns.nix
./kea.nix
./glance.nix
./services.nix
];
# Secrix for secrets management
secrix.hostPubKey = vars.pubkey;
# Bootloader.
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = false;
boot.kernelParams = [
"sysrq_always_enabled=1"
imports = [
../../modules/router
./hardware-configuration.nix
./private.nix
];
boot.loader.timeout = 2;
boot.loader.systemd-boot.configurationLimit = 5;
boot.kernelPackages = pkgs.linuxKernel.packages.linux_6_12;
boot.growPartition = true;
router = {
enableDesktop = false;
enableDhcpClient = true;
wanMAC = "bc:24:11:4f:c9:c4";
lanMAC = "bc:24:11:83:d8:de";
wanLL = "fe80::be24:11ff:fe4f:c9c4";
lanLL = "fe80::be24:11ff:fe83:d8de";
defaultToken = 1;
};
networking.hostName = "grouter";
# Enable the X11 windowing system.
# You can disable this if you're only using the Wayland session.
services.xserver.enable = false;
# Enable the KDE Plasma Desktop Environment.
# Useful for debugging with wireshark.
hardware.graphics.enable = true;
services.displayManager.sddm.enable = enableDesktop;
services.displayManager.sddm.wayland.enable = enableDesktop;
services.desktopManager.plasma6.enable = enableDesktop;
# No need for audio in VM
services.pipewire.enable = false;
# VM services
services.qemuGuest.enable = true;
services.spice-vdagentd.enable = true;
security.sudo.wheelNeedsPassword = false;
programs.firefox.enable = true;
programs.fish.enable = true;
programs.git.enable = true;
programs.neovim.enable = true;
programs.bat.enable = true;
programs.htop.enable = true;
programs.wireshark.enable = true;
programs.wireshark.package = pkgs.wireshark; # wireshark-cli by default
environment.systemPackages = with pkgs; [
dust
eza
fastfetch
fd
kdePackages.kate
ldns
lsof
micro
mpv
openssl
ripgrep
rustscan
starship
tealdeer
transcrypt
waypipe
whois
wireguard-tools
];
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "24.11"; # Did you read the comment?
}

View File

@@ -1,135 +0,0 @@
{ config, lib, pkgs, ... }:
let
vars = import ./vars.nix;
domain = vars.domain;
ldomain = vars.ldomain;
sysdomain = vars.sysdomain;
ifs = vars.ifs;
alpinaDomains = [
"|"
"|nc."
"|sonarr."
"|radarr."
"|prowlarr."
"|qbit."
"|gitea."
"|traefik."
"|auth."
"||s3."
"|minio."
"|jellyfin."
"|whoami."
"|grafana."
"|influxdb."
"|uptime."
"|opnsense."
"|vpgen."
"|woodpecker."
"||pgrok."
"|sync."
];
in
{
# https://github.com/quic-go/quic-go/wiki/UDP-Buffer-Sizes
# For upstream quic dns
boot.kernel.sysctl."net.core.wmem_max" = 7500000;
boot.kernel.sysctl."net.core.rmem_max" = 7500000;
services.resolved.enable = false;
networking.resolvconf.enable = true;
networking.resolvconf.useLocalResolver = true;
services.adguardhome.enable = true;
services.adguardhome.mutableSettings = false;
# https://github.com/AdguardTeam/Adguardhome/wiki/Configuration
services.adguardhome.settings = {
querylog.interval = "168h"; # 7 days
dns = {
# Disable rate limit, default of 20 is too low
# https://github.com/AdguardTeam/AdGuardHome/issues/6726
ratelimit = 0;
enable_dnssec = true;
bootstrap_dns = [ "1.1.1.1" "9.9.9.9" ];
upstream_dns = [
# Default upstreams
"quic://p0.freedns.controld.com"
"tls://one.one.one.one"
"tls://dns.quad9.net"
# Adguard uses upstream and not rewrite rules to resolve cname rewrites,
# and obviously my sysdomain entries don't exist in cloudflare.
"[/${sysdomain}/][::1]" # Sys domains to self (for cname rewrites)
"[/${ldomain}/][::1]:1053" # Local domains to Knot (ddns)
"[/home/][${ifs.lan.ulaPrefix}::250]" # .home domains to opnsense (temporary)
];
};
# https://adguard-dns.io/kb/general/dns-filtering-syntax/
user_rules = [
# DNS rewrites
"|grouter.${domain}^$dnsrewrite=${ifs.lan.ulaAddr}"
"|pve-1.${sysdomain}^$dnsrewrite=${ifs.lan.p4}.5"
"|pve-1.${sysdomain}^$dnsrewrite=${ifs.lan.ulaPrefix}::5:1"
"|pve-3.${sysdomain}^$dnsrewrite=${ifs.lan.p4}.7"
"|pve-3.${sysdomain}^$dnsrewrite=${ifs.lan.ulaPrefix}::7:1"
"|truenas.${sysdomain}^$dnsrewrite=${ifs.lan.p4}.10"
"|truenas.${sysdomain}^$dnsrewrite=${ifs.lan.ulaPrefix}::20d0:43ff:fec6:3192"
"|debbi.${sysdomain}^$dnsrewrite=${ifs.lan.p4}.11"
"|debbi.${sysdomain}^$dnsrewrite=${ifs.lan.ulaPrefix}::11:1"
"|etappi.${sysdomain}^$dnsrewrite=${ifs.lan.p4}.12"
"|etappi.${sysdomain}^$dnsrewrite=${ifs.lan.ulaPrefix}::12:1"
# Lab DNS rewrites
"||lab.${domain}^$dnsrewrite=etappi.${sysdomain}"
# Allowed exceptions
"@@||googleads.g.doubleclick.net"
]
# Alpina DNS rewrites
++ map (host: "${host}${domain}^$dnsrewrite=debbi.${sysdomain}") alpinaDomains;
};
services.knot.enable = true;
services.knot.settings = {
# server.listen = "0.0.0.0@1053";
server.listen = "::1@1053";
zone = [
{
domain = ldomain;
storage = "/var/lib/knot/zones";
file = "${ldomain}.zone";
acl = [ "allow_localhost_update" ];
}
];
acl = [
{
id = "allow_localhost_update";
address = [ "::1" "127.0.0.1" ];
action = [ "update" ];
}
];
};
# Ensure the zone file exists
system.activationScripts.knotZoneFile = ''
ZONE_DIR="/var/lib/knot/zones"
ZONE_FILE="$ZONE_DIR/${ldomain}.zone"
# Create the directory if it doesn't exist
mkdir -p "$ZONE_DIR"
# Check if the zone file exists
if [ ! -f "$ZONE_FILE" ]; then
# Create the zone file with a basic SOA record
# Serial; Refresh; Retry; Expire; Negative Cache TTL;
echo "${ldomain}. 3600 SOA ns.${ldomain}. admin.${ldomain}. 1 86400 900 691200 3600" > "$ZONE_FILE"
echo "Created new zone file: $ZONE_FILE"
else
echo "Zone file already exists: $ZONE_FILE"
fi
# Ensure proper ownership and permissions
chown -R knot:knot "/var/lib/knot"
chmod 644 "$ZONE_FILE"
'';
}

View File

@@ -1,221 +0,0 @@
{ config, lib, pkgs, ... }:
let
vars = import ./vars.nix;
links = vars.links;
ifs = vars.ifs;
pdFromWan = vars.pdFromWan;
nftIdentifiers = ''
define ZONE_WAN_IFS = { ${ifs.wan.name} }
define ZONE_LAN_IFS = {
${ifs.lan.name},
${ifs.lan10.name},
${ifs.lan20.name},
${ifs.lan30.name},
${ifs.lan40.name},
${ifs.lan50.name},
${ifs.wg0.name},
}
define OPNSENSE_NET6 = ${vars.extra.opnsense.net6}
define ZONE_LAN_EXTRA_NET6 = {
# TODO: reevaluate this statement
${ifs.lan20.net6}, # needed since packets can come in from wan on these addrs
$OPNSENSE_NET6,
}
define RFC1918 = { 10.0.0.0/8, 172.12.0.0/12, 192.168.0.0/16 }
define CLOUDFLARE_NET6 = {
# https://www.cloudflare.com/ips-v6
# TODO: figure out a better way to get addrs dynamically from url
# perhaps building a nixos module/package that fetches the ips?
2400:cb00::/32,
2606:4700::/32,
2803:f800::/32,
2405:b500::/32,
2405:8100::/32,
2a06:98c0::/29,
2c0f:f248::/32,
}
'';
in
{
networking.firewall.enable = false;
networking.nftables.enable = true;
# networking.nftables.ruleset = nftIdentifiers; #doesn't work because it's appended to the end
networking.nftables.tables.nat4 = {
family = "ip";
content = ''
${nftIdentifiers}
map port_forward {
type inet_proto . inet_service : ipv4_addr . inet_service
elements = {
tcp . 8006 : ${ifs.lan50.p4}.10 . 8006,
# opnsense vpn endpoints
# the plan is to maybe eventually move these to nixos
udp . 18596 : ${vars.extra.opnsense.addr4} . 18596,
udp . 48512 : ${vars.extra.opnsense.addr4} . 48512,
udp . 40993 : ${vars.extra.opnsense.addr4} . 40993,
udp . 45608 : ${vars.extra.opnsense.addr4} . 45608,
udp . 35848 : ${vars.extra.opnsense.addr4} . 35848,
udp . 48425 : ${vars.extra.opnsense.addr4} . 48425,
}
}
chain prerouting {
# Initial step, accept by default
type nat hook prerouting priority dstnat; policy accept;
# Port forwarding
fib daddr type local dnat ip to meta l4proto . th dport map @port_forward
}
chain postrouting {
# Last step, accept by default
type nat hook postrouting priority srcnat; policy accept;
# Masquerade LAN addrs
oifname $ZONE_WAN_IFS ip saddr $RFC1918 masquerade
}
'';
};
# Optional IPv6 masquerading (big L if enabled, don't forget to allow forwarding)
networking.nftables.tables.nat6 = {
family = "ip6";
enable = false;
content = ''
${nftIdentifiers}
chain postrouting {
type nat hook postrouting priority srcnat; policy accept;
oifname $ZONE_WAN_IFS ip6 saddr fd00::/8 masquerade
}
'';
};
networking.nftables.tables.firewall = {
family = "inet";
content = ''
${nftIdentifiers}
define ALLOWED_TCP_PORTS = { ssh }
define ALLOWED_UDP_PORTS = { ${toString vars.ifs.wg0.listenPort} }
define ALLOWED_TCP_LAN_PORTS = { ssh, https }
define ALLOWED_UDP_LAN_PORTS = { bootps, dhcpv6-server, domain, https }
set port_forward_v6 {
type inet_proto . ipv6_addr . inet_service
elements = {
# syncthing on alpina
tcp . ${ifs.lan.p6}::11:1 . 22000 ,
udp . ${ifs.lan.p6}::11:1 . 22000 ,
}
}
set cloudflare_forward_v6 {
type ipv6_addr
elements = {
${ifs.lan.p6}::11:1,
}
}
chain input {
type filter hook input priority filter; policy drop;
# Drop router adverts from self
# peculiarity due to wan and lan20 being bridged
# TODO: figure out a less jank way to do this
iifname $ZONE_WAN_IFS ip6 saddr ${links.lanLL} icmpv6 type nd-router-advert log prefix "self radvt: " drop
# iifname $ZONE_WAN_IFS ip6 saddr ${links.lanLL} ip6 nexthdr icmpv6 log prefix "self icmpv6: " drop
# iifname $ZONE_WAN_IFS ip6 saddr ${links.lanLL} log prefix "self llv6: " drop
# iifname $ZONE_WAN_IFS ip6 saddr ${links.lanLL} log drop
# iifname $ZONE_LAN_IFS ip6 saddr ${links.wanLL} log drop
# Allow established and related connections
# All icmp stuff should (theoretically) be handled by ct related
# https://serverfault.com/a/632363
ct state established,related accept
# However, that doesn't happen for router advertisements from what I can tell
# TODO: more testing
# Allow ICMPv6 on local addrs
ip6 nexthdr icmpv6 ip6 saddr { fe80::/10, ${pdFromWan}0::/60 } accept
ip6 nexthdr icmpv6 ip6 daddr fe80::/10 accept # TODO: not sure if necessary
# Allow all traffic from loopback interface
iif lo accept
# Allow DHCPv6 traffic
# I thought dhcpv6-client traffic would be accepted by established/related,
# but apparently not.
ip6 daddr { fe80::/10, ff02::/16 } th dport { dhcpv6-client, dhcpv6-server } accept
# Global input rules
tcp dport $ALLOWED_TCP_PORTS accept
udp dport $ALLOWED_UDP_PORTS accept
# WAN zone input rules
iifname $ZONE_WAN_IFS jump zone_wan_input
# LAN zone input rules
# iifname $ZONE_LAN_IFS accept
iifname $ZONE_LAN_IFS jump zone_lan_input
ip6 saddr $ZONE_LAN_EXTRA_NET6 jump zone_lan_input
# log
}
chain forward {
type filter hook forward priority filter; policy drop;
# Allow established and related connections
ct state established,related accept
# WAN zone forward rules
iifname $ZONE_WAN_IFS jump zone_wan_forward
# LAN zone forward rules
iifname $ZONE_LAN_IFS jump zone_lan_forward
ip6 saddr $ZONE_LAN_EXTRA_NET6 jump zone_lan_forward
}
chain zone_wan_input {
# Allow specific stuff from WAN
}
chain zone_wan_forward {
# Port forwarding
ct status dnat accept
# Allowed IPv6 ports
meta l4proto . ip6 daddr . th dport @port_forward_v6 accept
# Allowed IPv6 from cloudflare
ip6 saddr $CLOUDFLARE_NET6 ip6 daddr @cloudflare_forward_v6 th dport https accept
}
chain zone_lan_input {
# Allow all ICMPv6 from LAN
ip6 nexthdr icmpv6 accept
# Allow all ICMP from LAN
ip protocol icmp accept
# Allow specific services from LAN
tcp dport $ALLOWED_TCP_LAN_PORTS accept
udp dport $ALLOWED_UDP_LAN_PORTS accept
}
chain zone_lan_forward {
# Allow port forwarded targets
# ct status dnat accept
# Allow all traffic from LAN to WAN, except ULAs
oifname $ZONE_WAN_IFS ip6 saddr fd00::/8 drop # Not sure if needed
oifname $ZONE_WAN_IFS accept;
# Allow traffic between LANs
oifname $ZONE_LAN_IFS accept
}
chain output {
# Accept anything out of self by default
type filter hook output priority filter; policy accept;
# NAT reflection
# oif lo ip daddr != 127.0.0.0/8 dnat ip to meta l4proto . th dport map @port_forward_v4
}
'';
};
}

View File

@@ -1,166 +0,0 @@
{ config, lib, pkgs, ... }:
let
vars = import ./vars.nix;
domain = vars.domain;
in
{
# Glance dashboard
services.glance.enable = true;
services.glance.settings.pages = [
{
name = "Home";
# hideDesktopNavigation = true; # Uncomment if needed
columns = [
{
size = "small";
widgets = [
{
type = "calendar";
firstDayOfWeek = "monday";
}
{
type = "rss";
limit = 10;
collapseAfter = 3;
cache = "12h";
feeds = [
{ url = "https://rtk0c.pages.dev/index.xml"; }
{ url = "https://www.yegor256.com/rss.xml"; }
{ url = "https://selfh.st/rss/"; title = "selfh.st"; }
{ url = "https://ciechanow.ski/atom.xml"; }
{ url = "https://www.joshwcomeau.com/rss.xml"; title = "Josh Comeau"; }
{ url = "https://samwho.dev/rss.xml"; }
{ url = "https://ishadeed.com/feed.xml"; title = "Ahmad Shadeed"; }
];
}
{
type = "twitch-channels";
channels = [
"theprimeagen"
"j_blow"
"piratesoftware"
"cohhcarnage"
"christitustech"
"EJ_SA"
];
}
];
}
{
size = "full";
widgets = [
{
type = "group";
widgets = [
{ type = "hacker-news"; }
{ type = "lobsters"; }
];
}
{
type = "videos";
channels = [
"UCXuqSBlHAE6Xw-yeJA0Tunw" # Linus Tech Tips
"UCR-DXc1voovS8nhAvccRZhg" # Jeff Geerling
"UCsBjURrPoezykLs9EqgamOA" # Fireship
"UCBJycsmduvYEL83R_U4JriQ" # Marques Brownlee
"UCHnyfMqiRRG1u-2MsSQLbXA" # Veritasium
];
}
{
type = "group";
widgets = [
{
type = "reddit";
subreddit = "technology";
showThumbnails = true;
}
{
type = "reddit";
subreddit = "selfhosted";
showThumbnails = true;
}
];
}
];
}
{
size = "small";
widgets = [
{
type = "weather";
location = "San Jose, California, United States";
units = "metric";
hourFormat = "12h";
# hideLocation = true; # Uncomment if needed
}
{
type = "markets";
markets = [
{ symbol = "SPY"; name = "S&P 500"; }
{ symbol = "BTC-USD"; name = "Bitcoin"; }
{ symbol = "NVDA"; name = "NVIDIA"; }
{ symbol = "AAPL"; name = "Apple"; }
{ symbol = "MSFT"; name = "Microsoft"; }
];
}
{
type = "releases";
cache = "1d";
# token = "..."; # Uncomment and set if needed
repositories = [
"glanceapp/glance"
"go-gitea/gitea"
"immich-app/immich"
"syncthing/syncthing"
];
}
];
}
];
}
{
name = "Infrastructure";
columns = [
{
size = "small";
widgets = [
{
type = "server-stats";
servers = [
{
type = "local";
name = "Router";
mountpoints."/nix/store".hide = true;
}
];
}
];
}
{
size = "full";
widgets = [
{
type = "iframe";
title = "Grafana";
title-url = "/grafana/";
source = "/grafana/d-solo/rYdddlPWk/node-exporter-full?orgId=1&from=1747211119196&to=1747297519196&timezone=browser&var-datasource=PBFA97CFB590B2093&var-job=node&var-node=localhost:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B%7Cmmcblk%5B0-9%5D%2B&refresh=1m&panelId=74&__feature.dashboardSceneSolo";
height = 400;
}
];
}
{
size = "small";
widgets = [
{
type = "dns-stats";
service = "adguard";
url = "http://localhost:${toString config.services.adguardhome.port}";
username = "";
password = "";
}
];
}
];
}
];
}

View File

@@ -1,187 +0,0 @@
{ config, lib, pkgs, ... }:
let
vars = import ./vars.nix;
links = vars.links;
ifs = vars.ifs;
pdFromWan = vars.pdFromWan;
ulaPrefix = vars.ulaPrefix;
mkVlanDev = { id, name }: {
netdevConfig = {
Kind = "vlan";
Name = name;
};
vlanConfig.Id = id;
};
mkLanConfig = ifObj: {
matchConfig.Name = ifObj.name;
networkConfig = {
IPv4Forwarding = true;
IPv6SendRA = true;
Address = [ ifObj.addr4Sized ifObj.addr6Sized ifObj.ulaAddrSized ];
};
ipv6Prefixes = [
{
Prefix = ifObj.net6;
Assign = true;
# Token = [ "static::1" "eui64" ];
Token = [ "static:${ifObj.ip6Token}" ];
}
{
Prefix = ifObj.ulaNet;
Assign = true;
Token = [ "static:${ifObj.ulaToken}" ];
}
];
ipv6RoutePrefixes = [ { Route = "${ulaPrefix}::/48"; } ];
ipv6SendRAConfig = {
# don't manage the att box subnet
# should work fine either way though
Managed = (ifObj.p6 != "${pdFromWan}0");
OtherInformation = (ifObj.p6 != "${pdFromWan}0");
EmitDNS = true;
DNS = [ ifObj.ulaAddr ];
};
};
in
{
# By default, Linux will respond to ARP requests that belong to other interfaces.
# Normally this isn't a problem, but it causes issues
# since my WAN and LAN20 are technically bridged.
# https://networkengineering.stackexchange.com/questions/83071/why-linux-answers-arp-requests-for-ips-that-belong-to-different-network-interfac
boot.kernel.sysctl."net.ipv4.conf.default.arp_filter" = 1;
# It is impossible to do multiple prefix requests with networkd,
# so I use dhcpcd for this
# https://github.com/systemd/systemd/issues/22571
# https://github.com/systemd/systemd/issues/22571#issuecomment-2094905496
# https://gist.github.com/csamsel/0f8cca3b2e64d7e4cc47819ec5ba9396
networking.dhcpcd.enable = true;
networking.dhcpcd.allowInterfaces = [ ifs.wan.name ];
networking.dhcpcd.extraConfig = ''
debug
nohook resolv.conf, yp, hostname, ntp
interface ${ifs.wan.name}
ipv6only
duid
ipv6rs
dhcp6
# option rapid_commit
# DHCPv6 addr
ia_na
# DHCPv6 Prefix Delegation
# request the leases just for routing (so that the att box knows we're here)
# actual ip assignments are static, based on $pdFromWan
ia_pd 1/${ifs.lan.net6} -
ia_pd 10/${ifs.lan10.net6} -
# ia_pd 20/${pdFromWan}d::/64 - # for opnsense (legacy services)
ia_pd 30/${ifs.lan30.net6} -
ia_pd 40/${ifs.lan40.net6} -
ia_pd 50/${ifs.lan50.net6} -
ia_pd 100/${pdFromWan}9::/64 - # for vpn stuff
# ia_pd 8 -
# the leases can be assigned to the interfaces,
# but this doesn't play well with networkd
# ia_pd 1 ${ifs.lan.name}/0
# ia_pd 2 ${ifs.lan10.name}/0
# ia_pd 3 ${ifs.lan20.name}/0
'';
networking.useNetworkd = true;
systemd.network.enable = true;
systemd.network = {
# Global options
config.networkConfig = {
IPv4Forwarding = true;
IPv6Forwarding = true;
};
# This is applied by udev, not networkd
# https://nixos.wiki/wiki/Systemd-networkd
# https://nixos.org/manual/nixos/stable/#sec-rename-ifs
links = {
"10-wan" = {
matchConfig.PermanentMACAddress = links.wanMAC;
linkConfig.Name = ifs.wan.name;
};
"10-lan" = {
matchConfig.PermanentMACAddress = links.lanMAC;
linkConfig.Name = ifs.lan.name;
};
};
netdevs = {
"10-vlan10" = mkVlanDev { id = 10; name = ifs.lan10.name; };
"10-vlan20" = mkVlanDev { id = 20; name = ifs.lan20.name; };
"10-vlan30" = mkVlanDev { id = 30; name = ifs.lan30.name; };
"10-vlan40" = mkVlanDev { id = 40; name = ifs.lan40.name; };
"10-vlan50" = mkVlanDev { id = 50; name = ifs.lan50.name; };
};
networks = {
"10-wan" = {
matchConfig.Name = ifs.wan.name;
# make routing on this interface a dependency for network-online.target
linkConfig.RequiredForOnline = "routable";
networkConfig = {
# start a DHCP Client for IPv4 Addressing/Routing
# DHCP = "ipv4";
# accept Router Advertisements for Stateless IPv6 Autoconfiguraton (SLAAC)
# let dhcpcd handle this
Address = [ ifs.wan.addr4Sized ];
IPv6AcceptRA = false;
KeepConfiguration = true;
};
routes = [
{ Gateway = ifs.wan.gw4; }
];
};
"20-lan" = (mkLanConfig ifs.lan) // {
vlan = [
ifs.lan10.name
ifs.lan20.name
ifs.lan30.name
ifs.lan40.name
ifs.lan50.name
];
routes = vars.extra.opnsense.routes;
};
"30-vlan10" = mkLanConfig ifs.lan10;
"30-vlan20" = mkLanConfig ifs.lan20;
"30-vlan30" = mkLanConfig ifs.lan30;
"30-vlan40" = mkLanConfig ifs.lan40;
"30-vlan50" = mkLanConfig ifs.lan50;
};
};
# For some reason, the interfaces stop receiving route solicitations after a while.
# Regular router adverts still get sent out at intervals, but this breaks dhcp6 clients.
# Restarting networkd makes it work again, I have no clue why.
# This is jank af, but I've tried a bunch of other stuff with no success
# and I'm giving up (for now).
systemd.timers."restart-networkd" = {
wantedBy = [ "timers.target" ];
timerConfig = {
OnBootSec = "1m";
OnUnitActiveSec = "1m";
Unit = "restart-networkd.service";
};
};
systemd.services."restart-networkd" = {
script = ''
set -eu
${pkgs.systemd}/bin/systemctl restart systemd-networkd
'';
serviceConfig = {
Type = "oneshot";
User = "root";
};
};
}

View File

@@ -1,183 +0,0 @@
{ config, lib, pkgs, ... }:
let
vars = import ./vars.nix;
ldomain = vars.ldomain;
ifs = vars.ifs;
mkDhcp4Subnet = id: ifObj: {
id = id;
subnet = ifObj.net4;
pools = [ { pool = "${ifObj.p4}.100 - ${ifObj.p4}.199"; } ];
ddns-qualifying-suffix = "4.${ifObj.domain}";
option-data = [
{ name = "routers"; data = ifObj.addr4; }
{ name = "domain-name-servers"; data = ifObj.addr4; }
{ name = "domain-name"; data = "4.${ifObj.domain}"; }
];
};
mkDhcp6Subnet = id: ifObj: {
id = id;
interface = ifObj.name;
subnet = ifObj.net6;
rapid-commit = true;
pools = [ { pool = "${ifObj.p6}::1:1000/116"; } ];
ddns-qualifying-suffix = "6.${ifObj.domain}";
option-data = [
{ name = "domain-search"; data = "6.${ifObj.domain}"; }
];
};
# Reservations added to Kea
reservations.lan.v4.reservations = [
{
hw-address = "64:66:b3:78:9c:09";
hostname = "openwrt";
ip-address = "${ifs.lan.p4}.2";
}
{
hw-address = "40:86:cb:19:9d:70";
hostname = "dlink-switchy";
ip-address = "${ifs.lan.p4}.3";
}
{
hw-address = "6c:cd:d6:af:4f:6f";
hostname = "netgear-switchy";
ip-address = "${ifs.lan.p4}.4";
}
{
hw-address = "74:d4:35:1d:0e:80";
hostname = "pve-1";
ip-address = "${ifs.lan.p4}.5";
}
{
hw-address = "00:25:90:f3:d0:e0";
hostname = "pve-2";
ip-address = "${ifs.lan.p4}.6";
}
{
hw-address = "a8:a1:59:d0:57:87";
hostname = "pve-3";
ip-address = "${ifs.lan.p4}.7";
}
{
hw-address = "22:d0:43:c6:31:92";
hostname = "truenas";
ip-address = "${ifs.lan.p4}.10";
}
{
hw-address = "1e:d5:56:ec:c7:4a";
hostname = "debbi";
ip-address = "${ifs.lan.p4}.11";
}
{
hw-address = "ee:42:75:2e:f1:a6";
hostname = "etappi";
ip-address = "${ifs.lan.p4}.12";
}
];
reservations.lan.v6.reservations = [
{
duid = "00:03:00:01:64:66:b3:78:9c:09";
hostname = "openwrt";
ip-addresses = [ "${ifs.lan.p6}::1:2" ];
}
{
duid = "00:01:00:01:2e:c0:63:23:22:d0:43:c6:31:92";
hostname = "truenas";
ip-addresses = [ "${ifs.lan.p6}::10:1" ];
}
{
duid = "00:02:00:00:ab:11:09:41:25:21:32:71:e3:77";
hostname = "debbi";
ip-addresses = [ "${ifs.lan.p6}::11:1" ];
}
{
duid = "00:02:00:00:ab:11:6b:56:93:72:0b:3c:84:11";
hostname = "etappi";
ip-addresses = [ "${ifs.lan.p6}::12:1" ];
}
];
reservations.lan20.v4.reservations = [
{
# Router
hw-address = "1c:3b:f3:da:5f:cc";
hostname = "archer-ax3000";
ip-address = "${ifs.lan20.p4}.2";
}
{
# Printer
hw-address = "30:cd:a7:c5:40:71";
hostname = "SEC30CDA7C54071";
ip-address = "${ifs.lan20.p4}.9";
}
{
# 3D Printer
hw-address = "20:f8:5e:ff:ae:5f";
hostname = "GS_ffae5f";
ip-address = "${ifs.lan20.p4}.11";
}
{
hw-address = "70:85:c2:d8:87:3f";
hostname = "Yura-PC";
ip-address = "${ifs.lan20.p4}.40";
}
];
in
{
services.kea.dhcp4.enable = true;
services.kea.dhcp4.settings = {
interfaces-config.interfaces = [
ifs.lan.name
ifs.lan10.name
ifs.lan20.name
ifs.lan30.name
ifs.lan40.name
ifs.lan50.name
];
dhcp-ddns.enable-updates = true;
ddns-qualifying-suffix = "4.default.${ldomain}";
subnet4 = [
((mkDhcp4Subnet 1 ifs.lan) // reservations.lan.v4)
(mkDhcp4Subnet 10 ifs.lan10)
((mkDhcp4Subnet 20 ifs.lan20) // reservations.lan20.v4)
(mkDhcp4Subnet 30 ifs.lan30)
(mkDhcp4Subnet 40 ifs.lan40)
(mkDhcp4Subnet 50 ifs.lan50)
];
};
services.kea.dhcp6.enable = true;
services.kea.dhcp6.settings = {
interfaces-config.interfaces = [
ifs.lan.name
ifs.lan10.name
# ifs.lan20.name # Managed by Att box
ifs.lan30.name
ifs.lan40.name
ifs.lan50.name
];
# TODO: https://kea.readthedocs.io/en/latest/arm/ddns.html#dual-stack-environments
dhcp-ddns.enable-updates = true;
ddns-qualifying-suffix = "6.default.${ldomain}";
subnet6 = [
((mkDhcp6Subnet 1 ifs.lan) // reservations.lan.v6)
(mkDhcp6Subnet 10 ifs.lan10)
(mkDhcp6Subnet 30 ifs.lan30)
(mkDhcp6Subnet 40 ifs.lan40)
(mkDhcp6Subnet 50 ifs.lan50)
];
};
services.kea.dhcp-ddns.enable = true;
services.kea.dhcp-ddns.settings = {
forward-ddns.ddns-domains = [
{
name = "${ldomain}.";
dns-servers = [ { ip-address = "::1"; port = 1053; } ];
}
];
};
}

View File

@@ -1,3 +1,3 @@
U2FsdGVkX1/MGwxIMmhbZuPqJUZzex9v5tTuiOrEvDVldI7xY0edUy9Ii5udFLXt
AbxShsH43FH1ucgygKDlKTB/yXZWDoDVhjdgLFjaBtAt0xyAL9Doet844oDVhbbO
FqOwXHMd+PE23rgqiCscsA==
U2FsdGVkX1+eMFkQxarJDGLkX0zXyMRPukeRNvzd/BJ0XDAUZ2EeZvQnZw8U53Xz
W97X4rvT+K/NQ7FVCYfOg1XpQhzlSiC9z1M4WLmPRutUgsDY5n5RFJu2R8K5DAfi
sAPcxiQUGVKrmDUva16yNqoR2Dkx2XE9iW1hpkfGFYVUK+QKIBkUEowQQlJJ1Wg1

View File

@@ -1,5 +0,0 @@
age-encryption.org/v1
-> ssh-ed25519 D2MY/A Kj69kavxx+ATNHP5pX0JtGggU76f9uRwkZp2HbjwiWc
SbU3jIcQzUzaQjRHzVSoW1WKiUj+1ijbkUKqVb406fY
--- vMV0TcchFvxw1xetQQZ0xVi2KwjLFRfZBM1gl7BGbGI
<EFBFBD><EFBFBD>1<10><><EFBFBD><EFBFBD>K<EFBFBD><<3C>

View File

@@ -1,5 +0,0 @@
age-encryption.org/v1
-> ssh-ed25519 D2MY/A cRVo1AetNYKsb28kGpe6mVpoCyfNcRibeBYhJuXbbEY
k8XL4XEv4FM6sfU/TOFTg4vlKm61409No/TpCEjTnSk
--- mT9w1vnx2FrzWw+Zt1wV6UJ+mjHTizrUPVeaTisYQ74
=<3D>q-So<><6F>pn<70><6E><EFBFBD><EFBFBD><EFBFBD>I<EFBFBD><49><EFBFBD>Z֠i<D6A0>'<27><><EFBFBD>"%M<><06><>C&

View File

@@ -1,5 +0,0 @@
age-encryption.org/v1
-> ssh-ed25519 D2MY/A Xg7XTl/qJqVqvXsHNKcoICq74DeOlquN1CEn1PwxlVY
FqmPdDgmuUrwZPLW56RhW8o1VXr5l2Xms6IVebpi7bA
--- nLT/bC55EvoXK6f7DYbMhD3I8Z122bxeGVw1PCds2IM
!<><7F><05>Dl<44><6C><EFBFBD><EFBFBD><EFBFBD><EFBFBD>;<3B><>KXq8<71>4<EFBFBD><34><EFBFBD><EFBFBD>+b<><62>p_q4B<34><42>'8<>%<25>cI<63><49>D<EFBFBD>t<> <0C> <05>V~;v*<2A><>W<EFBFBD>-<2D>,[<5B><74>

View File

@@ -1,85 +0,0 @@
{ config, lib, pkgs, ... }:
let
vars = import ./vars.nix;
domain = vars.domain;
in
{
# vnStat for tracking network interface stats
services.vnstat.enable = true;
# https://wiki.nixos.org/wiki/Prometheus
services.prometheus = {
enable = true;
exporters = {
# TODO: DNS, Kea, Knot, other exporters
node = {
enable = true;
enabledCollectors = [ "systemd" ];
};
};
scrapeConfigs = [
{
job_name = "node";
static_configs = [{
targets = [ "localhost:${toString config.services.prometheus.exporters.node.port}" ];
}];
}
];
};
# https://wiki.nixos.org/wiki/Grafana#Declarative_configuration
services.grafana = {
enable = true;
settings = {
security.allow_embedding = true;
server = {
http_port = 3001;
domain = "grouter.${domain}";
root_url = "https://%(domain)s/grafana/";
serve_from_sub_path = true;
};
};
provision = {
enable = true;
datasources.settings.datasources = [
{
name = "Prometheus";
type = "prometheus";
url = "http://localhost:${toString config.services.prometheus.port}";
}
];
};
};
secrix.system.secrets.cf-api-key.encrypted.file = ./secrets/cf-api-key.age;
systemd.services.caddy.serviceConfig.EnvironmentFile = config.secrix.system.secrets.cf-api-key.decrypted.path;
services.caddy = {
enable = true;
package = pkgs.caddy.withPlugins {
plugins = [ "github.com/caddy-dns/cloudflare@v0.2.1" ];
hash = "sha256-Gsuo+ripJSgKSYOM9/yl6Kt/6BFCA6BuTDvPdteinAI=";
};
virtualHosts."grouter.${domain}".extraConfig = ''
encode
tls {
dns cloudflare {env.CF_API_KEY}
resolvers 1.1.1.1
}
@grafana path /grafana /grafana/*
handle @grafana {
reverse_proxy localhost:${toString config.services.grafana.settings.server.http_port}
}
redir /adghome /adghome/
handle_path /adghome/* {
reverse_proxy localhost:${toString config.services.adguardhome.port}
basic_auth {
Bob $2a$14$HsWmmzQTN68K3vwiRAfiUuqIjKoXEXaj9TOLUtG2mO1vFpdovmyBy
}
}
handle /* {
reverse_proxy localhost:${toString config.services.glance.settings.server.port}
}
'';
};
}

View File

@@ -1,139 +0,0 @@
let
private = import ./private.nix;
mkIfConfig = {
name_,
domain_,
p4_, # /24
p4Size_ ? 24,
p6_, # /64
p6Size_ ? 64,
ulaPrefix_, # /64
ulaSize_ ? 64,
token? 1,
ip6Token_? "::${toString token}",
ulaToken_? "::${toString token}",
}: rec {
name = name_;
domain = domain_;
p4 = p4_;
p4Size = p4Size_;
net4 = "${p4}.0/${toString p4Size}";
addr4 = "${p4}.${toString token}";
addr4Sized = "${addr4}/${toString p4Size}";
p6 = p6_;
p6Size = p6Size_;
net6 = "${p6}::/${toString p6Size}";
ip6Token = ip6Token_;
addr6 = "${p6}${ip6Token}";
addr6Sized = "${addr6}/${toString p6Size}";
ulaPrefix = ulaPrefix_;
ulaSize = ulaSize_;
ulaNet = "${ulaPrefix}::/${toString ulaSize}";
ulaToken = ulaToken_;
ulaAddr = "${ulaPrefix}${ulaToken}";
ulaAddrSized = "${ulaAddr}/${toString ulaSize}";
};
in
rec {
pubkey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFobB87yYVwhuYrA+tfztLuks3s9jZOqEFktwGw1mo83 root@grouter";
domain = "cazzzer.com";
ldomain = "l.${domain}";
sysdomain = "sys.${domain}";
links = {
wanMAC = "bc:24:11:4f:c9:c4";
lanMAC = "bc:24:11:83:d8:de";
wanLL = "fe80::be24:11ff:fe4f:c9c4";
lanLL = "fe80::be24:11ff:fe83:d8de";
};
p4 = "10.17"; # .0.0/16
pdFromWan = private.pdFromWan; # ::/60
ulaPrefix = "fdab:07d3:581d"; # ::/48
ifs = rec {
wan = rec {
name = "wan";
addr4 = private.wanAddr4;
addr4Sized = "${addr4}/23";
gw4 = private.wanGw4;
};
lan = mkIfConfig {
name_ = "lan";
domain_ = "lan.${ldomain}";
p4_ = "${p4}.1"; # .0/24
p6_ = "${pdFromWan}f"; # ::/64
ulaPrefix_ = "${ulaPrefix}:0001"; # ::/64
};
lan10 = mkIfConfig {
name_ = "${lan.name}.10";
domain_ = "lab.${ldomain}";
p4_ = "${p4}.10"; # .0/24
p6_ = "${pdFromWan}e"; # ::/64
ulaPrefix_ = "${ulaPrefix}:0010"; # ::/64
};
lan20 = mkIfConfig {
name_ = "${lan.name}.20";
domain_ = "life.${ldomain}";
p4_ = "${p4}.20"; # .0/24
p6_ = "${pdFromWan}0"; # ::/64 managed by Att box
ulaPrefix_ = "${ulaPrefix}:0020"; # ::/64
ip6Token_ = "::1:1"; # override ipv6 for lan20, since the Att box uses ::1 here
};
lan30 = mkIfConfig {
name_ = "${lan.name}.30";
domain_ = "iot.${ldomain}";
p4_ = "${p4}.30"; # .0/24
p6_ = "${pdFromWan}c"; # ::/64
ulaPrefix_ = "${ulaPrefix}:0030"; # ::/64
};
lan40 = mkIfConfig {
name_ = "${lan.name}.40";
domain_ = "kube.${ldomain}";
p4_ = "${p4}.40"; # .0/24
p6_ = "${pdFromWan}b"; # ::/64
ulaPrefix_ = "${ulaPrefix}:0040"; # ::/64
};
lan50 = mkIfConfig {
name_ = "${lan.name}.50";
domain_ = "prox.${ldomain}";
p4_ = "${p4}.50"; # .0/24
p6_ = "${pdFromWan}a"; # ::/64
ulaPrefix_ = "${ulaPrefix}:0050"; # ::/64
};
wg0 = mkIfConfig {
name_ = "wg0";
domain_ = "wg0.${ldomain}";
p4_ = "10.18.16"; # .0/24
p6_ = "${pdFromWan}9:0:6"; # ::/96
p6Size_ = 96;
ulaPrefix_ = "${ulaPrefix}:0100:0:6"; # ::/96
ulaSize_ = 96;
} // {
listenPort = 51944;
};
};
extra = {
opnsense = rec {
addr4 = "${ifs.lan.p4}.250";
ulaAddr = "${ifs.lan.ulaPrefix}::250";
p6 = "${pdFromWan}d";
net6 = "${p6}::/64";
# VPN routes on opnsense
routes = [
{
Destination = "10.6.0.0/24";
Gateway = addr4;
}
{
Destination = "10.18.0.0/20";
Gateway = addr4;
}
{
Destination = net6;
Gateway = ulaAddr;
}
];
};
};
}

View File

@@ -1,72 +0,0 @@
{ config, lib, pkgs, ... }:
let
vars = import ./vars.nix;
wg0 = vars.ifs.wg0;
peerIps = ifObj: token: [
"${ifObj.p4}.${toString token}/32"
"${ifObj.p6}:${toString token}:0/112"
"${ifObj.ulaPrefix}:${toString token}:0/112"
];
mkWg0Peer = token: publicKey: {
allowedIPs = peerIps wg0 token;
inherit publicKey;
pskEnabled = true;
};
wg0Peers = {
"Yura-TPX13" = mkWg0Peer 100 "iFdsPYrpw7vsFYYJB4SOTa+wxxGVcmYp9CPxe0P9ewA=";
"Yura-Pixel7Pro" = mkWg0Peer 101 "GPdXxjvnhsyufd2QX/qsR02dinUtPnnxrE66oGt/KyA=";
};
peerSecretName = name: "wg0-peer-${name}-psk";
secrets = config.secrix.services.systemd-networkd.secrets;
in
{
secrix.services.systemd-networkd.secrets = let
pskPeers = lib.attrsets.filterAttrs (name: peer: peer.pskEnabled) wg0Peers;
mapPeer = name: peer: {
name = peerSecretName name;
value.encrypted.file = ./secrets/wireguard/${peerSecretName name}.age;
};
peerSecrets = lib.attrsets.mapAttrs' mapPeer pskPeers;
allSecrets = {
wg0-private-key.encrypted.file = ./secrets/wireguard/wg0-private-key.age;
} // peerSecrets;
setSecretOwnership = name: value: value // {
decrypted.user = "systemd-network";
decrypted.group = "systemd-network";
};
in lib.attrsets.mapAttrs setSecretOwnership allSecrets;
systemd.network.netdevs = {
"10-wg0" = {
netdevConfig = {
Kind = "wireguard";
Name = wg0.name;
};
wireguardConfig = {
PrivateKeyFile = secrets.wg0-private-key.decrypted.path;
ListenPort = wg0.listenPort;
};
wireguardPeers = map (peer: {
AllowedIPs = lib.strings.concatStringsSep "," peer.value.allowedIPs;
PublicKey = peer.value.publicKey;
PresharedKeyFile = if peer.value.pskEnabled then secrets."${peerSecretName peer.name}".decrypted.path else null;
}) (lib.attrsToList wg0Peers);
};
};
systemd.network.networks = {
"10-wg0" = {
matchConfig.Name = "wg0";
networkConfig = {
IPv4Forwarding = true;
IPv6SendRA = false;
Address = [ wg0.addr4Sized wg0.addr6Sized wg0.ulaAddrSized ];
};
};
};
}