diff --git a/hosts/default.nix b/hosts/default.nix index 93e1f41..fdc4bbc 100644 --- a/hosts/default.nix +++ b/hosts/default.nix @@ -70,9 +70,9 @@ let #vf = input-views.inputs-edge.nixos-vf2.nixosModules; # nspawn = ls [ "${nixos-nspawn}/nspawn-image.nix" { boot.isContainer = true; } ]; au = ./../profiles/auto-upgrade.nix; - ts1 = ./../profiles/tailscale-inner.nix; - ts2 = ./../profiles/tailscale-outer.nix; - tse = ./../profiles/tailscale-exit-node.nix; + ts1 = ./../profiles/tailscale/inner.nix; + ts2 = ./../profiles/tailscale/outer.nix; + tse = ./../profiles/tailscale/exit-node.nix; #rb = ./../profiles/known-hosts; # TODO dns64 = { config, ... }: { networking.nameservers = [ "2001:700:1:11::2:51" ]; # dns64.uninett.no diff --git a/hosts/nixos/noximilien/configuration.nix b/hosts/nixos/noximilien/configuration.nix index b55f84f..98d22de 100644 --- a/hosts/nixos/noximilien/configuration.nix +++ b/hosts/nixos/noximilien/configuration.nix @@ -33,6 +33,7 @@ ../../../profiles/http # enables nginx+acme, defines mkDomain ../../../profiles/http/index + # ../../../profiles/http/services/cache-proxy # ../../../profiles/http/services/attic.nix # ../../../profiles/http/services/cinny.nix # ../../../profiles/http/services/element.nix diff --git a/profiles/base/binary-caches.nix b/profiles/base/binary-caches.nix index 6d091f1..816d2e5 100644 --- a/profiles/base/binary-caches.nix +++ b/profiles/base/binary-caches.nix @@ -26,6 +26,9 @@ let "https://cache.flox.dev/?priority=45" = [ "flox-cache-public-1:7F4OyH7ZCnFhcze3fJdfyXYLQw/aV7GEed86nQ7IsOs=" ]; + + # me! + # "cache-proxy.pbsds.net" = [ ]; }; in diff --git a/profiles/http/default.nix b/profiles/http/default.nix index b5e1cb3..554e537 100644 --- a/profiles/http/default.nix +++ b/profiles/http/default.nix @@ -3,32 +3,46 @@ let inherit (config.pbsds.nginx) mkDomain allSubdomains; in { - options.pbsds.nginx.mkDomain = lib.mkOption { - visible = false; internal = true; readOnly = true; - default = subname: "${subname}.${config.networking.fqdn}"; - }; - options.pbsds.nginx.allSubdomains = lib.mkOption { - visible = false; internal = true; readOnly = true; - default = lib.pipe config.services.nginx.virtualHosts [ - # #(lib.mapAttrsToList (domain: vhost: [ domain ] ++ vhost.serverAliases)) - # (lib.mapAttrsToList (domain: vhost: [ domain ])) - # lib.flatten - lib.attrNames - (lib.filter (domain: domain != "" && domain != "_")) - (lib.sort (x: y: x [str] + */ + filter-caches = + let + blacklist = [ + "https://cache.nixos.org/" + "http://${config.services.ncps.server.addr}" + "http://${config.services.ncps.cache.hostName}" + "https://${config.services.ncps.cache.hostName}" + ]; + in + lib.filter (cacheAddr: !(builtins.elem cacheAddr blacklist)); + +in + +{ + + # based on + # - https://discourse.nixos.org/t/announcing-ncps-a-nix-cache-proxy-server-for-faster-builds/58166 + # - https://github.com/kalbasit/ncps + # - https://search.nixos.org/options?query=services.ncps + # - https://github.com/msfjarvis/dotfiles/blob/2dc0b9abc40b6af757b18f3f687fe205c96ef87c/modules/nixos/ncps/default.nix + # - https://github.com/Ramblurr/nixcfg/blob/2901418935895f86ea84a881e5571813c6370f11/hosts/mali/ncps.nix + # - https://github.com/numinit/MeshOS/blob/e30e3902a8d6d63d5eda031f58aff8c0b6bc9c94/nixos/modules/caches.nix + + services.domeneshop-updater.targets = lib.mkIf config.services.ncps.enable [ + config.services.ncps.cache.hostName + ]; + + services.ncps = { + enable = !config.virtualisation.isVmVariant; + # logLevel = "info"; # default is "info" + server.addr = "127.0.0.1:8876"; + # prometheus.enable = true; + + cache = { + hostName = "cache-proxy.pbsds.net"; + # hostName = mkDomain "cache-proxy"; + # hostName = config.pbsds.tailscale.fqdn; + + dataPath = "/mnt/meconium/blob/ncps"; # will be automatically chowned (systemd ReadWritePaths) + # tempPath = ""; # defaults to "/tmp" + maxSize = "50G"; + # TODO: + secretKeyPath = TODO; # config.sops.secrets.ncps-private-key.path; + + allowPutVerb = false; + allowDeleteVerb = false; + # lru = { scheduleTimeZone = "Europe/Oslo"; schedule = "00 08 * * *"; }; # 8 AM daily + }; + + upstream.caches = filter-caches config.nix.settings.trusted-substituters; + upstream.publicKeys = config.nix.settings.trusted-public-keys; + }; + + services.nginx.virtualHosts.${config.services.ncps.cache.hostName} = { + # addSSL = true; + forceSSL = true; + addSSL = true; + enableACME = true; # useACMEHost = acmeDomain; + # serverAliases = [ "binarycache" ]; + # serverAliases = [ config.pbsds.tailscale.fqdn ]; + locations."/" = { + inherit (config.pbsds.nginx.allowList) extraConfig; + # proxyPass = "http://localhost:${toString config.services.nix-serve.port}"; + }; + }; + +} diff --git a/profiles/http/services/cache-proxy/nginx-proxy_store-approach.nix b/profiles/http/services/cache-proxy/nginx-proxy_store-approach.nix new file mode 100644 index 0000000..967c93b --- /dev/null +++ b/profiles/http/services/cache-proxy/nginx-proxy_store-approach.nix @@ -0,0 +1,52 @@ +{ + config, + lib, + ... +}: +let + # domain = config.pbsds.nginx.mkDomain "cache-proxy"; + domain = "cache-proxy.pbsds.net"; +in +{ + services.domeneshop-updater.targets = [ domain ]; + + # based on + # - https://wiki.nixos.org/wiki/FAQ/Private_Cache_Proxy + # - https://github.com/a1994sc/nixos-configs/blob/7927960877fc311745e55118823321851a618d80/archive/modules/binary-cache.nix#L42 + # never tested, since this doesn't support multiple upstream caches + + services.nginx.virtualHosts.${domain} = { + forceSSL = true; # addSSL = true; + enableACME = true; # useACMEHost = acmeDomain; + # serverAliases = [ "binarycache" ]; + locations."/" = { + inherit (config.pbsds.nginx.allowList) extraConfig; + # proxyPass = "http://localhost:${toString config.services.nix-serve.port}"; + # extraConfig = '' + # proxy_pass http://localhost:${toString config.services.nix-serve.port}; + # proxy_set_header Host $host; + # proxy_set_header X-Real-IP $remote_addr; + # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # ''; + }; + + # based on https://wiki.nixos.org/wiki/FAQ/Private_Cache_Proxy + locations."~ ^/nix-cache-info".extraConfig = '' + proxy_store on; + proxy_store_access user:rw group:rw all:r; + proxy_temp_path /mnt/nginx/nix-cache-info/temp; + root /mnt/nginx/nix-cache-info/store; + proxy_set_header Host "cache.nixos.org"; + proxy_pass https://cache.nixos.org; + ''; + locations."~ ^/nar/.+$".extraConfig = '' + proxy_store on; + proxy_store_access user:rw group:rw all:r; + proxy_temp_path /mnt/nginx/nar/temp; + root /mnt/nginx/nar/store; + + proxy_set_header Host "cache.nixos.org"; + proxy_pass https://cache.nixos.org; + ''; + }; +} diff --git a/profiles/http/services/cache-proxy/secrets.yaml b/profiles/http/services/cache-proxy/secrets.yaml new file mode 100644 index 0000000..e69de29 diff --git a/profiles/tailscale-exit-node.nix b/profiles/tailscale-exit-node.nix deleted file mode 100644 index 0461afe..0000000 --- a/profiles/tailscale-exit-node.nix +++ /dev/null @@ -1,27 +0,0 @@ -{ config, ... }: - -let - exitNodeFlags = [ - "--advertise-exit-node" - # "--exit-node-allow-lan-access" - # "--exit-node-allow-incoming-wan-access" - ]; -in - -{ - # exit nodes must be approved in admin interface - # https://login.tailscale.com/admin/machines - /* imports = [ ./tailscale-inner.nix ]; */ - - # if host is _upgraded_ to exit node, reload with - # sudo systemctl start tailscaled-autoconnect - # or maybe even - # sudo systemctl start tailscaled-set - services.tailscale.useRoutingFeatures = "both"; - services.tailscale.extraSetFlags = exitNodeFlags; - services.tailscale.extraUpFlags = exitNodeFlags; - - # # Strict reverse path filtering breaks Tailscale exit node use and some subnet routing setups - # # https://github.com/tailscale/tailscale/issues/4432#issuecomment-1112819111 - # networking.firewall.checkReversePath = "loose"; -} diff --git a/profiles/tailscale-inner.nix b/profiles/tailscale-inner.nix deleted file mode 100644 index 746c047..0000000 --- a/profiles/tailscale-inner.nix +++ /dev/null @@ -1,42 +0,0 @@ -{ config, lib, pkgs, ... }: - -let - inherit (config.services.tailscale) interfaceName; -in - -lib.mkIf (!config.virtualisation.isVmVariant) - -# DERP is a relay system that Tailscale uses when a direct connection cannot be established. -# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp - -{ - # https://login.tailscale.com/admin/machines - - services.tailscale.enable = true; - # https://tailscale.com/kb/1085/auth-keys - services.tailscale.authKeyFile = config.sops.secrets.tailscale-authkey-inner.path; # also enables autoconnect - sops.secrets.tailscale-authkey-inner.sopsFile = ../secrets/tailscale-inner.yaml; - - # https://wiki.nixos.org/wiki/Tailscale#DNS - services.resolved.enable = lib.mkDefault config.networking.networkmanager.enable; - - # Strict reverse path filtering breaks Tailscale exit node use and some subnet routing setups - # https://wiki.nixos.org/wiki/Tailscale#No_internet_when_using_exit_node - # https://github.com/tailscale/tailscale/issues/4432#issuecomment-1112819111 - networking.firewall.checkReversePath = lib.mkDefault "loose"; - - # TODO: why do people do this? - # networking.firewall.trustedInterfaces = [ interfaceName ]; - - # done in profiles/sshd/ts-only.nix: - # networking.firewall.interfaces.${interfaceName} = { - # allowedTCPPorts = [ 22 ]; - # }; - - # environment.systemPackages = lib.mkMerge [ - # (lib.mkIf config.services.desktopManager.gnome.enable [ - # pkgs.ktailctl - # ]) - # ]; - -} diff --git a/profiles/tailscale-outer.nix b/profiles/tailscale-outer.nix deleted file mode 100644 index 1960bac..0000000 --- a/profiles/tailscale-outer.nix +++ /dev/null @@ -1,42 +0,0 @@ -{ config, lib, pkgs, ... }: - -let - inherit (config.services.tailscale) interfaceName; -in - -lib.mkIf (!config.virtualisation.isVmVariant) - -# DERP is a relay system that Tailscale uses when a direct connection cannot be established. -# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp - -{ - # https://login.tailscale.com/admin/machines - - services.tailscale.enable = true; - # https://tailscale.com/kb/1085/auth-keys - services.tailscale.authKeyFile = config.sops.secrets.tailscale-authkey-outer.path; # also enables autoconnect - sops.secrets.tailscale-authkey-outer.sopsFile = ../secrets/tailscale-outer.yaml; - - # https://wiki.nixos.org/wiki/Tailscale#DNS - services.resolved.enable = lib.mkDefault config.networking.networkmanager.enable; - - # Strict reverse path filtering breaks Tailscale exit node use and some subnet routing setups - # https://wiki.nixos.org/wiki/Tailscale#No_internet_when_using_exit_node - # https://github.com/tailscale/tailscale/issues/4432#issuecomment-1112819111 - networking.firewall.checkReversePath = lib.mkDefault "loose"; - - # TODO: why do people do this? - # networking.firewall.trustedInterfaces = [ interfaceName ]; - - # done in profiles/sshd/ts-only.nix: - # networking.firewall.interfaces.${interfaceName} = { - # allowedTCPPorts = [ 22 ]; - # }; - - # environment.systemPackages = lib.mkMerge [ - # (lib.mkIf config.services.desktopManager.gnome.enable [ - # pkgs.ktailctl - # ]) - # ]; - -} diff --git a/profiles/tailscale/exit-node.nix b/profiles/tailscale/exit-node.nix new file mode 100644 index 0000000..c6a1ffb --- /dev/null +++ b/profiles/tailscale/exit-node.nix @@ -0,0 +1,31 @@ +{ config, lib, ... }: + +let + exitNodeFlags = [ + "--advertise-exit-node" + # "--exit-node-allow-lan-access" + # "--exit-node-allow-incoming-wan-access" + ]; +in + +{ + # exit nodes must be approved in admin interface + # https://login.tailscale.com/admin/machines + /* imports = [ ./inner.nix ]; */ + + config = lib.mkIf (!config.virtualisation.isVmVariant) { + + # if host is _upgraded_ to exit node, reload with + # sudo systemctl start tailscaled-autoconnect + # or maybe even + # sudo systemctl start tailscaled-set + services.tailscale.useRoutingFeatures = "both"; + services.tailscale.extraSetFlags = exitNodeFlags; + services.tailscale.extraUpFlags = exitNodeFlags; + + # # Strict reverse path filtering breaks Tailscale exit node use and some subnet routing setups + # # https://github.com/tailscale/tailscale/issues/4432#issuecomment-1112819111 + # networking.firewall.checkReversePath = "loose"; + + }; +} diff --git a/profiles/tailscale/inner.nix b/profiles/tailscale/inner.nix new file mode 100644 index 0000000..ae7b3ea --- /dev/null +++ b/profiles/tailscale/inner.nix @@ -0,0 +1,19 @@ +{ config, lib, ... }: +{ + # https://login.tailscale.com/admin/machines + + imports = [ ./shared.nix ]; + + config = lib.mkIf (!config.virtualisation.isVmVariant) { + + # https://tailscale.com/kb/1085/auth-keys + services.tailscale.authKeyFile = config.sops.secrets.tailscale-authkey-inner.path; # also enables autoconnect + sops.secrets.tailscale-authkey-inner.sopsFile = ../secrets/tailscale-inner.yaml; + + # systemd-resolved will by default read /etc/hosts + networking.extraHosts = [ + # "100.113.27.44 cache-proxy.pbsds.net" # noximilien over tailscale + ]; + + }; +} diff --git a/profiles/tailscale/outer.nix b/profiles/tailscale/outer.nix new file mode 100644 index 0000000..32fc865 --- /dev/null +++ b/profiles/tailscale/outer.nix @@ -0,0 +1,14 @@ +{ config, lib, ... }: +{ + # https://login.tailscale.com/admin/machines + + imports = [ ./shared.nix ]; + + config = lib.mkIf (!config.virtualisation.isVmVariant) { + + # https://tailscale.com/kb/1085/auth-keys + services.tailscale.authKeyFile = config.sops.secrets.tailscale-authkey-outer.path; # also enables autoconnect + sops.secrets.tailscale-authkey-outer.sopsFile = ../secrets/tailscale-outer.yaml; + + }; +} diff --git a/profiles/tailscale/shared.nix b/profiles/tailscale/shared.nix new file mode 100644 index 0000000..49ccecb --- /dev/null +++ b/profiles/tailscale/shared.nix @@ -0,0 +1,53 @@ +{ config, lib, pkgs, ... }: + +let + inherit (config.services.tailscale) interfaceName; # "tailscale0" +in + +# DERP is a relay system that Tailscale uses when a direct connection cannot be established. +# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp + +{ + # https://login.tailscale.com/admin/machines + + options.pbsds.tailscale.fqdn = lib.mkOption { + visible = false; internal = true; readOnly = true; + default = "${config.networking.hostName}.tail9aac63.ts.net"; + }; + + config = lib.mkIf (!config.virtualisation.isVmVariant) { + + services.tailscale.enable = true; + + networking.extraHosts = [ + "127.0.0.2 ${config.pbsds.tailscale.fqdn}" # the entire 127.0.0.0/8 is loopback + ]; + + # # https://tailscale.com/kb/1085/auth-keys + # services.tailscale.authKeyFile = config.sops.secrets.tailscale-authkey-inner.path; # also enables autoconnect + # sops.secrets.tailscale-authkey-inner.sopsFile = ../secrets/tailscale-inner.yaml; + + # https://wiki.nixos.org/wiki/Tailscale#DNS + services.resolved.enable = lib.mkDefault config.networking.networkmanager.enable; + + # Strict reverse path filtering breaks Tailscale exit node use and some subnet routing setups + # https://wiki.nixos.org/wiki/Tailscale#No_internet_when_using_exit_node + # https://github.com/tailscale/tailscale/issues/4432#issuecomment-1112819111 + networking.firewall.checkReversePath = lib.mkDefault "loose"; + + # TODO: why do people do this? + # networking.firewall.trustedInterfaces = [ interfaceName ]; + + # done in profiles/sshd/ts-only.nix: + # networking.firewall.interfaces.${interfaceName} = { + # allowedTCPPorts = [ 22 ]; + # }; + + # environment.systemPackages = lib.mkMerge [ + # (lib.mkIf config.services.desktopManager.gnome.enable [ + # pkgs.ktailctl + # ]) + # ]; + + }; +}