ljasdjklasdljasdljk

This commit is contained in:
2025-11-07 14:48:26 +01:00
parent 41e1915f15
commit d0eff972df
14 changed files with 291 additions and 139 deletions

View File

@@ -70,9 +70,9 @@ let
#vf = input-views.inputs-edge.nixos-vf2.nixosModules;
# nspawn = ls [ "${nixos-nspawn}/nspawn-image.nix" { boot.isContainer = true; } ];
au = ./../profiles/auto-upgrade.nix;
ts1 = ./../profiles/tailscale-inner.nix;
ts2 = ./../profiles/tailscale-outer.nix;
tse = ./../profiles/tailscale-exit-node.nix;
ts1 = ./../profiles/tailscale/inner.nix;
ts2 = ./../profiles/tailscale/outer.nix;
tse = ./../profiles/tailscale/exit-node.nix;
#rb = ./../profiles/known-hosts; # TODO
dns64 = { config, ... }: {
networking.nameservers = [ "2001:700:1:11::2:51" ]; # dns64.uninett.no

View File

@@ -33,6 +33,7 @@
../../../profiles/http # enables nginx+acme, defines mkDomain
../../../profiles/http/index
# ../../../profiles/http/services/cache-proxy
# ../../../profiles/http/services/attic.nix
# ../../../profiles/http/services/cinny.nix
# ../../../profiles/http/services/element.nix

View File

@@ -26,6 +26,9 @@ let
"https://cache.flox.dev/?priority=45" = [
"flox-cache-public-1:7F4OyH7ZCnFhcze3fJdfyXYLQw/aV7GEed86nQ7IsOs="
];
# me!
# "cache-proxy.pbsds.net" = [ ];
};
in

View File

@@ -3,32 +3,46 @@ let
inherit (config.pbsds.nginx) mkDomain allSubdomains;
in
{
options.pbsds.nginx.mkDomain = lib.mkOption {
visible = false; internal = true; readOnly = true;
default = subname: "${subname}.${config.networking.fqdn}";
};
options.pbsds.nginx.allSubdomains = lib.mkOption {
visible = false; internal = true; readOnly = true;
default = lib.pipe config.services.nginx.virtualHosts [
# #(lib.mapAttrsToList (domain: vhost: [ domain ] ++ vhost.serverAliases))
# (lib.mapAttrsToList (domain: vhost: [ domain ]))
# lib.flatten
lib.attrNames
(lib.filter (domain: domain != "" && domain != "_"))
(lib.sort (x: y: x<y))
];
};
options.pbsds.nginx.allowList.extraConfig = lib.mkOption {
visible = false; internal = true; readOnly = true;
default = ''
# home https://ipinfo.io/ips/193.71.0.0/16
allow 193.71.0.0/16;
# ntnu https://ipinfo.io/ips/129.241.0.0/16
allow 129.241.0.0/16;
deny all;
'';
};
options.pbsds.nginx = {
mkDomain = lib.mkOption {
visible = false; internal = true; readOnly = true;
default = subname: "${subname}.${config.networking.fqdn}";
};
allSubdomains = lib.mkOption {
visible = false; internal = true; readOnly = true;
default = lib.pipe config.services.nginx.virtualHosts [
# #(lib.mapAttrsToList (domain: vhost: [ domain ] ++ vhost.serverAliases))
# (lib.mapAttrsToList (domain: vhost: [ domain ]))
# lib.flatten
lib.attrNames
(lib.filter (domain: domain != "" && domain != "_"))
(lib.sort (x: y: x<y))
];
};
allowList.extraConfig = lib.mkOption {
visible = false; internal = true; readOnly = true;
default = ''
# home https://ipinfo.io/ips/193.71.0.0/16
allow 193.71.0.0/16;
# ntnu https://ipinfo.io/ips/129.241.0.0/16
allow 129.241.0.0/16;
# local
${
lib.pipe config.services.fail2ban.ignoreIP [
(map (x: "allow ${x};"))
lib.concatLines
]
}
deny all;
'';
};
};
config = {

View File

@@ -0,0 +1,76 @@
{ lib, config, ... }:
let
inherit (config.pbsds.nginx) mkDomain;
/**
filter-caches :: [str] -> [str]
*/
filter-caches =
let
blacklist = [
"https://cache.nixos.org/"
"http://${config.services.ncps.server.addr}"
"http://${config.services.ncps.cache.hostName}"
"https://${config.services.ncps.cache.hostName}"
];
in
lib.filter (cacheAddr: !(builtins.elem cacheAddr blacklist));
in
{
# based on
# - https://discourse.nixos.org/t/announcing-ncps-a-nix-cache-proxy-server-for-faster-builds/58166
# - https://github.com/kalbasit/ncps
# - https://search.nixos.org/options?query=services.ncps
# - https://github.com/msfjarvis/dotfiles/blob/2dc0b9abc40b6af757b18f3f687fe205c96ef87c/modules/nixos/ncps/default.nix
# - https://github.com/Ramblurr/nixcfg/blob/2901418935895f86ea84a881e5571813c6370f11/hosts/mali/ncps.nix
# - https://github.com/numinit/MeshOS/blob/e30e3902a8d6d63d5eda031f58aff8c0b6bc9c94/nixos/modules/caches.nix
services.domeneshop-updater.targets = lib.mkIf config.services.ncps.enable [
config.services.ncps.cache.hostName
];
services.ncps = {
enable = !config.virtualisation.isVmVariant;
# logLevel = "info"; # default is "info"
server.addr = "127.0.0.1:8876";
# prometheus.enable = true;
cache = {
hostName = "cache-proxy.pbsds.net";
# hostName = mkDomain "cache-proxy";
# hostName = config.pbsds.tailscale.fqdn;
dataPath = "/mnt/meconium/blob/ncps"; # will be automatically chowned (systemd ReadWritePaths)
# tempPath = ""; # defaults to "/tmp"
maxSize = "50G";
# TODO:
secretKeyPath = TODO; # config.sops.secrets.ncps-private-key.path;
allowPutVerb = false;
allowDeleteVerb = false;
# lru = { scheduleTimeZone = "Europe/Oslo"; schedule = "00 08 * * *"; }; # 8 AM daily
};
upstream.caches = filter-caches config.nix.settings.trusted-substituters;
upstream.publicKeys = config.nix.settings.trusted-public-keys;
};
services.nginx.virtualHosts.${config.services.ncps.cache.hostName} = {
# addSSL = true;
forceSSL = true;
addSSL = true;
enableACME = true; # useACMEHost = acmeDomain;
# serverAliases = [ "binarycache" ];
# serverAliases = [ config.pbsds.tailscale.fqdn ];
locations."/" = {
inherit (config.pbsds.nginx.allowList) extraConfig;
# proxyPass = "http://localhost:${toString config.services.nix-serve.port}";
};
};
}

View File

@@ -0,0 +1,52 @@
{
config,
lib,
...
}:
let
# domain = config.pbsds.nginx.mkDomain "cache-proxy";
domain = "cache-proxy.pbsds.net";
in
{
services.domeneshop-updater.targets = [ domain ];
# based on
# - https://wiki.nixos.org/wiki/FAQ/Private_Cache_Proxy
# - https://github.com/a1994sc/nixos-configs/blob/7927960877fc311745e55118823321851a618d80/archive/modules/binary-cache.nix#L42
# never tested, since this doesn't support multiple upstream caches
services.nginx.virtualHosts.${domain} = {
forceSSL = true; # addSSL = true;
enableACME = true; # useACMEHost = acmeDomain;
# serverAliases = [ "binarycache" ];
locations."/" = {
inherit (config.pbsds.nginx.allowList) extraConfig;
# proxyPass = "http://localhost:${toString config.services.nix-serve.port}";
# extraConfig = ''
# proxy_pass http://localhost:${toString config.services.nix-serve.port};
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# '';
};
# based on https://wiki.nixos.org/wiki/FAQ/Private_Cache_Proxy
locations."~ ^/nix-cache-info".extraConfig = ''
proxy_store on;
proxy_store_access user:rw group:rw all:r;
proxy_temp_path /mnt/nginx/nix-cache-info/temp;
root /mnt/nginx/nix-cache-info/store;
proxy_set_header Host "cache.nixos.org";
proxy_pass https://cache.nixos.org;
'';
locations."~ ^/nar/.+$".extraConfig = ''
proxy_store on;
proxy_store_access user:rw group:rw all:r;
proxy_temp_path /mnt/nginx/nar/temp;
root /mnt/nginx/nar/store;
proxy_set_header Host "cache.nixos.org";
proxy_pass https://cache.nixos.org;
'';
};
}

View File

@@ -1,27 +0,0 @@
{ config, ... }:
let
exitNodeFlags = [
"--advertise-exit-node"
# "--exit-node-allow-lan-access"
# "--exit-node-allow-incoming-wan-access"
];
in
{
# exit nodes must be approved in admin interface
# https://login.tailscale.com/admin/machines
/* imports = [ ./tailscale-inner.nix ]; */
# if host is _upgraded_ to exit node, reload with
# sudo systemctl start tailscaled-autoconnect
# or maybe even
# sudo systemctl start tailscaled-set
services.tailscale.useRoutingFeatures = "both";
services.tailscale.extraSetFlags = exitNodeFlags;
services.tailscale.extraUpFlags = exitNodeFlags;
# # Strict reverse path filtering breaks Tailscale exit node use and some subnet routing setups
# # https://github.com/tailscale/tailscale/issues/4432#issuecomment-1112819111
# networking.firewall.checkReversePath = "loose";
}

View File

@@ -1,42 +0,0 @@
{ config, lib, pkgs, ... }:
let
inherit (config.services.tailscale) interfaceName;
in
lib.mkIf (!config.virtualisation.isVmVariant)
# DERP is a relay system that Tailscale uses when a direct connection cannot be established.
# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
{
# https://login.tailscale.com/admin/machines
services.tailscale.enable = true;
# https://tailscale.com/kb/1085/auth-keys
services.tailscale.authKeyFile = config.sops.secrets.tailscale-authkey-inner.path; # also enables autoconnect
sops.secrets.tailscale-authkey-inner.sopsFile = ../secrets/tailscale-inner.yaml;
# https://wiki.nixos.org/wiki/Tailscale#DNS
services.resolved.enable = lib.mkDefault config.networking.networkmanager.enable;
# Strict reverse path filtering breaks Tailscale exit node use and some subnet routing setups
# https://wiki.nixos.org/wiki/Tailscale#No_internet_when_using_exit_node
# https://github.com/tailscale/tailscale/issues/4432#issuecomment-1112819111
networking.firewall.checkReversePath = lib.mkDefault "loose";
# TODO: why do people do this?
# networking.firewall.trustedInterfaces = [ interfaceName ];
# done in profiles/sshd/ts-only.nix:
# networking.firewall.interfaces.${interfaceName} = {
# allowedTCPPorts = [ 22 ];
# };
# environment.systemPackages = lib.mkMerge [
# (lib.mkIf config.services.desktopManager.gnome.enable [
# pkgs.ktailctl
# ])
# ];
}

View File

@@ -1,42 +0,0 @@
{ config, lib, pkgs, ... }:
let
inherit (config.services.tailscale) interfaceName;
in
lib.mkIf (!config.virtualisation.isVmVariant)
# DERP is a relay system that Tailscale uses when a direct connection cannot be established.
# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
{
# https://login.tailscale.com/admin/machines
services.tailscale.enable = true;
# https://tailscale.com/kb/1085/auth-keys
services.tailscale.authKeyFile = config.sops.secrets.tailscale-authkey-outer.path; # also enables autoconnect
sops.secrets.tailscale-authkey-outer.sopsFile = ../secrets/tailscale-outer.yaml;
# https://wiki.nixos.org/wiki/Tailscale#DNS
services.resolved.enable = lib.mkDefault config.networking.networkmanager.enable;
# Strict reverse path filtering breaks Tailscale exit node use and some subnet routing setups
# https://wiki.nixos.org/wiki/Tailscale#No_internet_when_using_exit_node
# https://github.com/tailscale/tailscale/issues/4432#issuecomment-1112819111
networking.firewall.checkReversePath = lib.mkDefault "loose";
# TODO: why do people do this?
# networking.firewall.trustedInterfaces = [ interfaceName ];
# done in profiles/sshd/ts-only.nix:
# networking.firewall.interfaces.${interfaceName} = {
# allowedTCPPorts = [ 22 ];
# };
# environment.systemPackages = lib.mkMerge [
# (lib.mkIf config.services.desktopManager.gnome.enable [
# pkgs.ktailctl
# ])
# ];
}

View File

@@ -0,0 +1,31 @@
{ config, lib, ... }:
let
exitNodeFlags = [
"--advertise-exit-node"
# "--exit-node-allow-lan-access"
# "--exit-node-allow-incoming-wan-access"
];
in
{
# exit nodes must be approved in admin interface
# https://login.tailscale.com/admin/machines
/* imports = [ ./inner.nix ]; */
config = lib.mkIf (!config.virtualisation.isVmVariant) {
# if host is _upgraded_ to exit node, reload with
# sudo systemctl start tailscaled-autoconnect
# or maybe even
# sudo systemctl start tailscaled-set
services.tailscale.useRoutingFeatures = "both";
services.tailscale.extraSetFlags = exitNodeFlags;
services.tailscale.extraUpFlags = exitNodeFlags;
# # Strict reverse path filtering breaks Tailscale exit node use and some subnet routing setups
# # https://github.com/tailscale/tailscale/issues/4432#issuecomment-1112819111
# networking.firewall.checkReversePath = "loose";
};
}

View File

@@ -0,0 +1,19 @@
{ config, lib, ... }:
{
# https://login.tailscale.com/admin/machines
imports = [ ./shared.nix ];
config = lib.mkIf (!config.virtualisation.isVmVariant) {
# https://tailscale.com/kb/1085/auth-keys
services.tailscale.authKeyFile = config.sops.secrets.tailscale-authkey-inner.path; # also enables autoconnect
sops.secrets.tailscale-authkey-inner.sopsFile = ../secrets/tailscale-inner.yaml;
# systemd-resolved will by default read /etc/hosts
networking.extraHosts = [
# "100.113.27.44 cache-proxy.pbsds.net" # noximilien over tailscale
];
};
}

View File

@@ -0,0 +1,14 @@
{ config, lib, ... }:
{
# https://login.tailscale.com/admin/machines
imports = [ ./shared.nix ];
config = lib.mkIf (!config.virtualisation.isVmVariant) {
# https://tailscale.com/kb/1085/auth-keys
services.tailscale.authKeyFile = config.sops.secrets.tailscale-authkey-outer.path; # also enables autoconnect
sops.secrets.tailscale-authkey-outer.sopsFile = ../secrets/tailscale-outer.yaml;
};
}

View File

@@ -0,0 +1,53 @@
{ config, lib, pkgs, ... }:
let
inherit (config.services.tailscale) interfaceName; # "tailscale0"
in
# DERP is a relay system that Tailscale uses when a direct connection cannot be established.
# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
{
# https://login.tailscale.com/admin/machines
options.pbsds.tailscale.fqdn = lib.mkOption {
visible = false; internal = true; readOnly = true;
default = "${config.networking.hostName}.tail9aac63.ts.net";
};
config = lib.mkIf (!config.virtualisation.isVmVariant) {
services.tailscale.enable = true;
networking.extraHosts = [
"127.0.0.2 ${config.pbsds.tailscale.fqdn}" # the entire 127.0.0.0/8 is loopback
];
# # https://tailscale.com/kb/1085/auth-keys
# services.tailscale.authKeyFile = config.sops.secrets.tailscale-authkey-inner.path; # also enables autoconnect
# sops.secrets.tailscale-authkey-inner.sopsFile = ../secrets/tailscale-inner.yaml;
# https://wiki.nixos.org/wiki/Tailscale#DNS
services.resolved.enable = lib.mkDefault config.networking.networkmanager.enable;
# Strict reverse path filtering breaks Tailscale exit node use and some subnet routing setups
# https://wiki.nixos.org/wiki/Tailscale#No_internet_when_using_exit_node
# https://github.com/tailscale/tailscale/issues/4432#issuecomment-1112819111
networking.firewall.checkReversePath = lib.mkDefault "loose";
# TODO: why do people do this?
# networking.firewall.trustedInterfaces = [ interfaceName ];
# done in profiles/sshd/ts-only.nix:
# networking.firewall.interfaces.${interfaceName} = {
# allowedTCPPorts = [ 22 ];
# };
# environment.systemPackages = lib.mkMerge [
# (lib.mkIf config.services.desktopManager.gnome.enable [
# pkgs.ktailctl
# ])
# ];
};
}