Compare commits

..

1 Commits

Author SHA1 Message Date
Oystein Kristoffer Tveit c16238a88f WIP: init buskerud/salsa
Eval nix flake / evals (push) Failing after 1m49s Details
2024-04-13 18:47:03 +02:00
142 changed files with 2263 additions and 10106 deletions

View File

@ -1,10 +0,0 @@
root = true
[*]
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
[*.nix]
indent_style = space
indent_size = 2

1
.envrc
View File

@ -1 +0,0 @@
use flake

View File

@ -1 +0,0 @@
e00008da1afe0d760badd34bbeddff36bb08c475

2
.gitignore vendored
View File

@ -1,4 +1,2 @@
result* result*
/configuration.nix /configuration.nix
/.direnv/
*.qcow2

View File

@ -3,14 +3,10 @@ keys:
- &user_danio age17tagmpwqjk3mdy45rfesrfey6h863x8wfq38wh33tkrlrywxducs0k6tpq - &user_danio age17tagmpwqjk3mdy45rfesrfey6h863x8wfq38wh33tkrlrywxducs0k6tpq
- &user_felixalb age1mrnldl334l2nszuta6ywvewng0fswv2dz9l5g4qcwe3nj4yxf92qjskdx6 - &user_felixalb age1mrnldl334l2nszuta6ywvewng0fswv2dz9l5g4qcwe3nj4yxf92qjskdx6
- &user_oysteikt F7D37890228A907440E1FD4846B9228E814A2AAC - &user_oysteikt F7D37890228A907440E1FD4846B9228E814A2AAC
- &user_eirikwit age1ju7rd26llahz3g8tz7cy5ld52swj8gsmg0flrmrxngc0nj0avq3ssh0sn5
- &user_pederbs_sopp age1hmpdk4h69wxpwqk9tkud39f66hprhehxtzhgw97r6dvr7v0mx5jscsuhkn
- &user_pederbs_nord age1wrssr4z4g6vl3fd3qme5cewchmmhm0j2xe6wf2meu4r6ycn37anse98mfs
- &user_pederbs_bjarte age1zhxul786an743u0fascv4wtc5xduu7qfy803lfs539yzhgmlq5ds2lznt5
# Hosts # Hosts
- &host_jokum age1gp8ye4g2mmw3may5xg0zsy7mm04glfz3788mmdx9cvcsdxs9hg0s0cc9kt - &host_jokum age1gp8ye4g2mmw3may5xg0zsy7mm04glfz3788mmdx9cvcsdxs9hg0s0cc9kt
- &host_ildkule age1x28hmzvuv6f2n66c0jtqcca3h9rput8d7j5uek6jcpx8n9egd52sqpejq0 - &host_ildkule age1hn45n46ypyrvypv0mwfnpt9ddrlmw34dwlpf33n8v67jexr3lucq6ahc9x
- &host_bekkalokk age12nj59tguy9wg882updc2vjdusx5srnxmjyfaqve4zx6jnnsaw3qsyjq6zd - &host_bekkalokk age12nj59tguy9wg882updc2vjdusx5srnxmjyfaqve4zx6jnnsaw3qsyjq6zd
- &host_bicep age1sl43gc9cw939z5tgha2lpwf0xxxgcnlw7w4xem4sqgmt2pt264vq0dmwx2 - &host_bicep age1sl43gc9cw939z5tgha2lpwf0xxxgcnlw7w4xem4sqgmt2pt264vq0dmwx2
@ -22,10 +18,6 @@ creation_rules:
- *host_jokum - *host_jokum
- *user_danio - *user_danio
- *user_felixalb - *user_felixalb
- *user_eirikwit
- *user_pederbs_sopp
- *user_pederbs_nord
- *user_pederbs_bjarte
pgp: pgp:
- *user_oysteikt - *user_oysteikt
@ -37,9 +29,6 @@ creation_rules:
- *host_bekkalokk - *host_bekkalokk
- *user_danio - *user_danio
- *user_felixalb - *user_felixalb
- *user_pederbs_sopp
- *user_pederbs_nord
- *user_pederbs_bjarte
pgp: pgp:
- *user_oysteikt - *user_oysteikt
@ -49,9 +38,6 @@ creation_rules:
- *host_jokum - *host_jokum
- *user_danio - *user_danio
- *user_felixalb - *user_felixalb
- *user_pederbs_sopp
- *user_pederbs_nord
- *user_pederbs_bjarte
pgp: pgp:
- *user_oysteikt - *user_oysteikt
@ -61,9 +47,6 @@ creation_rules:
- *host_ildkule - *host_ildkule
- *user_danio - *user_danio
- *user_felixalb - *user_felixalb
- *user_pederbs_sopp
- *user_pederbs_nord
- *user_pederbs_bjarte
pgp: pgp:
- *user_oysteikt - *user_oysteikt
@ -73,8 +56,5 @@ creation_rules:
- *host_bicep - *host_bicep
- *user_danio - *user_danio
- *user_felixalb - *user_felixalb
- *user_pederbs_sopp
- *user_pederbs_nord
- *user_pederbs_bjarte
pgp: pgp:
- *user_oysteikt - *user_oysteikt

View File

@ -26,14 +26,10 @@ Det er sikkert lurt å lage en PR først om du ikke er vandt til nix enda.
Innen 24h skal alle systemene hente ned den nye konfigurasjonen og deploye den. Innen 24h skal alle systemene hente ned den nye konfigurasjonen og deploye den.
Du kan tvinge en maskin til å oppdatere seg før dette ved å kjøre: Du kan tvinge en maskin til å oppdatere seg før dette ved å kjøre:
`nixos-rebuild switch --update-input nixpkgs --update-input nixpkgs-unstable --no-write-lock-file --refresh --upgrade --flake git+https://git.pvv.ntnu.no/Drift/pvv-nixos-config.git` `nixos-rebuild switch --update-input nixpkgs --update-input nixpkgs-unstable --no-write-lock-file --refresh --flake git+https://git.pvv.ntnu.no/Drift/pvv-nixos-config.git --upgrade`
som root på maskinen. som root på maskinen.
Hvis du ikke har lyst til å oppdatere alle pakkene (og kanskje måtte vente en stund!) kan du kjøre
`nixos-rebuild switch --override-input nixpkgs nixpkgs --override-input nixpkgs-unstable nixpkgs-unstable --flake git+https://git.pvv.ntnu.no/Drift/pvv-nixos-config.git`
## Seksjonen for hemmeligheter ## Seksjonen for hemmeligheter
For at hemmeligheter ikke skal deles med hele verden i git - eller å være world For at hemmeligheter ikke skal deles med hele verden i git - eller å være world

86
base.nix Normal file
View File

@ -0,0 +1,86 @@
{ config, lib, pkgs, inputs, values, ... }:
{
imports = [
./users
];
networking.domain = "pvv.ntnu.no";
networking.useDHCP = false;
# networking.search = [ "pvv.ntnu.no" "pvv.org" ];
# networking.nameservers = lib.mkDefault [ "129.241.0.200" "129.241.0.201" ];
# networking.tempAddresses = lib.mkDefault "disabled";
# networking.defaultGateway = values.hosts.gateway;
systemd.network.enable = true;
services.resolved = {
enable = lib.mkDefault true;
dnssec = "false"; # Supposdly this keeps breaking and the default is to allow downgrades anyways...
};
time.timeZone = "Europe/Oslo";
i18n.defaultLocale = "en_US.UTF-8";
console = {
font = "Lat2-Terminus16";
keyMap = "no";
};
system.autoUpgrade = {
enable = true;
flake = "git+https://git.pvv.ntnu.no/Drift/pvv-nixos-config.git";
flags = [
"--update-input" "nixpkgs"
"--update-input" "nixpkgs-unstable"
"--no-write-lock-file"
];
};
nix.gc.automatic = true;
nix.gc.options = "--delete-older-than 2d";
nix.settings.experimental-features = [ "nix-command" "flakes" ];
/* This makes commandline tools like
** nix run nixpkgs#hello
** and nix-shell -p hello
** use the same channel the system
** was built with
*/
nix.registry = {
nixpkgs.flake = inputs.nixpkgs;
};
nix.nixPath = [ "nixpkgs=${inputs.nixpkgs}" ];
environment.systemPackages = with pkgs; [
file
git
gnupg
htop
nano
rsync
screen
tmux
vim
wget
kitty.terminfo
];
programs.zsh.enable = true;
users.groups."drift".name = "drift";
# Trusted users on the nix builder machines
users.groups."nix-builder-users".name = "nix-builder-users";
services.openssh = {
enable = true;
extraConfig = ''
PubkeyAcceptedAlgorithms=+ssh-rsa
'';
settings.PermitRootLogin = "yes";
};
}

View File

@ -1,60 +0,0 @@
{ pkgs, lib, fp, ... }:
{
imports = [
(fp /users)
(fp /modules/snakeoil-certs.nix)
./networking.nix
./nix.nix
./services/acme.nix
./services/auto-upgrade.nix
./services/irqbalance.nix
./services/logrotate.nix
./services/nginx.nix
./services/openssh.nix
./services/postfix.nix
./services/smartd.nix
./services/thermald.nix
];
boot.tmp.cleanOnBoot = lib.mkDefault true;
time.timeZone = "Europe/Oslo";
i18n.defaultLocale = "en_US.UTF-8";
console = {
font = "Lat2-Terminus16";
keyMap = "no";
};
environment.systemPackages = with pkgs; [
file
git
gnupg
htop
nano
ripgrep
rsync
screen
tmux
vim
wget
kitty.terminfo
];
programs.zsh.enable = true;
security.sudo.execWheelOnly = true;
security.sudo.extraConfig = ''
Defaults lecture = never
'';
users.groups."drift".name = "drift";
# Trusted users on the nix builder machines
users.groups."nix-builder-users".name = "nix-builder-users";
}

View File

@ -1,13 +0,0 @@
{ lib, values, ... }:
{
systemd.network.enable = true;
networking.domain = "pvv.ntnu.no";
networking.useDHCP = false;
# The rest of the networking configuration is usually sourced from /values.nix
services.resolved = {
enable = lib.mkDefault true;
dnssec = "false"; # Supposdly this keeps breaking and the default is to allow downgrades anyways...
};
}

View File

@ -1,34 +0,0 @@
{ inputs, ... }:
{
nix = {
gc = {
automatic = true;
options = "--delete-older-than 2d";
};
settings = {
allow-dirty = true;
auto-optimise-store = true;
builders-use-substitutes = true;
experimental-features = [ "nix-command" "flakes" ];
log-lines = 50;
use-xdg-base-directories = true;
};
/* This makes commandline tools like
** nix run nixpkgs#hello
** and nix-shell -p hello
** use the same channel the system
** was built with
*/
registry = {
"nixpkgs".flake = inputs.nixpkgs;
"nixpkgs-unstable".flake = inputs.nixpkgs-unstable;
"pvv-nix".flake = inputs.self;
};
nixPath = [
"nixpkgs=${inputs.nixpkgs}"
"unstable=${inputs.nixpkgs-unstable}"
];
};
}

View File

@ -1,15 +0,0 @@
{ ... }:
{
security.acme = {
acceptTerms = true;
defaults.email = "drift@pvv.ntnu.no";
};
# Let's not spam LetsEncrypt in `nixos-rebuild build-vm` mode:
virtualisation.vmVariant = {
security.acme.defaults.server = "https://127.0.0.1";
security.acme.preliminarySelfsigned = true;
users.users.root.initialPassword = "root";
};
}

View File

@ -1,26 +0,0 @@
{ inputs, pkgs, lib, ... }:
{
system.autoUpgrade = {
enable = true;
flake = "git+https://git.pvv.ntnu.no/Drift/pvv-nixos-config.git";
flags = [
# --update-input is deprecated since nix 2.22, and removed in lix 2.90
# https://git.lix.systems/lix-project/lix/issues/400
"--refresh"
"--override-input" "nixpkgs" "github:nixos/nixpkgs/nixos-24.05-small"
"--override-input" "nixpkgs-unstable" "github:nixos/nixpkgs/nixos-unstable-small"
"--no-write-lock-file"
];
};
# workaround for https://github.com/NixOS/nix/issues/6895
# via https://git.lix.systems/lix-project/lix/issues/400
environment.etc."current-system-flake-inputs.json".source
= pkgs.writers.writeJSON "flake-inputs.json" (
lib.flip lib.mapAttrs inputs (name: input:
# inputs.*.sourceInfo sans outPath, since writeJSON will otherwise serialize sourceInfo like a derivation
lib.removeAttrs (input.sourceInfo or {}) [ "outPath" ]
// { store-path = input.outPath; } # comment this line if you don't want to retain a store reference to the flake inputs
)
);
}

View File

@ -1,4 +0,0 @@
{ ... }:
{
services.irqbalance.enable = true;
}

View File

@ -1,42 +0,0 @@
{ ... }:
{
# source: https://github.com/logrotate/logrotate/blob/main/examples/logrotate.service
systemd.services.logrotate = {
documentation = [ "man:logrotate(8)" "man:logrotate.conf(5)" ];
unitConfig.RequiresMountsFor = "/var/log";
serviceConfig = {
Nice = 19;
IOSchedulingClass = "best-effort";
IOSchedulingPriority = 7;
ReadWritePaths = [ "/var/log" ];
AmbientCapabilities = [ "" ];
CapabilityBoundingSet = [ "" ];
DeviceAllow = [ "" ];
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true; # disable for third party rotate scripts
PrivateDevices = true;
PrivateNetwork = true; # disable for mail delivery
PrivateTmp = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true; # disable for userdir logs
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "full";
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true; # disable for creating setgid directories
SocketBindDeny = [ "any" ];
SystemCallArchitectures = "native";
SystemCallFilter = [
"@system-service"
];
};
};
}

View File

@ -1,44 +0,0 @@
{ config, lib, ... }:
{
# nginx return 444 for all nonexistent virtualhosts
systemd.services.nginx.after = [ "generate-snakeoil-certs.service" ];
environment.snakeoil-certs = lib.mkIf config.services.nginx.enable {
"/etc/certs/nginx" = {
owner = "nginx";
group = "nginx";
};
};
networking.firewall.allowedTCPPorts = lib.mkIf config.services.nginx.enable [ 80 443 ];
services.nginx = {
recommendedTlsSettings = true;
recommendedProxySettings = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
appendConfig = ''
pcre_jit on;
worker_processes auto;
worker_rlimit_nofile 100000;
'';
eventsConfig = ''
worker_connections 2048;
use epoll;
multi_accept on;
'';
};
systemd.services.nginx.serviceConfig = lib.mkIf config.services.nginx.enable {
LimitNOFILE = 65536;
};
services.nginx.virtualHosts."_" = lib.mkIf config.services.nginx.enable {
sslCertificate = "/etc/certs/nginx.crt";
sslCertificateKey = "/etc/certs/nginx.key";
addSSL = true;
extraConfig = "return 444;";
};
}

View File

@ -1,21 +0,0 @@
{ ... }:
{
services.openssh = {
enable = true;
startWhenNeeded = true;
extraConfig = ''
PubkeyAcceptedAlgorithms=+ssh-rsa
Match Group wheel
PasswordAuthentication no
Match All
'';
settings.PermitRootLogin = "yes";
};
users.users."root".openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCqVt4LCe0YIttr9swFxjkjn37ZDY9JxwVC+2gvfSINDJorOCtqPjDOTD2fTS1Gz08QCwpnLWq2kyvRchu6WgriAbSACpbZZBgxRaF/FVh3oiMVFGnNKGnv6/fdo/vZtu8mUVuqtmTrgLYpZdbR4oD3XiBlDKs7Cv5hPqt95lnP6MNFvE8mICCfd1PwhsABd2IQ5laz3u77/RXhNFJL0Kf2/+6gk9awcLuwHrPdvq7c3BxRHbc9UMRQENyjyQPa7aLe+uJBFLKP51I8VBuDpDacuibQx7nMt6N2UJ2KWI0JxRMHuJNq4S5jidR82aOw9gzGbTv30SKNLMqsZ0xj4LtdqCXDiZF6Lr09PsJYsvnBUFWa14HGcThKDtgwQwBryNViYmfv//0h9+RLZiU0ab+NEwSs7Zh5iAD+vhx64QqNX3tR7Le4SWXh8W0eShU9N78qYdSkiC3Ui7htxeqOocXM/P4AwbnHsLELIvkHdvgchCPvl8ygZa4WJTEWv16+ICskJcAKWGuqjvXAFuwjJJmPp9xLW9O0DFfQhMELiGamQR9wK07yYQVr34iah6qZO7cwhSKyEPFrVPIaNtfDhsjED639F7vmktf26SWNJHWfW0wOHILjI6TgqUvy0JDd8W8w0CHlAfz6Fs2l99NNgNF8dB3vBASbxS0hu/y0PVu/xQ== openstack-sleipner"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICCbgJ0Uwh9VSVhfId7l9i5/jk4CvAK5rbkiab8R+moF root@sleipner"
];
}

View File

@ -1,23 +0,0 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.postfix;
in
{
services.postfix = {
enable = true;
hostname = "${config.networking.hostName}.pvv.ntnu.no";
domain = "pvv.ntnu.no";
relayHost = "smtp.pvv.ntnu.no";
relayPort = 465;
config = {
smtp_tls_wrappermode = "yes";
smtp_tls_security_level = "encrypt";
};
# Nothing should be delivered to this machine
destination = [ ];
};
}

View File

@ -1,18 +0,0 @@
{ config, pkgs, lib, ... }:
{
services.smartd = {
enable = lib.mkDefault true;
notifications = {
mail = {
enable = true;
sender = "root@pvv.ntnu.no";
recipient = "root@pvv.ntnu.no";
};
wall.enable = false;
};
};
environment.systemPackages = lib.optionals config.services.smartd.enable (with pkgs; [
smartmontools
]);
}

View File

@ -1,8 +0,0 @@
{ config, lib, ... }:
{
# Let's not thermal throttle
services.thermald.enable = lib.mkIf (lib.all (x: x) [
(config.nixpkgs.system == "x86_64-linux")
(!config.boot.isContainer or false)
]) true;
}

View File

@ -7,11 +7,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1731746438, "lastModified": 1709632354,
"narHash": "sha256-f3SSp1axoOk0NAI7oFdRzbxG2XPBSIXC+/DaAXnvS1A=", "narHash": "sha256-jxRHwqrtNze51WKFKvxlQ8Inf62UNRl5cFqEQ2V96vE=",
"owner": "nix-community", "owner": "nix-community",
"repo": "disko", "repo": "disko",
"rev": "cb64993826fa7a477490be6ccb38ba1fa1e18fa8", "rev": "0d11aa8d6431326e10b8656420f91085c3bd0b12",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -20,25 +20,24 @@
"type": "github" "type": "github"
} }
}, },
"greg-ng": { "grzegorz": {
"inputs": { "inputs": {
"nixpkgs": [ "nixpkgs": [
"nixpkgs" "nixpkgs-unstable"
], ]
"rust-overlay": "rust-overlay"
}, },
"locked": { "locked": {
"lastModified": 1730249639, "lastModified": 1696346665,
"narHash": "sha256-G3URSlqCcb+GIvGyki+HHrDM5ZanX/dP9BtppD/SdfI=", "narHash": "sha256-J6Tf6a/zhFZ8SereluHLrvgPsIVm2CGHHA8wrbhZB3Y=",
"ref": "refs/heads/main", "owner": "Programvareverkstedet",
"rev": "80e0447bcb79adad4f459ada5610f3eae987b4e3", "repo": "grzegorz",
"revCount": 34, "rev": "9b9c3ac7d408ac7c6d67544b201e6b169afacb03",
"type": "git", "type": "github"
"url": "https://git.pvv.ntnu.no/Projects/greg-ng.git"
}, },
"original": { "original": {
"type": "git", "owner": "Programvareverkstedet",
"url": "https://git.pvv.ntnu.no/Projects/greg-ng.git" "repo": "grzegorz",
"type": "github"
} }
}, },
"grzegorz-clients": { "grzegorz-clients": {
@ -48,121 +47,96 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1726861934, "lastModified": 1693864994,
"narHash": "sha256-lOzPDwktd+pwszUTbpUdQg6iCzInS11fHLfkjmnvJrM=", "narHash": "sha256-oLDiWdCKDtEfeGzfAuDTq+n9VWp6JCo67PEESEZ3y8E=",
"ref": "refs/heads/master", "owner": "Programvareverkstedet",
"rev": "546d921ec46735dbf876e36f4af8df1064d09432", "repo": "grzegorz-clients",
"revCount": 78, "rev": "a38a0b0fb31ad0ad78a91458cb2c7f77f686468f",
"type": "git", "type": "github"
"url": "https://git.pvv.ntnu.no/Projects/grzegorz-clients.git"
}, },
"original": { "original": {
"type": "git", "owner": "Programvareverkstedet",
"url": "https://git.pvv.ntnu.no/Projects/grzegorz-clients.git" "repo": "grzegorz-clients",
"type": "github"
} }
}, },
"matrix-next": { "matrix-next": {
"inputs": { "inputs": {
"nixpkgs": [ "nixpkgs-lib": "nixpkgs-lib"
"nixpkgs"
]
}, },
"locked": { "locked": {
"lastModified": 1727410897, "lastModified": 1701507532,
"narHash": "sha256-tWsyxvf421ieWUJYgjV7m1eTdr2ZkO3vId7vmtvfFpQ=", "narHash": "sha256-Zzv8OFB7iilzDGe6z2t/j8qRtR23TN3N8LssGsvRWEA=",
"owner": "dali99", "owner": "dali99",
"repo": "nixos-matrix-modules", "repo": "nixos-matrix-modules",
"rev": "ff787d410cba17882cd7b6e2e22cc88d4064193c", "rev": "046194cdadc50d81255a9c57789381ed1153e2b1",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "dali99", "owner": "dali99",
"ref": "v0.6.1",
"repo": "nixos-matrix-modules", "repo": "nixos-matrix-modules",
"type": "github" "type": "github"
} }
}, },
"minecraft-data": {
"locked": {
"lastModified": 1725277886,
"narHash": "sha256-Fw4VbbE3EfypQWSgPDFfvVH47BHeg3ptsO715NlUM8Q=",
"ref": "refs/heads/master",
"rev": "1b4087bd3322a2e2ba84271c8fcc013e6b641a58",
"revCount": 2,
"type": "git",
"url": "https://git.pvv.ntnu.no/Drift/minecraft-data.git"
},
"original": {
"type": "git",
"url": "https://git.pvv.ntnu.no/Drift/minecraft-data.git"
}
},
"nix-gitea-themes": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1714416973,
"narHash": "sha256-aZUcvXjdETUC6wVQpWDVjLUzwpDAEca8yR0ITDeK39o=",
"ref": "refs/heads/main",
"rev": "2b23c0ba8aae68d3cb6789f0f6e4891cef26cc6d",
"revCount": 6,
"type": "git",
"url": "https://git.pvv.ntnu.no/oysteikt/nix-gitea-themes.git"
},
"original": {
"type": "git",
"url": "https://git.pvv.ntnu.no/oysteikt/nix-gitea-themes.git"
}
},
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1731663789, "lastModified": 1709565521,
"narHash": "sha256-x07g4NcqGP6mQn6AISXJaks9sQYDjZmTMBlKIvajvyc=", "narHash": "sha256-YP3H7Lm3IhOKHIcn+qMCLRINJG313Io5CjvNTJyrnhY=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "035d434d48f4375ac5d3a620954cf5fda7dd7c36", "rev": "9a5f1a573376eeb8c525f936eed32fabfb6e81be",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "NixOS", "id": "nixpkgs",
"ref": "nixos-24.05-small", "ref": "nixos-23.11-small",
"repo": "nixpkgs", "type": "indirect"
}
},
"nixpkgs-lib": {
"locked": {
"lastModified": 1673743903,
"narHash": "sha256-sloY6KYyVOozJ1CkbgJPpZ99TKIjIvM+04V48C04sMQ=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "7555e2dfcbac1533f047021f1744ac8871150f9f",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github" "type": "github"
} }
}, },
"nixpkgs-stable": { "nixpkgs-stable": {
"locked": { "locked": {
"lastModified": 1730602179, "lastModified": 1709428628,
"narHash": "sha256-efgLzQAWSzJuCLiCaQUCDu4NudNlHdg2NzGLX5GYaEY=", "narHash": "sha256-//ZCCnpVai/ShtO2vPjh3AWgo8riXCaret6V9s7Hew4=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "3c2f1c4ca372622cb2f9de8016c9a0b1cbd0f37c", "rev": "66d65cb00b82ffa04ee03347595aa20e41fe3555",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "NixOS", "owner": "NixOS",
"ref": "release-24.05", "ref": "release-23.11",
"repo": "nixpkgs", "repo": "nixpkgs",
"type": "github" "type": "github"
} }
}, },
"nixpkgs-unstable": { "nixpkgs-unstable": {
"locked": { "locked": {
"lastModified": 1731745710, "lastModified": 1712963716,
"narHash": "sha256-SVeiClbgqL071JpAspOu0gCkPSAL51kSIRwo4C/pghA=", "narHash": "sha256-WKm9CvgCldeIVvRz87iOMi8CFVB1apJlkUT4GGvA0iM=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "dfaa4cb76c2d450d8f396bb6b9f43cede3ade129", "rev": "cfd6b5fc90b15709b780a5a1619695a88505a176",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "NixOS", "id": "nixpkgs",
"ref": "nixos-unstable-small", "ref": "nixos-unstable-small",
"repo": "nixpkgs", "type": "indirect"
"type": "github"
} }
}, },
"pvv-calendar-bot": { "pvv-calendar-bot": {
@ -172,11 +146,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1723850344, "lastModified": 1693136143,
"narHash": "sha256-aT37O9l9eclWEnqxASVNBL1dKwDHZUOqdbA4VO9DJvw=", "narHash": "sha256-amHprjftc3y/bg8yf4hITCLa+ez5HIi0yGfR7TU6UIc=",
"ref": "refs/heads/main", "ref": "refs/heads/main",
"rev": "38b66677ab8c01aee10cd59e745af9ce3ea88092", "rev": "a32894b305f042d561500f5799226afd1faf5abb",
"revCount": 19, "revCount": 9,
"type": "git", "type": "git",
"url": "https://git.pvv.ntnu.no/Projects/calendar-bot.git" "url": "https://git.pvv.ntnu.no/Projects/calendar-bot.git"
}, },
@ -185,62 +159,18 @@
"url": "https://git.pvv.ntnu.no/Projects/calendar-bot.git" "url": "https://git.pvv.ntnu.no/Projects/calendar-bot.git"
} }
}, },
"pvv-nettsiden": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1725212759,
"narHash": "sha256-yZBsefIarFUEhFRj+rCGMp9Zvag3MCafqV/JfGVRVwc=",
"ref": "refs/heads/master",
"rev": "e7b66b4bc6a89bab74bac45b87e9434f5165355f",
"revCount": 473,
"type": "git",
"url": "https://git.pvv.ntnu.no/Projects/nettsiden.git"
},
"original": {
"type": "git",
"url": "https://git.pvv.ntnu.no/Projects/nettsiden.git"
}
},
"root": { "root": {
"inputs": { "inputs": {
"disko": "disko", "disko": "disko",
"greg-ng": "greg-ng", "grzegorz": "grzegorz",
"grzegorz-clients": "grzegorz-clients", "grzegorz-clients": "grzegorz-clients",
"matrix-next": "matrix-next", "matrix-next": "matrix-next",
"minecraft-data": "minecraft-data",
"nix-gitea-themes": "nix-gitea-themes",
"nixpkgs": "nixpkgs", "nixpkgs": "nixpkgs",
"nixpkgs-unstable": "nixpkgs-unstable", "nixpkgs-unstable": "nixpkgs-unstable",
"pvv-calendar-bot": "pvv-calendar-bot", "pvv-calendar-bot": "pvv-calendar-bot",
"pvv-nettsiden": "pvv-nettsiden",
"sops-nix": "sops-nix" "sops-nix": "sops-nix"
} }
}, },
"rust-overlay": {
"inputs": {
"nixpkgs": [
"greg-ng",
"nixpkgs"
]
},
"locked": {
"lastModified": 1729391507,
"narHash": "sha256-as0I9xieJUHf7kiK2a9znDsVZQTFWhM1pLivII43Gi0=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "784981a9feeba406de38c1c9a3decf966d853cca",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"sops-nix": { "sops-nix": {
"inputs": { "inputs": {
"nixpkgs": [ "nixpkgs": [
@ -249,11 +179,11 @@
"nixpkgs-stable": "nixpkgs-stable" "nixpkgs-stable": "nixpkgs-stable"
}, },
"locked": { "locked": {
"lastModified": 1731748189, "lastModified": 1709591996,
"narHash": "sha256-Zd/Uukvpcu26M6YGhpbsgqm6LUSLz+Q8mDZ5LOEGdiE=", "narHash": "sha256-0sQcalXSgqlO6mnxBTXkSQChBHy2GQsokB1XY8r+LpQ=",
"owner": "Mic92", "owner": "Mic92",
"repo": "sops-nix", "repo": "sops-nix",
"rev": "d2bd7f433b28db6bc7ae03d5eca43564da0af054", "rev": "291aad29b59ceda517a06e59809f35cb0bb17c6b",
"type": "github" "type": "github"
}, },
"original": { "original": {

117
flake.nix
View File

@ -2,8 +2,8 @@
description = "PVV System flake"; description = "PVV System flake";
inputs = { inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.05-small"; # remember to also update the url in base/services/auto-upgrade.nix nixpkgs.url = "nixpkgs/nixos-23.11-small";
nixpkgs-unstable.url = "github:NixOS/nixpkgs/nixos-unstable-small"; nixpkgs-unstable.url = "nixpkgs/nixos-unstable-small";
sops-nix.url = "github:Mic92/sops-nix"; sops-nix.url = "github:Mic92/sops-nix";
sops-nix.inputs.nixpkgs.follows = "nixpkgs"; sops-nix.inputs.nixpkgs.follows = "nixpkgs";
@ -11,24 +11,15 @@
disko.url = "github:nix-community/disko"; disko.url = "github:nix-community/disko";
disko.inputs.nixpkgs.follows = "nixpkgs"; disko.inputs.nixpkgs.follows = "nixpkgs";
pvv-nettsiden.url = "git+https://git.pvv.ntnu.no/Projects/nettsiden.git";
pvv-nettsiden.inputs.nixpkgs.follows = "nixpkgs";
pvv-calendar-bot.url = "git+https://git.pvv.ntnu.no/Projects/calendar-bot.git"; pvv-calendar-bot.url = "git+https://git.pvv.ntnu.no/Projects/calendar-bot.git";
pvv-calendar-bot.inputs.nixpkgs.follows = "nixpkgs"; pvv-calendar-bot.inputs.nixpkgs.follows = "nixpkgs";
matrix-next.url = "github:dali99/nixos-matrix-modules/v0.6.1"; matrix-next.url = "github:dali99/nixos-matrix-modules";
matrix-next.inputs.nixpkgs.follows = "nixpkgs";
nix-gitea-themes.url = "git+https://git.pvv.ntnu.no/oysteikt/nix-gitea-themes.git"; grzegorz.url = "github:Programvareverkstedet/grzegorz";
nix-gitea-themes.inputs.nixpkgs.follows = "nixpkgs"; grzegorz.inputs.nixpkgs.follows = "nixpkgs-unstable";
grzegorz-clients.url = "github:Programvareverkstedet/grzegorz-clients";
greg-ng.url = "git+https://git.pvv.ntnu.no/Projects/greg-ng.git";
greg-ng.inputs.nixpkgs.follows = "nixpkgs";
grzegorz-clients.url = "git+https://git.pvv.ntnu.no/Projects/grzegorz-clients.git";
grzegorz-clients.inputs.nixpkgs.follows = "nixpkgs"; grzegorz-clients.inputs.nixpkgs.follows = "nixpkgs";
minecraft-data.url = "git+https://git.pvv.ntnu.no/Drift/minecraft-data.git";
}; };
outputs = { self, nixpkgs, nixpkgs-unstable, sops-nix, disko, ... }@inputs: outputs = { self, nixpkgs, nixpkgs-unstable, sops-nix, disko, ... }@inputs:
@ -39,8 +30,8 @@
"aarch64-linux" "aarch64-linux"
"aarch64-darwin" "aarch64-darwin"
]; ];
forAllSystems = f: nixlib.genAttrs systems f; forAllSystems = f: nixlib.genAttrs systems (system: f system);
allMachines = builtins.attrNames self.nixosConfigurations; allMachines = nixlib.mapAttrsToList (name: _: name) self.nixosConfigurations;
importantMachines = [ importantMachines = [
"bekkalokk" "bekkalokk"
"bicep" "bicep"
@ -49,32 +40,28 @@
"ildkule" "ildkule"
]; ];
in { in {
inherit inputs;
nixosConfigurations = let nixosConfigurations = let
unstablePkgs = nixpkgs-unstable.legacyPackages.x86_64-linux;
nixosConfig = nixpkgs: name: config: nixpkgs.lib.nixosSystem (nixpkgs.lib.recursiveUpdate nixosConfig = nixpkgs: name: config: nixpkgs.lib.nixosSystem (nixpkgs.lib.recursiveUpdate
rec { rec {
system = "x86_64-linux"; system = "x86_64-linux";
specialArgs = { specialArgs = {
inherit unstablePkgs inputs; inherit nixpkgs-unstable inputs;
values = import ./values.nix; values = import ./values.nix;
fp = path: ./${path};
}; };
modules = [ modules = [
./hosts/${name}/configuration.nix ./hosts/${name}/configuration.nix
sops-nix.nixosModules.sops sops-nix.nixosModules.sops
] ++ config.modules or []; ];
pkgs = import nixpkgs { pkgs = import nixpkgs {
inherit system; inherit system;
overlays = [ overlays = [
# Global overlays go here inputs.pvv-calendar-bot.overlays.${system}.default
] ++ config.overlays or [ ]; ];
}; };
} }
(removeAttrs config [ "modules" "overlays" ]) config
); );
stableNixosConfig = nixosConfig nixpkgs; stableNixosConfig = nixosConfig nixpkgs;
@ -82,31 +69,19 @@
in { in {
bicep = stableNixosConfig "bicep" { bicep = stableNixosConfig "bicep" {
modules = [ modules = [
./hosts/bicep/configuration.nix
sops-nix.nixosModules.sops
inputs.matrix-next.nixosModules.default inputs.matrix-next.nixosModules.default
inputs.pvv-calendar-bot.nixosModules.default inputs.pvv-calendar-bot.nixosModules.default
]; ];
overlays = [
inputs.pvv-calendar-bot.overlays.x86_64-linux.default
];
};
bekkalokk = stableNixosConfig "bekkalokk" {
overlays = [
(final: prev: {
heimdal = unstablePkgs.heimdal;
mediawiki-extensions = final.callPackage ./packages/mediawiki-extensions { };
simplesamlphp = final.callPackage ./packages/simplesamlphp { };
bluemap = final.callPackage ./packages/bluemap.nix { };
})
inputs.nix-gitea-themes.overlays.default
inputs.pvv-nettsiden.overlays.default
];
modules = [
inputs.nix-gitea-themes.nixosModules.default
inputs.pvv-nettsiden.nixosModules.default
];
}; };
bekkalokk = stableNixosConfig "bekkalokk" { };
bob = stableNixosConfig "bob" { bob = stableNixosConfig "bob" {
modules = [ modules = [
./hosts/bob/configuration.nix
sops-nix.nixosModules.sops
disko.nixosModules.disko disko.nixosModules.disko
{ disko.devices.disk.disk1.device = "/dev/vda"; } { disko.devices.disk.disk1.device = "/dev/vda"; }
]; ];
@ -117,27 +92,28 @@
brzeczyszczykiewicz = stableNixosConfig "brzeczyszczykiewicz" { brzeczyszczykiewicz = stableNixosConfig "brzeczyszczykiewicz" {
modules = [ modules = [
./hosts/brzeczyszczykiewicz/configuration.nix
sops-nix.nixosModules.sops
inputs.grzegorz.nixosModules.grzegorz-kiosk
inputs.grzegorz-clients.nixosModules.grzegorz-webui inputs.grzegorz-clients.nixosModules.grzegorz-webui
inputs.greg-ng.nixosModules.default
];
overlays = [
inputs.greg-ng.overlays.default
]; ];
}; };
georg = stableNixosConfig "georg" { georg = stableNixosConfig "georg" {
modules = [ modules = [
inputs.grzegorz-clients.nixosModules.grzegorz-webui ./hosts/georg/configuration.nix
inputs.greg-ng.nixosModules.default sops-nix.nixosModules.sops
];
overlays = [
inputs.greg-ng.overlays.default
];
};
};
nixosModules = { inputs.grzegorz.nixosModules.grzegorz-kiosk
snakeoil-certs = ./modules/snakeoil-certs.nix; inputs.grzegorz-clients.nixosModules.grzegorz-webui
snappymail = ./modules/snappymail.nix; ];
};
buskerud = stableNixosConfig "buskerud" {
modules = [
./hosts/buskerud/configuration.nix
sops-nix.nixosModules.sops
];
};
}; };
devShells = forAllSystems (system: { devShells = forAllSystems (system: {
@ -147,22 +123,23 @@
packages = { packages = {
"x86_64-linux" = let "x86_64-linux" = let
pkgs = nixpkgs.legacyPackages."x86_64-linux"; pkgs = nixpkgs.legacyPackages."x86_64-linux";
pkgs-unstable = nixpkgs-unstable.legacyPackages."x86_64-linux";
in rec { in rec {
default = important-machines; default = important-machines;
important-machines = pkgs.linkFarm "important-machines" important-machines = pkgs.linkFarm "important-machines"
(nixlib.getAttrs importantMachines self.packages.x86_64-linux); (nixlib.getAttrs importantMachines self.packages.x86_64-linux);
all-machines = pkgs.linkFarm "all-machines" all-machines = pkgs.linkFarm "all-machines"
(nixlib.getAttrs allMachines self.packages.x86_64-linux); (nixlib.getAttrs allMachines self.packages.x86_64-linux);
heimdal = pkgs.callPackage hosts/buskerud/containers/salsa/services/heimdal/package.nix {
simplesamlphp = pkgs.callPackage ./packages/simplesamlphp { }; inherit (pkgs.apple_sdk.frameworks)
CoreFoundation Security SystemConfiguration;
} // };
(nixlib.pipe null [ heimdal-unstable = pkgs-unstable.callPackage hosts/buskerud/containers/salsa/services/heimdal/package.nix {
(_: pkgs.callPackage ./packages/mediawiki-extensions { }) inherit (pkgs.apple_sdk.frameworks)
(nixlib.flip builtins.removeAttrs ["override" "overrideDerivation"]) CoreFoundation Security SystemConfiguration;
(nixlib.mapAttrs' (name: nixlib.nameValuePair "mediawiki-${name}")) };
]) inherit pkgs pkgs-unstable;
// nixlib.genAttrs allMachines } // nixlib.genAttrs allMachines
(machine: self.nixosConfigurations.${machine}.config.system.build.toplevel); (machine: self.nixosConfigurations.${machine}.config.system.build.toplevel);
}; };
}; };

View File

@ -1,25 +1,22 @@
{ fp, pkgs, values, ... }: { pkgs, values, ... }:
{ {
imports = [ imports = [
./hardware-configuration.nix ./hardware-configuration.nix
(fp /base) ../../base.nix
(fp /misc/metrics-exporters.nix) ../../misc/metrics-exporters.nix
./services/bluemap/default.nix #./services/keycloak.nix
# TODO: set up authentication for the following:
# ./services/website.nix
./services/nginx
./services/gitea/default.nix ./services/gitea/default.nix
./services/idp-simplesamlphp
./services/kerberos
./services/mediawiki
./services/nginx.nix
./services/phpfpm.nix
./services/vaultwarden.nix
./services/webmail ./services/webmail
./services/website # ./services/mediawiki.nix
./services/well-known
]; ];
sops.defaultSopsFile = fp /secrets/bekkalokk/bekkalokk.yaml; sops.defaultSopsFile = ../../secrets/bekkalokk/bekkalokk.yaml;
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ]; sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
sops.age.keyFile = "/var/lib/sops-nix/key.txt"; sops.age.keyFile = "/var/lib/sops-nix/key.txt";
sops.age.generateKey = true; sops.age.generateKey = true;
@ -27,6 +24,8 @@
boot.loader.systemd-boot.enable = true; boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true; boot.loader.efi.canTouchEfiVariables = true;
virtualisation.podman.enable = true;
networking.hostName = "bekkalokk"; networking.hostName = "bekkalokk";
systemd.network.networks."30-enp2s0" = values.defaultNetworkConfig // { systemd.network.networks."30-enp2s0" = values.defaultNetworkConfig // {
@ -34,8 +33,6 @@
address = with values.hosts.bekkalokk; [ (ipv4 + "/25") (ipv6 + "/64") ]; address = with values.hosts.bekkalokk; [ (ipv4 + "/25") (ipv6 + "/64") ];
}; };
services.btrfs.autoScrub.enable = true;
# Do not change, even during upgrades. # Do not change, even during upgrades.
# See https://search.nixos.org/options?show=system.stateVersion # See https://search.nixos.org/options?show=system.stateVersion
system.stateVersion = "22.11"; system.stateVersion = "22.11";

View File

@ -1,83 +0,0 @@
{ config, lib, pkgs, inputs, ... }:
let
vanillaSurvival = "/var/lib/bluemap/vanilla_survival_world";
in {
imports = [
./module.nix # From danio, pending upstreaming
];
disabledModules = [ "services/web-servers/bluemap.nix" ];
sops.secrets."bluemap/ssh-key" = { };
sops.secrets."bluemap/ssh-known-hosts" = { };
services.bluemap = {
enable = true;
eula = true;
onCalendar = "*-*-* 05:45:00"; # a little over an hour after auto-upgrade
host = "minecraft.pvv.ntnu.no";
maps = {
"verden" = {
settings = {
world = vanillaSurvival;
sorting = 0;
ambient-light = 0.1;
cave-detection-ocean-floor = -5;
marker-sets = inputs.minecraft-data.map-markers.vanillaSurvival.verden;
};
};
"underverden" = {
settings = {
world = "${vanillaSurvival}/DIM-1";
sorting = 100;
sky-color = "#290000";
void-color = "#150000";
ambient-light = 0.6;
world-sky-light = 0;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
cave-detection-uses-block-light = true;
max-y = 90;
marker-sets = inputs.minecraft-data.map-markers.vanillaSurvival.underverden;
};
};
"enden" = {
settings = {
world = "${vanillaSurvival}/DIM1";
sorting = 200;
sky-color = "#080010";
void-color = "#080010";
ambient-light = 0.6;
world-sky-light = 0;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
};
};
};
};
services.nginx.virtualHosts."minecraft.pvv.ntnu.no" = {
enableACME = true;
forceSSL = true;
};
# TODO: render somewhere else lmao
systemd.services."render-bluemap-maps" = {
preStart = ''
mkdir -p /var/lib/bluemap/world
${pkgs.rsync}/bin/rsync \
-e "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=$CREDENTIALS_DIRECTORY/ssh-known-hosts -i $CREDENTIALS_DIRECTORY/sshkey" \
-avz --no-owner --no-group \
root@innovation.pvv.ntnu.no:/ \
${vanillaSurvival}
'';
serviceConfig = {
LoadCredential = [
"sshkey:${config.sops.secrets."bluemap/ssh-key".path}"
"ssh-known-hosts:${config.sops.secrets."bluemap/ssh-known-hosts".path}"
];
};
};
}

View File

@ -1,343 +0,0 @@
{ config, lib, pkgs, ... }:
let
cfg = config.services.bluemap;
format = pkgs.formats.hocon { };
coreConfig = format.generate "core.conf" cfg.coreSettings;
webappConfig = format.generate "webapp.conf" cfg.webappSettings;
webserverConfig = format.generate "webserver.conf" cfg.webserverSettings;
storageFolder = pkgs.linkFarm "storage"
(lib.attrsets.mapAttrs' (name: value:
lib.nameValuePair "${name}.conf"
(format.generate "${name}.conf" value))
cfg.storage);
mapsFolder = pkgs.linkFarm "maps"
(lib.attrsets.mapAttrs' (name: value:
lib.nameValuePair "${name}.conf"
(format.generate "${name}.conf" value.settings))
cfg.maps);
webappConfigFolder = pkgs.linkFarm "bluemap-config" {
"maps" = mapsFolder;
"storages" = storageFolder;
"core.conf" = coreConfig;
"webapp.conf" = webappConfig;
"webserver.conf" = webserverConfig;
"packs" = cfg.resourcepacks;
"addons" = cfg.resourcepacks; # TODO
};
renderConfigFolder = name: value: pkgs.linkFarm "bluemap-${name}-config" {
"maps" = pkgs.linkFarm "maps" {
"${name}.conf" = (format.generate "${name}.conf" value.settings);
};
"storages" = storageFolder;
"core.conf" = coreConfig;
"webapp.conf" = format.generate "webapp.conf" (cfg.webappSettings // { "update-settings-file" = false; });
"webserver.conf" = webserverConfig;
"packs" = value.resourcepacks;
"addons" = cfg.resourcepacks; # TODO
};
inherit (lib) mkOption;
in {
options.services.bluemap = {
enable = lib.mkEnableOption "bluemap";
eula = mkOption {
type = lib.types.bool;
description = ''
By changing this option to true you confirm that you own a copy of minecraft Java Edition,
and that you agree to minecrafts EULA.
'';
default = false;
};
defaultWorld = mkOption {
type = lib.types.path;
description = ''
The world used by the default map ruleset.
If you configure your own maps you do not need to set this.
'';
example = lib.literalExpression "\${config.services.minecraft.dataDir}/world";
};
enableRender = mkOption {
type = lib.types.bool;
description = "Enable rendering";
default = true;
};
webRoot = mkOption {
type = lib.types.path;
default = "/var/lib/bluemap/web";
description = "The directory for saving and serving the webapp and the maps";
};
enableNginx = mkOption {
type = lib.types.bool;
default = true;
description = "Enable configuring a virtualHost for serving the bluemap webapp";
};
host = mkOption {
type = lib.types.str;
default = "bluemap.${config.networking.domain}";
defaultText = lib.literalExpression "bluemap.\${config.networking.domain}";
description = "Domain to configure nginx for";
};
onCalendar = mkOption {
type = lib.types.str;
description = ''
How often to trigger rendering the map,
in the format of a systemd timer onCalendar configuration.
See {manpage}`systemd.timer(5)`.
'';
default = "*-*-* 03:10:00";
};
coreSettings = mkOption {
type = lib.types.submodule {
freeformType = format.type;
options = {
data = mkOption {
type = lib.types.path;
description = "Folder for where bluemap stores its data";
default = "/var/lib/bluemap";
};
metrics = lib.mkEnableOption "Sending usage metrics containing the version of bluemap in use";
};
};
description = "Settings for the core.conf file, [see upstream docs](https://github.com/BlueMap-Minecraft/BlueMap/blob/master/BlueMapCommon/src/main/resources/de/bluecolored/bluemap/config/core.conf).";
};
webappSettings = mkOption {
type = lib.types.submodule {
freeformType = format.type;
};
default = {
enabled = true;
webroot = cfg.webRoot;
};
defaultText = lib.literalExpression ''
{
enabled = true;
webroot = config.services.bluemap.webRoot;
}
'';
description = "Settings for the webapp.conf file, see [upstream docs](https://github.com/BlueMap-Minecraft/BlueMap/blob/master/BlueMapCommon/src/main/resources/de/bluecolored/bluemap/config/webapp.conf).";
};
webserverSettings = mkOption {
type = lib.types.submodule {
freeformType = format.type;
options = {
enabled = mkOption {
type = lib.types.bool;
description = ''
Enable bluemap's built-in webserver.
Disabled by default in nixos for use of nginx directly.
'';
default = false;
};
};
};
default = { };
description = ''
Settings for the webserver.conf file, usually not required.
[See upstream docs](https://github.com/BlueMap-Minecraft/BlueMap/blob/master/BlueMapCommon/src/main/resources/de/bluecolored/bluemap/config/webserver.conf).
'';
};
maps = mkOption {
type = lib.types.attrsOf (lib.types.submodule {
options = {
resourcepacks = mkOption {
type = lib.types.path;
default = cfg.resourcepacks;
defaultText = lib.literalExpression "config.services.bluemap.resourcepacks";
description = "A set of resourcepacks/mods to extract models from loaded in alphabetical order";
};
settings = mkOption {
type = (lib.types.submodule {
freeformType = format.type;
options = {
world = mkOption {
type = lib.types.path;
description = "Path to world folder containing the dimension to render";
};
};
});
description = ''
Settings for files in `maps/`.
See the default for an example with good options for the different world types.
For valid values [consult upstream docs](https://github.com/BlueMap-Minecraft/BlueMap/blob/master/BlueMapCommon/src/main/resources/de/bluecolored/bluemap/config/maps/map.conf).
'';
};
};
});
default = {
"overworld".settings = {
world = "${cfg.defaultWorld}";
ambient-light = 0.1;
cave-detection-ocean-floor = -5;
};
"nether".settings = {
world = "${cfg.defaultWorld}/DIM-1";
sorting = 100;
sky-color = "#290000";
void-color = "#150000";
ambient-light = 0.6;
world-sky-light = 0;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
cave-detection-uses-block-light = true;
max-y = 90;
};
"end".settings = {
world = "${cfg.defaultWorld}/DIM1";
sorting = 200;
sky-color = "#080010";
void-color = "#080010";
ambient-light = 0.6;
world-sky-light = 0;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
};
};
defaultText = lib.literalExpression ''
{
"overworld".settings = {
world = "''${cfg.defaultWorld}";
ambient-light = 0.1;
cave-detection-ocean-floor = -5;
};
"nether".settings = {
world = "''${cfg.defaultWorld}/DIM-1";
sorting = 100;
sky-color = "#290000";
void-color = "#150000";
ambient-light = 0.6;
world-sky-light = 0;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
cave-detection-uses-block-light = true;
max-y = 90;
};
"end".settings = {
world = "''${cfg.defaultWorld}/DIM1";
sorting = 200;
sky-color = "#080010";
void-color = "#080010";
ambient-light = 0.6;
world-sky-light = 0;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
};
};
'';
description = ''
map-specific configuration.
These correspond to views in the webapp and are usually
different dimension of a world or different render settings of the same dimension.
If you set anything in this option you must configure all dimensions yourself!
'';
};
storage = mkOption {
type = lib.types.attrsOf (lib.types.submodule {
freeformType = format.type;
options = {
storage-type = mkOption {
type = lib.types.enum [ "FILE" "SQL" ];
description = "Type of storage config";
default = "FILE";
};
};
});
description = ''
Where the rendered map will be stored.
Unless you are doing something advanced you should probably leave this alone and configure webRoot instead.
[See upstream docs](https://github.com/BlueMap-Minecraft/BlueMap/tree/master/BlueMapCommon/src/main/resources/de/bluecolored/bluemap/config/storages)
'';
default = {
"file" = {
root = "${cfg.webRoot}/maps";
};
};
defaultText = lib.literalExpression ''
{
"file" = {
root = "''${config.services.bluemap.webRoot}/maps";
};
}
'';
};
resourcepacks = mkOption {
type = lib.types.path;
default = pkgs.linkFarm "resourcepacks" { };
description = ''
A set of resourcepacks/mods to extract models from loaded in alphabetical order.
Can be overriden on a per-map basis with `services.bluemap.maps.<name>.resourcepacks`.
'';
};
};
config = lib.mkIf cfg.enable {
assertions =
[ { assertion = config.services.bluemap.eula;
message = ''
You have enabled bluemap but have not accepted minecraft's EULA.
You can achieve this through setting `services.bluemap.eula = true`
'';
}
];
services.bluemap.coreSettings.accept-download = cfg.eula;
systemd.services."render-bluemap-maps" = lib.mkIf cfg.enableRender {
serviceConfig = {
Type = "oneshot";
Group = "nginx";
UMask = "026";
};
script = lib.strings.concatStringsSep "\n" ((lib.attrsets.mapAttrsToList
(name: value: "${lib.getExe pkgs.bluemap} -c ${renderConfigFolder name value} -r")
cfg.maps) ++ [ "${lib.getExe pkgs.bluemap} -c ${webappConfigFolder} -gs" ]);
};
systemd.timers."render-bluemap-maps" = lib.mkIf cfg.enableRender {
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = cfg.onCalendar;
Persistent = true;
Unit = "render-bluemap-maps.service";
};
};
services.nginx.virtualHosts = lib.mkIf cfg.enableNginx {
"${cfg.host}" = {
root = config.services.bluemap.webRoot;
locations = {
"~* ^/maps/[^/]*/tiles/".extraConfig = ''
error_page 404 = @empty;
'';
"@empty".return = "204";
};
};
};
};
meta = {
maintainers = with lib.maintainers; [ dandellion h7x4 ];
};
}

View File

@ -27,5 +27,4 @@ lib.mkMerge [
(mkRunner "alpha") (mkRunner "alpha")
(mkRunner "beta") (mkRunner "beta")
(mkRunner "epsilon") (mkRunner "epsilon")
{ virtualisation.podman.enable = true; }
] ]

View File

@ -1,4 +1,4 @@
{ config, values, fp, pkgs, lib, ... }: { config, values, pkgs, ... }:
let let
cfg = config.services.gitea; cfg = config.services.gitea;
domain = "git.pvv.ntnu.no"; domain = "git.pvv.ntnu.no";
@ -6,9 +6,6 @@ let
in { in {
imports = [ imports = [
./ci.nix ./ci.nix
./gpg.nix
./import-users
./web-secret-provider
]; ];
sops.secrets = { sops.secrets = {
@ -16,27 +13,24 @@ in {
owner = "gitea"; owner = "gitea";
group = "gitea"; group = "gitea";
}; };
"gitea/email-password" = { "gitea/passwd-ssh-key" = { };
owner = "gitea"; "gitea/ssh-known-hosts" = { };
group = "gitea"; "gitea/import-user-env" = { };
};
}; };
services.gitea = { services.gitea = {
enable = true; enable = true;
stateDir = "/data/gitea";
appName = "PVV Git"; appName = "PVV Git";
database = { database = {
type = "postgres"; type = "postgres";
host = "postgres.pvv.ntnu.no"; host = "postgres.pvv.ntnu.no";
port = config.services.postgresql.settings.port; port = config.services.postgresql.port;
passwordFile = config.sops.secrets."gitea/database".path; passwordFile = config.sops.secrets."gitea/database".path;
createDatabase = false; createDatabase = false;
}; };
mailerPasswordFile = config.sops.secrets."gitea/email-password".path;
# https://docs.gitea.com/administration/config-cheat-sheet
settings = { settings = {
server = { server = {
DOMAIN = domain; DOMAIN = domain;
@ -44,91 +38,16 @@ in {
PROTOCOL = "http+unix"; PROTOCOL = "http+unix";
SSH_PORT = sshPort; SSH_PORT = sshPort;
START_SSH_SERVER = true; START_SSH_SERVER = true;
START_LFS_SERVER = true;
LANDING_PAGE = "explore";
};
mailer = {
ENABLED = true;
FROM = "gitea@pvv.ntnu.no";
PROTOCOL = "smtp";
SMTP_ADDR = "smtp.pvv.ntnu.no";
SMTP_PORT = 587;
USER = "gitea@pvv.ntnu.no";
SUBJECT_PREFIX = "[pvv-git]";
};
metrics = {
ENABLED = true;
ENABLED_ISSUE_BY_LABEL = true;
ENABLED_ISSUE_BY_REPOSITORY = true;
}; };
indexer.REPO_INDEXER_ENABLED = true; indexer.REPO_INDEXER_ENABLED = true;
service = { service.DISABLE_REGISTRATION = true;
DISABLE_REGISTRATION = true;
ENABLE_NOTIFY_MAIL = true;
AUTO_WATCH_NEW_REPOS = false;
};
admin.DEFAULT_EMAIL_NOTIFICATIONS = "onmention";
session.COOKIE_SECURE = true; session.COOKIE_SECURE = true;
database.LOG_SQL = false; database.LOG_SQL = false;
repository = {
PREFERRED_LICENSES = lib.concatStringsSep "," [
"AGPL-3.0-only"
"AGPL-3.0-or-later"
"Apache-2.0"
"BSD-3-Clause"
"CC-BY-4.0"
"CC-BY-NC-4.0"
"CC-BY-NC-ND-4.0"
"CC-BY-NC-SA-4.0"
"CC-BY-ND-4.0"
"CC-BY-SA-4.0"
"CC0-1.0"
"GPL-2.0-only"
"GPL-3.0-only"
"GPL-3.0-or-later"
"LGPL-3.0-linking-exception"
"LGPL-3.0-only"
"LGPL-3.0-or-later"
"MIT"
"MPL-2.0"
"Unlicense"
];
DEFAULT_REPO_UNITS = lib.concatStringsSep "," [
"repo.code"
"repo.issues"
"repo.pulls"
"repo.releases"
];
};
picture = { picture = {
DISABLE_GRAVATAR = true; DISABLE_GRAVATAR = true;
ENABLE_FEDERATED_AVATAR = false; ENABLE_FEDERATED_AVATAR = false;
}; };
actions.ENABLED = true; actions.ENABLED = true;
ui = {
REACTIONS = lib.concatStringsSep "," [
"+1"
"-1"
"laugh"
"confused"
"heart"
"hooray"
"rocket"
"eyes"
"100"
"anger"
"astonished"
"no_good"
"ok_hand"
"pensive"
"pizza"
"point_up"
"sob"
"skull"
"upside_down_face"
"shrug"
];
};
"ui.meta".DESCRIPTION = "Bokstavelig talt programvareverkstedet"; "ui.meta".DESCRIPTION = "Bokstavelig talt programvareverkstedet";
}; };
}; };
@ -138,54 +57,49 @@ in {
services.nginx.virtualHosts."${domain}" = { services.nginx.virtualHosts."${domain}" = {
forceSSL = true; forceSSL = true;
enableACME = true; enableACME = true;
kTLS = true; locations."/" = {
locations = {
"/" = {
proxyPass = "http://unix:${cfg.settings.server.HTTP_ADDR}"; proxyPass = "http://unix:${cfg.settings.server.HTTP_ADDR}";
recommendedProxySettings = true;
extraConfig = '' extraConfig = ''
client_max_body_size 512M; client_max_body_size 512M;
''; '';
}; };
"/metrics" = {
proxyPass = "http://unix:${cfg.settings.server.HTTP_ADDR}";
extraConfig = ''
allow ${values.hosts.ildkule.ipv4}/32;
deny all;
'';
};
};
}; };
networking.firewall.allowedTCPPorts = [ sshPort ]; networking.firewall.allowedTCPPorts = [ sshPort ];
# Extra customization # Automatically import users
systemd.services.gitea-import-users = {
services.gitea-themes.monokai = pkgs.gitea-theme-monokai; enable = true;
preStart=''${pkgs.rsync}/bin/rsync -e "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=$CREDENTIALS_DIRECTORY/ssh-known-hosts -i $CREDENTIALS_DIRECTORY/sshkey" -a pvv@smtp.pvv.ntnu.no:/etc/passwd /tmp/passwd-import'';
systemd.services.install-gitea-customization = {
description = "Install extra customization in gitea's CUSTOM_DIR";
wantedBy = [ "gitea.service" ];
requiredBy = [ "gitea.service" ];
serviceConfig = { serviceConfig = {
Type = "oneshot"; ExecStart = pkgs.writers.writePython3 "gitea-import-users" { libraries = [ pkgs.python3Packages.requests ]; } (builtins.readFile ./gitea-import-users.py);
User = cfg.user; LoadCredential=[
Group = cfg.group; "sshkey:${config.sops.secrets."gitea/passwd-ssh-key".path}"
"ssh-known-hosts:${config.sops.secrets."gitea/ssh-known-hosts".path}"
];
DynamicUser="yes";
EnvironmentFile=config.sops.secrets."gitea/import-user-env".path;
};
}; };
script = let systemd.timers.gitea-import-users = {
logo-svg = fp /assets/logo_blue_regular.svg; requires = [ "gitea.service" ];
logo-png = fp /assets/logo_blue_regular.png; after = [ "gitea.service" ];
extraLinks = pkgs.writeText "gitea-extra-links.tmpl" '' wantedBy = [ "timers.target" ];
<a class="item" href="https://www.pvv.ntnu.no/">PVV</a> timerConfig = {
<a class="item" href="https://wiki.pvv.ntnu.no/">Wiki</a> OnCalendar = "*-*-* 02:00:00";
<a class="item" href="https://git.pvv.ntnu.no/Drift/-/projects/4">Tokyo Drift Issues</a> Persistent = true;
''; Unit = "gitea-import-users.service";
in ''
install -Dm444 ${logo-svg} ${cfg.customDir}/public/assets/img/logo.svg
install -Dm444 ${logo-png} ${cfg.customDir}/public/assets/img/logo.png
install -Dm444 ${./loading.apng} ${cfg.customDir}/public/assets/img/loading.png
install -Dm444 ${extraLinks} ${cfg.customDir}/templates/custom/extra_links.tmpl
'';
}; };
};
system.activationScripts.linkGiteaLogo.text = let
logo-svg = ../../../../assets/logo_blue_regular.svg;
logo-png = ../../../../assets/logo_blue_regular.png;
in ''
install -Dm444 ${logo-svg} ${cfg.stateDir}/custom/public/img/logo.svg
install -Dm444 ${logo-png} ${cfg.stateDir}/custom/public/img/logo.png
install -Dm444 ${./loading.apng} ${cfg.stateDir}/custom/public/img/loading.png
'';
} }

View File

@ -0,0 +1,94 @@
import requests
import secrets
import os
EMAIL_DOMAIN = os.getenv('EMAIL_DOMAIN')
if EMAIL_DOMAIN is None:
EMAIL_DOMAIN = 'pvv.ntnu.no'
API_TOKEN = os.getenv('API_TOKEN')
if API_TOKEN is None:
raise Exception('API_TOKEN not set')
GITEA_API_URL = os.getenv('GITEA_API_URL')
if GITEA_API_URL is None:
GITEA_API_URL = 'https://git.pvv.ntnu.no/api/v1'
BANNED_SHELLS = [
"/usr/bin/nologin",
"/usr/sbin/nologin",
"/sbin/nologin",
"/bin/false",
"/bin/msgsh",
]
existing_users = {}
# This function should only ever be called when adding users
# from the passwd file
def add_user(username, name):
user = {
"full_name": name,
"username": username,
"login_name": username,
"visibility": "public",
"source_id": 1, # 1 = SMTP
}
if username not in existing_users:
user["password"] = secrets.token_urlsafe(32)
user["must_change_password"] = False
user["visibility"] = "private"
user["email"] = username + '@' + EMAIL_DOMAIN
r = requests.post(GITEA_API_URL + '/admin/users', json=user,
headers={'Authorization': 'token ' + API_TOKEN})
if r.status_code != 201:
print('ERR: Failed to create user ' + username + ': ' + r.text)
return
print('Created user ' + username)
existing_users[username] = user
else:
r = requests.patch(GITEA_API_URL + f'/admin/users/{username}',
json=user,
headers={'Authorization': 'token ' + API_TOKEN})
if r.status_code != 200:
print('ERR: Failed to update user ' + username + ': ' + r.text)
return
print('Updated user ' + username)
def main():
# Fetch existing users
r = requests.get(GITEA_API_URL + '/admin/users',
headers={'Authorization': 'token ' + API_TOKEN})
if r.status_code != 200:
raise Exception('Failed to get users: ' + r.text)
for user in r.json():
existing_users[user['login']] = user
# Read the file, add each user
with open("/tmp/passwd-import", 'r') as f:
for line in f.readlines():
uid = int(line.split(':')[2])
if uid < 1000:
continue
shell = line.split(':')[-1]
if shell in BANNED_SHELLS:
continue
username = line.split(':')[0]
name = line.split(':')[4].split(',')[0]
add_user(username, name)
if __name__ == '__main__':
main()

View File

@ -1,38 +0,0 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.gitea;
GNUPGHOME = "${config.users.users.gitea.home}/gnupg";
in
{
sops.secrets."gitea/gpg-signing-key" = {
owner = cfg.user;
inherit (cfg) group;
};
systemd.services.gitea.environment = { inherit GNUPGHOME; };
systemd.tmpfiles.settings."20-gitea-gnugpg".${GNUPGHOME}.d = {
inherit (cfg) user group;
mode = "700";
};
systemd.services.gitea-ensure-gnupg-homedir = {
description = "Import gpg key for gitea";
environment = { inherit GNUPGHOME; };
serviceConfig = {
Type = "oneshot";
User = cfg.user;
PrivateNetwork = true;
};
script = ''
${lib.getExe pkgs.gnupg} --import ${config.sops.secrets."gitea/gpg-signing-key".path}
'';
};
services.gitea.settings."repository.signing" = {
SIGNING_KEY = "0549C43374D2253C";
SIGNING_NAME = "PVV Git";
SIGNING_EMAIL = "gitea@git.pvv.ntnu.no";
INITIAL_COMMIT = "always";
};
}

View File

@ -1,41 +0,0 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.gitea;
in
{
sops.secrets = {
"gitea/passwd-ssh-key" = { };
"gitea/ssh-known-hosts" = { };
"gitea/import-user-env" = { };
};
systemd.services.gitea-import-users = lib.mkIf cfg.enable {
enable = true;
preStart=''${pkgs.rsync}/bin/rsync -e "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=$CREDENTIALS_DIRECTORY/ssh-known-hosts -i $CREDENTIALS_DIRECTORY/sshkey" -a pvv@smtp.pvv.ntnu.no:/etc/passwd /tmp/passwd-import'';
serviceConfig = {
ExecStart = pkgs.writers.writePython3 "gitea-import-users" {
flakeIgnore = [
"E501" # Line over 80 chars lol
];
libraries = with pkgs.python3Packages; [ requests ];
} (builtins.readFile ./gitea-import-users.py);
LoadCredential=[
"sshkey:${config.sops.secrets."gitea/passwd-ssh-key".path}"
"ssh-known-hosts:${config.sops.secrets."gitea/ssh-known-hosts".path}"
];
DynamicUser="yes";
EnvironmentFile=config.sops.secrets."gitea/import-user-env".path;
};
};
systemd.timers.gitea-import-users = lib.mkIf cfg.enable {
requires = [ "gitea.service" ];
after = [ "gitea.service" ];
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "*-*-* 02:00:00";
Persistent = true;
Unit = "gitea-import-users.service";
};
};
}

View File

@ -1,198 +0,0 @@
import requests
import secrets
import os
EMAIL_DOMAIN = os.getenv('EMAIL_DOMAIN')
if EMAIL_DOMAIN is None:
EMAIL_DOMAIN = 'pvv.ntnu.no'
API_TOKEN = os.getenv('API_TOKEN')
if API_TOKEN is None:
raise Exception('API_TOKEN not set')
GITEA_API_URL = os.getenv('GITEA_API_URL')
if GITEA_API_URL is None:
GITEA_API_URL = 'https://git.pvv.ntnu.no/api/v1'
def gitea_list_all_users() -> dict[str, dict[str, any]] | None:
r = requests.get(
GITEA_API_URL + '/admin/users',
headers={'Authorization': 'token ' + API_TOKEN}
)
if r.status_code != 200:
print('Failed to get users:', r.text)
return None
return {user['login']: user for user in r.json()}
def gitea_create_user(username: str, userdata: dict[str, any]) -> bool:
r = requests.post(
GITEA_API_URL + '/admin/users',
json=userdata,
headers={'Authorization': 'token ' + API_TOKEN},
)
if r.status_code != 201:
print(f'ERR: Failed to create user {username}:', r.text)
return False
return True
def gitea_edit_user(username: str, userdata: dict[str, any]) -> bool:
r = requests.patch(
GITEA_API_URL + f'/admin/users/{username}',
json=userdata,
headers={'Authorization': 'token ' + API_TOKEN},
)
if r.status_code != 200:
print(f'ERR: Failed to update user {username}:', r.text)
return False
return True
def gitea_list_teams_for_organization(org: str) -> dict[str, any] | None:
r = requests.get(
GITEA_API_URL + f'/orgs/{org}/teams',
headers={'Authorization': 'token ' + API_TOKEN},
)
if r.status_code != 200:
print(f"ERR: Failed to list teams for {org}:", r.text)
return None
return {team['name']: team for team in r.json()}
def gitea_add_user_to_organization_team(username: str, team_id: int) -> bool:
r = requests.put(
GITEA_API_URL + f'/teams/{team_id}/members/{username}',
headers={'Authorization': 'token ' + API_TOKEN},
)
if r.status_code != 204:
print(f'ERR: Failed to add user {username} to org team {team_id}:', r.text)
return False
return True
# If a passwd user has one of the following shells,
# it is most likely not a PVV user, but rather a system user.
# Users with these shells should thus be ignored.
BANNED_SHELLS = [
"/usr/bin/nologin",
"/usr/sbin/nologin",
"/sbin/nologin",
"/bin/false",
"/bin/msgsh",
]
# Reads out a passwd-file line for line, and filters out
# real PVV users (as opposed to system users meant for daemons and such)
def passwd_file_parser(passwd_path):
with open(passwd_path, 'r') as f:
for line in f.readlines():
uid = int(line.split(':')[2])
if uid < 1000:
continue
shell = line.split(':')[-1]
if shell in BANNED_SHELLS:
continue
username = line.split(':')[0]
name = line.split(':')[4].split(',')[0]
yield (username, name)
# This function either creates a new user in gitea
# and fills it out with some default information if
# it does not exist, or ensures that the default information
# is correct if the user already exists. All user information
# (including non-default fields) is pulled from gitea and added
# to the `existing_users` dict
def add_or_patch_gitea_user(
username: str,
name: str,
existing_users: dict[str, dict[str, any]],
) -> None:
user = {
"full_name": name,
"username": username,
"login_name": username,
"source_id": 1, # 1 = SMTP
}
if username not in existing_users:
user["password"] = secrets.token_urlsafe(32)
user["must_change_password"] = False
user["visibility"] = "private"
user["email"] = username + '@' + EMAIL_DOMAIN
if not gitea_create_user(username, user):
return
print('Created user', username)
existing_users[username] = user
else:
user["visibility"] = existing_users[username]["visibility"]
if not gitea_edit_user(username, user):
return
print('Updated user', username)
# This function adds a user to a gitea team (part of organization)
# if the user is not already part of said team.
def ensure_gitea_user_is_part_of_team(
username: str,
org: str,
team_name: str,
) -> None:
teams = gitea_list_teams_for_organization(org)
if teams is None:
return
if team_name not in teams:
print(f'ERR: could not find team "{team_name}" in organization "{org}"')
gitea_add_user_to_organization_team(username, teams[team_name]['id'])
print(f'User {username} is now part of {org}/{team_name}')
# List of teams that all users should be part of by default
COMMON_USER_TEAMS = [
("Projects", "Members"),
("Kurs", "Members"),
]
def main():
existing_users = gitea_list_all_users()
if existing_users is None:
exit(1)
for username, name in passwd_file_parser("/tmp/passwd-import"):
print(f"Processing {username}")
add_or_patch_gitea_user(username, name, existing_users)
for org, team_name in COMMON_USER_TEAMS:
ensure_gitea_user_is_part_of_team(username, org, team_name)
print()
if __name__ == '__main__':
main()

View File

@ -1,114 +0,0 @@
{ config, pkgs, lib, ... }:
let
organizations = [
"Drift"
"Projects"
"Kurs"
];
giteaCfg = config.services.gitea;
giteaWebSecretProviderScript = pkgs.writers.writePython3 "gitea-web-secret-provider" {
libraries = with pkgs.python3Packages; [ requests ];
flakeIgnore = [
"E501" # Line over 80 chars lol
"E201" # "whitespace after {"
"E202" # "whitespace after }"
"E251" # unexpected spaces around keyword / parameter equals
"W391" # Newline at end of file
];
makeWrapperArgs = [
"--prefix PATH : ${(lib.makeBinPath [ pkgs.openssh ])}"
];
} (builtins.readFile ./gitea-web-secret-provider.py);
in
{
users.groups."gitea-web" = { };
users.users."gitea-web" = {
group = "gitea-web";
isSystemUser = true;
};
sops.secrets."gitea/web-secret-provider/token" = {
owner = "gitea-web";
group = "gitea-web";
restartUnits = [
"gitea-web-secret-provider@"
] ++ (map (org: "gitea-web-secret-provider@${org}") organizations);
};
systemd.slices.system-giteaweb = {
description = "Gitea web directories";
};
# https://www.freedesktop.org/software/systemd/man/latest/systemd.unit.html#Specifiers
# %i - instance name (after the @)
# %d - secrets directory
systemd.services."gitea-web-secret-provider@" = {
description = "Ensure all repos in %i has an SSH key to push web content";
requires = [ "gitea.service" "network.target" ];
serviceConfig = {
Slice = "system-giteaweb.slice";
Type = "oneshot";
ExecStart = let
args = lib.cli.toGNUCommandLineShell { } {
org = "%i";
token-path = "%d/token";
api-url = "${giteaCfg.settings.server.ROOT_URL}api/v1";
key-dir = "/var/lib/gitea-web/keys/%i";
authorized-keys-path = "/var/lib/gitea-web/authorized_keys.d/%i";
rrsync-script = pkgs.writeShellScript "rrsync-chown" ''
${lib.getExe pkgs.rrsync} -wo "$1"
${pkgs.coreutils}/bin/chown -R gitea-web:gitea-web "$1"
'';
web-dir = "/var/lib/gitea-web/web";
};
in "${giteaWebSecretProviderScript} ${args}";
User = "gitea-web";
Group = "gitea-web";
StateDirectory = "gitea-web";
StateDirectoryMode = "0750";
LoadCredential = [
"token:${config.sops.secrets."gitea/web-secret-provider/token".path}"
];
NoNewPrivileges = true;
PrivateTmp = true;
PrivateDevices = true;
ProtectSystem = true;
ProtectHome = true;
ProtectControlGroups = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
RestrictRealtime = true;
RestrictSUIDSGID = true;
MemoryDenyWriteExecute = true;
LockPersonality = true;
};
};
systemd.timers."gitea-web-secret-provider@" = {
description = "Ensure all repos in %i has an SSH key to push web content";
timerConfig = {
RandomizedDelaySec = "1h";
Persistent = true;
Unit = "gitea-web-secret-provider@%i.service";
OnCalendar = "daily";
};
};
systemd.targets.timers.wants = map (org: "gitea-web-secret-provider@${org}.timer") organizations;
services.openssh.authorizedKeysFiles = map (org: "/var/lib/gitea-web/authorized_keys.d/${org}") organizations;
users.users.nginx.extraGroups = [ "gitea-web" ];
services.nginx.virtualHosts."pages.pvv.ntnu.no" = {
kTLS = true;
forceSSL = true;
enableACME = true;
root = "/var/lib/gitea-web/web";
};
}

View File

@ -1,112 +0,0 @@
import argparse
import hashlib
import os
import requests
import subprocess
from pathlib import Path
def parse_args():
parser = argparse.ArgumentParser(description="Generate SSH keys for Gitea repositories and add them as secrets")
parser.add_argument("--org", required=True, type=str, help="The organization to generate keys for")
parser.add_argument("--token-path", metavar='PATH', required=True, type=Path, help="Path to a file containing the Gitea API token")
parser.add_argument("--api-url", metavar='URL', type=str, help="The URL of the Gitea API", default="https://git.pvv.ntnu.no/api/v1")
parser.add_argument("--key-dir", metavar='PATH', type=Path, help="The directory to store the generated keys in", default="/run/gitea-web-secret-provider")
parser.add_argument("--authorized-keys-path", metavar='PATH', type=Path, help="The path to the resulting authorized_keys file", default="/etc/ssh/authorized_keys.d/gitea-web-secret-provider")
parser.add_argument("--rrsync-script", metavar='PATH', type=Path, help="The path to a rrsync script, taking the destination path as its single argument")
parser.add_argument("--web-dir", metavar='PATH', type=Path, help="The directory to sync the repositories to", default="/var/www")
parser.add_argument("--force", action="store_true", help="Overwrite existing keys")
return parser.parse_args()
def add_secret(args: argparse.Namespace, token: str, repo: str, name: str, secret: str):
result = requests.put(
f"{args.api_url}/repos/{args.org}/{repo}/actions/secrets/{name}",
json = { 'data': secret },
headers = { 'Authorization': 'token ' + token },
)
if result.status_code not in (201, 204):
raise Exception(f"Failed to add secret: {result.json()}")
def get_org_repo_list(args: argparse.Namespace, token: str):
result = requests.get(
f"{args.api_url}/orgs/{args.org}/repos",
headers = { 'Authorization': 'token ' + token },
)
return [repo["name"] for repo in result.json()]
def generate_ssh_key(args: argparse.Namespace, repository: str):
keyname = hashlib.sha256(args.org.encode() + repository.encode()).hexdigest()
key_path = args.key_dir / keyname
if not key_path.is_file() or args.force:
subprocess.run(
[
"ssh-keygen",
*("-t", "ed25519"),
*("-f", key_path),
*("-N", ""),
*("-C", f"{args.org}/{repository}"),
],
check=True,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
print(f"Generated SSH key for `{args.org}/{repository}`")
with open(key_path, "r") as f:
private_key = f.read()
pub_key_path = args.key_dir / (keyname + '.pub')
with open(pub_key_path, "r") as f:
public_key = f.read()
return private_key, public_key
SSH_OPTS = ",".join([
"restrict",
"no-agent-forwarding",
"no-port-forwarding",
"no-pty",
"no-X11-forwarding",
])
def generate_authorized_keys(args: argparse.Namespace, repo_public_keys: list[tuple[str, str]]):
lines = []
for repo, public_key in repo_public_keys:
command = f"{args.rrsync_script} {args.web_dir}/{args.org}/{repo}"
lines.append(f'command="{command}",{SSH_OPTS} {public_key}')
with open(args.authorized_keys_path, "w") as f:
f.writelines(lines)
def main():
args = parse_args()
with open(args.token_path, "r") as f:
token = f.read().strip()
os.makedirs(args.key_dir, 0o700, exist_ok=True)
os.makedirs(args.authorized_keys_path.parent, 0o700, exist_ok=True)
repos = get_org_repo_list(args, token)
print(f'Found {len(repos)} repositories in `{args.org}`')
repo_public_keys = []
for repo in repos:
print(f"Locating key for `{args.org}/{repo}`")
private_key, public_key = generate_ssh_key(args, repo)
add_secret(args, token, repo, "WEB_SYNC_SSH_KEY", private_key)
repo_public_keys.append((repo, public_key))
generate_authorized_keys(args, repo_public_keys)
print(f"Wrote authorized_keys file to `{args.authorized_keys_path}`")
if __name__ == "__main__":
main()

View File

@ -1,135 +0,0 @@
<?php
/**
* Authenticate using HTTP login.
*
* @author Yorn de Jong
* @author Oystein Kristoffer Tveit
* @package simpleSAMLphp
*/
namespace SimpleSAML\Module\authpwauth\Auth\Source;
class PwAuth extends \SimpleSAML\Module\core\Auth\UserPassBase
{
protected $pwauth_bin_path;
protected $mail_domain;
public function __construct(array $info, array &$config) {
assert('is_array($info)');
assert('is_array($config)');
/* Call the parent constructor first, as required by the interface. */
parent::__construct($info, $config);
$this->pwauth_bin_path = $config['pwauth_bin_path'];
if (array_key_exists('mail_domain', $config)) {
$this->mail_domain = '@' . ltrim($config['mail_domain'], '@');
}
}
public function login(string $username, string $password): array {
$username = strtolower( $username );
if (!file_exists($this->pwauth_bin_path)) {
die("Could not find pwauth binary");
return false;
}
if (!is_executable($this->pwauth_bin_path)) {
die("pwauth binary is not executable");
return false;
}
$handle = popen($this->pwauth_bin_path, 'w');
if ($handle === FALSE) {
die("Error opening pipe to pwauth");
return false;
}
$data = "$username\n$password\n";
if (fwrite($handle, $data) !== strlen($data)) {
die("Error writing to pwauth pipe");
return false;
}
# Is the password valid?
$result = pclose( $handle );
if ($result !== 0) {
if (!in_array($result, [1, 2, 3, 4, 5, 6, 7], true)) {
die("pwauth returned $result for username $username");
}
throw new \SimpleSAML\Error\Error('WRONGUSERPASS');
}
/*
$ldap = ldap_connect('129.241.210.159', 389);
ldap_set_option($ldap, LDAP_OPT_PROTOCOL_VERSION, 3);
ldap_start_tls($ldap);
ldap_bind($ldap, 'passordendrer@pvv.ntnu.no', 'Oi7aekoh');
$search = ldap_search($ldap, 'DC=pvv,DC=ntnu,DC=no', '(sAMAccountName='.ldap_escape($username, '', LDAP_ESCAPE_FILTER).')');
$entry = ldap_first_entry($ldap, $search);
$dn = ldap_get_dn($ldap, $entry);
$newpassword = mb_convert_encoding("\"$password\"", 'UTF-16LE', 'UTF-8');
ldap_modify_batch($ldap, $dn, [
#[
# 'modtype' => LDAP_MODIFY_BATCH_REMOVE,
# 'attrib' => 'unicodePwd',
# 'values' => [$password],
#],
[
#'modtype' => LDAP_MODIFY_BATCH_ADD,
'modtype' => LDAP_MODIFY_BATCH_REPLACE,
'attrib' => 'unicodePwd',
'values' => [$newpassword],
],
]);
*/
#0 - Login OK.
#1 - Nonexistant login or (for some configurations) incorrect password.
#2 - Incorrect password (for some configurations).
#3 - Uid number is below MIN_UNIX_UID value configured in config.h.
#4 - Login ID has expired.
#5 - Login's password has expired.
#6 - Logins to system have been turned off (usually by /etc/nologin file).
#7 - Limit on number of bad logins exceeded.
#50 - pwauth was not run with real uid SERVER_UID. If you get this
# this error code, you probably have SERVER_UID set incorrectly
# in pwauth's config.h file.
#51 - pwauth was not given a login & password to check. The means
# the passing of data from mod_auth_external to pwauth is messed
# up. Most likely one is trying to pass data via environment
# variables, while the other is trying to pass data via a pipe.
#52 - one of several possible internal errors occured.
$uid = $username;
# TODO: Reinstate this code once passwd is working...
/*
$cn = trim(shell_exec('getent passwd '.escapeshellarg($uid).' | cut -d: -f5 | cut -d, -f1'));
$groups = preg_split('_\\s_', shell_exec('groups '.escapeshellarg($uid)));
array_shift($groups);
array_shift($groups);
array_pop($groups);
$info = posix_getpwnam($uid);
$group = $info['gid'];
if (!in_array($group, $groups)) {
$groups[] = $group;
}
*/
$cn = "Unknown McUnknown";
$groups = array();
$result = array(
'uid' => array($uid),
'cn' => array($cn),
'group' => $groups,
);
if (isset($this->mail_domain)) {
$result['mail'] = array($uid.$this->mail_domain);
}
return $result;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,214 +0,0 @@
{ config, pkgs, lib, ... }:
let
pwAuthScript = pkgs.writeShellApplication {
name = "pwauth";
runtimeInputs = with pkgs; [ coreutils heimdal ];
text = ''
read -r user1
user2="$(echo -n "$user1" | tr -c -d '0123456789abcdefghijklmnopqrstuvwxyz')"
if test "$user1" != "$user2"
then
read -r _
exit 2
fi
kinit --password-file=STDIN "''${user1}@PVV.NTNU.NO" >/dev/null 2>/dev/null
kdestroy >/dev/null 2>/dev/null
'';
};
package = pkgs.simplesamlphp.override {
extra_files = {
# NOTE: Using self signed certificate created 30. march 2024, with command:
# openssl req -newkey rsa:4096 -new -x509 -days 365 -nodes -out idp.crt -keyout idp.pem
"metadata/saml20-idp-hosted.php" = pkgs.writeText "saml20-idp-remote.php" ''
<?php
$metadata['https://idp.pvv.ntnu.no/'] = array(
'host' => '__DEFAULT__',
'privatekey' => '${config.sops.secrets."idp/privatekey".path}',
'certificate' => '${./idp.crt}',
'auth' => 'pwauth',
);
?>
'';
"metadata/saml20-sp-remote.php" = pkgs.writeText "saml20-sp-remote.php" ''
<?php
${ lib.pipe config.services.idp.sp-remote-metadata [
(map (url: ''
$metadata['${url}'] = [
'SingleLogoutService' => [
[
'Binding' => 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect',
'Location' => '${url}module.php/saml/sp/saml2-logout.php/default-sp',
],
[
'Binding' => 'urn:oasis:names:tc:SAML:2.0:bindings:SOAP',
'Location' => '${url}module.php/saml/sp/saml2-logout.php/default-sp',
],
],
'AssertionConsumerService' => [
[
'Binding' => 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST',
'Location' => '${url}module.php/saml/sp/saml2-acs.php/default-sp',
'index' => 0,
],
[
'Binding' => 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Artifact',
'Location' => '${url}module.php/saml/sp/saml2-acs.php/default-sp',
'index' => 1,
],
],
];
''))
(lib.concatStringsSep "\n")
]}
?>
'';
"config/authsources.php" = pkgs.writeText "idp-authsources.php" ''
<?php
$config = array(
'admin' => array(
'core:AdminPassword'
),
'pwauth' => array(
'authpwauth:PwAuth',
'pwauth_bin_path' => '${lib.getExe pwAuthScript}',
'mail_domain' => '@pvv.ntnu.no',
),
);
?>
'';
"config/config.php" = pkgs.runCommandLocal "simplesamlphp-config.php" { } ''
cp ${./config.php} "$out"
substituteInPlace "$out" \
--replace-warn '$SAML_COOKIE_SECURE' 'true' \
--replace-warn '$SAML_COOKIE_SALT' 'file_get_contents("${config.sops.secrets."idp/cookie_salt".path}")' \
--replace-warn '$SAML_ADMIN_NAME' '"Drift"' \
--replace-warn '$SAML_ADMIN_EMAIL' '"drift@pvv.ntnu.no"' \
--replace-warn '$SAML_ADMIN_PASSWORD' 'file_get_contents("${config.sops.secrets."idp/admin_password".path}")' \
--replace-warn '$SAML_TRUSTED_DOMAINS' 'array( "idp.pvv.ntnu.no" )' \
--replace-warn '$SAML_DATABASE_DSN' '"pgsql:host=postgres.pvv.ntnu.no;port=5432;dbname=idp"' \
--replace-warn '$SAML_DATABASE_USERNAME' '"idp"' \
--replace-warn '$SAML_DATABASE_PASSWORD' 'file_get_contents("${config.sops.secrets."idp/postgres_password".path}")' \
--replace-warn '$CACHE_DIRECTORY' '/var/cache/idp'
'';
"modules/authpwauth/src/Auth/Source/PwAuth.php" = ./authpwauth.php;
};
};
in
{
options.services.idp.sp-remote-metadata = lib.mkOption {
type = with lib.types; listOf str;
default = [ ];
description = ''
List of urls point to (simplesamlphp) service profiders, which the idp should trust.
:::{.note}
Make sure the url ends with a `/`
:::
'';
};
config = {
sops.secrets = {
"idp/privatekey" = {
owner = "idp";
group = "idp";
mode = "0770";
};
"idp/admin_password" = {
owner = "idp";
group = "idp";
};
"idp/postgres_password" = {
owner = "idp";
group = "idp";
};
"idp/cookie_salt" = {
owner = "idp";
group = "idp";
};
};
users.groups."idp" = { };
users.users."idp" = {
description = "PVV Identity Provider Service User";
group = "idp";
createHome = false;
isSystemUser = true;
};
systemd.tmpfiles.settings."10-idp" = {
"/var/cache/idp".d = {
user = "idp";
group = "idp";
mode = "0770";
};
"/var/lib/idp".d = {
user = "idp";
group = "idp";
mode = "0770";
};
};
services.phpfpm.pools.idp = {
user = "idp";
group = "idp";
settings = let
listenUser = config.services.nginx.user;
listenGroup = config.services.nginx.group;
in {
"pm" = "dynamic";
"pm.max_children" = 32;
"pm.max_requests" = 500;
"pm.start_servers" = 2;
"pm.min_spare_servers" = 2;
"pm.max_spare_servers" = 4;
"listen.owner" = listenUser;
"listen.group" = listenGroup;
"catch_workers_output" = true;
"php_admin_flag[log_errors]" = true;
# "php_admin_value[error_log]" = "stderr";
};
};
services.nginx.virtualHosts."idp.pvv.ntnu.no" = {
forceSSL = true;
enableACME = true;
kTLS = true;
root = "${package}/share/php/simplesamlphp/public";
locations = {
# based on https://simplesamlphp.org/docs/stable/simplesamlphp-install.html#configuring-nginx
"/" = {
alias = "${package}/share/php/simplesamlphp/public/";
index = "index.php";
extraConfig = ''
location ~ ^/(?<phpfile>.+?\.php)(?<pathinfo>/.*)?$ {
include ${pkgs.nginx}/conf/fastcgi_params;
fastcgi_pass unix:${config.services.phpfpm.pools.idp.socket};
fastcgi_param SCRIPT_FILENAME ${package}/share/php/simplesamlphp/public/$phpfile;
fastcgi_param SCRIPT_NAME /$phpfile;
fastcgi_param PATH_INFO $pathinfo if_not_empty;
}
'';
};
"^~ /simplesaml/".extraConfig = ''
rewrite ^/simplesaml/(.*)$ /$1 redirect;
return 404;
'';
"/robots.txt" = {
root = pkgs.writeTextDir "robots.txt" ''
User-agent: *
Disallow: /
'';
};
};
};
};
}

View File

@ -1,33 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIFqTCCA5GgAwIBAgIUL2+PMM9rE9wI5W2yNnJ2CmfGxh0wDQYJKoZIhvcNAQEL
BQAwZDELMAkGA1UEBhMCTk8xEzARBgNVBAgMClNvbWUtU3RhdGUxHjAcBgNVBAoM
FVByb2dyYW12YXJldmVya3N0ZWRldDEgMB4GCSqGSIb3DQEJARYRZHJpZnRAcHZ2
Lm50bnUubm8wHhcNMjQwMzMwMDAyNjQ0WhcNMjUwMzMwMDAyNjQ0WjBkMQswCQYD
VQQGEwJOTzETMBEGA1UECAwKU29tZS1TdGF0ZTEeMBwGA1UECgwVUHJvZ3JhbXZh
cmV2ZXJrc3RlZGV0MSAwHgYJKoZIhvcNAQkBFhFkcmlmdEBwdnYubnRudS5ubzCC
AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL/0l0jdV+PoVxdd21F+2NLm
JN6sZmSJexOSk/sFjhhF4WMtjOfDAQYjt3hlLPyYl//jCe9WteavvtdCx1tHJitd
xjOUJ/leVjHzBttCVZR+iTlQtpsZ2TbRMJ5Fcfl82njlPecV4umJvnnFXawE4Qee
dE2OM8ODjjrK1cNaHR74tyZCwmdOxNHXZ7RN22p9kZjLD18LQyNr5igaDBeaZkyk
Gxbg4tbP51x9JFRLF7kUlyAc83geFnw6v/wBahr49m/X4y7xE0rdPb2L0moUjmOO
Zyl3hvxMI3+g/0FVMM5eKmfIIP2rIVEAa6MWMx0vPjC6h2fIyxkUqg5C8aFlpqav
+8f2rUc+JfdiFsIZNrylBXsleGzS+/wY1uB/pAy5Vg9WCp+eC75EtWMt0k2f442G
rhKa3lAZ6GIYrtEiQiNGM1aT1Cs1nqTtslfnHiuAKBefLjCXgq9uvL2yRodwe9/m
oZiqYnLHy/v1xfnF5rKTcRmOleU3tc+nlN6tZSGC1nZgMpqpoqdcbJXAkvaJ2Km4
sl0YS28VQnztgzuVPNdnv8lcS6HmkaGaNWbepKgWeaH5oT7O6u99wZIv88m+tf5m
Eu197YVpcclnojQCYKauWcQFsXS20egsVP87Qk0e2SHmGTUQp6YEYX6RLjkg7/vS
BelDBbCldraNVEiC0jmpAgMBAAGjUzBRMB0GA1UdDgQWBBSL0yofG5NEmzFIRuqC
xmyiuZW6DTAfBgNVHSMEGDAWgBSL0yofG5NEmzFIRuqCxmyiuZW6DTAPBgNVHRMB
Af8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQAZZVs7BLk/NLq3f4Ik8qH3IoDN
2m4XXRZS+xxw5RwctgSnik7AffgAfv8QQm2co8UYkHbB0whaG1PDz+L7wB1hVkWn
DVUaJcKQnn0x+sNU5LoTbjI0PlaST7PO5D0OMFab8FSNxpzzpbUcgZUhelc99Ri/
2Gh8mf4b3Y3Uzq6YKFsuFM65OuJhH8f1w6onai9x28t6tERHUSUfJ2keXzU4ytCV
EitWXwhe759VLqmdP4BATwlCOCuwa5aDeGcWRIqFpYIn0SOAmVV3o4V71JdZc1jE
fuOo/PbiHZ+R9ZGbh98aMidb0moL1ZDhmir9KbedezNyki6JJ72mVclhLqUajFxr
T39FXd5e2+QBMHPPhVFznQoHWnHEbZigTt61b0cg/TsxaxOkF4Ilmr/2DmSWysWK
TF5eq8hp6/53qVbXXSzrCjxd3wzGnRabsEVPX/L2hYDx81hluovJQCtskqTq1joI
W2R7AO5Sdyc6NfOR85kl0HXzHa+0Slsf8ZDs5nCz/mOOPoAGl7IxF7xQ6kPO7V+U
HdGE2tkblM/TrAObJH0HXySeJGI7Vfya+D1Y8IqGtyZtWyx1DmlA/OezGGf5D3rG
88LywHQQ2mQ+8aosBTE4+HQ+apLKZBprqQKuiDjT1RSUbfUHQkYuL+D1oIVmklAc
UxTpf01QJnZkMqf5NQ==
-----END CERTIFICATE-----

View File

@ -1,22 +0,0 @@
''
<?php
$metadata['https://idp.pvv.ntnu.no/'] = [
'metadata-set' => 'saml20-idp-hosted',
'entityid' => 'https://idp.pvv.ntnu.no/',
'SingleSignOnService' => [
[
'Binding' => 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect',
'Location' => 'https://idp.pvv.ntnu.no/module.php/saml/idp/singleSignOnService',
],
],
'SingleLogoutService' => [
[
'Binding' => 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect',
'Location' => 'https://idp.pvv.ntnu.no/module.php/saml/idp/singleLogout',
],
],
'NameIDFormat' => [ 'urn:oasis:names:tc:SAML:2.0:nameid-format:transient' ],
'certificate' => '${./idp.crt}',
];
?>
''

View File

@ -1,14 +0,0 @@
{ config, pkgs, lib, ... }:
{
security.krb5 = {
enable = true;
settings = {
libdefaults = {
default_realm = "PVV.NTNU.NO";
dns_lookup_realm = "yes";
dns_lookup_kdc = "yes";
};
realms."PVV.NTNU.NO".admin_server = "kdc.pvv.ntnu.no";
};
};
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,24 @@
{ pkgs, config, values, ... }:
{
sops.secrets."keys/postgres/keycloak" = {
owner = "keycloak";
group = "keycloak";
restartUnits = [ "keycloak.service" ];
};
services.keycloak = {
enable = true;
settings = {
hostname = "auth.pvv.ntnu.no";
# hostname-strict-backchannel = true;
};
database = {
host = values.hosts.bicep.ipv4;
createLocally = false;
passwordFile = config.sops.secrets."keys/postgres/keycloak".path;
caCert = "${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt";
};
};
}

View File

@ -0,0 +1,175 @@
{ pkgs, lib, config, values, ... }: let
cfg = config.services.mediawiki;
# "mediawiki"
user = config.systemd.services.mediawiki-init.serviceConfig.User;
# "mediawiki"
group = config.users.users.${user}.group;
in {
sops.secrets = {
"mediawiki/password" = {
restartUnits = [ "mediawiki-init.service" "phpfpm-mediawiki.service" ];
owner = user;
group = group;
};
"keys/postgres/mediawiki" = {
restartUnits = [ "mediawiki-init.service" "phpfpm-mediawiki.service" ];
owner = user;
group = group;
};
};
services.mediawiki = {
enable = true;
name = "Programvareverkstedet";
passwordFile = config.sops.secrets."mediawiki/password".path;
passwordSender = "drift@pvv.ntnu.no";
database = {
type = "postgres";
host = "postgres.pvv.ntnu.no";
port = config.services.postgresql.port;
passwordFile = config.sops.secrets."keys/postgres/mediawiki".path;
createLocally = false;
# TODO: create a normal database and copy over old data when the service is production ready
name = "mediawiki_test";
};
# Host through nginx
webserver = "none";
poolConfig = let
listenUser = config.services.nginx.user;
listenGroup = config.services.nginx.group;
in {
inherit user group;
"pm" = "dynamic";
"pm.max_children" = 32;
"pm.max_requests" = 500;
"pm.start_servers" = 2;
"pm.min_spare_servers" = 2;
"pm.max_spare_servers" = 4;
"listen.owner" = listenUser;
"listen.group" = listenGroup;
"php_admin_value[error_log]" = "stderr";
"php_admin_flag[log_errors]" = "on";
"env[PATH]" = lib.makeBinPath [ pkgs.php ];
"catch_workers_output" = true;
# to accept *.html file
"security.limit_extensions" = "";
};
extensions = {
DeleteBatch = pkgs.fetchzip {
url = "https://extdist.wmflabs.org/dist/extensions/DeleteBatch-REL1_39-995ea6f.tar.gz";
sha256 = "sha256-0F4GLCy2f5WcWIY2YgF1tVxgYbglR0VOsj/pMrW93b8=";
};
UserMerge = pkgs.fetchzip {
url = "https://extdist.wmflabs.org/dist/extensions/UserMerge-REL1_39-b10d50e.tar.gz";
sha256 = "sha256-bXhj1+OlOUJDbvEuc8iwqb1LLEu6cN6+C/7cAvnWPOQ=";
};
PluggableAuth = pkgs.fetchzip {
url = "https://extdist.wmflabs.org/dist/extensions/PluggableAuth-REL1_39-1210fc3.tar.gz";
sha256 = "sha256-F6bTMCzkK3kZwZGIsNE87WlZWqXXmTMhEjApO99YKR0=";
};
SimpleSAMLphp = pkgs.fetchzip {
url = "https://extdist.wmflabs.org/dist/extensions/SimpleSAMLphp-REL1_39-dcf0acb.tar.gz";
sha256 = "sha256-tCvFmb2+q2rxms+lRo5pgoI3h6GjCwXAR8XisPg03TQ=";
};
};
extraConfig = let
SimpleSAMLphpRepo = pkgs.stdenvNoCC.mkDerivation rec {
pname = "configuredSimpleSAML";
version = "2.0.4";
src = pkgs.fetchzip {
url = "https://github.com/simplesamlphp/simplesamlphp/releases/download/v${version}/simplesamlphp-${version}.tar.gz";
sha256 = "sha256-pfMV/VmqqxgtG7Nx4s8MW4tWSaxOkVPtCRJwxV6RDSE=";
};
buildPhase = ''
cat > config/authsources.php << EOF
<?php
$config = array(
'default-sp' => array(
'saml:SP',
'idp' => 'https://idp.pvv.ntnu.no/',
),
);
EOF
'';
installPhase = ''
cp -r . $out
'';
};
in ''
$wgServer = "https://bekkalokk.pvv.ntnu.no";
$wgLocaltimezone = "Europe/Oslo";
# Only allow login through SSO
$wgEnableEmail = false;
$wgEnableUserEmail = false;
$wgEmailAuthentication = false;
$wgGroupPermissions['*']['createaccount'] = false;
$wgGroupPermissions['*']['autocreateaccount'] = true;
$wgPluggableAuth_EnableAutoLogin = true;
# Disable anonymous editing
$wgGroupPermissions['*']['edit'] = false;
# Styling
$wgLogo = "/PNG/PVV-logo.png";
$wgDefaultSkin = "monobook";
# Misc
$wgEmergencyContact = "${cfg.passwordSender}";
$wgShowIPinHeader = false;
$wgUseTeX = false;
$wgLocalInterwiki = $wgSitename;
# SimpleSAML
$wgSimpleSAMLphp_InstallDir = "${SimpleSAMLphpRepo}";
$wgSimpleSAMLphp_AuthSourceId = "default-sp";
$wgSimpleSAMLphp_RealNameAttribute = "cn";
$wgSimpleSAMLphp_EmailAttribute = "mail";
$wgSimpleSAMLphp_UsernameAttribute = "uid";
# Fix https://github.com/NixOS/nixpkgs/issues/183097
$wgDBserver = "${toString cfg.database.host}";
'';
};
# Override because of https://github.com/NixOS/nixpkgs/issues/183097
systemd.services.mediawiki-init.script = let
# According to module
stateDir = "/var/lib/mediawiki";
pkg = cfg.finalPackage;
mediawikiConfig = config.services.phpfpm.pools.mediawiki.phpEnv.MEDIAWIKI_CONFIG;
inherit (lib) optionalString mkForce;
in mkForce ''
if ! test -e "${stateDir}/secret.key"; then
tr -dc A-Za-z0-9 </dev/urandom 2>/dev/null | head -c 64 > ${stateDir}/secret.key
fi
echo "exit( wfGetDB( DB_MASTER )->tableExists( 'user' ) ? 1 : 0 );" | \
${pkgs.php}/bin/php ${pkg}/share/mediawiki/maintenance/eval.php --conf ${mediawikiConfig} && \
${pkgs.php}/bin/php ${pkg}/share/mediawiki/maintenance/install.php \
--confpath /tmp \
--scriptpath / \
--dbserver "${cfg.database.host}" \
--dbport ${toString cfg.database.port} \
--dbname ${cfg.database.name} \
${optionalString (cfg.database.tablePrefix != null) "--dbprefix ${cfg.database.tablePrefix}"} \
--dbuser ${cfg.database.user} \
${optionalString (cfg.database.passwordFile != null) "--dbpassfile ${cfg.database.passwordFile}"} \
--passfile ${cfg.passwordFile} \
--dbtype ${cfg.database.type} \
${cfg.name} \
admin
${pkgs.php}/bin/php ${pkg}/share/mediawiki/maintenance/update.php --conf ${mediawikiConfig} --quick
'';
}

View File

@ -1,231 +0,0 @@
{ pkgs, lib, fp, config, values, pkgs-unstable, ... }: let
cfg = config.services.mediawiki;
# "mediawiki"
user = config.systemd.services.mediawiki-init.serviceConfig.User;
# "mediawiki"
group = config.users.users.${user}.group;
simplesamlphp = pkgs.simplesamlphp.override {
extra_files = {
"metadata/saml20-idp-remote.php" = pkgs.writeText "mediawiki-saml20-idp-remote.php" (import ../idp-simplesamlphp/metadata.php.nix);
"config/authsources.php" = ./simplesaml-authsources.php;
"config/config.php" = pkgs.runCommandLocal "mediawiki-simplesamlphp-config.php" { } ''
cp ${./simplesaml-config.php} "$out"
substituteInPlace "$out" \
--replace-warn '$SAML_COOKIE_SECURE' 'true' \
--replace-warn '$SAML_COOKIE_SALT' 'file_get_contents("${config.sops.secrets."mediawiki/simplesamlphp/cookie_salt".path}")' \
--replace-warn '$SAML_ADMIN_NAME' '"Drift"' \
--replace-warn '$SAML_ADMIN_EMAIL' '"drift@pvv.ntnu.no"' \
--replace-warn '$SAML_ADMIN_PASSWORD' 'file_get_contents("${config.sops.secrets."mediawiki/simplesamlphp/admin_password".path}")' \
--replace-warn '$SAML_TRUSTED_DOMAINS' 'array( "wiki.pvv.ntnu.no" )' \
--replace-warn '$SAML_DATABASE_DSN' '"pgsql:host=postgres.pvv.ntnu.no;port=5432;dbname=mediawiki_simplesamlphp"' \
--replace-warn '$SAML_DATABASE_USERNAME' '"mediawiki_simplesamlphp"' \
--replace-warn '$SAML_DATABASE_PASSWORD' 'file_get_contents("${config.sops.secrets."mediawiki/simplesamlphp/postgres_password".path}")' \
--replace-warn '$CACHE_DIRECTORY' '/var/cache/mediawiki/idp'
'';
};
};
in {
services.idp.sp-remote-metadata = [ "https://wiki.pvv.ntnu.no/simplesaml/" ];
sops.secrets = lib.pipe [
"mediawiki/password"
"mediawiki/postgres_password"
"mediawiki/simplesamlphp/postgres_password"
"mediawiki/simplesamlphp/cookie_salt"
"mediawiki/simplesamlphp/admin_password"
] [
(map (key: lib.nameValuePair key {
owner = user;
group = group;
restartUnits = [ "phpfpm-mediawiki.service" ];
}))
lib.listToAttrs
];
services.mediawiki = {
enable = true;
name = "Programvareverkstedet";
passwordFile = config.sops.secrets."mediawiki/password".path;
passwordSender = "drift@pvv.ntnu.no";
database = {
type = "mysql";
host = "mysql.pvv.ntnu.no";
port = 3306;
user = "mediawiki";
passwordFile = config.sops.secrets."mediawiki/postgres_password".path;
createLocally = false;
# TODO: create a normal database and copy over old data when the service is production ready
name = "mediawiki";
};
webserver = "nginx";
nginx.hostName = "wiki.pvv.ntnu.no";
poolConfig = {
inherit user group;
"pm" = "dynamic";
"pm.max_children" = 32;
"pm.max_requests" = 500;
"pm.start_servers" = 2;
"pm.min_spare_servers" = 2;
"pm.max_spare_servers" = 4;
"catch_workers_output" = true;
"php_admin_flag[log_errors]" = true;
# "php_admin_value[error_log]" = "stderr";
# to accept *.html file
"security.limit_extensions" = "";
};
extensions = {
inherit (pkgs.mediawiki-extensions)
CodeEditor
CodeMirror
DeleteBatch
PluggableAuth
Popups
Scribunto
SimpleSAMLphp
TemplateData
TemplateStyles
UserMerge
VisualEditor
WikiEditor
;
};
extraConfig = ''
$wgServer = "https://wiki.pvv.ntnu.no";
$wgLocaltimezone = "Europe/Oslo";
# Only allow login through SSO
$wgEnableEmail = false;
$wgEnableUserEmail = false;
$wgEmailAuthentication = false;
$wgGroupPermissions['*']['createaccount'] = false;
$wgGroupPermissions['*']['autocreateaccount'] = true;
$wgPluggableAuth_EnableAutoLogin = false;
# Misc. permissions
$wgGroupPermissions['*']['edit'] = false;
$wgGroupPermissions['*']['read'] = true;
# Allow subdirectories in article URLs
$wgNamespacesWithSubpages[NS_MAIN] = true;
# Styling
$wgLogos = array(
"2x" => "/PNG/PVV-logo.png",
"icon" => "/PNG/PVV-logo.svg",
);
$wgDefaultSkin = "vector-2022";
# from https://github.com/wikimedia/mediawiki-skins-Vector/blob/master/skin.json
$wgVectorDefaultSidebarVisibleForAnonymousUser = true;
$wgVectorResponsive = true;
# Misc
$wgEmergencyContact = "${cfg.passwordSender}";
$wgUseTeX = false;
$wgLocalInterwiki = $wgSitename;
# Fix https://github.com/NixOS/nixpkgs/issues/183097
$wgDBserver = "${toString cfg.database.host}";
$wgAllowCopyUploads = true;
# Misc program paths
$wgFFmpegLocation = '${pkgs.ffmpeg}/bin/ffmpeg';
$wgExiftool = '${pkgs.exiftool}/bin/exiftool';
$wgExiv2Command = '${pkgs.exiv2}/bin/exiv2';
# See https://gist.github.com/sergejmueller/088dce028b6dd120a16e
$wgJpegTran = '${pkgs.mozjpeg}/bin/jpegtran';
$wgGitBin = '${pkgs.git}/bin/git';
# Debugging
$wgShowExceptionDetails = false;
$wgShowIPinHeader = false;
# EXT:{SimpleSAML,PluggableAuth}
$wgSimpleSAMLphp_InstallDir = "${simplesamlphp}/share/php/simplesamlphp/";
$wgPluggableAuth_Config['Log in using SAML'] = [
'plugin' => 'SimpleSAMLphp',
'data' => [
'authSourceId' => 'default-sp',
'usernameAttribute' => 'uid',
'emailAttribute' => 'mail',
'realNameAttribute' => 'cn',
]
];
# EXT:Scribunto
$wgScribuntoDefaultEngine = 'luastandalone';
$wgScribuntoEngineConf['luastandalone']['luaPath'] = '${pkgs.lua}/bin';
# EXT:WikiEditor
$wgWikiEditorRealtimePreview = true;
'';
};
# Cache directory for simplesamlphp
# systemd.services.phpfpm-mediawiki.serviceConfig.CacheDirectory = "mediawiki/simplesamlphp";
systemd.tmpfiles.settings."10-mediawiki"."/var/cache/mediawiki/simplesamlphp".d = {
user = "mediawiki";
group = "mediawiki";
mode = "0770";
};
users.groups.mediawiki.members = [ "nginx" ];
services.nginx.virtualHosts."wiki.pvv.ntnu.no" = {
kTLS = true;
forceSSL = true;
enableACME = true;
locations = {
"= /wiki/Main_Page" = lib.mkForce {
return = "301 /wiki/Programvareverkstedet";
};
# based on https://simplesamlphp.org/docs/stable/simplesamlphp-install.html#configuring-nginx
"^~ /simplesaml/" = {
alias = "${simplesamlphp}/share/php/simplesamlphp/public/";
index = "index.php";
extraConfig = ''
location ~ ^/simplesaml/(?<phpfile>.+?\.php)(?<pathinfo>/.*)?$ {
include ${pkgs.nginx}/conf/fastcgi_params;
fastcgi_pass unix:${config.services.phpfpm.pools.mediawiki.socket};
fastcgi_param SCRIPT_FILENAME ${simplesamlphp}/share/php/simplesamlphp/public/$phpfile;
# Must be prepended with the baseurlpath
fastcgi_param SCRIPT_NAME /simplesaml/$phpfile;
fastcgi_param PATH_INFO $pathinfo if_not_empty;
}
'';
};
"= /PNG/PVV-logo.svg".alias = fp /assets/logo_blue_regular.svg;
"= /PNG/PVV-logo.png".alias = fp /assets/logo_blue_regular.png;
"= /favicon.ico".alias = pkgs.runCommandLocal "mediawiki-favicon.ico" {
buildInputs = with pkgs; [ imagemagick ];
} ''
convert \
-resize x64 \
-gravity center \
-crop 64x64+0+0 \
${fp /assets/logo_blue_regular.png} \
-flatten \
-colors 256 \
-background transparent \
$out
'';
};
};
}

View File

@ -1,11 +0,0 @@
<?php
$config = array(
'admin' => array(
'core:AdminPassword'
),
'default-sp' => array(
'saml:SP',
'entityID' => 'https://wiki.pvv.ntnu.no/simplesaml/',
'idp' => 'https://idp.pvv.ntnu.no/',
),
);

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +0,0 @@
{ pkgs, config, ... }:
{
services.nginx.enable = true;
}

View File

@ -0,0 +1,22 @@
{ pkgs, config, ... }:
{
imports = [
./ingress.nix
];
security.acme = {
acceptTerms = true;
defaults.email = "drift@pvv.ntnu.no";
};
services.nginx = {
enable = true;
recommendedTlsSettings = true;
recommendedProxySettings = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
};
networking.firewall.allowedTCPPorts = [ 80 443 ];
}

View File

@ -0,0 +1,55 @@
{ config, lib, ... }:
{
services.nginx.virtualHosts = {
"www2.pvv.ntnu.no" = {
serverAliases = [ "www2.pvv.org" "pvv.ntnu.no" "pvv.org" ];
addSSL = true;
enableACME = true;
locations = {
# Proxy home directories
"/~" = {
extraConfig = ''
proxy_redirect off;
proxy_pass https://tom.pvv.ntnu.no;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
'';
};
# Redirect old wiki entries
"/disk".return = "301 https://www.pvv.ntnu.no/pvv/Diskkjøp";
"/dok/boker.php".return = "301 https://www.pvv.ntnu.no/pvv/Bokhyllen";
"/styret/lover/".return = "301 https://www.pvv.ntnu.no/pvv/Lover";
"/styret/".return = "301 https://www.pvv.ntnu.no/pvv/Styret";
"/info/".return = "301 https://www.pvv.ntnu.no/pvv/";
"/info/maskinpark/".return = "301 https://www.pvv.ntnu.no/pvv/Maskiner";
"/medlemssider/meldinn.php".return = "301 https://www.pvv.ntnu.no/pvv/Medlemskontingent";
"/diverse/medlems-sider.php".return = "301 https://www.pvv.ntnu.no/pvv/Medlemssider";
"/cert/".return = "301 https://www.pvv.ntnu.no/pvv/CERT";
"/drift".return = "301 https://www.pvv.ntnu.no/pvv/Drift";
"/diverse/abuse.php".return = "301 https://www.pvv.ntnu.no/pvv/CERT/Abuse";
"/nerds/".return = "301 https://www.pvv.ntnu.no/pvv/Nerdepizza";
# TODO: Redirect webmail
"/webmail".return = "301 https://webmail.pvv.ntnu.no/squirrelmail";
# Redirect everything else to the main website
"/".return = "301 https://www.pvv.ntnu.no$request_uri";
# Proxy the matrix well-known files
# Host has be set before proxy_pass
# The header must be set so nginx on the other side routes it to the right place
"/.well-known/matrix/" = {
extraConfig = ''
proxy_set_header Host matrix.pvv.ntnu.no;
proxy_pass https://matrix.pvv.ntnu.no/.well-known/matrix/;
'';
};
};
};
};
}

View File

@ -1,51 +0,0 @@
{ lib, ... }:
let
pools = map (pool: "phpfpm-${pool}") [
"idp"
"mediawiki"
"pvv-nettsiden"
"roundcube"
"snappymail"
];
in
{
# Source: https://www.pierreblazquez.com/2023/06/17/how-to-harden-apache-php-fpm-daemons-using-systemd/
systemd.services = lib.genAttrs pools (_: {
serviceConfig = let
caps = [
"CAP_NET_BIND_SERVICE"
"CAP_SETGID"
"CAP_SETUID"
"CAP_CHOWN"
"CAP_KILL"
"CAP_IPC_LOCK"
"CAP_DAC_OVERRIDE"
];
in {
AmbientCapabilities = caps;
CapabilityBoundingSet = caps;
DeviceAllow = [ "" ];
LockPersonality = true;
MemoryDenyWriteExecute = false;
NoNewPrivileges = true;
PrivateMounts = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
RemoveIPC = true;
UMask = "0077";
RestrictNamespaces = "~mnt";
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
KeyringMode = "private";
SystemCallFilter = [
"@system-service"
];
};
});
}

View File

@ -1,104 +0,0 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.vaultwarden;
domain = "pw.pvv.ntnu.no";
address = "127.0.1.2";
port = 3011;
wsPort = 3012;
in {
sops.secrets."vaultwarden/environ" = {
owner = "vaultwarden";
group = "vaultwarden";
};
services.vaultwarden = {
enable = true;
dbBackend = "postgresql";
environmentFile = config.sops.secrets."vaultwarden/environ".path;
config = {
domain = "https://${domain}";
rocketAddress = address;
rocketPort = port;
websocketEnabled = true;
websocketAddress = address;
websocketPort = wsPort;
signupsAllowed = true;
signupsVerify = true;
signupsDomainsWhitelist = "pvv.ntnu.no";
smtpFrom = "vaultwarden@pvv.ntnu.no";
smtpFromName = "VaultWarden PVV";
smtpHost = "smtp.pvv.ntnu.no";
smtpUsername = "vaultwarden";
smtpSecurity = "force_tls";
smtpAuthMechanism = "Login";
# Configured in environ:
# databaseUrl = "postgresql://vaultwarden@/vaultwarden";
# smtpPassword = hemli
};
};
services.nginx.virtualHosts."${domain}" = {
forceSSL = true;
enableACME = true;
kTLS = true;
extraConfig = ''
client_max_body_size 128M;
'';
locations."/" = {
proxyPass = "http://${address}:${toString port}";
proxyWebsockets = true;
};
locations."/notifications/hub" = {
proxyPass = "http://${address}:${toString wsPort}";
proxyWebsockets = true;
};
locations."/notifications/hub/negotiate" = {
proxyPass = "http://${address}:${toString port}";
proxyWebsockets = true;
};
};
systemd.services.vaultwarden = lib.mkIf cfg.enable {
serviceConfig = {
AmbientCapabilities = [ "" ];
CapabilityBoundingSet = [ "" ];
DeviceAllow = [ "" ];
LockPersonality = true;
NoNewPrivileges = true;
# MemoryDenyWriteExecute = true;
PrivateMounts = true;
PrivateUsers = true;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
"AF_UNIX"
];
RemoveIPC = true;
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = [
"@system-service"
"~@privileged"
];
UMask = "0007";
};
};
}

View File

@ -2,20 +2,14 @@
{ {
imports = [ imports = [
./roundcube.nix ./roundcube.nix
./snappymail.nix
]; ];
services.nginx.virtualHosts."webmail.pvv.ntnu.no" = { services.nginx.virtualHosts."webmail2.pvv.ntnu.no" = {
forceSSL = true; forceSSL = true;
enableACME = true; enableACME = true;
kTLS = true; #locations."/" = lib.mkForce { };
locations = { locations."= /" = {
"= /".return = "302 https://webmail.pvv.ntnu.no/roundcube"; return = "301 https://www.pvv.ntnu.no/mail/";
"/afterlogic_lite".return = "302 https://webmail.pvv.ntnu.no/roundcube";
"/squirrelmail".return = "302 https://webmail.pvv.ntnu.no/roundcube";
"/rainloop".return = "302 https://snappymail.pvv.ntnu.no/";
"/snappymail".return = "302 https://snappymail.pvv.ntnu.no/";
}; };
}; };
} }

View File

@ -3,14 +3,9 @@
with lib; with lib;
let let
cfg = config.services.roundcube; cfg = config.services.roundcube;
domain = "webmail.pvv.ntnu.no"; domain = "webmail2.pvv.ntnu.no";
in in
{ {
sops.secrets."roundcube/postgres_password" = {
owner = "nginx";
group = "nginx";
};
services.roundcube = { services.roundcube = {
enable = true; enable = true;
@ -25,11 +20,6 @@ in
maxAttachmentSize = 20; maxAttachmentSize = 20;
hostName = "roundcubeplaceholder.example.com"; hostName = "roundcubeplaceholder.example.com";
database = {
host = "postgres.pvv.ntnu.no";
passwordFile = config.sops.secrets."roundcube/postgres_password".path;
};
extraConfig = '' extraConfig = ''
$config['enable_installer'] = false; $config['enable_installer'] = false;
$config['default_host'] = "ssl://imap.pvv.ntnu.no"; $config['default_host'] = "ssl://imap.pvv.ntnu.no";
@ -45,7 +35,6 @@ in
services.nginx.virtualHosts."roundcubeplaceholder.example.com" = lib.mkForce { }; services.nginx.virtualHosts."roundcubeplaceholder.example.com" = lib.mkForce { };
services.nginx.virtualHosts.${domain} = { services.nginx.virtualHosts.${domain} = {
kTLS = true;
locations."/roundcube" = { locations."/roundcube" = {
tryFiles = "$uri $uri/ =404"; tryFiles = "$uri $uri/ =404";
index = "index.php"; index = "index.php";

View File

@ -1,18 +0,0 @@
{ config, lib, fp, pkgs, ... }:
let
cfg = config.services.snappymail;
in {
imports = [ (fp /modules/snappymail.nix) ];
services.snappymail = {
enable = true;
hostname = "snappymail.pvv.ntnu.no";
};
services.nginx.virtualHosts.${cfg.hostname} = {
forceSSL = true;
enableACME = true;
kTLS = true;
};
}

View File

@ -0,0 +1,4 @@
{ ... }:
{
}

View File

@ -1,121 +0,0 @@
{ pkgs, lib, config, ... }:
let
format = pkgs.formats.php { };
cfg = config.services.pvv-nettsiden;
in {
imports = [
./fetch-gallery.nix
];
sops.secrets = lib.genAttrs [
"nettsiden/door_secret"
"nettsiden/mysql_password"
"nettsiden/simplesamlphp/admin_password"
"nettsiden/simplesamlphp/cookie_salt"
] (_: {
owner = config.services.phpfpm.pools.pvv-nettsiden.user;
group = config.services.phpfpm.pools.pvv-nettsiden.group;
restartUnits = [ "phpfpm-pvv-nettsiden.service" ];
});
services.idp.sp-remote-metadata = [
"https://www.pvv.ntnu.no/simplesaml/"
"https://pvv.ntnu.no/simplesaml/"
"https://www.pvv.org/simplesaml/"
"https://pvv.org/simplesaml/"
];
services.pvv-nettsiden = {
enable = true;
package = pkgs.pvv-nettsiden.override {
extra_files = {
"${pkgs.pvv-nettsiden.passthru.simplesamlphpPath}/metadata/saml20-idp-remote.php" = pkgs.writeText "pvv-nettsiden-saml20-idp-remote.php" (import ../idp-simplesamlphp/metadata.php.nix);
"${pkgs.pvv-nettsiden.passthru.simplesamlphpPath}/config/authsources.php" = pkgs.writeText "pvv-nettsiden-authsources.php" ''
<?php
$config = array(
'admin' => array(
'core:AdminPassword'
),
'default-sp' => array(
'saml:SP',
'entityID' => 'https://${cfg.domainName}/simplesaml/',
'idp' => 'https://idp.pvv.ntnu.no/',
),
);
'';
};
};
domainName = "www.pvv.ntnu.no";
settings = let
includeFromSops = path: format.lib.mkRaw "file_get_contents('${config.sops.secrets."nettsiden/${path}".path}')";
in {
DOOR_SECRET = includeFromSops "door_secret";
DB = {
DSN = "mysql:dbname=www-data_nettside;host=mysql.pvv.ntnu.no";
USER = "www-data_nettsi";
PASS = includeFromSops "mysql_password";
};
# TODO: set up postgres session for simplesamlphp
SAML = {
COOKIE_SALT = includeFromSops "simplesamlphp/cookie_salt";
COOKIE_SECURE = true;
ADMIN_NAME = "PVV Drift";
ADMIN_EMAIL = "drift@pvv.ntnu.no";
ADMIN_PASSWORD = includeFromSops "simplesamlphp/admin_password";
TRUSTED_DOMAINS = [ cfg.domainName ];
};
};
};
services.phpfpm.pools."pvv-nettsiden".settings = {
# "php_admin_value[error_log]" = "stderr";
"php_admin_flag[log_errors]" = true;
"catch_workers_output" = true;
};
services.nginx.virtualHosts.${cfg.domainName} = {
serverAliases = [
"pvv.ntnu.no"
"www.pvv.org"
"pvv.org"
];
locations = {
# Proxy home directories
"^~ /~" = {
extraConfig = ''
proxy_redirect off;
proxy_pass https://tom.pvv.ntnu.no;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
'';
};
# Redirect the old webmail/wiki paths from spikkjeposche
"^~ /webmail".return = "301 https://webmail.pvv.ntnu.no";
"~ /pvv/([^\\n\\r]*)".return = "301 https://wiki.pvv.ntnu.no/wiki/$1";
"= /pvv".return = "301 https://wiki.pvv.ntnu.no/";
# Redirect old wiki entries
"/disk".return = "301 https://wiki.pvv.ntnu.no/wiki/Diskkjøp";
"/dok/boker.php".return = "301 https://wiki.pvv.ntnu.no/wiki/Bokhyllen";
"/styret/lover/".return = "301 https://wiki.pvv.ntnu.no/wiki/Lover";
"/styret/".return = "301 https://wiki.pvv.ntnu.no/wiki/Styret";
"/info/".return = "301 https://wiki.pvv.ntnu.no/wiki/";
"/info/maskinpark/".return = "301 https://wiki.pvv.ntnu.no/wiki/Maskiner";
"/medlemssider/meldinn.php".return = "301 https://wiki.pvv.ntnu.no/wiki/Medlemskontingent";
"/diverse/medlems-sider.php".return = "301 https://wiki.pvv.ntnu.no/wiki/Medlemssider";
"/cert/".return = "301 https://wiki.pvv.ntnu.no/wiki/CERT";
"/drift".return = "301 https://wiki.pvv.ntnu.no/wiki/Drift";
"/diverse/abuse.php".return = "301 https://wiki.pvv.ntnu.no/wiki/CERT/Abuse";
"/nerds/".return = "301 https://wiki.pvv.ntnu.no/wiki/Nerdepizza";
};
};
}

View File

@ -1,94 +0,0 @@
{ pkgs, lib, config, ... }:
let
galleryDir = config.services.pvv-nettsiden.settings.GALLERY.DIR;
transferDir = "${config.services.pvv-nettsiden.settings.GALLERY.DIR}-transfer";
in {
users.users.${config.services.pvv-nettsiden.user} = {
useDefaultShell = true;
# This is pushed from microbel:/var/www/www-gallery/build-gallery.sh
openssh.authorizedKeys.keys = [
''command="${pkgs.rrsync}/bin/rrsync -wo ${transferDir}",restrict,no-agent-forwarding,no-port-forwarding,no-pty,no-X11-forwarding ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIjHhC2dikhWs/gG+m7qP1eSohWzTehn4ToNzDSOImyR gallery-publish''
];
};
systemd.paths.pvv-nettsiden-gallery-update = {
wantedBy = [ "multi-user.target" ];
pathConfig = {
PathChanged = "${transferDir}/gallery.tar.gz";
Unit = "pvv-nettsiden-gallery-update.service";
MakeDirectory = true;
};
};
systemd.services.pvv-nettsiden-gallery-update = {
path = with pkgs; [ imagemagick gnutar gzip ];
script = ''
tar ${lib.cli.toGNUCommandLineShell {} {
extract = true;
file = "${transferDir}/gallery.tar.gz";
directory = ".";
}}
# Delete files and directories that exists in the gallery that don't exist in the tarball
filesToRemove=$(uniq -u <(sort <(find . -not -path "./.thumbnails*") <(tar -tf ${transferDir}/gallery.tar.gz | sed 's|/$||')))
while IFS= read fname; do
rm -f "$fname" ||:
rm -f ".thumbnails/$fname.png" ||:
done <<< "$filesToRemove"
find . -type d -empty -delete
mkdir -p .thumbnails
images=$(find . -type f -not -path "./.thumbnails*")
while IFS= read fname; do
# Skip this file if an up-to-date thumbnail already exists
if [ -f ".thumbnails/$fname.png" ] && \
[ "$(date -R -r "$fname")" == "$(date -R -r ".thumbnails/$fname.png")" ]
then
continue
fi
echo "Creating thumbnail for $fname"
mkdir -p $(dirname ".thumbnails/$fname")
convert -define jpeg:size=200x200 "$fname" -thumbnail 300 -auto-orient ".thumbnails/$fname.png" ||:
touch -m -d "$(date -R -r "$fname")" ".thumbnails/$fname.png"
done <<< "$images"
'';
serviceConfig = {
WorkingDirectory = galleryDir;
User = config.services.pvv-nettsiden.user;
Group = config.services.pvv-nettsiden.group;
AmbientCapabilities = [ "" ];
CapabilityBoundingSet = [ "" ];
DeviceAllow = [ "" ];
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true; # disable for third party rotate scripts
PrivateDevices = true;
PrivateNetwork = true; # disable for mail delivery
PrivateTmp = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true; # disable for userdir logs
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "full";
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true; # disable for creating setgid directories
SocketBindDeny = [ "any" ];
SystemCallArchitectures = "native";
SystemCallFilter = [
"@system-service"
];
};
};
}

View File

@ -1,18 +0,0 @@
{ ... }:
{
services.nginx.virtualHosts."www.pvv.ntnu.no".locations = {
"^~ /.well-known/" = {
alias = (toString ./root) + "/";
};
# Proxy the matrix well-known files
# Host has be set before proxy_pass
# The header must be set so nginx on the other side routes it to the right place
"^~ /.well-known/matrix/" = {
extraConfig = ''
proxy_set_header Host matrix.pvv.ntnu.no;
proxy_pass https://matrix.pvv.ntnu.no/.well-known/matrix/;
'';
};
};
}

View File

@ -1,31 +0,0 @@
<?xml version="1.0"?>
<clientConfig version="1.1">
<emailProvider id="pvv.ntnu.no">
<domain>pvv.ntnu.no</domain>
<domain>pvv.org</domain>
<displayName>Programvareverkstedet</displayName>
<incomingServer type="imap">
<hostname>imap.pvv.ntnu.no</hostname>
<port>993</port>
<socketType>SSL</socketType>
<username>%EMAILLOCALPART%</username>
<authentication>password-cleartext</authentication>
</incomingServer>
<outgoingServer type="smtp">
<hostname>smtp.pvv.ntnu.no</hostname>
<port>587</port>
<socketType>STARTTLS</socketType>
<username>%EMAILLOCALPART%</username>
<authentication>password-cleartext</authentication>
<useGlobalPreferredServer>true</useGlobalPreferredServer>
</outgoingServer>
<documentation url="https://www.pvv.ntnu.no/pvv/Drift/Mail/IMAP_POP3">
<descr lang="en">Setup programvareverkstedet email user with IMAP or POP3</descr>
<descr lang="nb">Sett opp programvareverkstedet email bruker med IMAP eller POP3</descr>
</documentation>
</emailProvider>
</clientConfig>

View File

@ -1,12 +0,0 @@
Contact: mailto:drift@pvv.ntnu.no
Contact: mailto:cert@pvv.ntnu.no
# drift@pvv.ntnu.no is read by more people and have a quicker reaction time,
# but cert@pvv.ntnu.no can be used for more severe issues.
Preferred-Languages: no, en
Expires: 2032-12-31T23:59:59.000Z
# This file was last updated 2024-09-14.
# You can find a wikipage for our security policies at:
# https://wiki.pvv.ntnu.no/wiki/CERT

24
hosts/bicep/acmeCert.nix Normal file
View File

@ -0,0 +1,24 @@
{ values, ... }:
{
users.groups.acme.members = [ "nginx" ];
security.acme.certs."postgres.pvv.ntnu.no" = {
group = "acme";
extraDomainNames = [
# "postgres.pvv.org"
"bicep.pvv.ntnu.no"
# "bicep.pvv.org"
# values.hosts.bicep.ipv4
# values.hosts.bicep.ipv6
];
};
services.nginx = {
enable = true;
virtualHosts."postgres.pvv.ntnu.no" = {
forceSSL = true;
enableACME = true;
# useACMEHost = "postgres.pvv.ntnu.no";
};
};
}

View File

@ -1,21 +1,24 @@
{ fp, pkgs, values, ... }: { pkgs, values, ... }:
{ {
imports = [ imports = [
./hardware-configuration.nix ./hardware-configuration.nix
(fp /base) ../../base.nix
(fp /misc/metrics-exporters.nix) ../../misc/metrics-exporters.nix
./services/nginx ./services/nginx
./acmeCert.nix
./services/mysql.nix ./services/mysql.nix
./services/postgres.nix ./services/postgres.nix
./services/mysql.nix ./services/mysql.nix
./services/calendar-bot.nix # TODO: fix the calendar bot
# ./services/calendar-bot.nix
./services/matrix ./services/matrix
]; ];
sops.defaultSopsFile = fp /secrets/bicep/bicep.yaml; sops.defaultSopsFile = ../../secrets/bicep/bicep.yaml;
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ]; sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
sops.age.keyFile = "/var/lib/sops-nix/key.txt"; sops.age.keyFile = "/var/lib/sops-nix/key.txt";
sops.age.generateKey = true; sops.age.generateKey = true;
@ -34,9 +37,6 @@
anyInterface = true; anyInterface = true;
}; };
# There are no smart devices
services.smartd.enable = false;
# Do not change, even during upgrades. # Do not change, even during upgrades.
# See https://search.nixos.org/options?show=system.stateVersion # See https://search.nixos.org/options?show=system.stateVersion
system.stateVersion = "22.11"; system.stateVersion = "22.11";

View File

@ -1,21 +1,13 @@
{ config, fp, lib, pkgs, ... }: { config, lib, pkgs, ... }:
let let
cfg = config.services.pvv-calendar-bot; cfg = config.services.pvv-calendar-bot;
in { in {
sops.secrets = { sops.secrets."calendar-bot/matrix_token" = {
"calendar-bot/matrix_token" = { sopsFile = ../../../secrets/bicep/bicep.yaml;
sopsFile = fp /secrets/bicep/bicep.yaml;
key = "calendar-bot/matrix_token"; key = "calendar-bot/matrix_token";
owner = cfg.user; owner = cfg.user;
group = cfg.group; group = cfg.group;
}; };
"calendar-bot/mysql_password" = {
sopsFile = fp /secrets/bicep/bicep.yaml;
key = "calendar-bot/mysql_password";
owner = cfg.user;
group = cfg.group;
};
};
services.pvv-calendar-bot = { services.pvv-calendar-bot = {
enable = true; enable = true;
@ -26,11 +18,6 @@ in {
user = "@bot_calendar:pvv.ntnu.no"; user = "@bot_calendar:pvv.ntnu.no";
channel = "!gkNLUIhYVpEyLatcRz:pvv.ntnu.no"; channel = "!gkNLUIhYVpEyLatcRz:pvv.ntnu.no";
}; };
database = {
host = "mysql.pvv.ntnu.no";
user = "calendar-bot";
passwordFile = config.sops.secrets."calendar-bot/mysql_password".path;
};
secretsFile = config.sops.secrets."calendar-bot/matrix_token".path; secretsFile = config.sops.secrets."calendar-bot/matrix_token".path;
onCalendar = "*-*-* 09:00:00"; onCalendar = "*-*-* 09:00:00";
}; };

View File

@ -1,14 +1,14 @@
{ config, lib, fp, pkgs, secrets, values, ... }: { config, lib, pkgs, secrets, ... }:
{ {
sops.secrets."matrix/synapse/turnconfig" = { sops.secrets."matrix/synapse/turnconfig" = {
sopsFile = fp /secrets/bicep/matrix.yaml; sopsFile = ../../../../secrets/bicep/matrix.yaml;
key = "synapse/turnconfig"; key = "synapse/turnconfig";
owner = config.users.users.matrix-synapse.name; owner = config.users.users.matrix-synapse.name;
group = config.users.users.matrix-synapse.group; group = config.users.users.matrix-synapse.group;
}; };
sops.secrets."matrix/coturn/static-auth-secret" = { sops.secrets."matrix/coturn/static-auth-secret" = {
sopsFile = fp /secrets/bicep/matrix.yaml; sopsFile = ../../../../secrets/bicep/matrix.yaml;
key = "coturn/static-auth-secret"; key = "coturn/static-auth-secret";
owner = config.users.users.turnserver.name; owner = config.users.users.turnserver.name;
group = config.users.users.turnserver.group; group = config.users.users.turnserver.group;
@ -60,14 +60,12 @@
pkey = "${config.security.acme.certs.${realm}.directory}/key.pem"; pkey = "${config.security.acme.certs.${realm}.directory}/key.pem";
use-auth-secret = true; use-auth-secret = true;
# World readable but I dont think it's that bad
static-auth-secret-file = config.sops.secrets."matrix/coturn/static-auth-secret".path; static-auth-secret-file = config.sops.secrets."matrix/coturn/static-auth-secret".path;
secure-stun = true; secure-stun = true;
listening-ips = [ listening-ips = [ "129.241.210.213" "2001:700:300:1900::213" ];
values.services.turn.ipv4
# values.services.turn.ipv6
];
tls-listening-port = 443; tls-listening-port = 443;
alt-tls-listening-port = 5349; alt-tls-listening-port = 5349;

View File

@ -10,7 +10,6 @@
./mjolnir.nix ./mjolnir.nix
./discord.nix ./discord.nix
./hookshot
]; ];

View File

@ -1,4 +1,4 @@
{ config, lib, fp, ... }: { config, lib, ... }:
let let
cfg = config.services.mx-puppet-discord; cfg = config.services.mx-puppet-discord;
@ -6,42 +6,15 @@ in
{ {
users.groups.keys-matrix-registrations = { }; users.groups.keys-matrix-registrations = { };
sops.secrets."matrix/discord/as_token" = { sops.secrets."matrix/registrations/mx-puppet-discord" = {
sopsFile = fp /secrets/bicep/matrix.yaml; sopsFile = ../../../../secrets/bicep/matrix.yaml;
key = "discord/as_token"; key = "registrations/mx-puppet-discord";
};
sops.secrets."matrix/discord/hs_token" = {
sopsFile = fp /secrets/bicep/matrix.yaml;
key = "discord/hs_token";
};
sops.templates."discord-registration.yaml" = {
owner = config.users.users.matrix-synapse.name; owner = config.users.users.matrix-synapse.name;
group = config.users.groups.keys-matrix-registrations.name; group = config.users.groups.keys-matrix-registrations.name;
content = ''
as_token: "${config.sops.placeholder."matrix/discord/as_token"}"
hs_token: "${config.sops.placeholder."matrix/discord/hs_token"}"
id: discord-puppet
namespaces:
users:
- exclusive: true
regex: '@_discordpuppet_.*'
rooms: []
aliases:
- exclusive: true
regex: '#_discordpuppet_.*'
protocols: []
rate_limited: false
sender_localpart: _discordpuppet_bot
url: 'http://localhost:8434'
de.sorunome.msc2409.push_ephemeral: true
'';
}; };
systemd.services.mx-puppet-discord = { systemd.services.mx-puppet-discord = {
serviceConfig.SupplementaryGroups = [ serviceConfig.SupplementaryGroups = [ config.users.groups.keys-matrix-registrations.name ];
config.users.groups.keys-matrix-registrations.name
];
}; };
@ -56,16 +29,11 @@ in
relay.whitelist = [ ".*" ]; relay.whitelist = [ ".*" ];
selfService.whitelist = [ "@danio:pvv\\.ntnu\\.no" "@dandellion:dodsorf\\.as" ]; selfService.whitelist = [ "@danio:pvv\\.ntnu\\.no" "@dandellion:dodsorf\\.as" ];
}; };
services.mx-puppet-discord.serviceDependencies = [ services.mx-puppet-discord.serviceDependencies = [ "matrix-synapse.target" "nginx.service" ];
"matrix-synapse.target"
"nginx.service"
];
services.matrix-synapse-next.settings = { services.matrix-synapse-next.settings = {
app_service_config_files = [ app_service_config_files = [ config.sops.secrets."matrix/registrations/mx-puppet-discord".path ];
config.sops.templates."discord-registration.yaml".path
];
use_appservice_legacy_authorization = true; use_appservice_legacy_authorization = true;
}; };

View File

@ -5,7 +5,6 @@ in {
services.nginx.virtualHosts."chat.pvv.ntnu.no" = { services.nginx.virtualHosts."chat.pvv.ntnu.no" = {
enableACME = true; enableACME = true;
forceSSL = true; forceSSL = true;
kTLS = true;
root = pkgs.element-web.override { root = pkgs.element-web.override {
conf = { conf = {
@ -25,26 +24,21 @@ in {
features = { features = {
feature_latex_maths = true; feature_latex_maths = true;
feature_pinning = true; feature_pinning = true;
feature_render_reaction_images = true;
feature_state_counters = true; feature_state_counters = true;
# element call group calls feature_custom_status = false;
feature_group_calls = true;
}; };
default_theme = "dark"; default_theme = "dark";
# Servers in this list should provide some sort of valuable scoping
# matrix.org is not useful compared to matrixrooms.info,
# because it has so many general members, rooms of all topics are on it.
# Something matrixrooms.info is already providing.
room_directory.servers = [ room_directory.servers = [
"pvv.ntnu.no" "pvv.ntnu.no"
"matrixrooms.info" # Searches all public room directories "matrix.omegav.no"
"matrix.omegav.no" # Friends "matrix.org"
"gitter.im" # gitter rooms "libera.chat"
"mozilla.org" # mozilla and friends "gitter.im"
"kde.org" # KDE rooms "mozilla.org"
"fosdem.org" # FOSDEM "kde.org"
"dodsorf.as" # PVV Member "t2bot.io"
"nani.wtf" # PVV Member "fosdem.org"
"dodsorf.as"
]; ];
enable_presence_by_hs_url = { enable_presence_by_hs_url = {
"https://matrix.org" = false; "https://matrix.org" = false;

View File

@ -1,139 +0,0 @@
{ config, lib, fp, unstablePkgs, inputs, ... }:
let
cfg = config.services.matrix-hookshot;
webhookListenAddress = "127.0.0.1";
webhookListenPort = 8435;
in
{
imports = [
./module.nix
];
sops.secrets."matrix/hookshot/as_token" = {
sopsFile = fp /secrets/bicep/matrix.yaml;
key = "hookshot/as_token";
};
sops.secrets."matrix/hookshot/hs_token" = {
sopsFile = fp /secrets/bicep/matrix.yaml;
key = "hookshot/hs_token";
};
sops.templates."hookshot-registration.yaml" = {
owner = config.users.users.matrix-synapse.name;
group = config.users.groups.keys-matrix-registrations.name;
content = ''
id: matrix-hookshot
as_token: "${config.sops.placeholder."matrix/hookshot/as_token"}"
hs_token: "${config.sops.placeholder."matrix/hookshot/hs_token"}"
namespaces:
rooms: []
users:
- regex: "@_webhooks_.*:pvv.ntnu.no"
exclusive: true
- regex: "@bot_feeds:pvv.ntnu.no"
exclusive: true
aliases: []
sender_localpart: hookshot
url: "http://${cfg.settings.bridge.bindAddress}:${toString cfg.settings.bridge.port}"
rate_limited: false
# If enabling encryption
de.sorunome.msc2409.push_ephemeral: true
push_ephemeral: true
org.matrix.msc3202: true
'';
};
systemd.services.matrix-hookshot = {
serviceConfig.SupplementaryGroups = [
config.users.groups.keys-matrix-registrations.name
];
};
services.matrix-hookshot = {
enable = true;
package = unstablePkgs.matrix-hookshot;
registrationFile = config.sops.templates."hookshot-registration.yaml".path;
settings = {
bridge = {
bindAddress = "127.0.0.1";
domain = "pvv.ntnu.no";
url = "https://matrix.pvv.ntnu.no";
mediaUrl = "https://matrix.pvv.ntnu.no";
port = 9993;
};
listeners = [
{
bindAddress = webhookListenAddress;
port = webhookListenPort;
resources = [
"webhooks"
# "metrics"
# "provisioning"
"widgets"
];
}
];
generic = {
enabled = true;
outbound = true;
urlPrefix = "https://hookshot.pvv.ntnu.no/webhook/";
userIdPrefix = "_webhooks_";
allowJsTransformationFunctions = false;
waitForComplete = false;
};
feeds = {
enabled = true;
pollIntervalSeconds = 600;
};
serviceBots = [
{ localpart = "bot_feeds";
displayname = "Aya";
avatar = ./feeds.png;
prefix = "!aya";
service = "feeds";
}
];
permissions = [
# Users of the PVV Server
{ actor = "pvv.ntnu.no";
services = [ { service = "*"; level = "commands"; } ];
}
# Members of Medlem space (for people with their own hs)
{ actor = "!pZOTJQinWyyTWaeOgK:pvv.ntnu.no";
services = [ { service = "*"; level = "commands"; } ];
}
# Members of Drift
{ actor = "!eYgeufLrninXxQpYml:pvv.ntnu.no";
services = [ { service = "*"; level = "admin"; } ];
}
# Dan bootstrap
{ actor = "@dandellion:dodsorf.as";
services = [ { service = "*"; level = "admin"; } ];
}
];
};
};
services.matrix-hookshot.serviceDependencies = [
"matrix-synapse.target"
"nginx.service"
];
services.matrix-synapse-next.settings = {
app_service_config_files = [
config.sops.templates."hookshot-registration.yaml".path
];
};
services.nginx.virtualHosts."hookshot.pvv.ntnu.no" = {
enableACME = true;
locations."/" = {
proxyPass = "http://${webhookListenAddress}:${toString webhookListenPort}";
};
};
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 MiB

View File

@ -1,127 +0,0 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.services.matrix-hookshot;
settingsFormat = pkgs.formats.yaml { };
configFile = settingsFormat.generate "matrix-hookshot-config.yml" cfg.settings;
in
{
options = {
services.matrix-hookshot = {
enable = lib.mkEnableOption "matrix-hookshot, a bridge between Matrix and project management services";
package = lib.mkPackageOption pkgs "matrix-hookshot" { };
registrationFile = lib.mkOption {
type = lib.types.path;
description = ''
Appservice registration file.
As it contains secret tokens, you may not want to add this to the publicly readable Nix store.
'';
example = lib.literalExpression ''
pkgs.writeText "matrix-hookshot-registration" \'\'
id: matrix-hookshot
as_token: aaaaaaaaaa
hs_token: aaaaaaaaaa
namespaces:
rooms: []
users:
- regex: "@_webhooks_.*:foobar"
exclusive: true
sender_localpart: hookshot
url: "http://localhost:9993"
rate_limited: false
\'\'
'';
};
settings = lib.mkOption {
description = ''
{file}`config.yml` configuration as a Nix attribute set.
For details please see the [documentation](https://matrix-org.github.io/matrix-hookshot/latest/setup/sample-configuration.html).
'';
example = {
bridge = {
domain = "example.com";
url = "http://localhost:8008";
mediaUrl = "https://example.com";
port = 9993;
bindAddress = "127.0.0.1";
};
listeners = [
{
port = 9000;
bindAddress = "0.0.0.0";
resources = [ "webhooks" ];
}
{
port = 9001;
bindAddress = "localhost";
resources = [
"metrics"
"provisioning"
];
}
];
};
default = { };
type = lib.types.submodule {
freeformType = settingsFormat.type;
options = {
passFile = lib.mkOption {
type = lib.types.path;
default = "/var/lib/matrix-hookshot/passkey.pem";
description = ''
A passkey used to encrypt tokens stored inside the bridge.
File will be generated if not found.
'';
};
};
};
};
serviceDependencies = lib.mkOption {
type = with lib.types; listOf str;
default = lib.optional config.services.matrix-synapse.enable config.services.matrix-synapse.serviceUnit;
defaultText = lib.literalExpression ''
lib.optional config.services.matrix-synapse.enable config.services.matrix-synapse.serviceUnit
'';
description = ''
List of Systemd services to require and wait for when starting the application service,
such as the Matrix homeserver if it's running on the same host.
'';
};
};
};
config = lib.mkIf cfg.enable {
systemd.services.matrix-hookshot = {
description = "a bridge between Matrix and multiple project management services";
wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ] ++ cfg.serviceDependencies;
after = [ "network-online.target" ] ++ cfg.serviceDependencies;
preStart = ''
if [ ! -f '${cfg.settings.passFile}' ]; then
mkdir -p $(dirname '${cfg.settings.passFile}')
${pkgs.openssl}/bin/openssl genpkey -out '${cfg.settings.passFile}' -outform PEM -algorithm RSA -pkeyopt rsa_keygen_bits:4096
fi
'';
serviceConfig = {
Type = "simple";
Restart = "always";
ExecStart = "${cfg.package}/bin/matrix-hookshot ${configFile} ${cfg.registrationFile}";
};
};
};
meta.maintainers = with lib.maintainers; [ flandweber ];
}

View File

@ -1,8 +1,8 @@
{ config, lib, fp, ... }: { config, lib, ... }:
{ {
sops.secrets."matrix/mjolnir/access_token" = { sops.secrets."matrix/mjolnir/access_token" = {
sopsFile = fp /secrets/bicep/matrix.yaml; sopsFile = ../../../../secrets/bicep/matrix.yaml;
key = "mjolnir/access_token"; key = "mjolnir/access_token";
owner = config.users.users.mjolnir.name; owner = config.users.users.mjolnir.name;
group = config.users.users.mjolnir.group; group = config.users.users.mjolnir.group;
@ -11,7 +11,7 @@
services.mjolnir = { services.mjolnir = {
enable = true; enable = true;
pantalaimon.enable = false; pantalaimon.enable = false;
homeserverUrl = "https://matrix.pvv.ntnu.no"; homeserverUrl = http://127.0.0.1:8008;
accessTokenFile = config.sops.secrets."matrix/mjolnir/access_token".path; accessTokenFile = config.sops.secrets."matrix/mjolnir/access_token".path;
managementRoom = "!gsdeCoWjvYRBrzuiRq:pvv.ntnu.no"; managementRoom = "!gsdeCoWjvYRBrzuiRq:pvv.ntnu.no";
protectedRooms = map (a: "https://matrix.to/#/${a}") [ protectedRooms = map (a: "https://matrix.to/#/${a}") [

View File

@ -7,9 +7,6 @@ from synapse import module_api
import re import re
import logging
logger = logging.getLogger(__name__)
class SMTPAuthProvider: class SMTPAuthProvider:
def __init__(self, config: dict, api: module_api): def __init__(self, config: dict, api: module_api):
self.api = api self.api = api
@ -46,13 +43,8 @@ class SMTPAuthProvider:
if result == True: if result == True:
userid = self.api.get_qualified_user_id(username) userid = self.api.get_qualified_user_id(username)
if not self.api.check_user_exists(userid):
userid = await self.api.check_user_exists(userid) self.api.register_user(username)
if not userid:
logger.info(f"user did not exist, registering {username}")
userid = await self.api.register_user(username)
logger.info(f"registered userid: {userid}")
return (userid, None) return (userid, None)
else: else:
logger.info("returning None")
return None return None

View File

@ -1,4 +1,4 @@
{ config, lib, fp, pkgs, values, inputs, ... }: { config, lib, pkgs, values, inputs, ... }:
let let
cfg = config.services.matrix-synapse-next; cfg = config.services.matrix-synapse-next;
@ -10,18 +10,23 @@ let
in { in {
sops.secrets."matrix/synapse/signing_key" = { sops.secrets."matrix/synapse/signing_key" = {
key = "synapse/signing_key"; key = "synapse/signing_key";
sopsFile = fp /secrets/bicep/matrix.yaml; sopsFile = ../../../../secrets/bicep/matrix.yaml;
owner = config.users.users.matrix-synapse.name; owner = config.users.users.matrix-synapse.name;
group = config.users.users.matrix-synapse.group; group = config.users.users.matrix-synapse.group;
}; };
sops.secrets."matrix/synapse/user_registration" = { sops.secrets."matrix/synapse/user_registration" = {
sopsFile = fp /secrets/bicep/matrix.yaml; sopsFile = ../../../../secrets/bicep/matrix.yaml;
key = "synapse/signing_key"; key = "synapse/signing_key";
owner = config.users.users.matrix-synapse.name; owner = config.users.users.matrix-synapse.name;
group = config.users.users.matrix-synapse.group; group = config.users.users.matrix-synapse.group;
}; };
sops.secrets."matrix/sliding-sync/env" = {
sopsFile = ../../../../secrets/bicep/matrix.yaml;
key = "sliding-sync/env";
};
services.matrix-synapse-next = { services.matrix-synapse-next = {
enable = true; enable = true;
@ -38,6 +43,8 @@ in {
workers.eventPersisters = 2; workers.eventPersisters = 2;
workers.useUserDirectoryWorker = true; workers.useUserDirectoryWorker = true;
enableSlidingSync = true;
enableNginx = true; enableNginx = true;
settings = { settings = {
@ -127,16 +134,90 @@ in {
"129.241.0.0/16" "129.241.0.0/16"
"2001:700:300::/44" "2001:700:300::/44"
]; ];
saml2_config = {
sp_config.metadata.remote = [
{ url = "https://idp.pvv.ntnu.no/simplesaml/saml2/idp/metadata.php"; }
];
description = [ "Matrix Synapse SP" "en" ];
name = [ "Matrix Synapse SP" "en" ];
ui_info = {
display_name = [
{
lang = "en";
text = "PVV Matrix login";
}
];
description = [
{
lang = "en";
text = "Matrix is a modern free and open federated chat protocol";
}
];
#information_url = [
# {
# lang = "en";
# text = "";
# };
#];
#privacy_statement_url = [
# {
# lang = "en";
# text = "";
# };
#];
keywords = [
{
lang = "en";
text = [ "Matrix" "Element" ];
}
];
#logo = [
# {
# lang = "en";
# text = "";
# width = "";
# height = "";
# }
#];
};
organization = {
name = "Programvareverkstedet";
display_name = [ "Programvareverkstedet" "en" ];
url = "https://www.pvv.ntnu.no";
};
contact_person = [
{ given_name = "Drift";
sur_name = "King";
email_adress = [ "drift@pvv.ntnu.no" ];
contact_type = "technical";
}
];
user_mapping_provider = {
config = {
mxid_source_attribute = "uid"; # What is this supposed to be?
mxid_mapping = "hexencode";
}; };
}; };
#attribute_requirements = [
# {attribute = "userGroup"; value = "medlem";} # Do we have this?
#];
};
};
};
services.matrix-synapse.sliding-sync.environmentFile = config.sops.secrets."matrix/sliding-sync/env".path;
services.redis.servers."".enable = true; services.redis.servers."".enable = true;
services.nginx.virtualHosts."matrix.pvv.ntnu.no" = lib.mkMerge [ services.nginx.virtualHosts."matrix.pvv.ntnu.no" = lib.mkMerge [
{ ({
kTLS = true;
}
{
locations."/.well-known/matrix/server" = { locations."/.well-known/matrix/server" = {
return = '' return = ''
200 '{"m.server": "matrix.pvv.ntnu.no:443"}' 200 '{"m.server": "matrix.pvv.ntnu.no:443"}'
@ -146,38 +227,26 @@ in {
add_header Access-Control-Allow-Origin *; add_header Access-Control-Allow-Origin *;
''; '';
}; };
} })
{ ({
locations."/_synapse/admin" = {
proxyPass = "http://$synapse_backend";
extraConfig = ''
allow 127.0.0.1;
allow ::1;
allow ${values.hosts.bicep.ipv4};
allow ${values.hosts.bicep.ipv6};
deny all;
'';
};
}
{
locations = let locations = let
connectionInfo = w: matrix-lib.workerConnectionResource "metrics" w; connectionInfo = w: matrix-lib.workerConnectionResource "metrics" w;
socketAddress = w: let c = connectionInfo w; in "${c.host}:${toString c.port}"; socketAddress = w: let c = connectionInfo w; in "${c.host}:${toString (c.port)}";
metricsPath = w: "/metrics/${w.type}/${toString w.index}"; metricsPath = w: "/metrics/${w.type}/${toString w.index}";
proxyPath = w: "http://${socketAddress w}/_synapse/metrics"; proxyPath = w: "http://${socketAddress w}/_synapse/metrics";
in lib.mapAttrs' (n: v: lib.nameValuePair in lib.mapAttrs' (n: v: lib.nameValuePair
(metricsPath v) { (metricsPath v) ({
proxyPass = proxyPath v; proxyPass = proxyPath v;
extraConfig = '' extraConfig = ''
allow ${values.hosts.ildkule.ipv4}; allow ${values.hosts.ildkule.ipv4};
allow ${values.hosts.ildkule.ipv6}; allow ${values.hosts.ildkule.ipv6};
deny all; deny all;
''; '';
}) }))
cfg.workers.instances; cfg.workers.instances;
} })
{ ({
locations."/metrics/master/1" = { locations."/metrics/master/1" = {
proxyPass = "http://127.0.0.1:9000/_synapse/metrics"; proxyPass = "http://127.0.0.1:9000/_synapse/metrics";
extraConfig = '' extraConfig = ''
@ -200,5 +269,5 @@ in {
labels = { }; labels = { };
}]) + "/"; }]) + "/";
}; };
}]; })];
} }

View File

@ -1,8 +1,15 @@
{ config, values, ... }: { config, values, ... }:
{ {
security.acme = {
acceptTerms = true;
defaults.email = "danio@pvv.ntnu.no";
};
services.nginx = { services.nginx = {
enable = true; enable = true;
enableReload = true; enableReload = true;
defaultListenAddresses = [ defaultListenAddresses = [
values.hosts.bicep.ipv4 values.hosts.bicep.ipv4
"[${values.hosts.bicep.ipv6}]" "[${values.hosts.bicep.ipv6}]"
@ -11,5 +18,28 @@
"127.0.0.2" "127.0.0.2"
"[::1]" "[::1]"
]; ];
appendConfig = ''
pcre_jit on;
worker_processes 8;
worker_rlimit_nofile 8192;
'';
eventsConfig = ''
multi_accept on;
worker_connections 4096;
'';
recommendedProxySettings = true;
recommendedTlsSettings = true;
recommendedGzipSettings = true;
recommendedBrotliSettings = true;
recommendedOptimisation = true;
};
networking.firewall.allowedTCPPorts = [ 80 443 ];
systemd.services.nginx.serviceConfig = {
LimitNOFILE = 65536;
}; };
} }

View File

@ -1,4 +1,7 @@
{ config, pkgs, ... }: { config, pkgs, ... }:
let
sslCert = config.security.acme.certs."postgres.pvv.ntnu.no";
in
{ {
services.postgresql = { services.postgresql = {
enable = true; enable = true;
@ -76,16 +79,12 @@
systemd.services.postgresql.serviceConfig = { systemd.services.postgresql.serviceConfig = {
LoadCredential = [ LoadCredential = [
"cert:/etc/certs/postgres.crt" "cert:${sslCert.directory}/cert.pem"
"key:/etc/certs/postgres.key" "key:${sslCert.directory}/key.pem"
]; ];
}; };
environment.snakeoil-certs."/etc/certs/postgres" = { users.groups.acme.members = [ "postgres" ];
owner = "postgres";
group = "postgres";
subject = "/C=NO/O=Programvareverkstedet/CN=postgres.pvv.ntnu.no/emailAddress=drift@pvv.ntnu.no";
};
networking.firewall.allowedTCPPorts = [ 5432 ]; networking.firewall.allowedTCPPorts = [ 5432 ];
networking.firewall.allowedUDPPorts = [ 5432 ]; networking.firewall.allowedUDPPorts = [ 5432 ];

View File

@ -1,16 +1,16 @@
{ config, fp, pkgs, values, ... }: { config, pkgs, values, ... }:
{ {
imports = [ imports = [
# Include the results of the hardware scan. # Include the results of the hardware scan.
./hardware-configuration.nix ./hardware-configuration.nix
(fp /base) ../../base.nix
(fp /misc/metrics-exporters.nix) ../../misc/metrics-exporters.nix
./disks.nix ./disks.nix
(fp /misc/builder.nix) ../../misc/builder.nix
]; ];
sops.defaultSopsFile = fp /secrets/bob/bob.yaml; sops.defaultSopsFile = ../../secrets/bob/bob.yaml;
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ]; sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
sops.age.keyFile = "/var/lib/sops-nix/key.txt"; sops.age.keyFile = "/var/lib/sops-nix/key.txt";
sops.age.generateKey = true; sops.age.generateKey = true;

View File

@ -1,10 +1,10 @@
{ config, fp, pkgs, values, ... }: { config, pkgs, values, ... }:
{ {
imports = [ imports = [
# Include the results of the hardware scan. # Include the results of the hardware scan.
./hardware-configuration.nix ./hardware-configuration.nix
(fp /base) ../../base.nix
(fp /misc/metrics-exporters.nix) ../../misc/metrics-exporters.nix
./services/grzegorz.nix ./services/grzegorz.nix
]; ];

View File

@ -1,6 +1,6 @@
{ config, fp, ... }: { config, ... }:
{ {
imports = [ (fp /modules/grzegorz.nix) ]; imports = [ ../../../modules/grzegorz.nix ];
services.nginx.virtualHosts."${config.networking.fqdn}" = { services.nginx.virtualHosts."${config.networking.fqdn}" = {
serverAliases = [ serverAliases = [

View File

@ -0,0 +1,38 @@
{ config, pkgs, values, ... }:
{
imports = [
./hardware-configuration.nix
../../base.nix
../../misc/metrics-exporters.nix
./containers/salsa/configuration.nix
];
# buskerud does not support efi?
# boot.loader.systemd-boot.enable = true;
# boot.loader.efi.canTouchEfiVariables = true;
boot.loader.grub.enable = true;
boot.loader.grub.device = "/dev/sdb";
networking.hostName = "buskerud";
networking.search = [ "pvv.ntnu.no" "pvv.org" ];
networking.nameservers = [ "129.241.0.200" "129.241.0.201" ];
networking.tempAddresses = "disabled";
systemd.network.networks."enp3s0f0" = values.defaultNetworkConfig // {
matchConfig.Name = "enp3s0f0";
address = with values.hosts.buskerud; [ (ipv4 + "/25") (ipv6 + "/64") ];
};
# List packages installed in system profile
environment.systemPackages = with pkgs; [
];
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "23.05"; # Did you read the comment?
}

View File

@ -0,0 +1,27 @@
# Misc docs
### Old stuff about storing kerberos state inside LDAP - might not be relevant
- https://wiki.debian.org/LDAP/OpenLDAPSetup#OpenLDAP_as_a_Backend
- https://ubuntu.com/server/docs/service-kerberos-with-openldap-backend
- https://forums.freebsd.org/threads/heimdal-and-openldap-integration-some-questions.58422/
- http://osr507doc.sco.com/cgi-bin/info2html?(heimdal.info.gz)Using%2520LDAP%2520to%2520store%2520the%2520database&lang=en
- https://bbs.archlinux.org/viewtopic.php?id=54236
- https://openldap-software.0penldap.narkive.com/Ml6seAGL/ldap-backend-for-heimdal-kerberos
### Heimdal setup
- http://chschneider.eu/linux/server/heimdal.shtml
- https://github.com/NixOS/nixpkgs/blob/master/nixos/tests/kerberos/heimdal.nix
- https://itk.samfundet.no/dok/Kerberos (possibly a bit dated)
### OpenLDAP setup with new olc stuff
- https://www.openldap.org/doc/admin26/
- https://www.openldap.org/doc/admin26/sasl.html#GSSAPI
- https://www.zytrax.com/books/ldap/
### SASLAUTHD
- https://linux.die.net/man/8/saslauthd
- https://www.cyrusimap.org/sasl/index.html

View File

@ -0,0 +1,51 @@
{ config, pkgs, lib, inputs, values, ... }:
{
containers.salsa = {
autoStart = true;
interfaces = [ "enp6s0f1" ];
bindMounts = {
"/data" = { hostPath = "/data/salsa"; isReadOnly = false; };
};
nixpkgs = inputs.nixpkgs-unstable;
config = { config, pkgs, ... }: let
inherit values inputs;
in {
imports = [
inputs.sops-nix.nixosModules.sops
../../../../base.nix
./services/heimdal
./services/openldap.nix
./services/saslauthd.nix
# https://github.com/NixOS/nixpkgs/pull/287611
./modules/krb5
./modules/kerberos
];
disabledModules = [
"security/krb5"
"services/system/kerberos/default.nix"
];
_module.args = {
inherit values inputs;
};
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
sops.age.keyFile = "/var/lib/sops-nix/key.txt";
sops.age.generateKey = true;
# systemd.network.networks."30-enp6s0f1" = values.defaultNetworkConfig // {
# matchConfig.Name = "enp6s0f1";
# address = with values.hosts.jokum; [ (ipv4 + "/25") (ipv6 + "/64") ]
# ++ (with values.services.turn; [ (ipv4 + "/25") (ipv6 + "/64") ]);
# };
networking.useHostResolvConf = lib.mkForce false;
system.stateVersion = "23.11";
};
};
}

View File

@ -0,0 +1,101 @@
{ config, pkgs, lib, ... }:
let
inherit (lib) mkOption types;
cfg = config.services.kerberos_server;
inherit (config.security.krb5) package;
format = import ../krb5/krb5-conf-format.nix { inherit pkgs lib; } { };
in
{
imports = [
(lib.mkRenamedOptionModule [ "services" "kerberos_server" "realms" ] [ "services" "kerberos_server" "settings" "realms" ])
./mit.nix
./heimdal.nix
];
options = {
services.kerberos_server = {
enable = lib.mkEnableOption (lib.mdDoc "the kerberos authentication server");
settings = let
aclEntry = types.submodule {
options = {
principal = mkOption {
type = types.str;
description = lib.mdDoc "Which principal the rule applies to";
};
access = mkOption {
type = types.either
(types.listOf (types.enum ["add" "cpw" "delete" "get" "list" "modify"]))
(types.enum ["all"]);
default = "all";
description = lib.mdDoc "The changes the principal is allowed to make.";
};
target = mkOption {
type = types.str;
default = "*";
description = lib.mdDoc "The principals that 'access' applies to.";
};
};
};
realm = types.submodule ({ name, ... }: {
freeformType = format.sectionType;
options = {
acl = mkOption {
type = types.listOf aclEntry;
default = [
{ principal = "*/admin"; access = "all"; }
{ principal = "admin"; access = "all"; }
];
description = lib.mdDoc ''
The privileges granted to a user.
'';
};
};
});
in mkOption {
type = types.submodule (format.type.getSubModules ++ [{
options = {
realms = mkOption {
type = types.attrsOf realm;
description = lib.mdDoc ''
The realm(s) to serve keys for.
'';
};
};
}]);
description = ''
Settings for the kerberos server of choice.
See the following documentation:
- Heimdal: {manpage}`kdc.conf(5)`
- MIT Kerberos: <https://web.mit.edu/kerberos/krb5-1.21/doc/admin/conf_files/kdc_conf.html>
'';
default = { };
};
};
};
config = lib.mkIf cfg.enable {
environment.systemPackages = [ package ];
assertions = [
{
assertion = cfg.settings.realms != { };
message = "The server needs at least one realm";
}
{
assertion = lib.length (lib.attrNames cfg.settings.realms) <= 1;
message = "Only one realm per server is currently supported.";
}
];
systemd.slices.system-kerberos-server = { };
systemd.targets.kerberos-server = {
wantedBy = [ "multi-user.target" ];
};
};
}

View File

@ -0,0 +1,87 @@
{ pkgs, config, lib, ... } :
let
inherit (lib) mapAttrs;
cfg = config.services.kerberos_server;
package = config.security.krb5.package;
aclConfigs = lib.pipe cfg.settings.realms [
(mapAttrs (name: { acl, ... }: lib.concatMapStringsSep "\n" (
{ principal, access, target, ... }:
"${principal}\t${lib.concatStringsSep "," (lib.toList access)}\t${target}"
) acl))
(lib.mapAttrsToList (name: text:
{
dbname = "/var/lib/heimdal/heimdal";
acl_file = pkgs.writeText "${name}.acl" text;
}
))
];
finalConfig = cfg.settings // {
realms = mapAttrs (_: v: removeAttrs v [ "acl" ]) (cfg.settings.realms or { });
kdc = (cfg.settings.kdc or { }) // {
database = aclConfigs;
};
};
format = import ../krb5/krb5-conf-format.nix { inherit pkgs lib; } { };
kdcConfFile = format.generate "kdc.conf" finalConfig;
in
{
config = lib.mkIf (cfg.enable && package.passthru.implementation == "heimdal") {
environment.etc."heimdal-kdc/kdc.conf".source = kdcConfFile;
systemd.tmpfiles.settings."10-heimdal" = let
databases = lib.pipe finalConfig.kdc.database [
(map (dbAttrs: dbAttrs.dbname or null))
(lib.filter (x: x != null))
lib.unique
];
in lib.genAttrs databases (_: {
d = {
user = "root";
group = "root";
mode = "0700";
};
});
systemd.services.kadmind = {
description = "Kerberos Administration Daemon";
partOf = [ "kerberos-server.target" ];
wantedBy = [ "kerberos-server.target" ];
serviceConfig = {
ExecStart = "${package}/libexec/kadmind --config-file=/etc/heimdal-kdc/kdc.conf";
Slice = "system-kerberos-server.slice";
StateDirectory = "heimdal";
};
restartTriggers = [ kdcConfFile ];
};
systemd.services.kdc = {
description = "Key Distribution Center daemon";
partOf = [ "kerberos-server.target" ];
wantedBy = [ "kerberos-server.target" ];
serviceConfig = {
ExecStart = "${package}/libexec/kdc --config-file=/etc/heimdal-kdc/kdc.conf";
Slice = "system-kerberos-server.slice";
StateDirectory = "heimdal";
};
restartTriggers = [ kdcConfFile ];
};
systemd.services.kpasswdd = {
description = "Kerberos Password Changing daemon";
partOf = [ "kerberos-server.target" ];
wantedBy = [ "kerberos-server.target" ];
serviceConfig = {
ExecStart = "${package}/libexec/kpasswdd";
Slice = "system-kerberos-server.slice";
StateDirectory = "heimdal";
};
restartTriggers = [ kdcConfFile ];
};
};
}

View File

@ -0,0 +1,77 @@
{ pkgs, config, lib, ... } :
let
inherit (lib) mapAttrs;
cfg = config.services.kerberos_server;
package = config.security.krb5.package;
PIDFile = "/run/kdc.pid";
format = import ../krb5/krb5-conf-format.nix { inherit pkgs lib; } { };
aclMap = {
add = "a"; cpw = "c"; delete = "d"; get = "i"; list = "l"; modify = "m";
all = "*";
};
aclConfigs = lib.pipe cfg.settings.realms [
(mapAttrs (name: { acl, ... }: lib.concatMapStringsSep "\n" (
{ principal, access, target, ... }: let
access_code = map (a: aclMap.${a}) (lib.toList access);
in "${principal} ${lib.concatStrings access_code} ${target}"
) acl))
(lib.concatMapAttrs (name: text: {
${name} = {
acl_file = pkgs.writeText "${name}.acl" text;
};
}))
];
finalConfig = cfg.settings // {
realms = mapAttrs (n: v: (removeAttrs v [ "acl" ]) // aclConfigs.${n}) (cfg.settings.realms or { });
};
kdcConfFile = format.generate "kdc.conf" finalConfig;
env = {
# What Debian uses, could possibly link directly to Nix store?
KRB5_KDC_PROFILE = "/etc/krb5kdc/kdc.conf";
};
in
{
config = lib.mkIf (cfg.enable && package.passthru.implementation == "krb5") {
environment = {
etc."krb5kdc/kdc.conf".source = kdcConfFile;
variables = env;
};
systemd.services.kadmind = {
description = "Kerberos Administration Daemon";
partOf = [ "kerberos-server.target" ];
wantedBy = [ "kerberos-server.target" ];
serviceConfig = {
ExecStart = "${package}/bin/kadmind -nofork";
Slice = "system-kerberos-server.slice";
StateDirectory = "krb5kdc";
};
restartTriggers = [ kdcConfFile ];
environment = env;
};
systemd.services.kdc = {
description = "Key Distribution Center daemon";
partOf = [ "kerberos-server.target" ];
wantedBy = [ "kerberos-server.target" ];
serviceConfig = {
Type = "forking";
PIDFile = PIDFile;
ExecStart = "${package}/bin/krb5kdc -P ${PIDFile}";
Slice = "system-kerberos-server.slice";
StateDirectory = "krb5kdc";
};
restartTriggers = [ kdcConfFile ];
environment = env;
};
};
}

View File

@ -77,8 +77,22 @@ in {
}; };
}; };
config = mkIf cfg.enable { config = {
environment = { assertions = mkIf (cfg.enable || config.services.kerberos_server.enable) [(let
implementation = cfg.package.passthru.implementation or "<NOT SET>";
in {
assertion = lib.elem implementation [ "krb5" "heimdal" ];
message = ''
`security.krb5.package` must be one of:
- krb5
- heimdal
Currently chosen implementation: ${implementation}
'';
})];
environment = mkIf cfg.enable {
systemPackages = [ cfg.package ]; systemPackages = [ cfg.package ];
etc."krb5.conf".source = format.generate "krb5.conf" cfg.settings; etc."krb5.conf".source = format.generate "krb5.conf" cfg.settings;
}; };

View File

@ -10,14 +10,19 @@ let
inherit (lib.types) attrsOf bool coercedTo either int listOf oneOf path inherit (lib.types) attrsOf bool coercedTo either int listOf oneOf path
str submodule; str submodule;
in in
{ }: { { }: rec {
type = let sectionType = let
section = attrsOf relation; relation = oneOf [
relation = either (attrsOf value) value; (listOf (attrsOf value))
(attrsOf value)
value
];
value = either (listOf atom) atom; value = either (listOf atom) atom;
atom = oneOf [int str bool]; atom = oneOf [int str bool];
in submodule { in attrsOf relation;
freeformType = attrsOf section;
type = submodule {
freeformType = attrsOf sectionType;
options = { options = {
include = mkOption { include = mkOption {
default = [ ]; default = [ ];
@ -71,6 +76,9 @@ in
${name} = { ${name} = {
${indent (concatStringsSep "\n" (mapAttrsToList formatValue relation))} ${indent (concatStringsSep "\n" (mapAttrsToList formatValue relation))}
}'' }''
else if isList relation
then
concatMapStringsSep "\n" (formatRelation name) relation
else formatValue name relation; else formatValue name relation;
formatValue = name: value: formatValue = name: value:

View File

@ -0,0 +1,78 @@
{ config, pkgs, lib, ... }:
let
realm = "PVV.NTNU.NO";
cfg = config.security.krb5;
in
{
security.krb5 = {
enable = true;
# NOTE: This has a small edit that moves an include header to $dev/include.
# It is required in order to build smbk5pwd, because of some nested includes.
# We should open an issue upstream (heimdal, not nixpkgs), but this patch
# will do for now.
# package = pkgs.heimdal;
package = pkgs.callPackage ./package.nix {
inherit (pkgs.apple_sdk.frameworks)
CoreFoundation Security SystemConfiguration;
};
settings = {
logging.kdc = "CONSOLE";
realms.${realm} = {
admin_server = "localhost";
kdc = [ "localhost" ];
};
kadmin.default_keys = lib.concatStringsSep " " [
"aes256-cts-hmac-sha1-96:pw-salt"
"aes128-cts-hmac-sha1-96:pw-salt"
];
libdefaults.default_etypes = lib.concatStringsSep " " [
"aes256-cts-hmac-sha1-96"
"aes128-cts-hmac-sha1-96"
];
libdefaults = {
default_realm = realm;
};
domain_realm = {
"pvv.ntnu.no" = realm;
".pvv.ntnu.no" = realm;
};
};
};
services.kerberos_server = {
enable = true;
settings = {
realms.${realm} = {
dbname = "/var/heimdal/heimdal";
mkey = "/var/heimdal/mkey";
};
# kadmin.default_keys = lib.concatStringsSep " " [
# "aes256-cts-hmac-sha1-96:pw-salt"
# "aes128-cts-hmac-sha1-96:pw-salt"
# ];
# libdefaults.default_etypes = lib.concatStringsSep " " [
# "aes256-cts-hmac-sha1-96"
# "aes128-cts-hmac-sha1-96"
# ];
# password_quality.min_length = 8;
};
};
# NOTE: These changes are part of nixpkgs-unstable, but not 23.11.
# The package override needs these changes.
# systemd.services = {
# kdc.serviceConfig.ExecStart = lib.mkForce "${cfg.package}/libexec/kadmind --config-file=/etc/heimdal-kdc/kdc.conf";
# kpasswdd.serviceConfig.ExecStart = lib.mkForce "${cfg.package}/libexec/kpasswdd";
# kadmind.serviceConfig.ExecStart = lib.mkForce "${cfg.package}/libexec/kdc --config-file=/etc/heimdal-kdc/kdc.conf";
# };
}

View File

@ -0,0 +1,180 @@
{ lib
, stdenv
, fetchFromGitHub
, autoreconfHook
, pkg-config
, python3
, perl
, bison
, flex
, texinfo
, perlPackages
, openldap
, libcap_ng
, sqlite
, openssl
, db
, libedit
, pam
, krb5
, libmicrohttpd
, cjson
, CoreFoundation
, Security
, SystemConfiguration
, curl
, jdk
, unzip
, which
, nixosTests
, withCJSON ? true
, withCapNG ? stdenv.isLinux
# libmicrohttpd should theoretically work for darwin as well, but something is broken.
# It affects tests check-bx509d and check-httpkadmind.
, withMicroHTTPD ? stdenv.isLinux
, withOpenLDAP ? true
, withOpenLDAPAsHDBModule ? false
, withOpenSSL ? true
, withSQLite3 ? true
}:
assert lib.assertMsg (withOpenLDAPAsHDBModule -> withOpenLDAP) ''
OpenLDAP needs to be enabled in order to build the OpenLDAP HDB Module.
'';
stdenv.mkDerivation {
pname = "heimdal";
version = "7.8.0-unstable-2023-11-29";
src = fetchFromGitHub {
owner = "heimdal";
repo = "heimdal";
rev = "3253c49544eacb33d5ad2f6f919b0696e5aab794";
hash = "sha256-uljzQBzXrZCZjcIWfioqHN8YsbUUNy14Vo+A3vZIXzM=";
};
outputs = [ "out" "dev" "man" "info" ];
nativeBuildInputs = [
autoreconfHook
pkg-config
python3
perl
bison
flex
texinfo
]
++ (with perlPackages; [ JSON ]);
buildInputs = [ db libedit pam ]
++ lib.optionals (stdenv.isDarwin) [ CoreFoundation Security SystemConfiguration ]
++ lib.optionals (withCJSON) [ cjson ]
++ lib.optionals (withCapNG) [ libcap_ng ]
++ lib.optionals (withMicroHTTPD) [ libmicrohttpd ]
++ lib.optionals (withOpenLDAP) [ openldap ]
++ lib.optionals (withOpenSSL) [ openssl ]
++ lib.optionals (withSQLite3) [ sqlite ];
doCheck = true;
nativeCheckInputs = [
curl
jdk
unzip
which
];
configureFlags = [
"--with-libedit-include=${libedit.dev}/include"
"--with-libedit-lib=${libedit}/lib"
# "--with-berkeley-db-include=${db.dev}/include"
"--with-berkeley-db=${db}/lib"
"--without-x"
"--disable-afs-string-to-key"
] ++ lib.optionals (withCapNG) [
"--with-capng"
] ++ lib.optionals (withCJSON) [
"--with-cjson=${cjson}"
] ++ lib.optionals (withOpenLDAP) [
"--with-openldap=${openldap.dev}"
] ++ lib.optionals (withOpenLDAPAsHDBModule) [
"--enable-hdb-openldap-module"
] ++ lib.optionals (withSQLite3) [
"--with-sqlite3=${sqlite.dev}"
];
# (check-ldap) slapd resides within ${openldap}/libexec,
# which is not part of $PATH by default.
# (check-ldap) prepending ${openldap}/bin to the path to avoid
# using the default installation of openldap on unsandboxed darwin systems,
# which does not support the new mdb backend at the moment (2024-01-13).
# (check-ldap) the bdb backend got deprecated in favour of mdb in openldap 2.5.0,
# but the heimdal tests still seem to expect bdb as the openldap backend.
# This might be fixed upstream in a future update.
patchPhase = ''
runHook prePatch
substituteInPlace tests/ldap/slapd-init.in \
--replace 'SCHEMA_PATHS="' 'SCHEMA_PATHS="${openldap}/etc/schema '
substituteInPlace tests/ldap/check-ldap.in \
--replace 'PATH=' 'PATH=${openldap}/libexec:${openldap}/bin:'
substituteInPlace tests/ldap/slapd.conf \
--replace 'database bdb' 'database mdb'
runHook postPatch
'';
# (test_cc) heimdal uses librokens implementation of `secure_getenv` on darwin,
# which expects either USER or LOGNAME to be set.
preCheck = lib.optionalString (stdenv.isDarwin) ''
export USER=nix-builder
'';
# We need to build hcrypt for applications like samba
postBuild = ''
(cd include/hcrypto; make -j $NIX_BUILD_CORES)
(cd lib/hcrypto; make -j $NIX_BUILD_CORES)
'';
postInstall = ''
# Install hcrypto
(cd include/hcrypto; make -j $NIX_BUILD_CORES install)
(cd lib/hcrypto; make -j $NIX_BUILD_CORES install)
mkdir -p $dev/bin
mv $out/bin/krb5-config $dev/bin/
# asn1 compilers, move them to $dev
mv $out/libexec/heimdal/* $dev/bin
rmdir $out/libexec/heimdal
cp include/heim_threads.h $dev/include
# compile_et is needed for cross-compiling this package and samba
mv lib/com_err/.libs/compile_et $dev/bin
'';
# Issues with hydra
# In file included from hxtool.c:34:0:
# hx_locl.h:67:25: fatal error: pkcs10_asn1.h: No such file or directory
#enableParallelBuilding = true;
passthru = {
implementation = "heimdal";
tests.nixos = nixosTests.kerberos.heimdal;
};
meta = with lib; {
homepage = "https://www.heimdal.software";
changelog = "https://github.com/heimdal/heimdal/releases";
description = "An implementation of Kerberos 5 (and some more stuff)";
license = licenses.bsd3;
platforms = platforms.unix;
maintainers = with maintainers; [ h7x4 ];
};
}

View File

@ -0,0 +1,178 @@
{ lib
, stdenv
, fetchFromGitHub
, autoreconfHook
, pkg-config
, python3
, perl
, bison
, flex
, texinfo
, perlPackages
, openldap
, libcap_ng
, sqlite
, openssl
, db
, libedit
, pam
, krb5
, libmicrohttpd
, cjson
, CoreFoundation
, Security
, SystemConfiguration
, curl
, jdk
, unzip
, which
, nixosTests
, withCJSON ? true
, withCapNG ? stdenv.isLinux
# libmicrohttpd should theoretically work for darwin as well, but something is broken.
# It affects tests check-bx509d and check-httpkadmind.
, withMicroHTTPD ? stdenv.isLinux
, withOpenLDAP ? true
, withOpenLDAPAsHDBModule ? false
, withOpenSSL ? true
, withSQLite3 ? true
}:
assert lib.assertMsg (withOpenLDAPAsHDBModule -> withOpenLDAP) ''
OpenLDAP needs to be enabled in order to build the OpenLDAP HDB Module.
'';
stdenv.mkDerivation {
pname = "heimdal";
version = "7.8.0-unstable-2023-11-29";
src = fetchFromGitHub {
owner = "heimdal";
repo = "heimdal";
rev = "3253c49544eacb33d5ad2f6f919b0696e5aab794";
hash = "sha256-uljzQBzXrZCZjcIWfioqHN8YsbUUNy14Vo+A3vZIXzM=";
};
outputs = [ "out" "dev" "man" "info" ];
nativeBuildInputs = [
autoreconfHook
pkg-config
python3
perl
bison
flex
texinfo
]
++ (with perlPackages; [ JSON ]);
buildInputs = [ db libedit pam ]
++ lib.optionals (stdenv.isDarwin) [ CoreFoundation Security SystemConfiguration ]
++ lib.optionals (withCJSON) [ cjson ]
++ lib.optionals (withCapNG) [ libcap_ng ]
++ lib.optionals (withMicroHTTPD) [ libmicrohttpd ]
++ lib.optionals (withOpenLDAP) [ openldap ]
++ lib.optionals (withOpenSSL) [ openssl ]
++ lib.optionals (withSQLite3) [ sqlite ];
doCheck = true;
nativeCheckInputs = [
curl
jdk
unzip
which
];
configureFlags = [
"--with-libedit-include=${libedit.dev}/include"
"--with-libedit-lib=${libedit}/lib"
"--with-berkeley-db-include=${db.dev}/include"
"--with-berkeley-db"
"--without-x"
"--disable-afs-string-to-key"
] ++ lib.optionals (withCapNG) [
"--with-capng"
] ++ lib.optionals (withCJSON) [
"--with-cjson=${cjson}"
] ++ lib.optionals (withOpenLDAP) [
"--with-openldap=${openldap.dev}"
] ++ lib.optionals (withOpenLDAPAsHDBModule) [
"--enable-hdb-openldap-module"
] ++ lib.optionals (withSQLite3) [
"--with-sqlite3=${sqlite.dev}"
];
# (check-ldap) slapd resides within ${openldap}/libexec,
# which is not part of $PATH by default.
# (check-ldap) prepending ${openldap}/bin to the path to avoid
# using the default installation of openldap on unsandboxed darwin systems,
# which does not support the new mdb backend at the moment (2024-01-13).
# (check-ldap) the bdb backend got deprecated in favour of mdb in openldap 2.5.0,
# but the heimdal tests still seem to expect bdb as the openldap backend.
# This might be fixed upstream in a future update.
patchPhase = ''
runHook prePatch
substituteInPlace tests/ldap/slapd-init.in \
--replace 'SCHEMA_PATHS="' 'SCHEMA_PATHS="${openldap}/etc/schema '
substituteInPlace tests/ldap/check-ldap.in \
--replace 'PATH=' 'PATH=${openldap}/libexec:${openldap}/bin:'
substituteInPlace tests/ldap/slapd.conf \
--replace 'database bdb' 'database mdb'
runHook postPatch
'';
# (test_cc) heimdal uses librokens implementation of `secure_getenv` on darwin,
# which expects either USER or LOGNAME to be set.
preCheck = lib.optionalString (stdenv.isDarwin) ''
export USER=nix-builder
'';
# We need to build hcrypt for applications like samba
postBuild = ''
(cd include/hcrypto; make -j $NIX_BUILD_CORES)
(cd lib/hcrypto; make -j $NIX_BUILD_CORES)
'';
postInstall = ''
# Install hcrypto
(cd include/hcrypto; make -j $NIX_BUILD_CORES install)
(cd lib/hcrypto; make -j $NIX_BUILD_CORES install)
mkdir -p $dev/bin
mv $out/bin/krb5-config $dev/bin/
# asn1 compilers, move them to $dev
mv $out/libexec/heimdal/* $dev/bin
rmdir $out/libexec/heimdal
# compile_et is needed for cross-compiling this package and samba
mv lib/com_err/.libs/compile_et $dev/bin
'';
# Issues with hydra
# In file included from hxtool.c:34:0:
# hx_locl.h:67:25: fatal error: pkcs10_asn1.h: No such file or directory
#enableParallelBuilding = true;
passthru = {
implementation = "heimdal";
tests.nixos = nixosTests.kerberos.heimdal;
};
meta = with lib; {
homepage = "https://www.heimdal.software";
changelog = "https://github.com/heimdal/heimdal/releases";
description = "An implementation of Kerberos 5 (and some more stuff)";
license = licenses.bsd3;
platforms = platforms.unix;
maintainers = with maintainers; [ h7x4 ];
};
}

View File

@ -0,0 +1,115 @@
{ config, pkgs, lib, ... }:
{
services.openldap = let
dn = "dc=kerberos,dc=pvv,dc=ntnu,dc=no";
cfg = config.services.openldap;
in {
enable = true;
# NOTE: this is a custom build of openldap with support for
# perl and kerberos.
package = pkgs.openldap.overrideAttrs (prev: {
# https://github.com/openldap/openldap/blob/master/configure
configureFlags = prev.configureFlags ++ [
# Connect to slapd via UNIX socket
"--enable-local"
# Cyrus SASL
"--enable-spasswd"
# Reverse hostname lookups
"--enable-rlookups"
# perl
"--enable-perl"
];
buildInputs = prev.buildInputs ++ (with pkgs; [
perl
config.security.krb5.package
]);
extraContribModules = prev.extraContribModules ++ [
# https://git.openldap.org/openldap/openldap/-/tree/master/contrib/slapd-modules
"smbk5pwd"
];
});
settings = {
attrs = {
olcLogLevel = [ "stats" "config" "args" ];
# olcAuthzRegexp = ''
# gidNumber=.*\\\+uidNumber=0,cn=peercred,cn=external,cn=auth
# "uid=heimdal,${dn2}"
# '';
# olcSaslSecProps = "minssf=0";
};
children = {
"cn=schema".includes = let
# NOTE: needed for smbk5pwd.so module
# schemaToLdif = name: path: pkgs.runCommandNoCC name {
# buildInputs = with pkgs; [ schema2ldif ];
# } ''
# schema2ldif "${path}" > $out
# '';
# hdb-ldif = schemaToLdif "hdb.ldif" "${pkgs.heimdal.src}/lib/hdb/hdb.schema";
# samba-ldif = schemaToLdif "samba.ldif" "${pkgs.heimdal.src}/tests/ldap/samba.schema";
in [
"${cfg.package}/etc/schema/core.ldif"
"${cfg.package}/etc/schema/cosine.ldif"
"${cfg.package}/etc/schema/nis.ldif"
"${cfg.package}/etc/schema/inetorgperson.ldif"
# "${hdb-ldif}"
# "${samba-ldif}"
];
# NOTE: installation of smbk5pwd.so module
# https://git.openldap.org/openldap/openldap/-/tree/master/contrib/slapd-modules/smbk5pwd
# "cn=module{0}".attrs = {
# objectClass = [ "olcModuleList" ];
# olcModuleLoad = [ "${cfg.package}/lib/modules/smbk5pwd.so" ];
# };
# NOTE: activation of smbk5pwd.so module for {1}mdb
# "olcOverlay={0}smbk5pwd,olcDatabase={1}mdb".attrs = {
# objectClass = [ "olcOverlayConfig" "olcSmbK5PwdConfig" ];
# olcOverlay = "{0}smbk5pwd";
# olcSmbK5PwdEnable = [ "krb5" "samba" ];
# olcSmbK5PwdMustChange = toString (60 * 60 * 24 * 30);
# };
"olcDatabase={1}mdb".attrs = {
objectClass = [ "olcDatabaseConfig" "olcMdbConfig" ];
olcDatabase = "{1}mdb";
olcSuffix = dn;
# TODO: PW is supposed to be a secret, but it's probably fine for testing
olcRootDN = "cn=admin,${dn}";
olcRootPW.path = pkgs.writeText "olcRootPW" "pass";
olcDbDirectory = "/var/lib/openldap/test-db";
olcDbIndex = "objectClass eq";
olcAccess = [
''{0}to attrs=userPassword,shadowLastChange
by dn.exact=cn=admin,${dn} write
by self write
by anonymous auth
by * none''
''{1}to dn.base=""
by * read''
/* allow read on anything else */
# ''{2}to *
# by cn=admin,${dn} write by dn.exact=gidNumber=0+uidNumber=0+cn=peercred,cn=external write
# by * read''
];
};
};
};
};
}

View File

@ -0,0 +1,14 @@
{ ... }:
{
# TODO: This is seemingly required for openldap to authenticate
# against kerberos, but I have no idea how to configure it as
# such. Does it need a keytab? There's a binary "testsaslauthd"
# that follows with `pkgs.cyrus_sasl` that might be useful.
services.saslauthd = {
enable = true;
mechanism = "kerberos5";
# config = ''
# '';
};
}

View File

@ -0,0 +1,37 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "uhci_hcd" "ehci_pci" "ata_piix" "hpsa" "usb_storage" "usbhid" "sd_mod" "sr_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-uuid/ed9654fe-575a-4fb3-b6ff-1b059479acff";
fsType = "ext4";
};
swapDevices = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.enp14s0f0.useDHCP = lib.mkDefault true;
# networking.interfaces.enp14s0f1.useDHCP = lib.mkDefault true;
# networking.interfaces.enp3s0f0.useDHCP = lib.mkDefault true;
# networking.interfaces.enp3s0f1.useDHCP = lib.mkDefault true;
# networking.interfaces.enp4s0f0.useDHCP = lib.mkDefault true;
# networking.interfaces.enp4s0f1.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

View File

@ -1,12 +1,12 @@
{ config, fp, pkgs, values, ... }: { config, pkgs, values, ... }:
{ {
imports = [ imports = [
# Include the results of the hardware scan. # Include the results of the hardware scan.
./hardware-configuration.nix ./hardware-configuration.nix
(fp /base) ../../base.nix
(fp /misc/metrics-exporters.nix) ../../misc/metrics-exporters.nix
(fp /modules/grzegorz.nix) ../../modules/grzegorz.nix
]; ];
boot.loader.systemd-boot.enable = true; boot.loader.systemd-boot.enable = true;

View File

@ -1,55 +1,42 @@
{ config, fp, pkgs, lib, values, ... }: { config, pkgs, values, ... }:
{ {
imports = [ imports = [
# Include the results of the hardware scan. # Include the results of the hardware scan.
./hardware-configuration.nix ./hardware-configuration.nix
(fp /base) ../../base.nix
(fp /misc/metrics-exporters.nix) ../../misc/metrics-exporters.nix
./services/monitoring
./services/nginx ./services/nginx
./services/metrics
]; ];
sops.defaultSopsFile = fp /secrets/ildkule/ildkule.yaml; sops.defaultSopsFile = ../../secrets/ildkule/ildkule.yaml;
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ]; sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
sops.age.keyFile = "/var/lib/sops-nix/key.txt"; sops.age.keyFile = "/var/lib/sops-nix/key.txt";
sops.age.generateKey = true; sops.age.generateKey = true;
boot.loader.grub.device = "/dev/vda"; boot.loader.systemd-boot.enable = true;
boot.tmp.cleanOnBoot = true; boot.loader.efi.canTouchEfiVariables = true;
zramSwap.enable = true;
# Openstack Neutron and systemd-networkd are not best friends, use something else: networking.hostName = "ildkule"; # Define your hostname.
systemd.network.enable = lib.mkForce false;
networking = let
hostConf = values.hosts.ildkule;
in {
hostName = "ildkule";
tempAddresses = "disabled";
useDHCP = lib.mkForce true;
search = values.defaultNetworkConfig.domains; systemd.network.networks."30-ens18" = values.defaultNetworkConfig // {
nameservers = values.defaultNetworkConfig.dns; matchConfig.Name = "ens18";
defaultGateway.address = hostConf.ipv4_internal_gw; address = with values.hosts.ildkule; [ (ipv4 + "/25") (ipv6 + "/64") ];
interfaces."ens4" = {
ipv4.addresses = [
{ address = hostConf.ipv4; prefixLength = 32; }
{ address = hostConf.ipv4_internal; prefixLength = 24; }
];
ipv6.addresses = [
{ address = hostConf.ipv6; prefixLength = 64; }
];
};
}; };
# List packages installed in system profile # List packages installed in system profile
environment.systemPackages = with pkgs; [ environment.systemPackages = with pkgs; [
]; ];
# No devices with SMART # List services that you want to enable:
services.smartd.enable = false;
system.stateVersion = "23.11"; # Did you read the comment? # This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "21.11"; # Did you read the comment?
} }

View File

@ -1,16 +1,37 @@
{ modulesPath, lib, ... }: # Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{ {
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ]; imports =
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "xen_blkfront" "vmw_pvscsi" ]; [ (modulesPath + "/profiles/qemu-guest.nix")
boot.initrd.kernelModules = [ "nvme" ]; ];
fileSystems."/" = {
device = "/dev/disk/by-uuid/e35eb4ce-aac3-4f91-8383-6e7cd8bbf942"; boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "virtio_pci" "sr_mod" "virtio_blk" ];
fsType = "ext4"; boot.initrd.kernelModules = [ ];
}; boot.kernelModules = [ ];
fileSystems."/data" = { boot.extraModulePackages = [ ];
device = "/dev/disk/by-uuid/0a4c1234-02d3-4b53-aeca-d95c4c8d534b";
fileSystems."/" =
{ device = "/dev/disk/by-uuid/afe70fe4-681a-4675-8cbd-e5d08cdcf5b5";
fsType = "ext4"; fsType = "ext4";
}; };
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/B71A-E5CD";
fsType = "vfat";
};
swapDevices = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true; networking.useDHCP = lib.mkDefault true;
# networking.interfaces.ens18.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
} }

View File

@ -2,9 +2,8 @@
{ {
imports = [ imports = [
./prometheus
./grafana.nix ./grafana.nix
./loki.nix ./loki.nix
./prometheus
./uptime-kuma.nix
]; ];
} }

View File

@ -7,6 +7,7 @@ in {
in { in {
"keys/grafana/secret_key" = { inherit owner group; }; "keys/grafana/secret_key" = { inherit owner group; };
"keys/grafana/admin_password" = { inherit owner group; }; "keys/grafana/admin_password" = { inherit owner group; };
"keys/postgres/grafana" = { inherit owner group; };
}; };
services.grafana = { services.grafana = {
@ -17,7 +18,7 @@ in {
secretFile = path: "$__file{${path}}"; secretFile = path: "$__file{${path}}";
in { in {
server = { server = {
domain = "grafana.pvv.ntnu.no"; domain = "ildkule.pvv.ntnu.no";
http_port = 2342; http_port = 2342;
http_addr = "127.0.0.1"; http_addr = "127.0.0.1";
}; };
@ -26,6 +27,13 @@ in {
secret_key = secretFile config.sops.secrets."keys/grafana/secret_key".path; secret_key = secretFile config.sops.secrets."keys/grafana/secret_key".path;
admin_password = secretFile config.sops.secrets."keys/grafana/admin_password".path; admin_password = secretFile config.sops.secrets."keys/grafana/admin_password".path;
}; };
database = {
type = "postgres";
user = "grafana";
host = "${values.hosts.bicep.ipv4}:5432";
password = secretFile config.sops.secrets."keys/postgres/grafana".path;
};
}; };
provision = { provision = {
@ -34,13 +42,13 @@ in {
{ {
name = "Ildkule Prometheus"; name = "Ildkule Prometheus";
type = "prometheus"; type = "prometheus";
url = "http://${config.services.prometheus.listenAddress}:${toString config.services.prometheus.port}"; url = ("http://${config.services.prometheus.listenAddress}:${toString config.services.prometheus.port}");
isDefault = true; isDefault = true;
} }
{ {
name = "Ildkule loki"; name = "Ildkule loki";
type = "loki"; type = "loki";
url = "http://${config.services.loki.configuration.server.http_listen_address}:${toString config.services.loki.configuration.server.http_listen_port}"; url = ("http://${config.services.loki.configuration.server.http_listen_address}:${toString config.services.loki.configuration.server.http_listen_port}");
} }
]; ];
dashboards.settings.providers = [ dashboards.settings.providers = [
@ -75,12 +83,6 @@ in {
url = "https://grafana.com/api/dashboards/240/revisions/3/download"; url = "https://grafana.com/api/dashboards/240/revisions/3/download";
options.path = dashboards/go-processes.json; options.path = dashboards/go-processes.json;
} }
{
name = "Gitea Dashbaord";
type = "file";
url = "https://grafana.com/api/dashboards/17802/revisions/3/download";
options.path = dashboards/gitea-dashbaord.json;
}
]; ];
}; };
@ -89,7 +91,6 @@ in {
services.nginx.virtualHosts.${cfg.settings.server.domain} = { services.nginx.virtualHosts.${cfg.settings.server.domain} = {
enableACME = true; enableACME = true;
forceSSL = true; forceSSL = true;
kTLS = true;
locations = { locations = {
"/" = { "/" = {
proxyPass = "http://127.0.0.1:${toString cfg.settings.server.http_port}"; proxyPass = "http://127.0.0.1:${toString cfg.settings.server.http_port}";

Some files were not shown because too many files have changed in this diff Show More