Compare commits

..

90 Commits

Author SHA1 Message Date
54a6b687dd nixfmt
All checks were successful
Eval nix flake / evals (push) Successful in 8m43s
Eval nix flake / evals (pull_request) Successful in 8m57s
2026-02-20 18:12:39 +01:00
eedb94b998 flake.lock: bump dibbler 2026-02-19 20:56:04 +01:00
18167dca0a update README to reflect added host
All checks were successful
Build topology graph / evals (push) Successful in 3m55s
Eval nix flake / evals (push) Successful in 8m43s
2026-02-14 19:12:41 +01:00
b5fecc94a7 hosts: add skrot
All checks were successful
Build topology graph / evals (push) Successful in 3m31s
Eval nix flake / evals (push) Successful in 10m10s
Co-authored-by: System administrator <root@skrot.pvv.ntnu.no>
Reviewed-on: #124
Co-authored-by: Vegard Bieker Matthey <VegardMatthey@protonmail.com>
Co-committed-by: Vegard Bieker Matthey <VegardMatthey@protonmail.com>
2026-02-14 18:53:54 +01:00
0d40c7d7a7 base/acme: use different email alias for account
Some checks are pending
Eval nix flake / evals (push) Waiting to run
Build topology graph / evals (push) Successful in 3m56s
2026-02-13 19:45:45 +09:00
b327582236 kommode/gitea: use redis for sessions and queue
All checks were successful
Build topology graph / evals (push) Successful in 5m58s
Eval nix flake / evals (push) Successful in 10m19s
2026-02-13 18:55:42 +09:00
7e39bf3ba2 bicep/matrix/ooye: add rsync pull target for principal backups
All checks were successful
Build topology graph / evals (push) Successful in 6m11s
Eval nix flake / evals (push) Successful in 10m9s
2026-02-13 18:26:55 +09:00
5bb0cd0465 kommode/gitea: set default theme
All checks were successful
Build topology graph / evals (push) Successful in 3m59s
Eval nix flake / evals (push) Successful in 8m38s
2026-02-13 14:32:36 +09:00
9efda802cb kommode/gitea: move ui configuration to customization 2026-02-13 14:23:48 +09:00
3c08be3d73 kommode/gitea: configure redis cache
Some checks failed
Eval nix flake / evals (push) Failing after 1m13s
Build topology graph / evals (push) Successful in 3m59s
2026-02-13 03:50:21 +09:00
b1a2836b5d kommode/gitea: custom emoji
All checks were successful
Build topology graph / evals (push) Successful in 3m23s
Eval nix flake / evals (push) Successful in 10m29s
2026-02-13 03:38:45 +09:00
ba1f30f737 kommode/gitea: configure more meta fields
All checks were successful
Build topology graph / evals (push) Successful in 3m30s
Eval nix flake / evals (push) Successful in 8m54s
2026-02-13 03:13:49 +09:00
c455c5a7e3 bicep/matrix/livekit: fix matrix domain in livekit, allow dan's server as well
All checks were successful
Build topology graph / evals (push) Successful in 6m15s
Eval nix flake / evals (push) Successful in 8m48s
2026-02-11 22:58:19 +01:00
35907be4f2 update sops keys for skrott
All checks were successful
Build topology graph / evals (push) Successful in 4m2s
Eval nix flake / evals (push) Successful in 8m37s
2026-02-07 22:17:09 +01:00
210f74dc59 secrets: sops updatekeys
All checks were successful
Build topology graph / evals (push) Successful in 5m52s
Eval nix flake / evals (push) Successful in 8m46s
2026-02-08 05:19:26 +09:00
d35de940c1 update gpg install cmd for secrets
Some checks failed
Build topology graph / evals (push) Successful in 3m30s
Eval nix flake / evals (push) Has been cancelled
2026-02-07 21:12:03 +01:00
daa4b9e271 bekkalokk/mediawiki: adjust umask
All checks were successful
Build topology graph / evals (push) Successful in 3m23s
Eval nix flake / evals (push) Successful in 10m11s
2026-02-07 01:46:55 +09:00
12eb0b3f53 bekkalokk/mediawiki: allow uploading more filetypes
All checks were successful
Build topology graph / evals (push) Successful in 3m55s
Eval nix flake / evals (push) Successful in 8m46s
2026-02-07 00:56:38 +09:00
02bdb8d45b kommode/gitea/web: use default login shell
All checks were successful
Build topology graph / evals (push) Successful in 6m45s
Eval nix flake / evals (push) Successful in 8m47s
2026-02-05 13:25:06 +09:00
a5143c0aaa bekkalokk/nettsiden: fix gallery rsync target
Some checks failed
Build topology graph / evals (push) Successful in 3m30s
Eval nix flake / evals (push) Has been cancelled
2026-02-05 13:19:29 +09:00
561404cd87 bump dibbler
All checks were successful
Build topology graph / evals (push) Successful in 9m45s
Eval nix flake / evals (push) Successful in 10m18s
2026-02-04 04:11:56 +01:00
System administrator
3338b4cd61 gluttony: fix ipv4 addr
All checks were successful
Build topology graph / evals (push) Successful in 3m23s
Eval nix flake / evals (push) Successful in 8m41s
2026-02-03 21:05:53 +01:00
2354dcf578 gluttony: update disk id
All checks were successful
Eval nix flake / evals (pull_request) Successful in 8m50s
Build topology graph / evals (push) Successful in 5m45s
Eval nix flake / evals (push) Successful in 10m1s
2026-02-03 16:18:43 +01:00
304304185c base: add lsof to list of default installed packages
All checks were successful
Build topology graph / evals (push) Successful in 4m1s
Eval nix flake / evals (push) Successful in 10m20s
2026-02-02 23:59:35 +09:00
b712f3cda3 temmie/userweb: add a few more packages
All checks were successful
Build topology graph / evals (push) Successful in 3m53s
Eval nix flake / evals (push) Successful in 8m46s
2026-01-31 21:53:12 +09:00
cc272a724c temmie/userweb: add directory index search path
All checks were successful
Build topology graph / evals (push) Successful in 3m24s
Eval nix flake / evals (push) Successful in 10m19s
2026-01-31 21:30:23 +09:00
fcaa97884e temmie/userweb: add a bunch more normal packages
All checks were successful
Build topology graph / evals (push) Successful in 3m58s
Eval nix flake / evals (push) Successful in 8m51s
2026-01-31 21:20:26 +09:00
11f2cf504f temmie/userweb: add a bunch more perl packages 2026-01-31 20:31:03 +09:00
7ab16bc949 temmie/userweb: restrict log access
All checks were successful
Build topology graph / evals (push) Successful in 3m25s
Eval nix flake / evals (push) Successful in 9m27s
2026-01-31 19:08:02 +09:00
c4d5cfde56 temmie/userweb: add legacy-cgi to the python package set
All checks were successful
Build topology graph / evals (push) Successful in 3m53s
Eval nix flake / evals (push) Successful in 8m0s
2026-01-31 18:53:44 +09:00
100d09f6b7 temmie/userweb: get first iteration working
All checks were successful
Build topology graph / evals (push) Successful in 6m14s
Eval nix flake / evals (push) Successful in 9m21s
2026-01-31 18:41:17 +09:00
3b0742bfac temmie: combine homedirs in overlayfs 2026-01-31 18:41:17 +09:00
3ba1ea2e4f flake.lock: bump
All checks were successful
Build topology graph / evals (push) Successful in 3m54s
Eval nix flake / evals (push) Successful in 7m46s
2026-01-31 13:44:39 +09:00
91de031896 treewide: limit rsync pull target access to principal
All checks were successful
Build topology graph / evals (push) Successful in 7m35s
Eval nix flake / evals (push) Successful in 7m54s
2026-01-31 11:14:18 +09:00
c3ce6a40ea ildkule/grafana: update a bunch of dashboards
All checks were successful
Build topology graph / evals (push) Successful in 3m55s
Eval nix flake / evals (push) Successful in 8m0s
2026-01-31 01:07:26 +09:00
beee0ddc75 ildkule/grafana: remove dashboard for gogs 2026-01-31 00:58:34 +09:00
359f599655 bekkalokk/snappymail: add rsync pull target for principal
All checks were successful
Build topology graph / evals (push) Successful in 3m26s
Eval nix flake / evals (push) Successful in 9m0s
2026-01-31 00:19:09 +09:00
5b1c6f16d1 bekkalokk/vaultwarden: add rsync pull target for principal 2026-01-31 00:18:57 +09:00
cec69d89a8 bicep/{postgres,mysql}: fix old backup deletion (again)
All checks were successful
Build topology graph / evals (push) Successful in 3m56s
Eval nix flake / evals (push) Successful in 9m28s
2026-01-30 13:26:10 +09:00
af0bf7b254 bicep/{postgres,mysql}: fix old backup deletion
All checks were successful
Build topology graph / evals (push) Successful in 3m22s
Eval nix flake / evals (push) Successful in 9m8s
2026-01-29 14:57:46 +09:00
bcf8b1607f bicep/{postgres,mysql}: use hardlink for latest backup file
Some checks failed
Build topology graph / evals (push) Successful in 3m22s
Eval nix flake / evals (push) Has been cancelled
2026-01-29 14:53:07 +09:00
1d46fd1ec6 bicep/{postgres,mysql}: keep multiple backups, point at latest with symlink
All checks were successful
Build topology graph / evals (push) Successful in 5m22s
Eval nix flake / evals (push) Successful in 9m18s
2026-01-29 14:16:34 +09:00
bac53be707 bicep/{postgres,mysql}: use zstd for backup compression
All checks were successful
Build topology graph / evals (push) Successful in 5m13s
Eval nix flake / evals (push) Successful in 9m25s
2026-01-29 13:50:35 +09:00
f08bd96b74 bicep/{postgres,mysql}: move backups to /data
All checks were successful
Build topology graph / evals (push) Successful in 3m22s
Eval nix flake / evals (push) Successful in 9m13s
2026-01-29 13:41:06 +09:00
25f2a13391 packages/mediawiki-extensions: bump all
Some checks failed
Build topology graph / evals (push) Successful in 3m25s
Eval nix flake / evals (push) Has been cancelled
2026-01-29 13:34:42 +09:00
8774c81d23 bicep/{postgres,mysql}: custom backup units
Some checks failed
Build topology graph / evals (push) Has been cancelled
Eval nix flake / evals (push) Has been cancelled
2026-01-29 13:32:28 +09:00
d6eca5c4e3 bicep/{postgres,mysql}: split config into several files
All checks were successful
Build topology graph / evals (push) Successful in 5m27s
Eval nix flake / evals (push) Successful in 7m57s
2026-01-29 13:18:25 +09:00
49d1122ee5 bicep/mysql: enable slow query logs
All checks were successful
Build topology graph / evals (push) Successful in 3m26s
Eval nix flake / evals (push) Successful in 7m44s
2026-01-28 14:55:52 +09:00
31bbf4b25f bicep/synapse: enable auto-compressor timer
Some checks failed
Build topology graph / evals (push) Has been cancelled
Eval nix flake / evals (push) Has been cancelled
2026-01-28 14:50:57 +09:00
2f7e1439d0 bicep/mysql: pin version, upgrade from 11.4 -> 11.8
All checks were successful
Build topology graph / evals (push) Successful in 4m21s
Eval nix flake / evals (push) Successful in 9m9s
2026-01-28 14:01:14 +09:00
fa31a84bd2 bicep/postgres: upgrade from 15 -> 18 2026-01-28 14:00:25 +09:00
b77c8eb5c0 modules/rsync-pull-targets: fix multiple pull targets with same user
Some checks failed
Build topology graph / evals (push) Successful in 3m27s
Eval nix flake / evals (push) Failing after 3h12m45s
2026-01-27 21:10:17 +09:00
949661113e bicep/mysql: move backup dir
All checks were successful
Build topology graph / evals (push) Successful in 3m52s
Eval nix flake / evals (push) Successful in 7m42s
2026-01-27 20:47:40 +09:00
f442c4d65f bicep/minecraft-heatmap: gate remaining config behind cfg.enable
Some checks failed
Build topology graph / evals (push) Has been cancelled
Eval nix flake / evals (push) Has been cancelled
2026-01-27 20:44:20 +09:00
690aee634b bicep/postgres: gate remaining config behind cfg.enable 2026-01-27 20:44:20 +09:00
2ed1c83858 bicep/{postgres,mysql}: add rsync pull targets for backups 2026-01-27 20:39:12 +09:00
d43de08a3b flake.lock: bump
All checks were successful
Build topology graph / evals (push) Successful in 3m25s
Eval nix flake / evals (push) Successful in 9m24s
2026-01-27 19:44:45 +09:00
e8c7f177e8 kommode: use disko to configure disks
All checks were successful
Build topology graph / evals (push) Successful in 6m5s
Eval nix flake / evals (push) Successful in 9m10s
2026-01-27 19:00:12 +09:00
fb59a242fb kommode/gitea: add rsync pull target for gitea dump dir
Some checks failed
Build topology graph / evals (push) Successful in 3m22s
Eval nix flake / evals (push) Has been cancelled
2026-01-27 18:55:25 +09:00
65d095feb1 bekkalokk/mediawiki, bicep/matrix/synapse: add keys for rsync targets 2026-01-27 18:55:03 +09:00
8273d98788 flake.nix: add disko to default devshell
Some checks failed
Build topology graph / evals (push) Successful in 3m52s
Eval nix flake / evals (push) Has been cancelled
2026-01-27 18:35:18 +09:00
8a84069dcf bicep/mysql: use BindPaths to access dataDir
All checks were successful
Build topology graph / evals (push) Successful in 3m24s
Eval nix flake / evals (push) Successful in 7m16s
2026-01-27 17:23:38 +09:00
cda84be5b0 bekkalokk/well-known: add note about bug bounty program to security.txt 2026-01-27 17:11:07 +09:00
79a46ce3f6 bicep/element: set default country code
All checks were successful
Build topology graph / evals (push) Successful in 3m57s
Eval nix flake / evals (push) Successful in 7m18s
2026-01-27 04:11:40 +09:00
19e45be83a .mailmap: further dedup
All checks were successful
Build topology graph / evals (push) Successful in 5m53s
Eval nix flake / evals (push) Successful in 7m24s
2026-01-27 04:07:25 +09:00
a8892e2fb2 hosts/various: bump stateVersion
Some checks failed
Build topology graph / evals (push) Successful in 5m59s
Eval nix flake / evals (push) Has been cancelled
2026-01-27 04:00:48 +09:00
a149f97ac0 bicep: bump stateVersion from 22.11 -> 25.11 2026-01-27 03:59:40 +09:00
e76c656378 bekkalokk: bump stateVersion from 22.11 -> 25.11 2026-01-27 03:52:34 +09:00
5877ef60b1 modules/rsync-pull-targets: leave TODO about assertion
All checks were successful
Build topology graph / evals (push) Successful in 5m53s
Eval nix flake / evals (push) Successful in 7m11s
2026-01-27 00:27:00 +09:00
73456de527 bekkalokk/mediawiki, bicep/matrix/synapse: leave principal rsync target stubs 2026-01-27 00:26:42 +09:00
2f8e9ea190 modules/rsync-pull-targets: init, migrate bekkalokk/website/fetch-gallery
All checks were successful
Build topology graph / evals (push) Successful in 3m55s
Eval nix flake / evals (push) Successful in 8m18s
2026-01-26 23:57:20 +09:00
c3c98392ad bicep/hookshot: add passkey to sops
All checks were successful
Build topology graph / evals (push) Successful in 3m23s
Eval nix flake / evals (push) Successful in 8m31s
2026-01-26 21:52:58 +09:00
e01fd902eb bekkalokk/mediawiki: move secret.key to sops
All checks were successful
Build topology graph / evals (push) Successful in 5m50s
Eval nix flake / evals (push) Successful in 8m39s
2026-01-26 17:55:55 +09:00
ce8d759f79 skrott: yeet 700MB worth of firmware, leave raspberry-specific firmware be
All checks were successful
Build topology graph / evals (push) Successful in 3m23s
Eval nix flake / evals (push) Successful in 7m21s
2026-01-26 17:09:18 +09:00
ea6296f47a base/vm: disable graphics for vms by default 2026-01-26 17:08:35 +09:00
c28fc3f229 ildkule/prometheus: add temmie,gluttony, re-enable lupine-2 2026-01-26 17:04:55 +09:00
c124183d95 ildkule/prometheus: scrape skrott 2026-01-26 17:04:52 +09:00
d7bb316056 skrott: yeetus ncdu
All checks were successful
Build topology graph / evals (push) Successful in 3m24s
Eval nix flake / evals (push) Successful in 7m16s
2026-01-26 15:45:10 +09:00
c78c29aaa6 skrott: don't pull in nixpkgs/nixpkgs-unstable source tarballs
Some checks failed
Build topology graph / evals (push) Has been cancelled
Eval nix flake / evals (push) Has been cancelled
2026-01-26 15:43:23 +09:00
7d451f1db5 base/auto-upgrade: don't install flake-inputs.json when disabled 2026-01-26 15:42:56 +09:00
1d57cec04d base/acme: remove deprecated argument
All checks were successful
Build topology graph / evals (push) Successful in 6m6s
Eval nix flake / evals (push) Successful in 8m29s
2026-01-26 15:07:40 +09:00
f50372fabd .sops.yaml: remove yet more remains of jokum
All checks were successful
Build topology graph / evals (push) Successful in 3m23s
Eval nix flake / evals (push) Successful in 7m20s
2026-01-26 13:53:30 +09:00
0f355046de .sops.yaml: add skrott 2026-01-26 13:53:16 +09:00
285f5b6a84 flake.nix: point skrott-x86_64 at correct nixosConfiguration, add -sd variants
Some checks failed
Build topology graph / evals (push) Has been cancelled
Eval nix flake / evals (push) Has been cancelled
2026-01-26 13:46:15 +09:00
20eec03cd4 bakke: fix eval warnings about kernel packages 2026-01-26 13:46:14 +09:00
fffdf77d6f skrott: disable more stuff 2026-01-26 13:46:13 +09:00
42bbb1eca1 flake.nix: make native skrott default, misc cleaning
Some checks failed
Build topology graph / evals (push) Successful in 3m54s
Eval nix flake / evals (push) Has been cancelled
2026-01-26 13:28:42 +09:00
34fdc9159c bekkalokk/mediawiki: remove nonused module import 2026-01-26 13:19:48 +09:00
1b6ff9876d Remove global packages from users, skrott: remove neovim properly
All checks were successful
Build topology graph / evals (push) Successful in 3m54s
Eval nix flake / evals (push) Successful in 8m30s
2026-01-26 13:16:06 +09:00
0206c159a2 skrott: cross compile and further minimize 2026-01-26 13:15:46 +09:00
162 changed files with 13771 additions and 25391 deletions

View File

@@ -23,3 +23,9 @@ Adrian Gunnar Lauterer <adriangl@pvv.ntnu.no> Adrian Gunnar Lauterer <adrian@lau
Fredrik Robertsen <frero@pvv.ntnu.no> frero <frero@pvv.ntnu.no>
Fredrik Robertsen <frero@pvv.ntnu.no> fredrikr79 <fredrikrobertsen7@gmail.com>
Fredrik Robertsen <frero@pvv.ntnu.no> fredrik <fredrikr79@pm.me>
Vegard Bieker Matthey <vegardbm@pvv.ntnu.no> Vegard Matthey <VegardMatthey@protonmail.com>
Vegard Bieker Matthey <vegardbm@pvv.ntnu.no> Vegard Bieker Matthey <VegardMatthey@protonmail.com>
Albert Bayazidi <albertba@pvv.ntnu.no> Albert <albert.bayazidi@gmail.com>

View File

@@ -20,7 +20,9 @@ keys:
- &host_lupine-3 age1j2u876z8hu87q5npfxzzpfgllyw8ypj66d7cgelmzmnrf3xud34qzkntp9
- &host_lupine-4 age1t8zlawqkmhye737pn8yx0z3p9cl947d9ktv2cajdc6hnvn52d3fsc59s2k
- &host_lupine-5 age199zkqq4jp4yc3d0hx2q0ksxdtp42xhmjsqwyngh8tswuck34ke3smrfyqu
- &host_skrott age1lpkju2e053aaddpgsr4ef83epclf4c9tp4m98d35ft2fswr8p4tq2ua0mf
- &host_ustetind age1hffjafs4slznksefmtqrlj7rdaqgzqncn4un938rhr053237ry8s3rs0v8
- &host_skrot age1hzkvnktkr8t5gvtq0ccw69e44z5z6wf00n3xhk3hj24emf07je5s6q2evr
creation_rules:
# Global secrets
@@ -137,10 +139,24 @@ creation_rules:
- path_regex: secrets/skrott/[^/]+\.yaml$
key_groups:
- age:
- *host_skrott
- *user_danio
- *user_felixalb
- *user_pederbs_sopp
- *user_pederbs_nord
- *user_pederbs_bjarte
- *user_vegardbm
pgp:
- *user_oysteikt
- path_regex: secrets/skrot/[^/]+\.yaml$
key_groups:
- age:
- *host_skrot
- *user_danio
- *user_felixalb
- *user_pederbs_sopp
- *user_pederbs_nord
- *user_pederbs_bjarte
- *user_vegardbm
pgp:
- *user_oysteikt

View File

@@ -43,7 +43,7 @@ revert the changes on the next nightly rebuild (tends to happen when everybody i
| [kommode][kom] | Virtual | Gitea + Gitea pages |
| [lupine][lup] | Physical | Gitea CI/CD runners |
| shark | Virtual | Test host for authentication, absolutely horrendous |
| [skrott][skr] | Physical | Kiosk, snacks and soda |
| [skrot/skrott][skr] | Physical | Kiosk, snacks and soda |
| [wenche][wen] | Virtual | Nix-builders, general purpose compute |
## Documentation

View File

@@ -1,4 +1,9 @@
{ lib, config, inputs, ... }:
{
lib,
config,
inputs,
...
}:
{
nix = {
gc = {
@@ -11,16 +16,21 @@
allow-dirty = true;
auto-allocate-uids = true;
builders-use-substitutes = true;
experimental-features = [ "nix-command" "flakes" "auto-allocate-uids" ];
experimental-features = [
"nix-command"
"flakes"
"auto-allocate-uids"
];
log-lines = 50;
use-xdg-base-directories = true;
};
/* This makes commandline tools like
** nix run nixpkgs#hello
** and nix-shell -p hello
** use the same channel the system
** was built with
/*
This makes commandline tools like
** nix run nixpkgs#hello
** and nix-shell -p hello
** use the same channel the system
** was built with
*/
registry = lib.mkMerge [
{

View File

@@ -13,9 +13,15 @@
# Debug and find files
file
# Process json data
jq
# Check computer specs
lshw
# Check who is keeping open files
lsof
# Scan for open ports with netstat
net-tools
@@ -54,6 +60,8 @@
programs.nano.enable = true;
# Same reasoning as nano
programs.vim.enable = true;
# Same reasoning as vim
programs.neovim.enable = true;
# Some people like this shell for some reason
programs.zsh.enable = true;

View File

@@ -2,14 +2,12 @@
{
security.acme = {
acceptTerms = true;
defaults.email = "drift@pvv.ntnu.no";
defaults.email = "acme-drift@pvv.ntnu.no";
};
# Let's not spam LetsEncrypt in `nixos-rebuild build-vm` mode:
virtualisation.vmVariant = {
security.acme.defaults.server = "https://127.0.0.1";
security.acme.preliminarySelfsigned = true;
users.users.root.initialPassword = "root";
};
}
}

View File

@@ -1,4 +1,10 @@
{ config, inputs, pkgs, lib, ... }:
{
config,
inputs,
pkgs,
lib,
...
}:
let
inputUrls = lib.mapAttrs (input: value: value.url) (import "${inputs.self}/flake.nix").inputs;
@@ -16,26 +22,34 @@ in
# --update-input is deprecated since nix 2.22, and removed in lix 2.90
# as such we instead use --override-input combined with --refresh
# https://git.lix.systems/lix-project/lix/issues/400
] ++ (lib.pipe inputUrls [
]
++ (lib.pipe inputUrls [
(lib.intersectAttrs {
nixpkgs = { };
nixpkgs-unstable = { };
})
(lib.mapAttrsToList (input: url: ["--override-input" input url]))
(lib.mapAttrsToList (
input: url: [
"--override-input"
input
url
]
))
lib.concatLists
]);
};
# workaround for https://github.com/NixOS/nix/issues/6895
# via https://git.lix.systems/lix-project/lix/issues/400
environment.etc = lib.mkIf (!config.virtualisation.isVmVariant) {
"current-system-flake-inputs.json".source
= pkgs.writers.writeJSON "flake-inputs.json" (
lib.flip lib.mapAttrs inputs (name: input:
# inputs.*.sourceInfo sans outPath, since writeJSON will otherwise serialize sourceInfo like a derivation
lib.removeAttrs (input.sourceInfo or {}) [ "outPath" ]
// { store-path = input.outPath; } # comment this line if you don't want to retain a store reference to the flake inputs
)
);
};
environment.etc =
lib.mkIf (!config.virtualisation.isVmVariant && config.system.autoUpgrade.enable)
{
"current-system-flake-inputs.json".source = pkgs.writers.writeJSON "flake-inputs.json" (
lib.flip lib.mapAttrs inputs (
name: input:
# inputs.*.sourceInfo sans outPath, since writeJSON will otherwise serialize sourceInfo like a derivation
lib.removeAttrs (input.sourceInfo or { }) [ "outPath" ] // { store-path = input.outPath; } # comment this line if you don't want to retain a store reference to the flake inputs
)
);
};
}

View File

@@ -1,4 +1,4 @@
{ ... }:
{
services.irqbalance.enable = true;
}
}

View File

@@ -1,4 +1,9 @@
{ config, lib, values, ... }:
{
config,
lib,
values,
...
}:
let
cfg = config.services.journald.upload;
in

View File

@@ -1,7 +1,10 @@
{ ... }:
{
systemd.services.logrotate = {
documentation = [ "man:logrotate(8)" "man:logrotate.conf(5)" ];
documentation = [
"man:logrotate(8)"
"man:logrotate.conf(5)"
];
unitConfig.RequiresMountsFor = "/var/log";
serviceConfig.ReadWritePaths = [ "/var/log" ];
};

View File

@@ -11,7 +11,10 @@
};
};
networking.firewall.allowedTCPPorts = lib.mkIf config.services.nginx.enable [ 80 443 ];
networking.firewall.allowedTCPPorts = lib.mkIf config.services.nginx.enable [
80
443
];
services.nginx = {
recommendedTlsSettings = true;

View File

@@ -12,10 +12,9 @@
settings.PermitRootLogin = "yes";
};
users.users."root".openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCqVt4LCe0YIttr9swFxjkjn37ZDY9JxwVC+2gvfSINDJorOCtqPjDOTD2fTS1Gz08QCwpnLWq2kyvRchu6WgriAbSACpbZZBgxRaF/FVh3oiMVFGnNKGnv6/fdo/vZtu8mUVuqtmTrgLYpZdbR4oD3XiBlDKs7Cv5hPqt95lnP6MNFvE8mICCfd1PwhsABd2IQ5laz3u77/RXhNFJL0Kf2/+6gk9awcLuwHrPdvq7c3BxRHbc9UMRQENyjyQPa7aLe+uJBFLKP51I8VBuDpDacuibQx7nMt6N2UJ2KWI0JxRMHuJNq4S5jidR82aOw9gzGbTv30SKNLMqsZ0xj4LtdqCXDiZF6Lr09PsJYsvnBUFWa14HGcThKDtgwQwBryNViYmfv//0h9+RLZiU0ab+NEwSs7Zh5iAD+vhx64QqNX3tR7Le4SWXh8W0eShU9N78qYdSkiC3Ui7htxeqOocXM/P4AwbnHsLELIvkHdvgchCPvl8ygZa4WJTEWv16+ICskJcAKWGuqjvXAFuwjJJmPp9xLW9O0DFfQhMELiGamQR9wK07yYQVr34iah6qZO7cwhSKyEPFrVPIaNtfDhsjED639F7vmktf26SWNJHWfW0wOHILjI6TgqUvy0JDd8W8w0CHlAfz6Fs2l99NNgNF8dB3vBASbxS0hu/y0PVu/xQ== openstack-sleipner"
users.users."root".openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCqVt4LCe0YIttr9swFxjkjn37ZDY9JxwVC+2gvfSINDJorOCtqPjDOTD2fTS1Gz08QCwpnLWq2kyvRchu6WgriAbSACpbZZBgxRaF/FVh3oiMVFGnNKGnv6/fdo/vZtu8mUVuqtmTrgLYpZdbR4oD3XiBlDKs7Cv5hPqt95lnP6MNFvE8mICCfd1PwhsABd2IQ5laz3u77/RXhNFJL0Kf2/+6gk9awcLuwHrPdvq7c3BxRHbc9UMRQENyjyQPa7aLe+uJBFLKP51I8VBuDpDacuibQx7nMt6N2UJ2KWI0JxRMHuJNq4S5jidR82aOw9gzGbTv30SKNLMqsZ0xj4LtdqCXDiZF6Lr09PsJYsvnBUFWa14HGcThKDtgwQwBryNViYmfv//0h9+RLZiU0ab+NEwSs7Zh5iAD+vhx64QqNX3tR7Le4SWXh8W0eShU9N78qYdSkiC3Ui7htxeqOocXM/P4AwbnHsLELIvkHdvgchCPvl8ygZa4WJTEWv16+ICskJcAKWGuqjvXAFuwjJJmPp9xLW9O0DFfQhMELiGamQR9wK07yYQVr34iah6qZO7cwhSKyEPFrVPIaNtfDhsjED639F7vmktf26SWNJHWfW0wOHILjI6TgqUvy0JDd8W8w0CHlAfz6Fs2l99NNgNF8dB3vBASbxS0hu/y0PVu/xQ== openstack-sleipner"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICCbgJ0Uwh9VSVhfId7l9i5/jk4CvAK5rbkiab8R+moF root@sleipner"
];
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICCbgJ0Uwh9VSVhfId7l9i5/jk4CvAK5rbkiab8R+moF root@sleipner"
];
}

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
let
cfg = config.services.postfix;
in

View File

@@ -1,4 +1,9 @@
{ config, lib, values, ... }:
{
config,
lib,
values,
...
}:
let
cfg = config.services.prometheus.exporters.node;
in

View File

@@ -1,4 +1,9 @@
{ config, lib, values, ... }:
{
config,
lib,
values,
...
}:
let
cfg = config.services.prometheus.exporters.systemd;
in

View File

@@ -1,4 +1,9 @@
{ config, lib, values, ... }:
{
config,
lib,
values,
...
}:
let
cfg = config.services.prometheus.exporters.node;
in
@@ -10,29 +15,33 @@ in
http_listen_port = 28183;
grpc_listen_port = 0;
};
clients = [{
url = "http://ildkule.pvv.ntnu.no:3100/loki/api/v1/push";
}];
scrape_configs = [{
job_name = "systemd-journal";
journal = {
max_age = "12h";
labels = {
job = "systemd-journal";
host = config.networking.hostName;
clients = [
{
url = "http://ildkule.pvv.ntnu.no:3100/loki/api/v1/push";
}
];
scrape_configs = [
{
job_name = "systemd-journal";
journal = {
max_age = "12h";
labels = {
job = "systemd-journal";
host = config.networking.hostName;
};
};
};
relabel_configs = [
{
source_labels = [ "__journal__systemd_unit" ];
target_label = "unit";
}
{
source_labels = [ "__journal_priority_keyword" ];
target_label = "level";
}
];
}];
relabel_configs = [
{
source_labels = [ "__journal__systemd_unit" ];
target_label = "unit";
}
{
source_labels = [ "__journal_priority_keyword" ];
target_label = "level";
}
];
}
];
};
};
}

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
{
services.smartd = {
# NOTE: qemu guests tend not to have SMART-reporting disks. Please override for the
@@ -14,9 +19,12 @@
};
};
environment.systemPackages = lib.optionals config.services.smartd.enable (with pkgs; [
smartmontools
]);
environment.systemPackages = lib.optionals config.services.smartd.enable (
with pkgs;
[
smartmontools
]
);
systemd.services.smartd.unitConfig.ConditionVirtualization = "no";
}

View File

@@ -2,7 +2,7 @@
{
# Let's not thermal throttle
services.thermald.enable = lib.mkIf (lib.all (x: x) [
(config.nixpkgs.system == "x86_64-linux")
(!config.boot.isContainer or false)
]) true;
}
(config.nixpkgs.system == "x86_64-linux")
(!config.boot.isContainer or false)
]) true;
}

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
let
cfg = config.services.uptimed;
in
@@ -15,45 +20,48 @@ in
services.uptimed = {
enable = true;
settings = let
stateDir = "/var/lib/uptimed";
in {
PIDFILE = "${stateDir}/pid";
SENDMAIL = lib.mkDefault "${pkgs.system-sendmail}/bin/sendmail -t";
};
settings =
let
stateDir = "/var/lib/uptimed";
in
{
PIDFILE = "${stateDir}/pid";
SENDMAIL = lib.mkDefault "${pkgs.system-sendmail}/bin/sendmail -t";
};
};
systemd.services.uptimed = lib.mkIf (cfg.enable) {
serviceConfig = let
uptimed = pkgs.uptimed.overrideAttrs (prev: {
postPatch = ''
substituteInPlace Makefile.am \
--replace-fail '$(sysconfdir)/uptimed.conf' '/var/lib/uptimed/uptimed.conf'
substituteInPlace src/Makefile.am \
--replace-fail '$(sysconfdir)/uptimed.conf' '/var/lib/uptimed/uptimed.conf'
'';
});
serviceConfig =
let
uptimed = pkgs.uptimed.overrideAttrs (prev: {
postPatch = ''
substituteInPlace Makefile.am \
--replace-fail '$(sysconfdir)/uptimed.conf' '/var/lib/uptimed/uptimed.conf'
substituteInPlace src/Makefile.am \
--replace-fail '$(sysconfdir)/uptimed.conf' '/var/lib/uptimed/uptimed.conf'
'';
});
in {
Type = "notify";
in
{
Type = "notify";
ExecStart = lib.mkForce "${uptimed}/sbin/uptimed -f";
ExecStart = lib.mkForce "${uptimed}/sbin/uptimed -f";
BindReadOnlyPaths = let
configFile = lib.pipe cfg.settings [
(lib.mapAttrsToList
(k: v:
if builtins.isList v
then lib.mapConcatStringsSep "\n" (v': "${k}=${v'}") v
else "${k}=${v}")
)
(lib.concatStringsSep "\n")
(pkgs.writeText "uptimed.conf")
];
in [
"${configFile}:/var/lib/uptimed/uptimed.conf"
];
};
BindReadOnlyPaths =
let
configFile = lib.pipe cfg.settings [
(lib.mapAttrsToList (
k: v: if builtins.isList v then lib.mapConcatStringsSep "\n" (v': "${k}=${v'}") v else "${k}=${v}"
))
(lib.concatStringsSep "\n")
(pkgs.writeText "uptimed.conf")
];
in
[
"${configFile}:/var/lib/uptimed/uptimed.conf"
];
};
};
};
}

View File

@@ -1,8 +1,15 @@
{ config, fp, lib, ... }:
{
sops.defaultSopsFile = let
secretsFilePath = fp /secrets/${config.networking.hostName}/${config.networking.hostName}.yaml;
in lib.mkIf (builtins.pathExists secretsFilePath) secretsFilePath;
config,
fp,
lib,
...
}:
{
sops.defaultSopsFile =
let
secretsFilePath = fp /secrets/${config.networking.hostName}/${config.networking.hostName}.yaml;
in
lib.mkIf (builtins.pathExists secretsFilePath) secretsFilePath;
sops.age = lib.mkIf (config.sops.defaultSopsFile != null) {
sshKeyPaths = lib.mkDefault [ "/etc/ssh/ssh_host_ed25519_key" ];

View File

@@ -11,5 +11,6 @@
};
config.virtualisation.vmVariant = {
virtualisation.isVmVariant = true;
virtualisation.graphics = false;
};
}

View File

@@ -151,7 +151,7 @@ is up to date, you can do the following:
```console
# Fetch gpg (unless you have it already)
nix-shell -p gpg
nix shell nixpkgs#gnupg
# Import oysteikts key to the gpg keychain
gpg --import ./keys/oysteikt.pub

107
flake.lock generated
View File

@@ -2,17 +2,16 @@
"nodes": {
"dibbler": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1769362210,
"narHash": "sha256-QCQD7Ofin5UYL0i5Sv34gfJ0p5pv1hwZspE/Ufe84L8=",
"lastModified": 1771267058,
"narHash": "sha256-EEL4SmD1b3BPJPsSJJ4wDTXWMumJqbR+BLzhJJG0skE=",
"ref": "main",
"rev": "1d01e1b2cb8fb2adee96c0b4f065c43c45eae290",
"revCount": 229,
"rev": "e3962d02c78b9c7b4d18148d931a9a4bf22e7902",
"revCount": 254,
"type": "git",
"url": "https://git.pvv.ntnu.no/Projects/dibbler.git"
},
@@ -61,23 +60,6 @@
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"id": "flake-utils",
"type": "indirect"
}
},
"gergle": {
"inputs": {
"nixpkgs": [
@@ -192,11 +174,11 @@
]
},
"locked": {
"lastModified": 1768749374,
"narHash": "sha256-dhXYLc64d7TKCnRPW4TlHGl6nLRNdabJB2DpJ8ffUw0=",
"lastModified": 1769500363,
"narHash": "sha256-vFxmdsLBPdTy5j2bf54gbTQi1XnWbZDmeR/BBh8MFrw=",
"ref": "main",
"rev": "040294f2e1df46e33d995add6944b25859654097",
"revCount": 37,
"rev": "2618e434e40e109eaab6a0693313c7e0de7324a3",
"revCount": 47,
"type": "git",
"url": "https://git.pvv.ntnu.no/Projects/minecraft-kartverket.git"
},
@@ -213,11 +195,11 @@
]
},
"locked": {
"lastModified": 1767906352,
"narHash": "sha256-wYsH9MMAPFG3XTL+3DwI39XMG0F2fTmn/5lt265a3Es=",
"lastModified": 1770960722,
"narHash": "sha256-IdhPsWFZUKSJh/nLjGLJvGM5d5Uta+k1FlVYPxTZi0E=",
"ref": "main",
"rev": "d054c5d064b8ed6d53a0adb0cf6c0a72febe212e",
"revCount": 13,
"rev": "c2e4aca7e1ba27cd09eeaeab47010d32a11841b2",
"revCount": 15,
"type": "git",
"url": "https://git.pvv.ntnu.no/Drift/nix-gitea-themes.git"
},
@@ -235,11 +217,11 @@
]
},
"locked": {
"lastModified": 1768955766,
"narHash": "sha256-V9ns1OII7sWSbIDwPkiqmJ3Xu/bHgQzj+asgH9cTpOo=",
"lastModified": 1769018862,
"narHash": "sha256-x3eMpPQhZwEDunyaUos084Hx41XwYTi2uHY4Yc4YNlk=",
"owner": "oddlama",
"repo": "nix-topology",
"rev": "71f27de56a03f6d8a1a72cf4d0dfd780bcc075bc",
"rev": "a15cac71d3399a4c2d1a3482ae62040a3a0aa07f",
"type": "github"
},
"original": {
@@ -251,11 +233,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1768877948,
"narHash": "sha256-Bq9Hd6DWCBaZ2GkwvJCWGnpGOchaD6RWPSCFxmSmupw=",
"rev": "43b2e61c9d09cf6c1c9c192fe6da08accc9bfb1d",
"lastModified": 1769724120,
"narHash": "sha256-oQBM04hQk1kotfv4qmIG1tHmuwODd1+hqRJE5TELeCE=",
"rev": "8ec59ed5093c2a742d7744e9ecf58f358aa4a87d",
"type": "tarball",
"url": "https://releases.nixos.org/nixos/25.11-small/nixos-25.11.4368.43b2e61c9d09/nixexprs.tar.xz"
"url": "https://releases.nixos.org/nixos/25.11-small/nixos-25.11.4961.8ec59ed5093c/nixexprs.tar.xz"
},
"original": {
"type": "tarball",
@@ -279,11 +261,11 @@
},
"nixpkgs-unstable": {
"locked": {
"lastModified": 1768886240,
"narHash": "sha256-HUAAI7AF+/Ov1u3Vvjs4DL91zTxMkWLC4xJgQ9QxOUQ=",
"rev": "80e4adbcf8992d3fd27ad4964fbb84907f9478b0",
"lastModified": 1769813739,
"narHash": "sha256-RmNWW1DQczvDwBHu11P0hGwJZxbngdoymVu7qkwq/2M=",
"rev": "16a3cae5c2487b1afa240e5f2c1811f172419558",
"type": "tarball",
"url": "https://releases.nixos.org/nixos/unstable-small/nixos-26.05pre930839.80e4adbcf899/nixexprs.tar.xz"
"url": "https://releases.nixos.org/nixos/unstable-small/nixos-26.05pre937548.16a3cae5c248/nixexprs.tar.xz"
},
"original": {
"type": "tarball",
@@ -318,11 +300,11 @@
]
},
"locked": {
"lastModified": 1768636400,
"narHash": "sha256-AiSKT4/25LS1rUlPduBMogf4EbdMQYDY1rS7AvHFcxk=",
"lastModified": 1769009806,
"narHash": "sha256-52xTtAOc9B+MBRMRZ8HI6ybNsRLMlHHLh+qwAbaJjRY=",
"ref": "main",
"rev": "3a8f82b12a44e6c4ceacd6955a290a52d1ee2856",
"revCount": 573,
"rev": "aa8adfc6a4d5b6222752e2d15d4a6d3b3b85252e",
"revCount": 575,
"type": "git",
"url": "https://git.pvv.ntnu.no/Projects/nettsiden.git"
},
@@ -382,11 +364,11 @@
"rust-overlay": "rust-overlay_3"
},
"locked": {
"lastModified": 1768140181,
"narHash": "sha256-HfZzup5/jlu8X5vMUglTovVTSwhHGHwwV1YOFIL/ksA=",
"lastModified": 1769834595,
"narHash": "sha256-P1jrO7BxHyIKDuOXHuUb7bi4H2TuYnACW5eqf1gG47g=",
"ref": "main",
"rev": "834463ed64773939798589ee6fd4adfe3a97dddd",
"revCount": 43,
"rev": "def4eec2d59a69b4638b3f25d6d713b703b2fa56",
"revCount": 49,
"type": "git",
"url": "https://git.pvv.ntnu.no/Projects/roowho2.git"
},
@@ -446,11 +428,11 @@
]
},
"locked": {
"lastModified": 1767322002,
"narHash": "sha256-yHKXXw2OWfIFsyTjduB4EyFwR0SYYF0hK8xI9z4NIn0=",
"lastModified": 1769309768,
"narHash": "sha256-AbOIlNO+JoqRJkK1VrnDXhxuX6CrdtIu2hSuy4pxi3g=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "03c6e38661c02a27ca006a284813afdc461e9f7e",
"rev": "140c9dc582cb73ada2d63a2180524fcaa744fad5",
"type": "github"
},
"original": {
@@ -466,11 +448,11 @@
]
},
"locked": {
"lastModified": 1768863606,
"narHash": "sha256-1IHAeS8WtBiEo5XiyJBHOXMzECD6aaIOJmpQKzRRl64=",
"lastModified": 1769469829,
"narHash": "sha256-wFcr32ZqspCxk4+FvIxIL0AZktRs6DuF8oOsLt59YBU=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "c7067be8db2c09ab1884de67ef6c4f693973f4a2",
"rev": "c5eebd4eb2e3372fe12a8d70a248a6ee9dd02eff",
"type": "github"
},
"original": {
@@ -479,21 +461,6 @@
"repo": "sops-nix",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",

654
flake.nix
View File

@@ -49,301 +49,403 @@
qotd.inputs.nixpkgs.follows = "nixpkgs";
};
outputs = { self, nixpkgs, nixpkgs-unstable, sops-nix, disko, ... }@inputs:
let
inherit (nixpkgs) lib;
systems = [
"x86_64-linux"
"aarch64-linux"
"aarch64-darwin"
];
forAllSystems = f: lib.genAttrs systems f;
allMachines = builtins.attrNames self.nixosConfigurations;
importantMachines = [
"bekkalokk"
"bicep"
"brzeczyszczykiewicz"
"georg"
"ildkule"
];
in {
inputs = lib.mapAttrs (_: src: src.outPath) inputs;
outputs =
{
self,
nixpkgs,
nixpkgs-unstable,
sops-nix,
disko,
...
}@inputs:
let
inherit (nixpkgs) lib;
systems = [
"x86_64-linux"
"aarch64-linux"
"aarch64-darwin"
];
forAllSystems = f: lib.genAttrs systems f;
allMachines = builtins.attrNames self.nixosConfigurations;
importantMachines = [
"bekkalokk"
"bicep"
"brzeczyszczykiewicz"
"georg"
"ildkule"
];
in
{
inputs = lib.mapAttrs (_: src: src.outPath) inputs;
pkgs = forAllSystems (system: import nixpkgs {
inherit system;
config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg)
[
"nvidia-x11"
"nvidia-settings"
];
});
nixosConfigurations = let
nixosConfig =
nixpkgs:
name:
configurationPath:
extraArgs@{
localSystem ? "x86_64-linux", # buildPlatform
crossSystem ? "x86_64-linux", # hostPlatform
specialArgs ? { },
modules ? [ ],
overlays ? [ ],
enableDefaults ? true,
...
}:
let
commonPkgsConfig = {
inherit localSystem crossSystem;
config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg)
[
"nvidia-x11"
"nvidia-settings"
];
overlays = (lib.optionals enableDefaults [
# Global overlays go here
inputs.roowho2.overlays.default
]) ++ overlays;
};
pkgs = import nixpkgs commonPkgsConfig;
unstablePkgs = import nixpkgs-unstable commonPkgsConfig;
in
lib.nixosSystem (lib.recursiveUpdate
{
system = crossSystem;
inherit pkgs;
specialArgs = {
inherit inputs unstablePkgs;
values = import ./values.nix;
fp = path: ./${path};
} // specialArgs;
modules = [
{
networking.hostName = lib.mkDefault name;
}
configurationPath
] ++ (lib.optionals enableDefaults [
sops-nix.nixosModules.sops
inputs.roowho2.nixosModules.default
]) ++ modules;
pkgs = forAllSystems (
system:
import nixpkgs {
inherit system;
config.allowUnfreePredicate =
pkg:
builtins.elem (lib.getName pkg) [
"nvidia-x11"
"nvidia-settings"
];
}
(builtins.removeAttrs extraArgs [
"localSystem"
"crossSystem"
"modules"
"overlays"
"specialArgs"
"enableDefaults"
])
);
stableNixosConfig = name: extraArgs:
nixosConfig nixpkgs name ./hosts/${name}/configuration.nix extraArgs;
in {
bakke = stableNixosConfig "bakke" {
modules = [
disko.nixosModules.disko
];
};
bicep = stableNixosConfig "bicep" {
modules = [
inputs.matrix-next.nixosModules.default
inputs.pvv-calendar-bot.nixosModules.default
inputs.minecraft-heatmap.nixosModules.default
self.nixosModules.gickup
self.nixosModules.matrix-ooye
];
overlays = [
inputs.pvv-calendar-bot.overlays.default
inputs.minecraft-heatmap.overlays.default
(final: prev: {
inherit (self.packages.${prev.stdenv.hostPlatform.system}) out-of-your-element;
})
];
};
bekkalokk = stableNixosConfig "bekkalokk" {
overlays = [
(final: prev: {
mediawiki-extensions = final.callPackage ./packages/mediawiki-extensions { };
simplesamlphp = final.callPackage ./packages/simplesamlphp { };
bluemap = final.callPackage ./packages/bluemap.nix { };
})
inputs.pvv-nettsiden.overlays.default
inputs.qotd.overlays.default
];
modules = [
inputs.pvv-nettsiden.nixosModules.default
self.nixosModules.bluemap
inputs.qotd.nixosModules.default
];
};
ildkule = stableNixosConfig "ildkule" { };
#ildkule-unstable = unstableNixosConfig "ildkule" { };
shark = stableNixosConfig "shark" { };
wenche = stableNixosConfig "wenche" { };
temmie = stableNixosConfig "temmie" { };
gluttony = stableNixosConfig "gluttony" { };
nixosConfigurations =
let
nixosConfig =
nixpkgs: name: configurationPath:
extraArgs@{
localSystem ? "x86_64-linux", # buildPlatform
crossSystem ? "x86_64-linux", # hostPlatform
specialArgs ? { },
modules ? [ ],
overlays ? [ ],
enableDefaults ? true,
...
}:
let
commonPkgsConfig = {
inherit localSystem crossSystem;
config.allowUnfreePredicate =
pkg:
builtins.elem (lib.getName pkg) [
"nvidia-x11"
"nvidia-settings"
];
overlays =
(lib.optionals enableDefaults [
# Global overlays go here
inputs.roowho2.overlays.default
])
++ overlays;
};
kommode = stableNixosConfig "kommode" {
overlays = [
inputs.nix-gitea-themes.overlays.default
];
modules = [
inputs.nix-gitea-themes.nixosModules.default
];
};
pkgs = import nixpkgs commonPkgsConfig;
unstablePkgs = import nixpkgs-unstable commonPkgsConfig;
in
lib.nixosSystem (
lib.recursiveUpdate
{
system = crossSystem;
ustetind = stableNixosConfig "ustetind" {
modules = [
"${nixpkgs}/nixos/modules/virtualisation/lxc-container.nix"
];
};
inherit pkgs;
brzeczyszczykiewicz = stableNixosConfig "brzeczyszczykiewicz" {
modules = [
inputs.grzegorz-clients.nixosModules.grzegorz-webui
inputs.gergle.nixosModules.default
inputs.greg-ng.nixosModules.default
];
overlays = [
inputs.greg-ng.overlays.default
inputs.gergle.overlays.default
];
};
georg = stableNixosConfig "georg" {
modules = [
inputs.grzegorz-clients.nixosModules.grzegorz-webui
inputs.gergle.nixosModules.default
inputs.greg-ng.nixosModules.default
];
overlays = [
inputs.greg-ng.overlays.default
inputs.gergle.overlays.default
];
};
skrott = stableNixosConfig "skrott" {
crossSystem = "aarch64-linux";
modules = [
(nixpkgs + "/nixos/modules/installer/sd-card/sd-image-aarch64.nix")
inputs.dibbler.nixosModules.default
];
overlays = [
inputs.dibbler.overlays.default
];
};
}
//
(let
machineNames = map (i: "lupine-${toString i}") (lib.range 1 5);
stableLupineNixosConfig = name: extraArgs:
nixosConfig nixpkgs name ./hosts/lupine/configuration.nix extraArgs;
in lib.genAttrs machineNames (name: stableLupineNixosConfig name {
modules = [{ networking.hostName = name; }];
specialArgs.lupineName = name;
}));
specialArgs = {
inherit inputs unstablePkgs;
values = import ./values.nix;
fp = path: ./${path};
}
// specialArgs;
nixosModules = {
bluemap = ./modules/bluemap.nix;
snakeoil-certs = ./modules/snakeoil-certs.nix;
snappymail = ./modules/snappymail.nix;
robots-txt = ./modules/robots-txt.nix;
gickup = ./modules/gickup;
matrix-ooye = ./modules/matrix-ooye.nix;
};
modules = [
{
networking.hostName = lib.mkDefault name;
}
configurationPath
]
++ (lib.optionals enableDefaults [
sops-nix.nixosModules.sops
inputs.roowho2.nixosModules.default
self.nixosModules.rsync-pull-targets
])
++ modules;
}
(
builtins.removeAttrs extraArgs [
"localSystem"
"crossSystem"
"modules"
"overlays"
"specialArgs"
"enableDefaults"
]
)
);
devShells = forAllSystems (system: {
default = nixpkgs-unstable.legacyPackages.${system}.callPackage ./shell.nix { };
cuda = let
cuda-pkgs = import nixpkgs-unstable {
inherit system;
config = {
allowUnfree = true;
cudaSupport = true;
stableNixosConfig =
name: extraArgs: nixosConfig nixpkgs name ./hosts/${name}/configuration.nix extraArgs;
in
{
bakke = stableNixosConfig "bakke" {
modules = [
inputs.disko.nixosModules.disko
];
};
};
in cuda-pkgs.callPackage ./shells/cuda.nix { };
});
packages = {
"x86_64-linux" = let
pkgs = nixpkgs.legacyPackages."x86_64-linux";
in rec {
default = important-machines;
important-machines = pkgs.linkFarm "important-machines"
(lib.getAttrs importantMachines self.packages.x86_64-linux);
all-machines = pkgs.linkFarm "all-machines"
(lib.getAttrs allMachines self.packages.x86_64-linux);
simplesamlphp = pkgs.callPackage ./packages/simplesamlphp { };
bluemap = pkgs.callPackage ./packages/bluemap.nix { };
out-of-your-element = pkgs.callPackage ./packages/ooye/package.nix { };
}
//
# Mediawiki extensions
(lib.pipe null [
(_: pkgs.callPackage ./packages/mediawiki-extensions { })
(lib.flip builtins.removeAttrs ["override" "overrideDerivation"])
(lib.mapAttrs' (name: lib.nameValuePair "mediawiki-${name}"))
])
//
# Machines
lib.genAttrs allMachines
(machine: self.nixosConfigurations.${machine}.config.system.build.toplevel)
//
# Skrott is exception
{
skrott = self.nixosConfigurations.skrott.config.system.build.sdImage;
}
//
# Nix-topology
(let
topology' = import inputs.nix-topology {
pkgs = import nixpkgs {
system = "x86_64-linux";
bicep = stableNixosConfig "bicep" {
modules = [
inputs.matrix-next.nixosModules.default
inputs.pvv-calendar-bot.nixosModules.default
inputs.minecraft-heatmap.nixosModules.default
self.nixosModules.gickup
self.nixosModules.matrix-ooye
];
overlays = [
inputs.nix-topology.overlays.default
inputs.pvv-calendar-bot.overlays.default
inputs.minecraft-heatmap.overlays.default
(final: prev: {
inherit (nixpkgs-unstable.legacyPackages.x86_64-linux) super-tiny-icons;
inherit (self.packages.${prev.stdenv.hostPlatform.system}) out-of-your-element;
})
];
};
bekkalokk = stableNixosConfig "bekkalokk" {
overlays = [
(final: prev: {
mediawiki-extensions = final.callPackage ./packages/mediawiki-extensions { };
simplesamlphp = final.callPackage ./packages/simplesamlphp { };
bluemap = final.callPackage ./packages/bluemap.nix { };
})
inputs.pvv-nettsiden.overlays.default
inputs.qotd.overlays.default
];
modules = [
inputs.pvv-nettsiden.nixosModules.default
self.nixosModules.bluemap
inputs.qotd.nixosModules.default
];
};
ildkule = stableNixosConfig "ildkule" { };
#ildkule-unstable = unstableNixosConfig "ildkule" { };
skrot = stableNixosConfig "skrot" {
modules = [
inputs.disko.nixosModules.disko
inputs.dibbler.nixosModules.default
];
overlays = [ inputs.dibbler.overlays.default ];
};
shark = stableNixosConfig "shark" { };
wenche = stableNixosConfig "wenche" { };
temmie = stableNixosConfig "temmie" { };
gluttony = stableNixosConfig "gluttony" { };
specialArgs = {
values = import ./values.nix;
kommode = stableNixosConfig "kommode" {
overlays = [
inputs.nix-gitea-themes.overlays.default
];
modules = [
inputs.nix-gitea-themes.nixosModules.default
inputs.disko.nixosModules.disko
];
};
modules = [
./topology
{
nixosConfigurations = lib.mapAttrs (_name: nixosCfg: nixosCfg.extendModules {
modules = [
inputs.nix-topology.nixosModules.default
./topology/service-extractors/greg-ng.nix
./topology/service-extractors/postgresql.nix
./topology/service-extractors/mysql.nix
./topology/service-extractors/gitea-runners.nix
];
}) self.nixosConfigurations;
ustetind = stableNixosConfig "ustetind" {
modules = [
"${nixpkgs}/nixos/modules/virtualisation/lxc-container.nix"
];
};
brzeczyszczykiewicz = stableNixosConfig "brzeczyszczykiewicz" {
modules = [
inputs.grzegorz-clients.nixosModules.grzegorz-webui
inputs.gergle.nixosModules.default
inputs.greg-ng.nixosModules.default
];
overlays = [
inputs.greg-ng.overlays.default
inputs.gergle.overlays.default
];
};
georg = stableNixosConfig "georg" {
modules = [
inputs.grzegorz-clients.nixosModules.grzegorz-webui
inputs.gergle.nixosModules.default
inputs.greg-ng.nixosModules.default
];
overlays = [
inputs.greg-ng.overlays.default
inputs.gergle.overlays.default
];
};
}
// (
let
skrottConfig = {
modules = [
(nixpkgs + "/nixos/modules/installer/sd-card/sd-image-aarch64.nix")
inputs.dibbler.nixosModules.default
];
overlays = [
inputs.dibbler.overlays.default
(final: prev: {
# NOTE: Yeetus (these break crosscompile ¯\_(ツ)_/¯)
atool = prev.emptyDirectory;
micro = prev.emptyDirectory;
ncdu = prev.emptyDirectory;
})
];
};
in
{
skrott = self.nixosConfigurations.skrott-native;
skrott-native = stableNixosConfig "skrott" (
skrottConfig
// {
localSystem = "aarch64-linux";
crossSystem = "aarch64-linux";
}
);
skrott-cross = stableNixosConfig "skrott" (
skrottConfig
// {
localSystem = "x86_64-linux";
crossSystem = "aarch64-linux";
}
);
skrott-x86_64 = stableNixosConfig "skrott" (
skrottConfig
// {
localSystem = "x86_64-linux";
crossSystem = "x86_64-linux";
}
);
}
)
// (
let
machineNames = map (i: "lupine-${toString i}") (lib.range 1 5);
stableLupineNixosConfig =
name: extraArgs: nixosConfig nixpkgs name ./hosts/lupine/configuration.nix extraArgs;
in
lib.genAttrs machineNames (
name:
stableLupineNixosConfig name {
modules = [ { networking.hostName = name; } ];
specialArgs.lupineName = name;
}
];
};
in {
topology = topology'.config.output;
topology-png = pkgs.runCommand "pvv-config-topology-png" {
nativeBuildInputs = [ pkgs.writableTmpDirAsHomeHook ];
} ''
mkdir -p "$out"
for file in '${topology'.config.output}'/*.svg; do
${lib.getExe pkgs.imagemagick} -density 300 -background none "$file" "$out"/"$(basename "''${file%.svg}.png")"
done
'';
)
);
nixosModules = {
bluemap = ./modules/bluemap.nix;
gickup = ./modules/gickup;
matrix-ooye = ./modules/matrix-ooye.nix;
robots-txt = ./modules/robots-txt.nix;
rsync-pull-targets = ./modules/rsync-pull-targets.nix;
snakeoil-certs = ./modules/snakeoil-certs.nix;
snappymail = ./modules/snappymail.nix;
};
devShells = forAllSystems (system: {
default =
let
pkgs = import nixpkgs-unstable {
inherit system;
overlays = [
(final: prev: {
inherit (inputs.disko.packages.${system}) disko;
})
];
};
in
pkgs.callPackage ./shell.nix { };
cuda =
let
cuda-pkgs = import nixpkgs-unstable {
inherit system;
config = {
allowUnfree = true;
cudaSupport = true;
};
};
in
cuda-pkgs.callPackage ./shells/cuda.nix { };
});
packages = {
"x86_64-linux" =
let
system = "x86_64-linux";
pkgs = nixpkgs.legacyPackages.${system};
in
rec {
default = important-machines;
important-machines = pkgs.linkFarm "important-machines" (
lib.getAttrs importantMachines self.packages.${system}
);
all-machines = pkgs.linkFarm "all-machines" (lib.getAttrs allMachines self.packages.${system});
simplesamlphp = pkgs.callPackage ./packages/simplesamlphp { };
bluemap = pkgs.callPackage ./packages/bluemap.nix { };
out-of-your-element = pkgs.callPackage ./packages/ooye/package.nix { };
}
//
# Mediawiki extensions
(lib.pipe null [
(_: pkgs.callPackage ./packages/mediawiki-extensions { })
(lib.flip builtins.removeAttrs [
"override"
"overrideDerivation"
])
(lib.mapAttrs' (name: lib.nameValuePair "mediawiki-${name}"))
])
//
# Machines
lib.genAttrs allMachines (machine: self.nixosConfigurations.${machine}.config.system.build.toplevel)
//
# Skrott is exception
{
skrott = self.packages.${system}.skrott-native-sd;
skrott-native = self.nixosConfigurations.skrott-native.config.system.build.toplevel;
skrott-native-sd = self.nixosConfigurations.skrott-native.config.system.build.sdImage;
skrott-cross = self.nixosConfigurations.skrott-cross.config.system.build.toplevel;
skrott-cross-sd = self.nixosConfigurations.skrott-cross.config.system.build.sdImage;
skrott-x86_64 = self.nixosConfigurations.skrott-x86_64.config.system.build.toplevel;
}
//
# Nix-topology
(
let
topology' = import inputs.nix-topology {
pkgs = import nixpkgs {
inherit system;
overlays = [
inputs.nix-topology.overlays.default
(final: prev: {
inherit (nixpkgs-unstable.legacyPackages.${system}) super-tiny-icons;
})
];
};
specialArgs = {
values = import ./values.nix;
};
modules = [
./topology
{
nixosConfigurations = lib.mapAttrs (
_name: nixosCfg:
nixosCfg.extendModules {
modules = [
inputs.nix-topology.nixosModules.default
./topology/service-extractors/greg-ng.nix
./topology/service-extractors/postgresql.nix
./topology/service-extractors/mysql.nix
./topology/service-extractors/gitea-runners.nix
];
}
) self.nixosConfigurations;
}
];
};
in
{
topology = topology'.config.output;
topology-png =
pkgs.runCommand "pvv-config-topology-png"
{
nativeBuildInputs = [ pkgs.writableTmpDirAsHomeHook ];
}
''
mkdir -p "$out"
for file in '${topology'.config.output}'/*.svg; do
${lib.getExe pkgs.imagemagick} -density 300 -background none "$file" "$out"/"$(basename "''${file%.svg}.png")"
done
'';
}
);
};
};
};
}

View File

@@ -1,15 +1,23 @@
{ config, pkgs, values, ... }:
{
config,
pkgs,
values,
...
}:
{
imports = [
./hardware-configuration.nix
../../base
./filesystems.nix
];
./hardware-configuration.nix
../../base
./filesystems.nix
];
networking.hostId = "99609ffc";
systemd.network.networks."30-enp2s0" = values.defaultNetworkConfig // {
matchConfig.Name = "enp2s0";
address = with values.hosts.bakke; [ (ipv4 + "/25") (ipv6 + "/64") ];
address = with values.hosts.bakke; [
(ipv4 + "/25")
(ipv6 + "/64")
];
};
# Don't change (even during upgrades) unless you know what you are doing.

View File

@@ -1,17 +1,17 @@
{ config, pkgs, lib, ... }:
{ pkgs, ... }:
{
# Boot drives:
boot.swraid.enable = true;
# ZFS Data pool:
environment.systemPackages = with pkgs; [ zfs ];
boot = {
zfs = {
extraPools = [ "tank" ];
requestEncryptionCredentials = false;
};
supportedFilesystems = [ "zfs" ];
kernelPackages = config.boot.zfs.package.latestCompatibleLinuxPackages;
supportedFilesystems.zfs = true;
# Use stable linux packages, these work with zfs
kernelPackages = pkgs.linuxPackages;
};
services.zfs.autoScrub = {
enable = true;

View File

@@ -1,41 +1,59 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
config,
lib,
pkgs,
modulesPath,
...
}:
{
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "ehci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" ];
boot.initrd.availableKernelModules = [
"ehci_pci"
"ahci"
"usbhid"
"usb_storage"
"sd_mod"
];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571";
fsType = "btrfs";
options = [ "subvol=root" ];
};
fileSystems."/" = {
device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571";
fsType = "btrfs";
options = [ "subvol=root" ];
};
fileSystems."/home" =
{ device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571";
fsType = "btrfs";
options = [ "subvol=home" ];
};
fileSystems."/home" = {
device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571";
fsType = "btrfs";
options = [ "subvol=home" ];
};
fileSystems."/nix" =
{ device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571";
fsType = "btrfs";
options = [ "subvol=nix" "noatime" ];
};
fileSystems."/nix" = {
device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571";
fsType = "btrfs";
options = [
"subvol=nix"
"noatime"
];
};
fileSystems."/boot" =
{ device = "/dev/sdc2";
fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ];
};
fileSystems."/boot" = {
device = "/dev/sdc2";
fsType = "vfat";
options = [
"fmask=0022"
"dmask=0022"
];
};
swapDevices = [ ];

View File

@@ -1,4 +1,9 @@
{ fp, pkgs, values, ... }:
{
fp,
pkgs,
values,
...
}:
{
imports = [
./hardware-configuration.nix
@@ -21,12 +26,15 @@
systemd.network.networks."30-enp2s0" = values.defaultNetworkConfig // {
matchConfig.Name = "enp2s0";
address = with values.hosts.bekkalokk; [ (ipv4 + "/25") (ipv6 + "/64") ];
address = with values.hosts.bekkalokk; [
(ipv4 + "/25")
(ipv6 + "/64")
];
};
services.btrfs.autoScrub.enable = true;
# Don't change (even during upgrades) unless you know what you are doing.
# See https://search.nixos.org/options?show=system.stateVersion
system.stateVersion = "22.11";
system.stateVersion = "25.11";
}

View File

@@ -1,31 +1,43 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
config,
lib,
pkgs,
modulesPath,
...
}:
{
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "ehci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" ];
boot.initrd.availableKernelModules = [
"ehci_pci"
"ahci"
"usbhid"
"usb_storage"
"sd_mod"
];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/sda1";
fsType = "btrfs";
};
fileSystems."/" = {
device = "/dev/sda1";
fsType = "btrfs";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/CE63-3B9B";
fsType = "vfat";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/CE63-3B9B";
fsType = "vfat";
};
swapDevices =
[ { device = "/dev/disk/by-uuid/2df10c7b-0dec-45c6-a728-533f7da7f4b9"; }
];
swapDevices = [
{ device = "/dev/disk/by-uuid/2df10c7b-0dec-45c6-a728-533f7da7f4b9"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -1,8 +1,15 @@
{ config, lib, pkgs, inputs, ... }:
{
config,
lib,
pkgs,
inputs,
...
}:
let
vanillaSurvival = "/var/lib/bluemap/vanilla_survival_world";
format = pkgs.formats.hocon { };
in {
in
{
# NOTE: our versino of the module gets added in flake.nix
disabledModules = [ "services/web-apps/bluemap.nix" ];
@@ -17,82 +24,88 @@ in {
host = "minecraft.pvv.ntnu.no";
maps = let
inherit (inputs.minecraft-kartverket.packages.${pkgs.stdenv.hostPlatform.system}) bluemap-export;
in {
"verden" = {
extraHoconMarkersFile = "${bluemap-export}/overworld.hocon";
settings = {
world = vanillaSurvival;
dimension = "minecraft:overworld";
name = "Verden";
sorting = 0;
start-pos = {
x = 0;
z = 0;
maps =
let
inherit (inputs.minecraft-kartverket.packages.${pkgs.stdenv.hostPlatform.system}) bluemap-export;
in
{
"verden" = {
extraHoconMarkersFile = "${bluemap-export}/overworld.hocon";
settings = {
world = vanillaSurvival;
dimension = "minecraft:overworld";
name = "Verden";
sorting = 0;
start-pos = {
x = 0;
z = 0;
};
ambient-light = 0.1;
cave-detection-ocean-floor = -5;
};
};
"underverden" = {
extraHoconMarkersFile = "${bluemap-export}/nether.hocon";
settings = {
world = vanillaSurvival;
dimension = "minecraft:the_nether";
name = "Underverden";
sorting = 100;
start-pos = {
x = 0;
z = 0;
};
sky-color = "#290000";
void-color = "#150000";
sky-light = 1;
ambient-light = 0.6;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
cave-detection-uses-block-light = true;
render-mask = [
{
max-y = 90;
}
];
};
};
"enden" = {
extraHoconMarkersFile = "${bluemap-export}/the-end.hocon";
settings = {
world = vanillaSurvival;
dimension = "minecraft:the_end";
name = "Enden";
sorting = 200;
start-pos = {
x = 0;
z = 0;
};
sky-color = "#080010";
void-color = "#080010";
sky-light = 1;
ambient-light = 0.6;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
};
ambient-light = 0.1;
cave-detection-ocean-floor = -5;
};
};
"underverden" = {
extraHoconMarkersFile = "${bluemap-export}/nether.hocon";
settings = {
world = vanillaSurvival;
dimension = "minecraft:the_nether";
name = "Underverden";
sorting = 100;
start-pos = {
x = 0;
z = 0;
};
sky-color = "#290000";
void-color = "#150000";
sky-light = 1;
ambient-light = 0.6;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
cave-detection-uses-block-light = true;
render-mask = [{
max-y = 90;
}];
};
};
"enden" = {
extraHoconMarkersFile = "${bluemap-export}/the-end.hocon";
settings = {
world = vanillaSurvival;
dimension = "minecraft:the_end";
name = "Enden";
sorting = 200;
start-pos = {
x = 0;
z = 0;
};
sky-color = "#080010";
void-color = "#080010";
sky-light = 1;
ambient-light = 0.6;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
};
};
};
};
systemd.services."render-bluemap-maps" = {
serviceConfig = {
StateDirectory = [ "bluemap/world" ];
ExecStartPre = let
rsyncArgs = lib.cli.toCommandLineShellGNU { } {
archive = true;
compress = true;
verbose = true;
no-owner = true;
no-group = true;
rsh = "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=%d/ssh-known-hosts -i %d/sshkey";
};
in "${lib.getExe pkgs.rsync} ${rsyncArgs} root@innovation.pvv.ntnu.no:/ ${vanillaSurvival}";
ExecStartPre =
let
rsyncArgs = lib.cli.toCommandLineShellGNU { } {
archive = true;
compress = true;
verbose = true;
no-owner = true;
no-group = true;
rsh = "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=%d/ssh-known-hosts -i %d/sshkey";
};
in
"${lib.getExe pkgs.rsync} ${rsyncArgs} root@innovation.pvv.ntnu.no:/ ${vanillaSurvival}";
LoadCredential = [
"sshkey:${config.sops.secrets."bluemap/ssh-key".path}"
"ssh-known-hosts:${config.sops.secrets."bluemap/ssh-known-hosts".path}"

View File

@@ -1,8 +1,16 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
let
pwAuthScript = pkgs.writeShellApplication {
name = "pwauth";
runtimeInputs = with pkgs; [ coreutils heimdal ];
runtimeInputs = with pkgs; [
coreutils
heimdal
];
text = ''
read -r user1
user2="$(echo -n "$user1" | tr -c -d '0123456789abcdefghijklmnopqrstuvwxyz')"
@@ -33,7 +41,7 @@ let
"metadata/saml20-sp-remote.php" = pkgs.writeText "saml20-sp-remote.php" ''
<?php
${ lib.pipe config.services.idp.sp-remote-metadata [
${lib.pipe config.services.idp.sp-remote-metadata [
(map (url: ''
$metadata['${url}'] = [
'SingleLogoutService' => [
@@ -85,14 +93,20 @@ let
substituteInPlace "$out" \
--replace-warn '$SAML_COOKIE_SECURE' 'true' \
--replace-warn '$SAML_COOKIE_SALT' 'file_get_contents("${config.sops.secrets."idp/cookie_salt".path}")' \
--replace-warn '$SAML_COOKIE_SALT' 'file_get_contents("${
config.sops.secrets."idp/cookie_salt".path
}")' \
--replace-warn '$SAML_ADMIN_NAME' '"Drift"' \
--replace-warn '$SAML_ADMIN_EMAIL' '"drift@pvv.ntnu.no"' \
--replace-warn '$SAML_ADMIN_PASSWORD' 'file_get_contents("${config.sops.secrets."idp/admin_password".path}")' \
--replace-warn '$SAML_ADMIN_PASSWORD' 'file_get_contents("${
config.sops.secrets."idp/admin_password".path
}")' \
--replace-warn '$SAML_TRUSTED_DOMAINS' 'array( "idp.pvv.ntnu.no" )' \
--replace-warn '$SAML_DATABASE_DSN' '"pgsql:host=postgres.pvv.ntnu.no;port=5432;dbname=idp"' \
--replace-warn '$SAML_DATABASE_USERNAME' '"idp"' \
--replace-warn '$SAML_DATABASE_PASSWORD' 'file_get_contents("${config.sops.secrets."idp/postgres_password".path}")' \
--replace-warn '$SAML_DATABASE_PASSWORD' 'file_get_contents("${
config.sops.secrets."idp/postgres_password".path
}")' \
--replace-warn '$CACHE_DIRECTORY' '/var/cache/idp'
'';
@@ -158,23 +172,25 @@ in
services.phpfpm.pools.idp = {
user = "idp";
group = "idp";
settings = let
listenUser = config.services.nginx.user;
listenGroup = config.services.nginx.group;
in {
"pm" = "dynamic";
"pm.max_children" = 32;
"pm.max_requests" = 500;
"pm.start_servers" = 2;
"pm.min_spare_servers" = 2;
"pm.max_spare_servers" = 4;
"listen.owner" = listenUser;
"listen.group" = listenGroup;
settings =
let
listenUser = config.services.nginx.user;
listenGroup = config.services.nginx.group;
in
{
"pm" = "dynamic";
"pm.max_children" = 32;
"pm.max_requests" = 500;
"pm.start_servers" = 2;
"pm.min_spare_servers" = 2;
"pm.max_spare_servers" = 4;
"listen.owner" = listenUser;
"listen.group" = listenGroup;
"catch_workers_output" = true;
"php_admin_flag[log_errors]" = true;
# "php_admin_value[error_log]" = "stderr";
};
"catch_workers_output" = true;
"php_admin_flag[log_errors]" = true;
# "php_admin_value[error_log]" = "stderr";
};
};
services.nginx.virtualHosts."idp.pvv.ntnu.no" = {
@@ -182,7 +198,7 @@ in
enableACME = true;
kTLS = true;
root = "${package}/share/php/simplesamlphp/public";
locations = {
locations = {
# based on https://simplesamlphp.org/docs/stable/simplesamlphp-install.html#configuring-nginx
"/" = {
alias = "${package}/share/php/simplesamlphp/public/";

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
{
security.krb5 = {
enable = true;

View File

@@ -1,4 +1,12 @@
{ pkgs, lib, fp, config, values, pkgs-unstable, ... }: let
{
pkgs,
lib,
fp,
config,
values,
...
}:
let
cfg = config.services.mediawiki;
# "mediawiki"
@@ -9,7 +17,9 @@
simplesamlphp = pkgs.simplesamlphp.override {
extra_files = {
"metadata/saml20-idp-remote.php" = pkgs.writeText "mediawiki-saml20-idp-remote.php" (import ../idp-simplesamlphp/metadata.php.nix);
"metadata/saml20-idp-remote.php" = pkgs.writeText "mediawiki-saml20-idp-remote.php" (
import ../idp-simplesamlphp/metadata.php.nix
);
"config/authsources.php" = ./simplesaml-authsources.php;
@@ -18,35 +28,66 @@
substituteInPlace "$out" \
--replace-warn '$SAML_COOKIE_SECURE' 'true' \
--replace-warn '$SAML_COOKIE_SALT' 'file_get_contents("${config.sops.secrets."mediawiki/simplesamlphp/cookie_salt".path}")' \
--replace-warn '$SAML_COOKIE_SALT' 'file_get_contents("${
config.sops.secrets."mediawiki/simplesamlphp/cookie_salt".path
}")' \
--replace-warn '$SAML_ADMIN_NAME' '"Drift"' \
--replace-warn '$SAML_ADMIN_EMAIL' '"drift@pvv.ntnu.no"' \
--replace-warn '$SAML_ADMIN_PASSWORD' 'file_get_contents("${config.sops.secrets."mediawiki/simplesamlphp/admin_password".path}")' \
--replace-warn '$SAML_ADMIN_PASSWORD' 'file_get_contents("${
config.sops.secrets."mediawiki/simplesamlphp/admin_password".path
}")' \
--replace-warn '$SAML_TRUSTED_DOMAINS' 'array( "wiki.pvv.ntnu.no" )' \
--replace-warn '$SAML_DATABASE_DSN' '"pgsql:host=postgres.pvv.ntnu.no;port=5432;dbname=mediawiki_simplesamlphp"' \
--replace-warn '$SAML_DATABASE_USERNAME' '"mediawiki_simplesamlphp"' \
--replace-warn '$SAML_DATABASE_PASSWORD' 'file_get_contents("${config.sops.secrets."mediawiki/simplesamlphp/postgres_password".path}")' \
--replace-warn '$SAML_DATABASE_PASSWORD' 'file_get_contents("${
config.sops.secrets."mediawiki/simplesamlphp/postgres_password".path
}")' \
--replace-warn '$CACHE_DIRECTORY' '/var/cache/mediawiki/idp'
'';
};
};
in {
in
{
services.idp.sp-remote-metadata = [ "https://wiki.pvv.ntnu.no/simplesaml/" ];
sops.secrets = lib.pipe [
"mediawiki/password"
"mediawiki/postgres_password"
"mediawiki/simplesamlphp/postgres_password"
"mediawiki/simplesamlphp/cookie_salt"
"mediawiki/simplesamlphp/admin_password"
] [
(map (key: lib.nameValuePair key {
owner = user;
group = group;
restartUnits = [ "phpfpm-mediawiki.service" ];
}))
lib.listToAttrs
];
sops.secrets =
lib.pipe
[
"mediawiki/secret-key"
"mediawiki/password"
"mediawiki/postgres_password"
"mediawiki/simplesamlphp/postgres_password"
"mediawiki/simplesamlphp/cookie_salt"
"mediawiki/simplesamlphp/admin_password"
]
[
(map (
key:
lib.nameValuePair key {
owner = user;
group = group;
restartUnits = [ "phpfpm-mediawiki.service" ];
}
))
lib.listToAttrs
];
services.rsync-pull-targets = {
enable = true;
locations.${cfg.uploadsDir} = {
user = "root";
rrsyncArgs.ro = true;
authorizedKeysAttrs = [
"restrict"
"from=\"principal.pvv.ntnu.no,${values.hosts.principal.ipv6},${values.hosts.principal.ipv4}\""
"no-agent-forwarding"
"no-port-forwarding"
"no-pty"
"no-X11-forwarding"
];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICHFHa3Iq1oKPhbKCAIHgOoWOTkLmIc7yqxeTbut7ig/ mediawiki rsync backup";
};
};
services.mediawiki = {
enable = true;
@@ -144,6 +185,24 @@ in {
$wgDBserver = "${toString cfg.database.host}";
$wgAllowCopyUploads = true;
# Files
$wgFileExtensions = [
'bmp',
'gif',
'jpeg',
'jpg',
'mp3',
'odg',
'odp',
'ods',
'odt',
'pdf',
'png',
'tiff',
'webm',
'webp',
];
# Misc program paths
$wgFFmpegLocation = '${pkgs.ffmpeg}/bin/ffmpeg';
$wgExiftool = '${pkgs.exiftool}/bin/exiftool';
@@ -179,19 +238,21 @@ in {
# Cache directory for simplesamlphp
# systemd.services.phpfpm-mediawiki.serviceConfig.CacheDirectory = "mediawiki/simplesamlphp";
systemd.tmpfiles.settings."10-mediawiki"."/var/cache/mediawiki/simplesamlphp".d = {
user = "mediawiki";
group = "mediawiki";
mode = "0770";
};
systemd.tmpfiles.settings."10-mediawiki"."/var/cache/mediawiki/simplesamlphp".d =
lib.mkIf cfg.enable
{
user = "mediawiki";
group = "mediawiki";
mode = "0770";
};
users.groups.mediawiki.members = [ "nginx" ];
users.groups.mediawiki.members = lib.mkIf cfg.enable [ "nginx" ];
services.nginx.virtualHosts."wiki.pvv.ntnu.no" = {
services.nginx.virtualHosts."wiki.pvv.ntnu.no" = lib.mkIf cfg.enable {
kTLS = true;
forceSSL = true;
enableACME = true;
locations = {
locations = {
"= /wiki/Main_Page" = lib.mkForce {
return = "301 /wiki/Programvareverkstedet";
};
@@ -217,20 +278,45 @@ in {
"= /PNG/PVV-logo.svg".alias = fp /assets/logo_blue_regular.svg;
"= /PNG/PVV-logo.png".alias = fp /assets/logo_blue_regular.png;
"= /favicon.ico".alias = pkgs.runCommandLocal "mediawiki-favicon.ico" {
buildInputs = with pkgs; [ imagemagick ];
} ''
magick \
${fp /assets/logo_blue_regular.png} \
-resize x64 \
-gravity center \
-crop 64x64+0+0 \
-flatten \
-colors 256 \
-background transparent \
$out
'';
"= /favicon.ico".alias =
pkgs.runCommandLocal "mediawiki-favicon.ico"
{
buildInputs = with pkgs; [ imagemagick ];
}
''
magick \
${fp /assets/logo_blue_regular.png} \
-resize x64 \
-gravity center \
-crop 64x64+0+0 \
-flatten \
-colors 256 \
-background transparent \
$out
'';
};
};
systemd.services.mediawiki-init = lib.mkIf cfg.enable {
after = [ "sops-install-secrets.service" ];
serviceConfig = {
BindReadOnlyPaths = [
"/run/credentials/mediawiki-init.service/secret-key:/var/lib/mediawiki/secret.key"
];
LoadCredential = [ "secret-key:${config.sops.secrets."mediawiki/secret-key".path}" ];
UMask = lib.mkForce "0007";
};
};
systemd.services.phpfpm-mediawiki = lib.mkIf cfg.enable {
after = [ "sops-install-secrets.service" ];
serviceConfig = {
BindReadOnlyPaths = [
"/run/credentials/phpfpm-mediawiki.service/secret-key:/var/lib/mediawiki/secret.key"
];
LoadCredential = [ "secret-key:${config.sops.secrets."mediawiki/secret-key".path}" ];
UMask = lib.mkForce "0007";
};
};
}

View File

@@ -11,41 +11,43 @@ in
{
# Source: https://www.pierreblazquez.com/2023/06/17/how-to-harden-apache-php-fpm-daemons-using-systemd/
systemd.services = lib.genAttrs pools (_: {
serviceConfig = let
caps = [
"CAP_NET_BIND_SERVICE"
"CAP_SETGID"
"CAP_SETUID"
"CAP_CHOWN"
"CAP_KILL"
"CAP_IPC_LOCK"
"CAP_DAC_OVERRIDE"
];
in {
AmbientCapabilities = caps;
CapabilityBoundingSet = caps;
DeviceAllow = [ "" ];
LockPersonality = true;
MemoryDenyWriteExecute = false;
NoNewPrivileges = true;
PrivateMounts = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
RemoveIPC = true;
UMask = "0077";
RestrictNamespaces = "~mnt";
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
KeyringMode = "private";
SystemCallFilter = [
"@system-service"
];
};
serviceConfig =
let
caps = [
"CAP_NET_BIND_SERVICE"
"CAP_SETGID"
"CAP_SETUID"
"CAP_CHOWN"
"CAP_KILL"
"CAP_IPC_LOCK"
"CAP_DAC_OVERRIDE"
];
in
{
AmbientCapabilities = caps;
CapabilityBoundingSet = caps;
DeviceAllow = [ "" ];
LockPersonality = true;
MemoryDenyWriteExecute = false;
NoNewPrivileges = true;
PrivateMounts = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
RemoveIPC = true;
UMask = "0077";
RestrictNamespaces = "~mnt";
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
KeyringMode = "private";
SystemCallFilter = [
"@system-service"
];
};
});
}

View File

@@ -1,11 +1,18 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
values,
...
}:
let
cfg = config.services.vaultwarden;
domain = "pw.pvv.ntnu.no";
address = "127.0.1.2";
port = 3011;
wsPort = 3012;
in {
in
{
sops.secrets."vaultwarden/environ" = {
owner = "vaultwarden";
group = "vaultwarden";
@@ -99,4 +106,21 @@ in {
];
};
};
services.rsync-pull-targets = {
enable = true;
locations."/var/lib/vaultwarden" = {
user = "root";
rrsyncArgs.ro = true;
authorizedKeysAttrs = [
"restrict"
"from=\"principal.pvv.ntnu.no,${values.hosts.principal.ipv6},${values.hosts.principal.ipv4}\""
"no-agent-forwarding"
"no-port-forwarding"
"no-pty"
"no-X11-forwarding"
];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB2cDaW52gBtLVaNqoGijvN2ZAVkAWlII5AXUzT3Dswj vaultwarden rsync backup";
};
};
}

View File

@@ -1,4 +1,10 @@
{ config, values, pkgs, lib, ... }:
{
config,
values,
pkgs,
lib,
...
}:
{
imports = [
./roundcube.nix

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
with lib;
let
@@ -14,14 +19,24 @@ in
services.roundcube = {
enable = true;
package = pkgs.roundcube.withPlugins (plugins: with plugins; [
persistent_login
thunderbird_labels
contextmenu
custom_from
]);
package = pkgs.roundcube.withPlugins (
plugins: with plugins; [
persistent_login
thunderbird_labels
contextmenu
custom_from
]
);
dicts = with pkgs.aspellDicts; [ en en-computers nb nn fr de it ];
dicts = with pkgs.aspellDicts; [
en
en-computers
nb
nn
fr
de
it
];
maxAttachmentSize = 20;
hostName = "roundcubeplaceholder.example.com";
@@ -54,21 +69,23 @@ in
ln -s ${cfg.package} $out/roundcube
'';
extraConfig = ''
location ~ ^/roundcube/(${builtins.concatStringsSep "|" [
# https://wiki.archlinux.org/title/Roundcube
"README"
"INSTALL"
"LICENSE"
"CHANGELOG"
"UPGRADING"
"bin"
"SQL"
".+\\.md"
"\\."
"config"
"temp"
"logs"
]})/? {
location ~ ^/roundcube/(${
builtins.concatStringsSep "|" [
# https://wiki.archlinux.org/title/Roundcube
"README"
"INSTALL"
"LICENSE"
"CHANGELOG"
"UPGRADING"
"bin"
"SQL"
".+\\.md"
"\\."
"config"
"temp"
"logs"
]
})/? {
deny all;
}

View File

@@ -1,7 +1,15 @@
{ config, lib, fp, pkgs, ... }:
{
config,
lib,
fp,
pkgs,
values,
...
}:
let
cfg = config.services.snappymail;
in {
in
{
imports = [ (fp /modules/snappymail.nix) ];
services.snappymail = {
@@ -14,5 +22,21 @@ in {
enableACME = true;
kTLS = true;
};
}
services.rsync-pull-targets = {
enable = true;
locations.${cfg.dataDir} = {
user = "root";
rrsyncArgs.ro = true;
authorizedKeysAttrs = [
"restrict"
"from=\"principal.pvv.ntnu.no,${values.hosts.principal.ipv6},${values.hosts.principal.ipv4}\""
"no-agent-forwarding"
"no-port-forwarding"
"no-pty"
"no-X11-forwarding"
];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJENMnuNsHEeA91oX+cj7Qpex2defSXP/lxznxCAqV03 snappymail rsync backup";
};
};
}

View File

@@ -1,22 +1,31 @@
{ pkgs, lib, config, ... }:
{
pkgs,
lib,
config,
...
}:
let
format = pkgs.formats.php { };
cfg = config.services.pvv-nettsiden;
in {
in
{
imports = [
./fetch-gallery.nix
];
sops.secrets = lib.genAttrs [
"nettsiden/door_secret"
"nettsiden/mysql_password"
"nettsiden/simplesamlphp/admin_password"
"nettsiden/simplesamlphp/cookie_salt"
] (_: {
owner = config.services.phpfpm.pools.pvv-nettsiden.user;
group = config.services.phpfpm.pools.pvv-nettsiden.group;
restartUnits = [ "phpfpm-pvv-nettsiden.service" ];
});
sops.secrets =
lib.genAttrs
[
"nettsiden/door_secret"
"nettsiden/mysql_password"
"nettsiden/simplesamlphp/admin_password"
"nettsiden/simplesamlphp/cookie_salt"
]
(_: {
owner = config.services.phpfpm.pools.pvv-nettsiden.user;
group = config.services.phpfpm.pools.pvv-nettsiden.group;
restartUnits = [ "phpfpm-pvv-nettsiden.service" ];
});
security.acme.certs."www.pvv.ntnu.no" = {
extraDomainNames = [
@@ -35,48 +44,53 @@ in {
package = pkgs.pvv-nettsiden.override {
extra_files = {
"${pkgs.pvv-nettsiden.passthru.simplesamlphpPath}/metadata/saml20-idp-remote.php" = pkgs.writeText "pvv-nettsiden-saml20-idp-remote.php" (import ../idp-simplesamlphp/metadata.php.nix);
"${pkgs.pvv-nettsiden.passthru.simplesamlphpPath}/config/authsources.php" = pkgs.writeText "pvv-nettsiden-authsources.php" ''
<?php
$config = array(
'admin' => array(
'core:AdminPassword'
),
'default-sp' => array(
'saml:SP',
'entityID' => 'https://${cfg.domainName}/simplesaml/',
'idp' => 'https://idp.pvv.ntnu.no/',
),
);
'';
"${pkgs.pvv-nettsiden.passthru.simplesamlphpPath}/metadata/saml20-idp-remote.php" =
pkgs.writeText "pvv-nettsiden-saml20-idp-remote.php" (import ../idp-simplesamlphp/metadata.php.nix);
"${pkgs.pvv-nettsiden.passthru.simplesamlphpPath}/config/authsources.php" =
pkgs.writeText "pvv-nettsiden-authsources.php" ''
<?php
$config = array(
'admin' => array(
'core:AdminPassword'
),
'default-sp' => array(
'saml:SP',
'entityID' => 'https://${cfg.domainName}/simplesaml/',
'idp' => 'https://idp.pvv.ntnu.no/',
),
);
'';
};
};
domainName = "www.pvv.ntnu.no";
settings = let
includeFromSops = path: format.lib.mkRaw "file_get_contents('${config.sops.secrets."nettsiden/${path}".path}')";
in {
DOOR_SECRET = includeFromSops "door_secret";
settings =
let
includeFromSops =
path: format.lib.mkRaw "file_get_contents('${config.sops.secrets."nettsiden/${path}".path}')";
in
{
DOOR_SECRET = includeFromSops "door_secret";
DB = {
DSN = "mysql:dbname=www-data_nettside;host=mysql.pvv.ntnu.no";
USER = "www-data_nettsi";
PASS = includeFromSops "mysql_password";
};
DB = {
DSN = "mysql:dbname=www-data_nettside;host=mysql.pvv.ntnu.no";
USER = "www-data_nettsi";
PASS = includeFromSops "mysql_password";
};
# TODO: set up postgres session for simplesamlphp
SAML = {
COOKIE_SALT = includeFromSops "simplesamlphp/cookie_salt";
COOKIE_SECURE = true;
ADMIN_NAME = "PVV Drift";
ADMIN_EMAIL = "drift@pvv.ntnu.no";
ADMIN_PASSWORD = includeFromSops "simplesamlphp/admin_password";
TRUSTED_DOMAINS = [
"www.pvv.ntnu.no"
];
# TODO: set up postgres session for simplesamlphp
SAML = {
COOKIE_SALT = includeFromSops "simplesamlphp/cookie_salt";
COOKIE_SECURE = true;
ADMIN_NAME = "PVV Drift";
ADMIN_EMAIL = "drift@pvv.ntnu.no";
ADMIN_PASSWORD = includeFromSops "simplesamlphp/admin_password";
TRUSTED_DOMAINS = [
"www.pvv.ntnu.no"
];
};
};
};
};
services.phpfpm.pools."pvv-nettsiden".settings = {

View File

@@ -1,15 +1,37 @@
{ pkgs, lib, config, ... }:
{
pkgs,
lib,
config,
values,
...
}:
let
galleryDir = config.services.pvv-nettsiden.settings.GALLERY.DIR;
transferDir = "${config.services.pvv-nettsiden.settings.GALLERY.DIR}-transfer";
in {
in
{
users.users.${config.services.pvv-nettsiden.user} = {
# NOTE: the user unfortunately needs a registered shell for rrsync to function...
# is there anything we can do to remove this?
useDefaultShell = true;
};
# This is pushed from microbel:/var/www/www-gallery/build-gallery.sh
openssh.authorizedKeys.keys = [
''command="${pkgs.rrsync}/bin/rrsync -wo ${transferDir}",restrict,no-agent-forwarding,no-port-forwarding,no-pty,no-X11-forwarding ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIjHhC2dikhWs/gG+m7qP1eSohWzTehn4ToNzDSOImyR gallery-publish''
];
# This is pushed from microbel:/var/www/www-gallery/build-gallery.sh
services.rsync-pull-targets = {
enable = true;
locations.${transferDir} = {
user = config.services.pvv-nettsiden.user;
rrsyncArgs.wo = true;
authorizedKeysAttrs = [
"restrict"
"from=\"microbel.pvv.ntnu.no,${values.hosts.microbel.ipv6},${values.hosts.microbel.ipv4}\""
"no-agent-forwarding"
"no-port-forwarding"
"no-pty"
"no-X11-forwarding"
];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIjHhC2dikhWs/gG+m7qP1eSohWzTehn4ToNzDSOImyR gallery-publish";
};
};
systemd.paths.pvv-nettsiden-gallery-update = {
@@ -22,14 +44,20 @@ in {
};
systemd.services.pvv-nettsiden-gallery-update = {
path = with pkgs; [ imagemagick gnutar gzip ];
path = with pkgs; [
imagemagick
gnutar
gzip
];
script = ''
tar ${lib.cli.toGNUCommandLineShell {} {
extract = true;
file = "${transferDir}/gallery.tar.gz";
directory = ".";
}}
tar ${
lib.cli.toGNUCommandLineShell { } {
extract = true;
file = "${transferDir}/gallery.tar.gz";
directory = ".";
}
}
# Delete files and directories that exists in the gallery that don't exist in the tarball
filesToRemove=$(uniq -u <(sort <(find . -not -path "./.thumbnails*") <(tar -tf ${transferDir}/gallery.tar.gz | sed 's|/$||')))

View File

@@ -1,25 +1,28 @@
{ lib, ... }:
{
services.nginx.virtualHosts = lib.genAttrs [
"pvv.ntnu.no"
"www.pvv.ntnu.no"
"pvv.org"
"www.pvv.org"
] (_: {
locations = {
"^~ /.well-known/" = {
alias = (toString ./root) + "/";
};
services.nginx.virtualHosts =
lib.genAttrs
[
"pvv.ntnu.no"
"www.pvv.ntnu.no"
"pvv.org"
"www.pvv.org"
]
(_: {
locations = {
"^~ /.well-known/" = {
alias = (toString ./root) + "/";
};
# Proxy the matrix well-known files
# Host has be set before proxy_pass
# The header must be set so nginx on the other side routes it to the right place
"^~ /.well-known/matrix/" = {
extraConfig = ''
proxy_set_header Host matrix.pvv.ntnu.no;
proxy_pass https://matrix.pvv.ntnu.no/.well-known/matrix/;
'';
};
};
});
# Proxy the matrix well-known files
# Host has be set before proxy_pass
# The header must be set so nginx on the other side routes it to the right place
"^~ /.well-known/matrix/" = {
extraConfig = ''
proxy_set_header Host matrix.pvv.ntnu.no;
proxy_pass https://matrix.pvv.ntnu.no/.well-known/matrix/;
'';
};
};
});
}

View File

@@ -6,7 +6,11 @@ Contact: mailto:cert@pvv.ntnu.no
Preferred-Languages: no, en
Expires: 2032-12-31T23:59:59.000Z
# This file was last updated 2024-09-14.
# This file was last updated 2026-02-27.
# You can find a wikipage for our security policies at:
# https://wiki.pvv.ntnu.no/wiki/CERT
# Please note that we are a student organization, and unfortunately we do not
# have a bug bounty program or offer monetary compensation for disclosure of
# security vulnerabilities.

View File

@@ -1,4 +1,9 @@
{ fp, pkgs, values, ... }:
{
fp,
pkgs,
values,
...
}:
{
imports = [
./hardware-configuration.nix
@@ -9,8 +14,8 @@
./services/calendar-bot.nix
#./services/git-mirrors
./services/minecraft-heatmap.nix
./services/mysql.nix
./services/postgres.nix
./services/mysql
./services/postgresql
./services/matrix
];
@@ -19,8 +24,16 @@
systemd.network.networks."30-ens18" = values.defaultNetworkConfig // {
#matchConfig.Name = "enp6s0f0";
matchConfig.Name = "ens18";
address = with values.hosts.bicep; [ (ipv4 + "/25") (ipv6 + "/64") ]
++ (with values.services.turn; [ (ipv4 + "/25") (ipv6 + "/64") ]);
address =
with values.hosts.bicep;
[
(ipv4 + "/25")
(ipv6 + "/64")
]
++ (with values.services.turn; [
(ipv4 + "/25")
(ipv6 + "/64")
]);
};
systemd.network.wait-online = {
anyInterface = true;
@@ -30,5 +43,5 @@
# Don't change (even during upgrades) unless you know what you are doing.
# See https://search.nixos.org/options?show=system.stateVersion
system.stateVersion = "22.11";
system.stateVersion = "25.11";
}

View File

@@ -1,34 +1,49 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
config,
lib,
pkgs,
modulesPath,
...
}:
{
imports =
[ (modulesPath + "/profiles/qemu-guest.nix")
];
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "ahci" "sd_mod" "sr_mod" ];
boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"ahci"
"sd_mod"
"sr_mod"
];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-uuid/20e06202-7a09-47cc-8ef6-5e7afe19453a";
fsType = "ext4";
};
fileSystems."/" = {
device = "/dev/disk/by-uuid/20e06202-7a09-47cc-8ef6-5e7afe19453a";
fsType = "ext4";
};
# temp data disk, only 128gb not enough until we can add another disk to the system.
fileSystems."/data" =
{ device = "/dev/disk/by-uuid/c81af266-0781-4084-b8eb-c2587cbcf1ba";
fsType = "ext4";
};
fileSystems."/data" = {
device = "/dev/disk/by-uuid/c81af266-0781-4084-b8eb-c2587cbcf1ba";
fsType = "ext4";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/198B-E363";
fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ];
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/198B-E363";
fsType = "vfat";
options = [
"fmask=0022"
"dmask=0022"
];
};
swapDevices = [ ];

View File

@@ -1,7 +1,14 @@
{ config, fp, lib, pkgs, ... }:
{
config,
fp,
lib,
pkgs,
...
}:
let
cfg = config.services.pvv-calendar-bot;
in {
in
{
sops.secrets = {
"calendar-bot/matrix_token" = {
sopsFile = fp /secrets/bicep/bicep.yaml;

View File

@@ -1,4 +1,10 @@
{ config, pkgs, lib, fp, ... }:
{
config,
pkgs,
lib,
fp,
...
}:
let
cfg = config.services.gickup;
in
@@ -20,79 +26,88 @@ in
lfs = false;
};
instances = let
defaultGithubConfig = {
settings.token_file = config.sops.secrets."gickup/github-token".path;
};
defaultGitlabConfig = {
# settings.token_file = ...
};
in {
"github:Git-Mediawiki/Git-Mediawiki" = defaultGithubConfig;
"github:NixOS/nixpkgs" = defaultGithubConfig;
"github:go-gitea/gitea" = defaultGithubConfig;
"github:heimdal/heimdal" = defaultGithubConfig;
"github:saltstack/salt" = defaultGithubConfig;
"github:typst/typst" = defaultGithubConfig;
"github:unmojang/FjordLauncher" = defaultGithubConfig;
"github:unmojang/drasl" = defaultGithubConfig;
"github:yushijinhun/authlib-injector" = defaultGithubConfig;
instances =
let
defaultGithubConfig = {
settings.token_file = config.sops.secrets."gickup/github-token".path;
};
defaultGitlabConfig = {
# settings.token_file = ...
};
in
{
"github:Git-Mediawiki/Git-Mediawiki" = defaultGithubConfig;
"github:NixOS/nixpkgs" = defaultGithubConfig;
"github:go-gitea/gitea" = defaultGithubConfig;
"github:heimdal/heimdal" = defaultGithubConfig;
"github:saltstack/salt" = defaultGithubConfig;
"github:typst/typst" = defaultGithubConfig;
"github:unmojang/FjordLauncher" = defaultGithubConfig;
"github:unmojang/drasl" = defaultGithubConfig;
"github:yushijinhun/authlib-injector" = defaultGithubConfig;
"gitlab:mx-puppet/discord/better-discord.js" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/discord-markdown" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/matrix-discord-parser" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/mx-puppet-discord" = defaultGitlabConfig;
"gitlab:mx-puppet/mx-puppet-bridge" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/better-discord.js" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/discord-markdown" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/matrix-discord-parser" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/mx-puppet-discord" = defaultGitlabConfig;
"gitlab:mx-puppet/mx-puppet-bridge" = defaultGitlabConfig;
"any:glibc" = {
settings.url = "https://sourceware.org/git/glibc.git";
};
"any:glibc" = {
settings.url = "https://sourceware.org/git/glibc.git";
};
"any:out-of-your-element" = {
settings.url = "https://gitdab.com/cadence/out-of-your-element.git";
};
"any:out-of-your-element" = {
settings.url = "https://gitdab.com/cadence/out-of-your-element.git";
};
"any:out-of-your-element-module" = {
settings.url = "https://cgit.rory.gay/nix/OOYE-module.git";
"any:out-of-your-element-module" = {
settings.url = "https://cgit.rory.gay/nix/OOYE-module.git";
};
};
};
};
services.cgit = let
domain = "mirrors.pvv.ntnu.no";
in {
${domain} = {
enable = true;
package = pkgs.callPackage (fp /packages/cgit.nix) { };
group = "gickup";
scanPath = "${cfg.dataDir}/linktree";
gitHttpBackend.checkExportOkFiles = false;
settings = {
enable-commit-graph = true;
enable-follow-links = true;
enable-http-clone = true;
enable-remote-branches = true;
clone-url = "https://${domain}/$CGIT_REPO_URL";
remove-suffix = true;
root-title = "PVVSPPP";
root-desc = "PVV Speiler Praktisk og Prominent Programvare";
snapshots = "all";
logo = "/PVV-logo.png";
services.cgit =
let
domain = "mirrors.pvv.ntnu.no";
in
{
${domain} = {
enable = true;
package = pkgs.callPackage (fp /packages/cgit.nix) { };
group = "gickup";
scanPath = "${cfg.dataDir}/linktree";
gitHttpBackend.checkExportOkFiles = false;
settings = {
enable-commit-graph = true;
enable-follow-links = true;
enable-http-clone = true;
enable-remote-branches = true;
clone-url = "https://${domain}/$CGIT_REPO_URL";
remove-suffix = true;
root-title = "PVVSPPP";
root-desc = "PVV Speiler Praktisk og Prominent Programvare";
snapshots = "all";
logo = "/PVV-logo.png";
};
};
};
};
services.nginx.virtualHosts."mirrors.pvv.ntnu.no" = {
forceSSL = true;
enableACME = true;
locations."= /PVV-logo.png".alias = let
small-pvv-logo = pkgs.runCommandLocal "pvv-logo-96x96" {
nativeBuildInputs = [ pkgs.imagemagick ];
} ''
magick '${fp /assets/logo_blue_regular.svg}' -resize 96x96 PNG:"$out"
'';
in toString small-pvv-logo;
locations."= /PVV-logo.png".alias =
let
small-pvv-logo =
pkgs.runCommandLocal "pvv-logo-96x96"
{
nativeBuildInputs = [ pkgs.imagemagick ];
}
''
magick '${fp /assets/logo_blue_regular.svg}' -resize 96x96 PNG:"$out"
'';
in
toString small-pvv-logo;
};
systemd.services."fcgiwrap-cgit-mirrors.pvv.ntnu.no" = {

View File

@@ -1,4 +1,12 @@
{ config, lib, fp, pkgs, secrets, values, ... }:
{
config,
lib,
fp,
pkgs,
secrets,
values,
...
}:
{
sops.secrets."matrix/coturn/static-auth-secret" = {
@@ -127,18 +135,31 @@
};
networking.firewall = {
interfaces.enp6s0f0 = let
range = with config.services.coturn; [ {
from = min-port;
to = max-port;
} ];
in
{
allowedUDPPortRanges = range;
allowedUDPPorts = [ 443 3478 3479 5349 ];
allowedTCPPortRanges = range;
allowedTCPPorts = [ 443 3478 3479 5349 ];
};
interfaces.enp6s0f0 =
let
range = with config.services.coturn; [
{
from = min-port;
to = max-port;
}
];
in
{
allowedUDPPortRanges = range;
allowedUDPPorts = [
443
3478
3479
5349
];
allowedTCPPortRanges = range;
allowedTCPPorts = [
443
3478
3479
5349
];
};
};
}

View File

@@ -1,8 +1,9 @@
{ config, ... }:
{
imports = [
./synapse.nix
./synapse-admin.nix
./synapse-auto-compressor.nix
./synapse.nix
./element.nix
./coturn.nix
./livekit.nix

View File

@@ -1,4 +1,9 @@
{ config, lib, fp, ... }:
{
config,
lib,
fp,
...
}:
let
cfg = config.services.mx-puppet-discord;
@@ -44,7 +49,6 @@ in
];
};
services.mx-puppet-discord.enable = false;
services.mx-puppet-discord.settings = {
bridge = {
@@ -52,16 +56,21 @@ in
domain = "pvv.ntnu.no";
homeserverUrl = "https://matrix.pvv.ntnu.no";
};
provisioning.whitelist = [ "@dandellion:dodsorf\\.as" "@danio:pvv\\.ntnu\\.no"];
provisioning.whitelist = [
"@dandellion:dodsorf\\.as"
"@danio:pvv\\.ntnu\\.no"
];
relay.whitelist = [ ".*" ];
selfService.whitelist = [ "@danio:pvv\\.ntnu\\.no" "@dandellion:dodsorf\\.as" ];
selfService.whitelist = [
"@danio:pvv\\.ntnu\\.no"
"@dandellion:dodsorf\\.as"
];
};
services.mx-puppet-discord.serviceDependencies = [
"matrix-synapse.target"
"nginx.service"
];
services.matrix-synapse-next.settings = {
app_service_config_files = [
config.sops.templates."discord-registration.yaml".path

View File

@@ -1,7 +1,13 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
synapse-cfg = config.services.matrix-synapse-next;
in {
in
{
services.pvv-matrix-well-known.client = {
"m.homeserver" = {
base_url = "https://matrix.pvv.ntnu.no";
@@ -21,12 +27,12 @@ in {
default_server_config = config.services.pvv-matrix-well-known.client;
disable_3pid_login = true;
# integrations_ui_url = "https://dimension.dodsorf.as/riot";
# integrations_rest_url = "https://dimension.dodsorf.as/api/v1/scalar";
# integrations_widgets_urls = [
# "https://dimension.dodsorf.as/widgets"
# ];
# integration_jitsi_widget_url = "https://dimension.dodsorf.as/widgets/jitsi";
# integrations_ui_url = "https://dimension.dodsorf.as/riot";
# integrations_rest_url = "https://dimension.dodsorf.as/api/v1/scalar";
# integrations_widgets_urls = [
# "https://dimension.dodsorf.as/widgets"
# ];
# integration_jitsi_widget_url = "https://dimension.dodsorf.as/widgets/jitsi";
defaultCountryCode = "NO";
showLabsSettings = true;
features = {
@@ -37,6 +43,7 @@ in {
# element call group calls
feature_group_calls = true;
};
default_country_code = "NO";
default_theme = "dark";
# Servers in this list should provide some sort of valuable scoping
# matrix.org is not useful compared to matrixrooms.info,

View File

@@ -1,4 +1,11 @@
{ config, lib, fp, unstablePkgs, inputs, ... }:
{
config,
lib,
fp,
unstablePkgs,
inputs,
...
}:
let
cfg = config.services.matrix-hookshot;
@@ -14,6 +21,10 @@ in
sopsFile = fp /secrets/bicep/matrix.yaml;
key = "hookshot/hs_token";
};
sops.secrets."matrix/hookshot/passkey" = {
sopsFile = fp /secrets/bicep/matrix.yaml;
key = "hookshot/passkey";
};
sops.templates."hookshot-registration.yaml" = {
owner = config.users.users.matrix-synapse.name;
@@ -44,9 +55,14 @@ in
};
systemd.services.matrix-hookshot = {
serviceConfig.SupplementaryGroups = [
config.users.groups.keys-matrix-registrations.name
];
serviceConfig = {
SupplementaryGroups = [
config.users.groups.keys-matrix-registrations.name
];
LoadCredential = [
"passkey.pem:${config.sops.secrets."matrix/hookshot/passkey".path}"
];
};
};
services.matrix-hookshot = {
@@ -54,6 +70,8 @@ in
package = unstablePkgs.matrix-hookshot;
registrationFile = config.sops.templates."hookshot-registration.yaml".path;
settings = {
passFile = "/run/credentials/matrix-hookshot.service/passkey.pem";
bridge = {
bindAddress = "127.0.0.1";
domain = "pvv.ntnu.no";
@@ -61,6 +79,7 @@ in
mediaUrl = "https://matrix.pvv.ntnu.no";
port = 9993;
};
listeners = [
{
bindAddress = webhookListenAddress;
@@ -73,6 +92,7 @@ in
];
}
];
generic = {
enabled = true;
outbound = true;
@@ -87,7 +107,8 @@ in
};
serviceBots = [
{ localpart = "bot_feeds";
{
localpart = "bot_feeds";
displayname = "Aya";
avatar = ./feeds.png;
prefix = "!aya";
@@ -102,20 +123,44 @@ in
permissions = [
# Users of the PVV Server
{ actor = "pvv.ntnu.no";
services = [ { service = "*"; level = "commands"; } ];
{
actor = "pvv.ntnu.no";
services = [
{
service = "*";
level = "commands";
}
];
}
# Members of Medlem space (for people with their own hs)
{ actor = "!pZOTJQinWyyTWaeOgK:pvv.ntnu.no";
services = [ { service = "*"; level = "commands"; } ];
{
actor = "!pZOTJQinWyyTWaeOgK:pvv.ntnu.no";
services = [
{
service = "*";
level = "commands";
}
];
}
# Members of Drift
{ actor = "!eYgeufLrninXxQpYml:pvv.ntnu.no";
services = [ { service = "*"; level = "admin"; } ];
{
actor = "!eYgeufLrninXxQpYml:pvv.ntnu.no";
services = [
{
service = "*";
level = "admin";
}
];
}
# Dan bootstrap
{ actor = "@dandellion:dodsorf.as";
services = [ { service = "*"; level = "admin"; } ];
{
actor = "@dandellion:dodsorf.as";
services = [
{
service = "*";
level = "admin";
}
];
}
];
};

View File

@@ -1,4 +1,9 @@
{ config, lib, fp, ... }:
{
config,
lib,
fp,
...
}:
let
synapseConfig = config.services.matrix-synapse-next;
matrixDomain = "matrix.pvv.ntnu.no";
@@ -20,10 +25,12 @@ in
};
services.pvv-matrix-well-known.client = lib.mkIf cfg.enable {
"org.matrix.msc4143.rtc_foci" = [{
type = "livekit";
livekit_service_url = "https://${matrixDomain}/livekit/jwt";
}];
"org.matrix.msc4143.rtc_foci" = [
{
type = "livekit";
livekit_service_url = "https://${matrixDomain}/livekit/jwt";
}
];
};
services.livekit = {
@@ -43,7 +50,12 @@ in
keyFile = config.sops.templates."matrix-livekit-keyfile".path;
};
systemd.services.lk-jwt-service.environment.LIVEKIT_FULL_ACCESS_HOMESERVERS = lib.mkIf cfg.enable matrixDomain;
systemd.services.lk-jwt-service.environment.LIVEKIT_FULL_ACCESS_HOMESERVERS = lib.mkIf cfg.enable (
builtins.concatStringsSep "," [
"pvv.ntnu.no"
"dodsorf.as"
]
);
services.nginx.virtualHosts.${matrixDomain} = lib.mkIf cfg.enable {
locations."^~ /livekit/jwt/" = {

View File

@@ -1,4 +1,9 @@
{ config, lib, fp, ... }:
{
config,
lib,
fp,
...
}:
{
sops.secrets."matrix/mjolnir/access_token" = {

View File

@@ -1,4 +1,11 @@
{ config, pkgs, fp, ... }:
{
config,
pkgs,
lib,
values,
fp,
...
}:
let
cfg = config.services.matrix-ooye;
in
@@ -28,6 +35,23 @@ in
};
};
services.rsync-pull-targets = lib.mkIf cfg.enable {
enable = true;
locations."/var/lib/private/matrix-ooye" = {
user = "root";
rrsyncArgs.ro = true;
authorizedKeysAttrs = [
"restrict"
"from=\"principal.pvv.ntnu.no,${values.hosts.principal.ipv6},${values.hosts.principal.ipv4}\""
"no-agent-forwarding"
"no-port-forwarding"
"no-pty"
"no-X11-forwarding"
];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE5koYfor5+kKB30Dugj3dAWvmj8h/akQQ2XYDvLobFL matrix_ooye rsync backup";
};
};
services.matrix-ooye = {
enable = true;
homeserver = "https://matrix.pvv.ntnu.no";

View File

@@ -1,4 +1,9 @@
{ lib, buildPythonPackage, fetchFromGitHub, setuptools }:
{
lib,
buildPythonPackage,
fetchFromGitHub,
setuptools,
}:
buildPythonPackage rec {
pname = "matrix-synapse-smtp-auth";

View File

@@ -1,5 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
# This service requires you to have access to endpoints not available over the internet
# Use an ssh proxy or similar to access this dashboard.

View File

@@ -0,0 +1,61 @@
{
config,
lib,
utils,
...
}:
let
cfg = config.services.synapse-auto-compressor;
in
{
services.synapse-auto-compressor = {
# enable = true;
postgresUrl = "postgresql://matrix-synapse@/synapse?host=/run/postgresql";
};
# NOTE: nixpkgs has some broken asserts, vendored the entire unit
systemd.services.synapse-auto-compressor = {
description = "synapse-auto-compressor";
requires = [
"postgresql.target"
];
inherit (cfg) startAt;
serviceConfig = {
Type = "oneshot";
DynamicUser = true;
User = "matrix-synapse";
PrivateTmp = true;
ExecStart = utils.escapeSystemdExecArgs [
"${cfg.package}/bin/synapse_auto_compressor"
"-p"
cfg.postgresUrl
"-c"
cfg.settings.chunk_size
"-n"
cfg.settings.chunks_to_compress
"-l"
(lib.concatStringsSep "," (map toString cfg.settings.levels))
];
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateDevices = true;
PrivateMounts = true;
PrivateUsers = true;
RemoveIPC = true;
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
ProcSubset = "pid";
ProtectProc = "invisible";
ProtectSystem = "strict";
ProtectHome = true;
ProtectHostname = true;
ProtectClock = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
};
};
}

View File

@@ -1,13 +1,23 @@
{ config, lib, fp, pkgs, values, inputs, ... }:
{
config,
lib,
fp,
pkgs,
values,
inputs,
...
}:
let
cfg = config.services.matrix-synapse-next;
matrix-lib = inputs.matrix-next.lib;
imap0Attrs = with lib; f: set:
listToAttrs (imap0 (i: attr: nameValuePair attr (f i attr set.${attr})) (attrNames set));
in {
imap0Attrs =
with lib;
f: set: listToAttrs (imap0 (i: attr: nameValuePair attr (f i attr set.${attr})) (attrNames set));
in
{
sops.secrets."matrix/synapse/signing_key" = {
key = "synapse/signing_key";
sopsFile = fp /secrets/bicep/matrix.yaml;
@@ -23,10 +33,29 @@ in {
owner = config.users.users.matrix-synapse.name;
group = config.users.users.matrix-synapse.group;
content = ''
registration_shared_secret: ${config.sops.placeholder."matrix/synapse/user_registration/registration_shared_secret"}
registration_shared_secret: ${
config.sops.placeholder."matrix/synapse/user_registration/registration_shared_secret"
}
'';
};
services.rsync-pull-targets = {
enable = true;
locations.${cfg.settings.media_store_path} = {
user = "root";
rrsyncArgs.ro = true;
authorizedKeysAttrs = [
"restrict"
"from=\"principal.pvv.ntnu.no,${values.hosts.principal.ipv6},${values.hosts.principal.ipv4}\""
"no-agent-forwarding"
"no-port-forwarding"
"no-pty"
"no-X11-forwarding"
];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIASnjI9b3j4ZS3BL/D1ggHfws1BkE8iS0v0cGpEmbG+k matrix_media_store rsync backup";
};
};
services.matrix-synapse-next = {
enable = true;
@@ -51,7 +80,7 @@ in {
signing_key_path = config.sops.secrets."matrix/synapse/signing_key".path;
media_store_path = "${cfg.dataDir}/media";
media_store_path = "${cfg.dataDir}/media";
database = {
name = "psycopg2";
@@ -93,7 +122,8 @@ in {
password_config.enabled = true;
modules = [
{ module = "smtp_auth_provider.SMTPAuthProvider";
{
module = "smtp_auth_provider.SMTPAuthProvider";
config = {
smtp_host = "smtp.pvv.ntnu.no";
};
@@ -166,61 +196,79 @@ in {
services.pvv-matrix-well-known.server."m.server" = "matrix.pvv.ntnu.no:443";
services.nginx.virtualHosts."matrix.pvv.ntnu.no" = lib.mkMerge [
{
kTLS = true;
}
{
locations."/_synapse/admin" = {
proxyPass = "http://$synapse_backend";
extraConfig = ''
allow 127.0.0.1;
allow ::1;
allow ${values.hosts.bicep.ipv4};
allow ${values.hosts.bicep.ipv6};
deny all;
'';
};
}
{
locations = let
connectionInfo = w: matrix-lib.workerConnectionResource "metrics" w;
socketAddress = w: let c = connectionInfo w; in "${c.host}:${toString c.port}";
{
kTLS = true;
}
{
locations."/_synapse/admin" = {
proxyPass = "http://$synapse_backend";
extraConfig = ''
allow 127.0.0.1;
allow ::1;
allow ${values.hosts.bicep.ipv4};
allow ${values.hosts.bicep.ipv6};
deny all;
'';
};
}
{
locations =
let
connectionInfo = w: matrix-lib.workerConnectionResource "metrics" w;
socketAddress =
w:
let
c = connectionInfo w;
in
"${c.host}:${toString c.port}";
metricsPath = w: "/metrics/${w.type}/${toString w.index}";
proxyPath = w: "http://${socketAddress w}/_synapse/metrics";
in lib.mapAttrs' (n: v: lib.nameValuePair
(metricsPath v) {
proxyPass = proxyPath v;
metricsPath = w: "/metrics/${w.type}/${toString w.index}";
proxyPath = w: "http://${socketAddress w}/_synapse/metrics";
in
lib.mapAttrs' (
n: v:
lib.nameValuePair (metricsPath v) {
proxyPass = proxyPath v;
extraConfig = ''
allow ${values.hosts.ildkule.ipv4};
allow ${values.hosts.ildkule.ipv6};
deny all;
'';
}
) cfg.workers.instances;
}
{
locations."/metrics/master/1" = {
proxyPass = "http://127.0.0.1:9000/_synapse/metrics";
extraConfig = ''
allow ${values.hosts.ildkule.ipv4};
allow ${values.hosts.ildkule.ipv6};
deny all;
'';
})
cfg.workers.instances;
}
{
locations."/metrics/master/1" = {
proxyPass = "http://127.0.0.1:9000/_synapse/metrics";
extraConfig = ''
allow ${values.hosts.ildkule.ipv4};
allow ${values.hosts.ildkule.ipv6};
deny all;
'';
};
};
locations."/metrics/" = let
endpoints = lib.pipe cfg.workers.instances [
(lib.mapAttrsToList (_: v: v))
(map (w: "${w.type}/${toString w.index}"))
(map (w: "matrix.pvv.ntnu.no/metrics/${w}"))
] ++ [ "matrix.pvv.ntnu.no/metrics/master/1" ];
in {
alias = pkgs.writeTextDir "/config.json"
(builtins.toJSON [
{ targets = endpoints;
labels = { };
}]) + "/";
};
}];
locations."/metrics/" =
let
endpoints =
lib.pipe cfg.workers.instances [
(lib.mapAttrsToList (_: v: v))
(map (w: "${w.type}/${toString w.index}"))
(map (w: "matrix.pvv.ntnu.no/metrics/${w}"))
]
++ [ "matrix.pvv.ntnu.no/metrics/master/1" ];
in
{
alias =
pkgs.writeTextDir "/config.json" (
builtins.toJSON [
{
targets = endpoints;
labels = { };
}
]
)
+ "/";
};
}
];
}

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
let
cfg = config.services.pvv-matrix-well-known;
format = pkgs.formats.json { };

View File

@@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.minecraft-heatmap;
in
@@ -22,28 +27,30 @@ in
};
};
systemd.services.minecraft-heatmap-ingest-logs = {
systemd.services.minecraft-heatmap-ingest-logs = lib.mkIf cfg.enable {
serviceConfig.LoadCredential = [
"sshkey:${config.sops.secrets."minecraft-heatmap/ssh-key/private".path}"
];
preStart = let
knownHostsFile = pkgs.writeText "minecraft-heatmap-known-hosts" ''
innovation.pvv.ntnu.no ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE9O/y5uqcLKCodg2Q+XfZPH/AoUIyBlDhigImU+4+Kn
innovation.pvv.ntnu.no ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQClR9GvWeVPZHudlnFXhGHUX5sGX9nscsOsotnlQ4uVuGsgvRifsVsuDULlAFXwoV1tYp4vnyXlsVtMddpLI5ANOIDcZ4fgDxpfSQmtHKssNpDcfMhFJbfRVyacipjA4osxTxvLox/yjtVt+URjTHUA1MWzEwc26KfiOvWO5tCBTan7doN/4KOyT05GwBxwzUAwUmoGTacIITck2Y9qp4+xFYqehbXqPdBb15hFyd38OCQhtU1hWV2Yi18+hJ4nyjc/g5pr6mW09ULlFghe/BaTUXrTisYC6bMcJZsTDwsvld9581KPvoNZOTQhZPTEQCZZ1h54fe0ZHuveVB3TIHovZyjoUuaf4uiFOjJVaKRB+Ig+Il6r7tMUn9CyHtus/Nd86E0TFBzoKxM0OFu88oaUlDtZVrUJL5En1lGoimajebb1JPxllFN5hqIT+gVyMY6nRzkcfS7ieny/U4rzXY2rfz98selftgh3LsBywwADv65i+mPw1A/1QdND1R6fV4U=
innovation.pvv.ntnu.no ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNjl3HfsDqmALWCL9uhz9k93RAD2565ndBqUh4N/rvI7MCwEJ6iRCdDev0YzB1Fpg24oriyYoxZRP24ifC2sQf8=
preStart =
let
knownHostsFile = pkgs.writeText "minecraft-heatmap-known-hosts" ''
innovation.pvv.ntnu.no ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE9O/y5uqcLKCodg2Q+XfZPH/AoUIyBlDhigImU+4+Kn
innovation.pvv.ntnu.no ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQClR9GvWeVPZHudlnFXhGHUX5sGX9nscsOsotnlQ4uVuGsgvRifsVsuDULlAFXwoV1tYp4vnyXlsVtMddpLI5ANOIDcZ4fgDxpfSQmtHKssNpDcfMhFJbfRVyacipjA4osxTxvLox/yjtVt+URjTHUA1MWzEwc26KfiOvWO5tCBTan7doN/4KOyT05GwBxwzUAwUmoGTacIITck2Y9qp4+xFYqehbXqPdBb15hFyd38OCQhtU1hWV2Yi18+hJ4nyjc/g5pr6mW09ULlFghe/BaTUXrTisYC6bMcJZsTDwsvld9581KPvoNZOTQhZPTEQCZZ1h54fe0ZHuveVB3TIHovZyjoUuaf4uiFOjJVaKRB+Ig+Il6r7tMUn9CyHtus/Nd86E0TFBzoKxM0OFu88oaUlDtZVrUJL5En1lGoimajebb1JPxllFN5hqIT+gVyMY6nRzkcfS7ieny/U4rzXY2rfz98selftgh3LsBywwADv65i+mPw1A/1QdND1R6fV4U=
innovation.pvv.ntnu.no ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNjl3HfsDqmALWCL9uhz9k93RAD2565ndBqUh4N/rvI7MCwEJ6iRCdDev0YzB1Fpg24oriyYoxZRP24ifC2sQf8=
'';
in
''
mkdir -p '${cfg.minecraftLogsDir}'
"${lib.getExe pkgs.rsync}" \
--archive \
--verbose \
--progress \
--no-owner \
--no-group \
--rsh="${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=\"${knownHostsFile}\" -i \"$CREDENTIALS_DIRECTORY\"/sshkey" \
root@innovation.pvv.ntnu.no:/ \
'${cfg.minecraftLogsDir}'/
'';
in ''
mkdir -p '${cfg.minecraftLogsDir}'
"${lib.getExe pkgs.rsync}" \
--archive \
--verbose \
--progress \
--no-owner \
--no-group \
--rsh="${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=\"${knownHostsFile}\" -i \"$CREDENTIALS_DIRECTORY\"/sshkey" \
root@innovation.pvv.ntnu.no:/ \
'${cfg.minecraftLogsDir}'/
'';
};
}

View File

@@ -1,55 +0,0 @@
{ pkgs, lib, config, values, ... }:
{
sops.secrets."mysql/password" = {
owner = "mysql";
group = "mysql";
};
users.mysql.passwordFile = config.sops.secrets."mysql/password".path;
services.mysql = {
enable = true;
dataDir = "/data/mysql";
package = pkgs.mariadb;
settings = {
mysqld = {
# PVV allows a lot of connections at the same time
max_connect_errors = 10000;
bind-address = values.services.mysql.ipv4;
skip-networking = 0;
# This was needed in order to be able to use all of the old users
# during migration from knakelibrak to bicep in Sep. 2023
secure_auth = 0;
};
};
# Note: This user also has MAX_USER_CONNECTIONS set to 3, and
# a password which can be found in /secrets/ildkule/ildkule.yaml
# We have also changed both the host and auth plugin of this user
# to be 'ildkule.pvv.ntnu.no' and 'mysql_native_password' respectively.
ensureUsers = [{
name = "prometheus_mysqld_exporter";
ensurePermissions = {
"*.*" = "PROCESS, REPLICATION CLIENT, SELECT, SLAVE MONITOR";
};
}];
};
services.mysqlBackup = {
enable = true;
location = "/var/lib/mysql/backups";
};
networking.firewall.allowedTCPPorts = [ 3306 ];
systemd.services.mysql.serviceConfig = {
IPAddressDeny = "any";
IPAddressAllow = [
values.ipv4-space
values.ipv6-space
values.hosts.ildkule.ipv4
values.hosts.ildkule.ipv6
];
};
}

View File

@@ -0,0 +1,91 @@
{
config,
lib,
pkgs,
values,
...
}:
let
cfg = config.services.mysql;
backupDir = "/data/mysql-backups";
in
{
# services.mysqlBackup = lib.mkIf cfg.enable {
# enable = true;
# location = "/var/lib/mysql-backups";
# };
systemd.tmpfiles.settings."10-mysql-backups".${backupDir}.d = {
user = "mysql";
group = "mysql";
mode = "700";
};
services.rsync-pull-targets = lib.mkIf cfg.enable {
enable = true;
locations.${backupDir} = {
user = "root";
rrsyncArgs.ro = true;
authorizedKeysAttrs = [
"restrict"
"from=\"principal.pvv.ntnu.no,${values.hosts.principal.ipv6},${values.hosts.principal.ipv4}\""
"no-agent-forwarding"
"no-port-forwarding"
"no-pty"
"no-X11-forwarding"
];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJgj55/7Cnj4cYMJ5sIkl+OwcGeBe039kXJTOf2wvo9j mysql rsync backup";
};
};
# NOTE: instead of having the upstream nixpkgs postgres backup unit trigger
# another unit, it was easier to just make one ourselves.
systemd.services."backup-mysql" = lib.mkIf cfg.enable {
description = "Backup MySQL data";
requires = [ "mysql.service" ];
path = with pkgs; [
cfg.package
coreutils
zstd
];
script =
let
rotations = 2;
in
''
set -euo pipefail
OUT_FILE="$STATE_DIRECTORY/mysql-dump-$(date --iso-8601).sql.zst"
mysqldump --all-databases | zstd --compress -9 --rsyncable -o "$OUT_FILE"
# NOTE: this needs to be a hardlink for rrsync to allow sending it
rm "$STATE_DIRECTORY/mysql-dump-latest.sql.zst" ||:
ln -T "$OUT_FILE" "$STATE_DIRECTORY/mysql-dump-latest.sql.zst"
while [ "$(find "$STATE_DIRECTORY" -type f -printf '.' | wc -c)" -gt ${toString (rotations + 1)} ]; do
rm "$(find "$STATE_DIRECTORY" -type f -printf '%T+ %p\n' | sort | head -n 1 | cut -d' ' -f2)"
done
'';
serviceConfig = {
Type = "oneshot";
User = "mysql";
Group = "mysql";
UMask = "0077";
Nice = 19;
IOSchedulingClass = "best-effort";
IOSchedulingPriority = 7;
StateDirectory = [ "mysql-backups" ];
BindPaths = [ "${backupDir}:/var/lib/mysql-backups" ];
# TODO: hardening
};
startAt = "*-*-* 02:15:00";
};
}

View File

@@ -0,0 +1,82 @@
{
config,
pkgs,
lib,
values,
...
}:
let
cfg = config.services.mysql;
dataDir = "/data/mysql";
in
{
imports = [ ./backup.nix ];
sops.secrets."mysql/password" = {
owner = "mysql";
group = "mysql";
};
users.mysql.passwordFile = config.sops.secrets."mysql/password".path;
services.mysql = {
enable = true;
package = pkgs.mariadb_118;
settings = {
mysqld = {
# PVV allows a lot of connections at the same time
max_connect_errors = 10000;
bind-address = values.services.mysql.ipv4;
skip-networking = 0;
# This was needed in order to be able to use all of the old users
# during migration from knakelibrak to bicep in Sep. 2023
secure_auth = 0;
slow-query-log = 1;
slow-query-log-file = "/var/log/mysql/mysql-slow.log";
};
};
# Note: This user also has MAX_USER_CONNECTIONS set to 3, and
# a password which can be found in /secrets/ildkule/ildkule.yaml
# We have also changed both the host and auth plugin of this user
# to be 'ildkule.pvv.ntnu.no' and 'mysql_native_password' respectively.
ensureUsers = [
{
name = "prometheus_mysqld_exporter";
ensurePermissions = {
"*.*" = "PROCESS, REPLICATION CLIENT, SELECT, SLAVE MONITOR";
};
}
];
};
networking.firewall.allowedTCPPorts = lib.mkIf cfg.enable [ 3306 ];
systemd.tmpfiles.settings."10-mysql".${dataDir}.d = lib.mkIf cfg.enable {
inherit (cfg) user group;
mode = "0700";
};
systemd.services.mysql = lib.mkIf cfg.enable {
after = [
"systemd-tmpfiles-setup.service"
"systemd-tmpfiles-resetup.service"
];
serviceConfig = {
BindPaths = [ "${dataDir}:${cfg.dataDir}" ];
LogsDirectory = "mysql";
IPAddressDeny = "any";
IPAddressAllow = [
values.ipv4-space
values.ipv6-space
values.hosts.ildkule.ipv4
values.hosts.ildkule.ipv6
];
};
};
}

View File

@@ -0,0 +1,92 @@
{
config,
lib,
pkgs,
values,
...
}:
let
cfg = config.services.postgresql;
backupDir = "/data/postgresql-backups";
in
{
# services.postgresqlBackup = lib.mkIf cfg.enable {
# enable = true;
# location = "/var/lib/postgresql-backups";
# backupAll = true;
# };
systemd.tmpfiles.settings."10-postgresql-backups".${backupDir}.d = {
user = "postgres";
group = "postgres";
mode = "700";
};
services.rsync-pull-targets = lib.mkIf cfg.enable {
enable = true;
locations.${backupDir} = {
user = "root";
rrsyncArgs.ro = true;
authorizedKeysAttrs = [
"restrict"
"from=\"principal.pvv.ntnu.no,${values.hosts.principal.ipv6},${values.hosts.principal.ipv4}\""
"no-agent-forwarding"
"no-port-forwarding"
"no-pty"
"no-X11-forwarding"
];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGvO7QX7QmwSiGLXEsaxPIOpAqnJP3M+qqQRe5dzf8gJ postgresql rsync backup";
};
};
# NOTE: instead of having the upstream nixpkgs postgres backup unit trigger
# another unit, it was easier to just make one ourselves
systemd.services."backup-postgresql" = {
description = "Backup PostgreSQL data";
requires = [ "postgresql.service" ];
path = with pkgs; [
coreutils
zstd
cfg.package
];
script =
let
rotations = 2;
in
''
set -euo pipefail
OUT_FILE="$STATE_DIRECTORY/postgresql-dump-$(date --iso-8601).sql.zst"
pg_dumpall -U postgres | zstd --compress -9 --rsyncable -o "$OUT_FILE"
# NOTE: this needs to be a hardlink for rrsync to allow sending it
rm "$STATE_DIRECTORY/postgresql-dump-latest.sql.zst" ||:
ln -T "$OUT_FILE" "$STATE_DIRECTORY/postgresql-dump-latest.sql.zst"
while [ "$(find "$STATE_DIRECTORY" -type f -printf '.' | wc -c)" -gt ${toString (rotations + 1)} ]; do
rm "$(find "$STATE_DIRECTORY" -type f -printf '%T+ %p\n' | sort | head -n 1 | cut -d' ' -f2)"
done
'';
serviceConfig = {
Type = "oneshot";
User = "postgres";
Group = "postgres";
UMask = "0077";
Nice = 19;
IOSchedulingClass = "best-effort";
IOSchedulingPriority = 7;
StateDirectory = [ "postgresql-backups" ];
BindPaths = [ "${backupDir}:/var/lib/postgresql-backups" ];
# TODO: hardening
};
startAt = "*-*-* 01:15:00";
};
}

View File

@@ -1,8 +1,19 @@
{ config, pkgs, values, ... }:
{
config,
lib,
pkgs,
values,
...
}:
let
cfg = config.services.postgresql;
in
{
imports = [ ./backup.nix ];
services.postgresql = {
enable = true;
package = pkgs.postgresql_15;
package = pkgs.postgresql_18;
enableTCPIP = true;
authentication = ''
@@ -74,13 +85,13 @@
};
};
systemd.tmpfiles.settings."10-postgresql"."/data/postgresql".d = {
systemd.tmpfiles.settings."10-postgresql"."/data/postgresql".d = lib.mkIf cfg.enable {
user = config.systemd.services.postgresql.serviceConfig.User;
group = config.systemd.services.postgresql.serviceConfig.Group;
mode = "0700";
};
systemd.services.postgresql-setup = {
systemd.services.postgresql-setup = lib.mkIf cfg.enable {
after = [
"systemd-tmpfiles-setup.service"
"systemd-tmpfiles-resetup.service"
@@ -95,7 +106,7 @@
};
};
systemd.services.postgresql = {
systemd.services.postgresql = lib.mkIf cfg.enable {
after = [
"systemd-tmpfiles-setup.service"
"systemd-tmpfiles-resetup.service"
@@ -110,18 +121,12 @@
};
};
environment.snakeoil-certs."/etc/certs/postgres" = {
environment.snakeoil-certs."/etc/certs/postgres" = lib.mkIf cfg.enable {
owner = "postgres";
group = "postgres";
subject = "/C=NO/O=Programvareverkstedet/CN=postgres.pvv.ntnu.no/emailAddress=drift@pvv.ntnu.no";
};
networking.firewall.allowedTCPPorts = [ 5432 ];
networking.firewall.allowedUDPPorts = [ 5432 ];
services.postgresqlBackup = {
enable = true;
location = "/var/lib/postgres/backups";
backupAll = true;
};
networking.firewall.allowedTCPPorts = lib.mkIf cfg.enable [ 5432 ];
networking.firewall.allowedUDPPorts = lib.mkIf cfg.enable [ 5432 ];
}

View File

@@ -1,8 +1,14 @@
{ config, pkgs, values, ... }:
{
lib,
config,
pkgs,
values,
...
}:
{
networking.nat = {
enable = true;
internalInterfaces = ["ve-+"];
internalInterfaces = [ "ve-+" ];
externalInterface = "ens3";
# Lazy IPv6 connectivity for the container
enableIPv6 = true;
@@ -10,9 +16,11 @@
containers.bikkje = {
autoStart = true;
config = { config, pkgs, ... }: {
#import packages
packages = with pkgs; [
config =
{ config, pkgs, ... }:
{
#import packages
packages = with pkgs; [
alpine
mutt
mutt-ics
@@ -22,26 +30,66 @@
hexchat
irssi
pidgin
];
];
networking = {
hostName = "bikkje";
firewall = {
enable = true;
# Allow SSH and HTTP and ports for email and irc
allowedTCPPorts = [ 80 22 194 994 6665 6666 6667 6668 6669 6697 995 993 25 465 587 110 143 993 995 ];
allowedUDPPorts = [ 80 22 194 994 6665 6666 6667 6668 6669 6697 995 993 25 465 587 110 143 993 995 ];
networking = {
hostName = "bikkje";
firewall = {
enable = true;
# Allow SSH and HTTP and ports for email and irc
allowedTCPPorts = [
80
22
194
994
6665
6666
6667
6668
6669
6697
995
993
25
465
587
110
143
993
995
];
allowedUDPPorts = [
80
22
194
994
6665
6666
6667
6668
6669
6697
995
993
25
465
587
110
143
993
995
];
};
# Use systemd-resolved inside the container
# Workaround for bug https://github.com/NixOS/nixpkgs/issues/162686
useHostResolvConf = lib.mkForce false;
};
# Use systemd-resolved inside the container
# Workaround for bug https://github.com/NixOS/nixpkgs/issues/162686
useHostResolvConf = mkForce false;
services.resolved.enable = true;
# Don't change (even during upgrades) unless you know what you are doing.
# See https://search.nixos.org/options?show=system.stateVersion
system.stateVersion = "23.11";
};
services.resolved.enable = true;
# Don't change (even during upgrades) unless you know what you are doing.
# See https://search.nixos.org/options?show=system.stateVersion
system.stateVersion = "23.11";
};
};
};
}

View File

@@ -1,21 +1,30 @@
{ config, fp, pkgs, values, ... }:
{
config,
fp,
pkgs,
values,
...
}:
{
imports = [
# Include the results of the hardware scan.
./hardware-configuration.nix
(fp /base)
# Include the results of the hardware scan.
./hardware-configuration.nix
(fp /base)
./services/grzegorz.nix
];
./services/grzegorz.nix
];
systemd.network.networks."30-eno1" = values.defaultNetworkConfig // {
matchConfig.Name = "eno1";
address = with values.hosts.brzeczyszczykiewicz; [ (ipv4 + "/25") (ipv6 + "/64") ];
address = with values.hosts.brzeczyszczykiewicz; [
(ipv4 + "/25")
(ipv6 + "/64")
];
};
fonts.fontconfig.enable = true;
# Don't change (even during upgrades) unless you know what you are doing.
# See https://search.nixos.org/options?show=system.stateVersion
system.stateVersion = "23.05";
system.stateVersion = "25.11";
}

View File

@@ -1,31 +1,45 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
config,
lib,
pkgs,
modulesPath,
...
}:
{
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ehci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" "sr_mod" ];
boot.initrd.availableKernelModules = [
"xhci_pci"
"ehci_pci"
"ahci"
"usbhid"
"usb_storage"
"sd_mod"
"sr_mod"
];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-uuid/4e8667f8-55de-4103-8369-b94665f42204";
fsType = "ext4";
};
fileSystems."/" = {
device = "/dev/disk/by-uuid/4e8667f8-55de-4103-8369-b94665f42204";
fsType = "ext4";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/82E3-3D03";
fsType = "vfat";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/82E3-3D03";
fsType = "vfat";
};
swapDevices =
[ { device = "/dev/disk/by-uuid/d0bf9a21-44bc-44a3-ae55-8f0971875883"; }
];
swapDevices = [
{ device = "/dev/disk/by-uuid/d0bf9a21-44bc-44a3-ae55-8f0971875883"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -1,16 +1,25 @@
{ config, fp, pkgs, values, ... }:
{
config,
fp,
pkgs,
values,
...
}:
{
imports = [
# Include the results of the hardware scan.
./hardware-configuration.nix
(fp /base)
# Include the results of the hardware scan.
./hardware-configuration.nix
(fp /base)
(fp /modules/grzegorz.nix)
];
(fp /modules/grzegorz.nix)
];
systemd.network.networks."30-eno1" = values.defaultNetworkConfig // {
matchConfig.Name = "eno1";
address = with values.hosts.georg; [ (ipv4 + "/25") (ipv6 + "/64") ];
address = with values.hosts.georg; [
(ipv4 + "/25")
(ipv6 + "/64")
];
};
services.spotifyd = {
@@ -32,5 +41,5 @@
# Don't change (even during upgrades) unless you know what you are doing.
# See https://search.nixos.org/options?show=system.stateVersion
system.stateVersion = "23.05";
system.stateVersion = "25.11";
}

View File

@@ -1,31 +1,44 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
config,
lib,
pkgs,
modulesPath,
...
}:
{
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ehci_pci" "ahci" "usb_storage" "usbhid" "sd_mod" ];
boot.initrd.availableKernelModules = [
"xhci_pci"
"ehci_pci"
"ahci"
"usb_storage"
"usbhid"
"sd_mod"
];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-uuid/33825f0d-5a63-40fc-83db-bfa1ebb72ba0";
fsType = "ext4";
};
fileSystems."/" = {
device = "/dev/disk/by-uuid/33825f0d-5a63-40fc-83db-bfa1ebb72ba0";
fsType = "ext4";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/145E-7362";
fsType = "vfat";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/145E-7362";
fsType = "vfat";
};
swapDevices =
[ { device = "/dev/disk/by-uuid/7ed27e21-3247-44cd-8bcc-5d4a2efebf57"; }
];
swapDevices = [
{ device = "/dev/disk/by-uuid/7ed27e21-3247-44cd-8bcc-5d4a2efebf57"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -31,7 +31,7 @@
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/D00A-B488";
device = "/dev/disk/by-uuid/933A-3005";
fsType = "vfat";
options = [
"fmask=0077"

View File

@@ -1,14 +1,21 @@
{ config, fp, pkgs, lib, values, ... }:
{
config,
fp,
pkgs,
lib,
values,
...
}:
{
imports = [
# Include the results of the hardware scan.
./hardware-configuration.nix
(fp /base)
# Include the results of the hardware scan.
./hardware-configuration.nix
(fp /base)
./services/monitoring
./services/nginx
./services/journald-remote.nix
];
./services/monitoring
./services/nginx
./services/journald-remote.nix
];
boot.loader.systemd-boot.enable = false;
boot.loader.grub.device = "/dev/vda";
@@ -17,26 +24,37 @@
# Openstack Neutron and systemd-networkd are not best friends, use something else:
systemd.network.enable = lib.mkForce false;
networking = let
hostConf = values.hosts.ildkule;
in {
tempAddresses = "disabled";
useDHCP = lib.mkForce true;
networking =
let
hostConf = values.hosts.ildkule;
in
{
tempAddresses = "disabled";
useDHCP = lib.mkForce true;
search = values.defaultNetworkConfig.domains;
nameservers = values.defaultNetworkConfig.dns;
defaultGateway.address = hostConf.ipv4_internal_gw;
search = values.defaultNetworkConfig.domains;
nameservers = values.defaultNetworkConfig.dns;
defaultGateway.address = hostConf.ipv4_internal_gw;
interfaces."ens4" = {
ipv4.addresses = [
{ address = hostConf.ipv4; prefixLength = 32; }
{ address = hostConf.ipv4_internal; prefixLength = 24; }
];
ipv6.addresses = [
{ address = hostConf.ipv6; prefixLength = 64; }
];
interfaces."ens4" = {
ipv4.addresses = [
{
address = hostConf.ipv4;
prefixLength = 32;
}
{
address = hostConf.ipv4_internal;
prefixLength = 24;
}
];
ipv6.addresses = [
{
address = hostConf.ipv6;
prefixLength = 64;
}
];
};
};
};
services.qemuGuest.enable = true;

View File

@@ -1,7 +1,12 @@
{ modulesPath, lib, ... }:
{
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "xen_blkfront" "vmw_pvscsi" ];
boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"xen_blkfront"
"vmw_pvscsi"
];
boot.initrd.kernelModules = [ "nvme" ];
fileSystems."/" = {
device = "/dev/disk/by-uuid/e35eb4ce-aac3-4f91-8383-6e7cd8bbf942";

View File

@@ -1,4 +1,9 @@
{ config, lib, values, ... }:
{
config,
lib,
values,
...
}:
let
cfg = config.services.journald.remote;
domainName = "journald.pvv.ntnu.no";
@@ -22,13 +27,15 @@ in
services.journald.remote = {
enable = true;
settings.Remote = let
inherit (config.security.acme.certs.${domainName}) directory;
in {
ServerKeyFile = "/run/credentials/systemd-journal-remote.service/key.pem";
ServerCertificateFile = "/run/credentials/systemd-journal-remote.service/cert.pem";
TrustedCertificateFile = "-";
};
settings.Remote =
let
inherit (config.security.acme.certs.${domainName}) directory;
in
{
ServerKeyFile = "/run/credentials/systemd-journal-remote.service/key.pem";
ServerCertificateFile = "/run/credentials/systemd-journal-remote.service/cert.pem";
TrustedCertificateFile = "-";
};
};
systemd.sockets."systemd-journal-remote" = {
@@ -47,12 +54,14 @@ in
systemd.services."systemd-journal-remote" = {
serviceConfig = {
LoadCredential = let
inherit (config.security.acme.certs.${domainName}) directory;
in [
"key.pem:${directory}/key.pem"
"cert.pem:${directory}/cert.pem"
];
LoadCredential =
let
inherit (config.security.acme.certs.${domainName}) directory;
in
[
"key.pem:${directory}/key.pem"
"cert.pem:${directory}/cert.pem"
];
};
};
}

File diff suppressed because it is too large Load Diff

View File

@@ -13,7 +13,7 @@
]
},
"description": "",
"editable": true,
"editable": false,
"gnetId": 11323,
"graphTooltip": 1,
"id": 31,
@@ -1899,7 +1899,7 @@
"dashes": false,
"datasource": "$datasource",
"decimals": 0,
"description": "***System Memory***: Total Memory for the system.\\\n***InnoDB Buffer Pool Data***: InnoDB maintains a storage area called the buffer pool for caching data and indexes in memory.\\\n***TokuDB Cache Size***: Similar in function to the InnoDB Buffer Pool, TokuDB will allocate 50% of the installed RAM for its own cache.\\\n***Key Buffer Size***: Index blocks for MYISAM tables are buffered and are shared by all threads. key_buffer_size is the size of the buffer used for index blocks.\\\n***Adaptive Hash Index Size***: When InnoDB notices that some index values are being accessed very frequently, it builds a hash index for them in memory on top of B-Tree indexes.\\\n ***Query Cache Size***: The query cache stores the text of a SELECT statement together with the corresponding result that was sent to the client. The query cache has huge scalability problems in that only one thread can do an operation in the query cache at the same time.\\\n***InnoDB Dictionary Size***: The data dictionary is InnoDB 's internal catalog of tables. InnoDB stores the data dictionary on disk, and loads entries into memory while the server is running.\\\n***InnoDB Log Buffer Size***: The MySQL InnoDB log buffer allows transactions to run without having to write the log to disk before the transactions commit.",
"description": "***System Memory***: Total Memory for the system.\\\n***InnoDB Buffer Pool Data***: InnoDB maintains a storage area called the buffer pool for caching data and indexes in memory.\\\n***TokuDB Cache Size***: Similar in function to the InnoDB Buffer Pool, TokuDB will allocate 50% of the installed RAM for its own cache.\\\n***Key Buffer Size***: Index blocks for MYISAM tables are buffered and are shared by all threads. key_buffer_size is the size of the buffer used for index blocks.\\\n***Adaptive Hash Index Size***: When InnoDB notices that some index values are being accessed very frequently, it builds a hash index for them in memory on top of B-Tree indexes.\\\n ***Query Cache Size***: The query cache stores the text of a SELECT statement together with the corresponding result that was sent to the client. The query cache has huge scalability problems in that only one thread can do an operation in the query cache at the same time.\\\n***InnoDB Dictionary Size***: The data dictionary is InnoDB s internal catalog of tables. InnoDB stores the data dictionary on disk, and loads entries into memory while the server is running.\\\n***InnoDB Log Buffer Size***: The MySQL InnoDB log buffer allows transactions to run without having to write the log to disk before the transactions commit.",
"editable": true,
"error": false,
"fieldConfig": {
@@ -3690,7 +3690,7 @@
},
"hide": 0,
"includeAll": false,
"label": "Data Source",
"label": "Data source",
"multi": false,
"name": "datasource",
"options": [],
@@ -3713,12 +3713,12 @@
"definition": "label_values(mysql_up, job)",
"hide": 0,
"includeAll": true,
"label": "job",
"label": "Job",
"multi": true,
"name": "job",
"options": [],
"query": "label_values(mysql_up, job)",
"refresh": 1,
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 0,
@@ -3742,12 +3742,12 @@
"definition": "label_values(mysql_up, instance)",
"hide": 0,
"includeAll": true,
"label": "instance",
"label": "Instance",
"multi": true,
"name": "instance",
"options": [],
"query": "label_values(mysql_up, instance)",
"refresh": 1,
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 0,

View File

@@ -328,7 +328,7 @@
"rgba(50, 172, 45, 0.97)"
],
"datasource": "${DS_PROMETHEUS}",
"format": "decbytes",
"format": "short",
"gauge": {
"maxValue": 100,
"minValue": 0,
@@ -411,7 +411,7 @@
"rgba(50, 172, 45, 0.97)"
],
"datasource": "${DS_PROMETHEUS}",
"format": "decbytes",
"format": "short",
"gauge": {
"maxValue": 100,
"minValue": 0,
@@ -1410,7 +1410,7 @@
"tableColumn": "",
"targets": [
{
"expr": "pg_settings_seq_page_cost",
"expr": "pg_settings_seq_page_cost{instance=\"$instance\"}",
"format": "time_series",
"intervalFactor": 1,
"refId": "A"
@@ -1872,7 +1872,7 @@
},
"yaxes": [
{
"format": "bytes",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
@@ -1966,7 +1966,7 @@
},
"yaxes": [
{
"format": "bytes",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
@@ -2060,7 +2060,7 @@
},
"yaxes": [
{
"format": "bytes",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
@@ -2251,7 +2251,7 @@
},
"yaxes": [
{
"format": "bytes",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
@@ -2439,7 +2439,7 @@
},
"yaxes": [
{
"format": "bytes",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
@@ -2589,35 +2589,35 @@
"steppedLine": false,
"targets": [
{
"expr": "irate(pg_stat_bgwriter_buffers_backend{instance=\"$instance\"}[5m])",
"expr": "irate(pg_stat_bgwriter_buffers_backend_total{instance=\"$instance\"}[5m])",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "buffers_backend",
"refId": "A"
},
{
"expr": "irate(pg_stat_bgwriter_buffers_alloc{instance=\"$instance\"}[5m])",
"expr": "irate(pg_stat_bgwriter_buffers_alloc_total{instance=\"$instance\"}[5m])",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "buffers_alloc",
"refId": "B"
},
{
"expr": "irate(pg_stat_bgwriter_buffers_backend_fsync{instance=\"$instance\"}[5m])",
"expr": "irate(pg_stat_bgwriter_buffers_backend_fsync_total{instance=\"$instance\"}[5m])",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "backend_fsync",
"refId": "C"
},
{
"expr": "irate(pg_stat_bgwriter_buffers_checkpoint{instance=\"$instance\"}[5m])",
"expr": "irate(pg_stat_bgwriter_buffers_checkpoint_total{instance=\"$instance\"}[5m])",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "buffers_checkpoint",
"refId": "D"
},
{
"expr": "irate(pg_stat_bgwriter_buffers_clean{instance=\"$instance\"}[5m])",
"expr": "irate(pg_stat_bgwriter_buffers_clean_total{instance=\"$instance\"}[5m])",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "buffers_clean",
@@ -2886,14 +2886,14 @@
"steppedLine": false,
"targets": [
{
"expr": "irate(pg_stat_bgwriter_checkpoint_write_time{instance=\"$instance\"}[5m])",
"expr": "irate(pg_stat_bgwriter_checkpoint_write_time_total{instance=\"$instance\"}[5m])",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "write_time - Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk.",
"refId": "B"
},
{
"expr": "irate(pg_stat_bgwriter_checkpoint_sync_time{instance=\"$instance\"}[5m])",
"expr": "irate(pg_stat_bgwriter_checkpoint_sync_time_total{instance=\"$instance\"}[5m])",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "sync_time - Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk.",
@@ -3164,4 +3164,4 @@
"title": "PostgreSQL Database",
"uid": "000000039",
"version": 1
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,32 +1,43 @@
{ config, pkgs, values, ... }: let
{
config,
pkgs,
values,
...
}:
let
cfg = config.services.grafana;
in {
sops.secrets = let
owner = "grafana";
group = "grafana";
in {
"keys/grafana/secret_key" = { inherit owner group; };
"keys/grafana/admin_password" = { inherit owner group; };
};
in
{
sops.secrets =
let
owner = "grafana";
group = "grafana";
in
{
"keys/grafana/secret_key" = { inherit owner group; };
"keys/grafana/admin_password" = { inherit owner group; };
};
services.grafana = {
enable = true;
settings = let
# See https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#file-provider
secretFile = path: "$__file{${path}}";
in {
server = {
domain = "grafana.pvv.ntnu.no";
http_port = 2342;
http_addr = "127.0.0.1";
};
settings =
let
# See https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#file-provider
secretFile = path: "$__file{${path}}";
in
{
server = {
domain = "grafana.pvv.ntnu.no";
http_port = 2342;
http_addr = "127.0.0.1";
};
security = {
secret_key = secretFile config.sops.secrets."keys/grafana/secret_key".path;
admin_password = secretFile config.sops.secrets."keys/grafana/admin_password".path;
security = {
secret_key = secretFile config.sops.secrets."keys/grafana/secret_key".path;
admin_password = secretFile config.sops.secrets."keys/grafana/admin_password".path;
};
};
};
provision = {
enable = true;
@@ -47,13 +58,13 @@ in {
{
name = "Node Exporter Full";
type = "file";
url = "https://grafana.com/api/dashboards/1860/revisions/29/download";
url = "https://grafana.com/api/dashboards/1860/revisions/42/download";
options.path = dashboards/node-exporter-full.json;
}
{
name = "Matrix Synapse";
type = "file";
url = "https://raw.githubusercontent.com/matrix-org/synapse/develop/contrib/grafana/synapse.json";
url = "https://github.com/element-hq/synapse/raw/refs/heads/develop/contrib/grafana/synapse.json";
options.path = dashboards/synapse.json;
}
{
@@ -65,15 +76,9 @@ in {
{
name = "Postgresql";
type = "file";
url = "https://grafana.com/api/dashboards/9628/revisions/7/download";
url = "https://grafana.com/api/dashboards/9628/revisions/8/download";
options.path = dashboards/postgres.json;
}
{
name = "Go Processes (gogs)";
type = "file";
url = "https://grafana.com/api/dashboards/240/revisions/3/download";
options.path = dashboards/go-processes.json;
}
{
name = "Gitea Dashboard";
type = "file";

View File

@@ -3,7 +3,8 @@
let
cfg = config.services.loki;
stateDir = "/data/monitoring/loki";
in {
in
{
services.loki = {
enable = true;
configuration = {

View File

@@ -1,6 +1,8 @@
{ config, ... }: let
{ config, ... }:
let
stateDir = "/data/monitoring/prometheus";
in {
in
{
imports = [
./exim.nix
./gitea.nix

View File

@@ -5,9 +5,11 @@
{
job_name = "exim";
scrape_interval = "15s";
static_configs = [{
targets = [ "microbel.pvv.ntnu.no:9636" ];
}];
static_configs = [
{
targets = [ "microbel.pvv.ntnu.no:9636" ];
}
];
}
];
};

View File

@@ -1,16 +1,18 @@
{ ... }:
{
services.prometheus.scrapeConfigs = [{
job_name = "gitea";
scrape_interval = "60s";
scheme = "https";
services.prometheus.scrapeConfigs = [
{
job_name = "gitea";
scrape_interval = "60s";
scheme = "https";
static_configs = [
{
targets = [
"git.pvv.ntnu.no:443"
];
}
];
}];
static_configs = [
{
targets = [
"git.pvv.ntnu.no:443"
];
}
];
}
];
}

View File

@@ -1,4 +1,5 @@
{ config, ... }: let
{ config, ... }:
let
cfg = config.services.prometheus;
mkHostScrapeConfig = name: ports: {
@@ -9,29 +10,98 @@
defaultNodeExporterPort = 9100;
defaultSystemdExporterPort = 9101;
defaultNixosExporterPort = 9102;
in {
services.prometheus.scrapeConfigs = [{
job_name = "base_info";
static_configs = [
(mkHostScrapeConfig "ildkule" [ cfg.exporters.node.port cfg.exporters.systemd.port defaultNixosExporterPort ])
in
{
services.prometheus.scrapeConfigs = [
{
job_name = "base_info";
static_configs = [
(mkHostScrapeConfig "ildkule" [
cfg.exporters.node.port
cfg.exporters.systemd.port
defaultNixosExporterPort
])
(mkHostScrapeConfig "bekkalokk" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "bicep" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "brzeczyszczykiewicz" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "georg" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "kommode" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "ustetind" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "wenche" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "bekkalokk" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "bicep" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "brzeczyszczykiewicz" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "georg" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "gluttony" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "kommode" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-1" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-2" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-3" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-4" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-5" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "temmie" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "ustetind" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "wenche" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-1" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
# (mkHostScrapeConfig "lupine-2" [ defaultNodeExporterPort defaultSystemdExporterPort ])
(mkHostScrapeConfig "lupine-3" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "lupine-4" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "lupine-5" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "skrott" [
defaultNodeExporterPort
defaultSystemdExporterPort
])
(mkHostScrapeConfig "hildring" [ defaultNodeExporterPort ])
(mkHostScrapeConfig "isvegg" [ defaultNodeExporterPort ])
(mkHostScrapeConfig "microbel" [ defaultNodeExporterPort ])
];
}];
(mkHostScrapeConfig "hildring" [ defaultNodeExporterPort ])
(mkHostScrapeConfig "isvegg" [ defaultNodeExporterPort ])
(mkHostScrapeConfig "microbel" [ defaultNodeExporterPort ])
];
}
];
}

View File

@@ -1,40 +1,44 @@
{ ... }:
{
services.prometheus.scrapeConfigs = [{
job_name = "synapse";
scrape_interval = "15s";
scheme = "https";
services.prometheus.scrapeConfigs = [
{
job_name = "synapse";
scrape_interval = "15s";
scheme = "https";
http_sd_configs = [{
url = "https://matrix.pvv.ntnu.no/metrics/config.json";
}];
http_sd_configs = [
{
url = "https://matrix.pvv.ntnu.no/metrics/config.json";
}
];
relabel_configs = [
{
source_labels = [ "__address__" ];
regex = "[^/]+(/.*)";
target_label = "__metrics_path__";
}
{
source_labels = [ "__address__" ];
regex = "([^/]+)/.*";
target_label = "instance";
}
{
source_labels = [ "__address__" ];
regex = "[^/]+\\/+[^/]+/(.*)/\\d+$";
target_label = "job";
}
{
source_labels = [ "__address__" ];
regex = "[^/]+\\/+[^/]+/.*/(\\d+)$";
target_label = "index";
}
{
source_labels = [ "__address__" ];
regex = "([^/]+)/.*";
target_label = "__address__";
}
];
}];
relabel_configs = [
{
source_labels = [ "__address__" ];
regex = "[^/]+(/.*)";
target_label = "__metrics_path__";
}
{
source_labels = [ "__address__" ];
regex = "([^/]+)/.*";
target_label = "instance";
}
{
source_labels = [ "__address__" ];
regex = "[^/]+\\/+[^/]+/(.*)/\\d+$";
target_label = "job";
}
{
source_labels = [ "__address__" ];
regex = "[^/]+\\/+[^/]+/.*/(\\d+)$";
target_label = "index";
}
{
source_labels = [ "__address__" ];
regex = "([^/]+)/.*";
target_label = "__address__";
}
];
}
];
}

View File

@@ -1,36 +1,42 @@
{ config, ... }: let
{ config, ... }:
let
cfg = config.services.prometheus;
in {
in
{
sops = {
secrets."config/mysqld_exporter_password" = { };
templates."mysqld_exporter.conf" = {
restartUnits = [ "prometheus-mysqld-exporter.service" ];
content = let
inherit (config.sops) placeholder;
in ''
[client]
host = mysql.pvv.ntnu.no
port = 3306
user = prometheus_mysqld_exporter
password = ${placeholder."config/mysqld_exporter_password"}
'';
content =
let
inherit (config.sops) placeholder;
in
''
[client]
host = mysql.pvv.ntnu.no
port = 3306
user = prometheus_mysqld_exporter
password = ${placeholder."config/mysqld_exporter_password"}
'';
};
};
services.prometheus = {
scrapeConfigs = [{
job_name = "mysql";
scheme = "http";
metrics_path = cfg.exporters.mysqld.telemetryPath;
static_configs = [
{
targets = [
"localhost:${toString cfg.exporters.mysqld.port}"
];
}
];
}];
scrapeConfigs = [
{
job_name = "mysql";
scheme = "http";
metrics_path = cfg.exporters.mysqld.telemetryPath;
static_configs = [
{
targets = [
"localhost:${toString cfg.exporters.mysqld.port}"
];
}
];
}
];
exporters.mysqld = {
enable = true;

View File

@@ -1,9 +1,17 @@
{ pkgs, lib, config, values, ... }: let
{
pkgs,
lib,
config,
values,
...
}:
let
cfg = config.services.prometheus;
in {
in
{
sops.secrets = {
"keys/postgres/postgres_exporter_env" = {};
"keys/postgres/postgres_exporter_knakelibrak_env" = {};
"keys/postgres/postgres_exporter_env" = { };
"keys/postgres/postgres_exporter_knakelibrak_env" = { };
};
services.prometheus = {
@@ -11,22 +19,26 @@ in {
{
job_name = "postgres";
scrape_interval = "15s";
static_configs = [{
targets = [ "localhost:${toString cfg.exporters.postgres.port}" ];
labels = {
server = "bicep";
};
}];
static_configs = [
{
targets = [ "localhost:${toString cfg.exporters.postgres.port}" ];
labels = {
server = "bicep";
};
}
];
}
{
job_name = "postgres-knakelibrak";
scrape_interval = "15s";
static_configs = [{
targets = [ "localhost:${toString (cfg.exporters.postgres.port + 1)}" ];
labels = {
server = "knakelibrak";
};
}];
static_configs = [
{
targets = [ "localhost:${toString (cfg.exporters.postgres.port + 1)}" ];
labels = {
server = "knakelibrak";
};
}
];
}
];
@@ -37,9 +49,11 @@ in {
};
};
systemd.services.prometheus-postgres-exporter-knakelibrak.serviceConfig = let
localCfg = config.services.prometheus.exporters.postgres;
in lib.recursiveUpdate config.systemd.services.prometheus-postgres-exporter.serviceConfig {
systemd.services.prometheus-postgres-exporter-knakelibrak.serviceConfig =
let
localCfg = config.services.prometheus.exporters.postgres;
in
lib.recursiveUpdate config.systemd.services.prometheus-postgres-exporter.serviceConfig {
EnvironmentFile = config.sops.secrets."keys/postgres/postgres_exporter_knakelibrak_env".path;
ExecStart = ''
${pkgs.prometheus-postgres-exporter}/bin/postgres_exporter \

View File

@@ -1,9 +1,15 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
let
cfg = config.services.uptime-kuma;
domain = "status.pvv.ntnu.no";
stateDir = "/data/monitoring/uptime-kuma";
in {
in
{
services.uptime-kuma = {
enable = true;
settings = {

View File

@@ -1,9 +1,15 @@
{ pkgs, values, fp, ... }:
{
pkgs,
values,
fp,
...
}:
{
imports = [
# Include the results of the hardware scan.
./hardware-configuration.nix
(fp /base)
./disks.nix
./services/gitea
./services/nginx.nix
@@ -11,7 +17,10 @@
systemd.network.networks."30-ens18" = values.defaultNetworkConfig // {
matchConfig.Name = "ens18";
address = with values.hosts.kommode; [ (ipv4 + "/25") (ipv6 + "/64") ];
address = with values.hosts.kommode; [
(ipv4 + "/25")
(ipv6 + "/64")
];
};
services.btrfs.autoScrub.enable = true;

80
hosts/kommode/disks.nix Normal file
View File

@@ -0,0 +1,80 @@
{ lib, ... }:
{
disko.devices = {
disk = {
sda = {
type = "disk";
device = "/dev/sda";
content = {
type = "gpt";
partitions = {
root = {
name = "root";
label = "root";
start = "1MiB";
end = "-5G";
content = {
type = "btrfs";
extraArgs = [ "-f" ]; # Override existing partition
# subvolumes = let
# makeSnapshottable = subvolPath: mountOptions: let
# name = lib.replaceString "/" "-" subvolPath;
# in {
# "@${name}/active" = {
# mountpoint = subvolPath;
# inherit mountOptions;
# };
# "@${name}/snapshots" = {
# mountpoint = "${subvolPath}/.snapshots";
# inherit mountOptions;
# };
# };
# in {
# "@" = { };
# "@/swap" = {
# mountpoint = "/.swapvol";
# swap.swapfile.size = "4G";
# };
# "@/root" = {
# mountpoint = "/";
# mountOptions = [ "compress=zstd" "noatime" ];
# };
# }
# // (makeSnapshottable "/home" [ "compress=zstd" "noatime" ])
# // (makeSnapshottable "/nix" [ "compress=zstd" "noatime" ])
# // (makeSnapshottable "/var/lib" [ "compress=zstd" "noatime" ])
# // (makeSnapshottable "/var/log" [ "compress=zstd" "noatime" ])
# // (makeSnapshottable "/var/cache" [ "compress=zstd" "noatime" ]);
# swap.swapfile.size = "4G";
mountpoint = "/";
};
};
swap = {
name = "swap";
label = "swap";
start = "-5G";
end = "-1G";
content.type = "swap";
};
ESP = {
name = "ESP";
label = "ESP";
start = "-1G";
end = "100%";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
mountOptions = [ "umask=0077" ];
};
};
};
};
};
};
};
}

View File

@@ -1,33 +1,31 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
config,
lib,
pkgs,
modulesPath,
...
}:
{
imports =
[ (modulesPath + "/profiles/qemu-guest.nix")
];
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "virtio_pci" "virtio_scsi" "sd_mod" "sr_mod" ];
boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"virtio_pci"
"virtio_scsi"
"sd_mod"
"sr_mod"
];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-uuid/d421538f-a260-44ae-8e03-47cac369dcc1";
fsType = "btrfs";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/86CD-4C23";
fsType = "vfat";
options = [ "fmask=0077" "dmask=0077" ];
};
swapDevices =
[ { device = "/dev/disk/by-uuid/4cfbb41e-801f-40dd-8c58-0a0c1a6025f6"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction

View File

@@ -1,4 +1,10 @@
{ config, pkgs, lib, fp, ... }:
{
config,
pkgs,
lib,
fp,
...
}:
let
cfg = config.services.gitea;
in
@@ -10,54 +16,117 @@ in
catppuccin = pkgs.gitea-theme-catppuccin;
};
services.gitea.settings = {
ui = {
DEFAULT_THEME = "gitea-auto";
REACTIONS = lib.concatStringsSep "," [
"+1"
"-1"
"laugh"
"confused"
"heart"
"hooray"
"rocket"
"eyes"
"100"
"anger"
"astonished"
"no_good"
"ok_hand"
"pensive"
"pizza"
"point_up"
"sob"
"skull"
"upside_down_face"
"shrug"
"huh"
"bruh"
"okiedokie"
"grr"
];
CUSTOM_EMOJIS = lib.concatStringsSep "," [
"bruh"
"grr"
"huh"
"ohyeah"
];
};
"ui.meta" = {
AUTHOR = "Programvareverkstedet";
DESCRIPTION = "Bokstavelig talt programvareverkstedet";
KEYWORDS = lib.concatStringsSep "," [
"git"
"hackerspace"
"nix"
"open source"
"foss"
"organization"
"software"
"student"
];
};
};
systemd.services.gitea-customization = lib.mkIf cfg.enable {
description = "Install extra customization in gitea's CUSTOM_DIR";
wantedBy = [ "gitea.service" ];
requiredBy = [ "gitea.service" ];
serviceConfig = {
serviceConfig = {
Type = "oneshot";
User = cfg.user;
Group = cfg.group;
};
script = let
logo-svg = fp /assets/logo_blue_regular.svg;
logo-png = fp /assets/logo_blue_regular.png;
script =
let
logo-svg = fp /assets/logo_blue_regular.svg;
logo-png = fp /assets/logo_blue_regular.png;
extraLinks = pkgs.writeText "gitea-extra-links.tmpl" ''
<a class="item" href="https://git.pvv.ntnu.no/Drift/-/projects/4">Tokyo Drift Issues</a>
extraLinks = pkgs.writeText "gitea-extra-links.tmpl" ''
<a class="item" href="https://git.pvv.ntnu.no/Drift/-/projects/4">Tokyo Drift Issues</a>
'';
extraLinksFooter = pkgs.writeText "gitea-extra-links-footer.tmpl" ''
<a class="item" href="https://www.pvv.ntnu.no/">PVV</a>
<a class="item" href="https://wiki.pvv.ntnu.no/">Wiki</a>
<a class="item" href="https://wiki.pvv.ntnu.no/wiki/Tjenester/Kodelager">PVV Gitea Howto</a>
'';
project-labels = (pkgs.formats.yaml { }).generate "gitea-project-labels.yaml" {
labels = lib.importJSON ./labels/projects.json;
};
customTemplates =
pkgs.runCommandLocal "gitea-templates"
{
nativeBuildInputs = with pkgs; [
coreutils
gnused
];
}
''
# Bigger icons
install -Dm444 "${cfg.package.src}/templates/repo/icon.tmpl" "$out/repo/icon.tmpl"
sed -i -e 's/24/60/g' "$out/repo/icon.tmpl"
'';
in
''
install -Dm444 ${logo-svg} ${cfg.customDir}/public/assets/img/logo.svg
install -Dm444 ${logo-png} ${cfg.customDir}/public/assets/img/logo.png
install -Dm444 ${./loading.apng} ${cfg.customDir}/public/assets/img/loading.png
install -Dm444 ${extraLinks} ${cfg.customDir}/templates/custom/extra_links.tmpl
install -Dm444 ${extraLinksFooter} ${cfg.customDir}/templates/custom/extra_links_footer.tmpl
install -Dm444 ${project-labels} ${cfg.customDir}/options/label/project-labels.yaml
install -Dm644 ${./emotes/bruh.png} ${cfg.customDir}/public/assets/img/emoji/bruh.png
install -Dm644 ${./emotes/huh.gif} ${cfg.customDir}/public/assets/img/emoji/huh.png
install -Dm644 ${./emotes/grr.png} ${cfg.customDir}/public/assets/img/emoji/grr.png
install -Dm644 ${./emotes/okiedokie.jpg} ${cfg.customDir}/public/assets/img/emoji/okiedokie.png
"${lib.getExe pkgs.rsync}" -a "${customTemplates}/" ${cfg.customDir}/templates/
'';
extraLinksFooter = pkgs.writeText "gitea-extra-links-footer.tmpl" ''
<a class="item" href="https://www.pvv.ntnu.no/">PVV</a>
<a class="item" href="https://wiki.pvv.ntnu.no/">Wiki</a>
<a class="item" href="https://wiki.pvv.ntnu.no/wiki/Tjenester/Kodelager">PVV Gitea Howto</a>
'';
project-labels = (pkgs.formats.yaml { }).generate "gitea-project-labels.yaml" {
labels = lib.importJSON ./labels/projects.json;
};
customTemplates = pkgs.runCommandLocal "gitea-templates" {
nativeBuildInputs = with pkgs; [
coreutils
gnused
];
} ''
# Bigger icons
install -Dm444 "${cfg.package.src}/templates/repo/icon.tmpl" "$out/repo/icon.tmpl"
sed -i -e 's/24/60/g' "$out/repo/icon.tmpl"
'';
in ''
install -Dm444 ${logo-svg} ${cfg.customDir}/public/assets/img/logo.svg
install -Dm444 ${logo-png} ${cfg.customDir}/public/assets/img/logo.png
install -Dm444 ${./loading.apng} ${cfg.customDir}/public/assets/img/loading.png
install -Dm444 ${extraLinks} ${cfg.customDir}/templates/custom/extra_links.tmpl
install -Dm444 ${extraLinksFooter} ${cfg.customDir}/templates/custom/extra_links_footer.tmpl
install -Dm444 ${project-labels} ${cfg.customDir}/options/label/project-labels.yaml
"${lib.getExe pkgs.rsync}" -a "${customTemplates}/" ${cfg.customDir}/templates/
'';
};
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 206 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 145 KiB

View File

@@ -1,9 +1,17 @@
{ config, values, lib, pkgs, unstablePkgs, ... }:
{
config,
values,
lib,
pkgs,
unstablePkgs,
...
}:
let
cfg = config.services.gitea;
domain = "git.pvv.ntnu.no";
sshPort = 2222;
in {
sshPort = 2222;
in
{
imports = [
./customization
./gpg.nix
@@ -11,19 +19,21 @@ in {
./web-secret-provider
];
sops.secrets = let
defaultConfig = {
owner = "gitea";
group = "gitea";
restartUnits = [ "gitea.service" ];
sops.secrets =
let
defaultConfig = {
owner = "gitea";
group = "gitea";
restartUnits = [ "gitea.service" ];
};
in
{
"gitea/database" = defaultConfig;
"gitea/email-password" = defaultConfig;
"gitea/lfs-jwt-secret" = defaultConfig;
"gitea/oauth2-jwt-secret" = defaultConfig;
"gitea/secret-key" = defaultConfig;
};
in {
"gitea/database" = defaultConfig;
"gitea/email-password" = defaultConfig;
"gitea/lfs-jwt-secret" = defaultConfig;
"gitea/oauth2-jwt-secret" = defaultConfig;
"gitea/secret-key" = defaultConfig;
};
services.gitea = {
enable = true;
@@ -44,7 +54,7 @@ in {
# https://docs.gitea.com/administration/config-cheat-sheet
settings = {
server = {
DOMAIN = domain;
DOMAIN = domain;
ROOT_URL = "https://${domain}/";
PROTOCOL = "http+unix";
SSH_PORT = sshPort;
@@ -83,11 +93,24 @@ in {
AUTO_WATCH_NEW_REPOS = false;
};
admin.DEFAULT_EMAIL_NOTIFICATIONS = "onmention";
session.COOKIE_SECURE = true;
security = {
SECRET_KEY = lib.mkForce "";
SECRET_KEY_URI = "file:${config.sops.secrets."gitea/secret-key".path}";
};
cache = {
ADAPTER = "redis";
HOST = "redis+socket://${config.services.redis.servers.gitea.unixSocket}?db=0";
ITEM_TTL = "72h";
};
session = {
COOKIE_SECURE = true;
PROVIDER = "redis";
PROVIDER_CONFIG = "redis+socket://${config.services.redis.servers.gitea.unixSocket}?db=1";
};
queue = {
TYPE = "redis";
CONN_STR = "redis+socket://${config.services.redis.servers.gitea.unixSocket}?db=2";
};
database.LOG_SQL = false;
repository = {
PREFERRED_LICENSES = lib.concatStringsSep "," [
@@ -128,31 +151,6 @@ in {
AVATAR_MAX_ORIGIN_SIZE = 1024 * 1024 * 2;
};
actions.ENABLED = true;
ui = {
REACTIONS = lib.concatStringsSep "," [
"+1"
"-1"
"laugh"
"confused"
"heart"
"hooray"
"rocket"
"eyes"
"100"
"anger"
"astonished"
"no_good"
"ok_hand"
"pensive"
"pizza"
"point_up"
"sob"
"skull"
"upside_down_face"
"shrug"
];
};
"ui.meta".DESCRIPTION = "Bokstavelig talt programvareverkstedet";
};
dump = {
@@ -164,12 +162,26 @@ in {
environment.systemPackages = [ cfg.package ];
systemd.services.gitea.serviceConfig.CPUSchedulingPolicy = "batch";
systemd.services.gitea = lib.mkIf cfg.enable {
wants = [ "redis-gitea.service" ];
after = [ "redis-gitea.service" ];
systemd.services.gitea.serviceConfig.CacheDirectory = "gitea/repo-archive";
systemd.services.gitea.serviceConfig.BindPaths = [
"%C/gitea/repo-archive:${cfg.stateDir}/data/repo-archive"
];
serviceConfig = {
CPUSchedulingPolicy = "batch";
CacheDirectory = "gitea/repo-archive";
BindPaths = [
"%C/gitea/repo-archive:${cfg.stateDir}/data/repo-archive"
];
};
};
services.redis.servers.gitea = lib.mkIf cfg.enable {
enable = true;
user = config.services.gitea.user;
save = [ ];
openFirewall = false;
port = 5698;
};
services.nginx.virtualHosts."${domain}" = {
forceSSL = true;
@@ -195,30 +207,51 @@ in {
networking.firewall.allowedTCPPorts = [ sshPort ];
services.rsync-pull-targets = {
enable = true;
locations.${cfg.dump.backupDir} = {
user = "root";
rrsyncArgs.ro = true;
authorizedKeysAttrs = [
"restrict"
"from=\"principal.pvv.ntnu.no,${values.hosts.principal.ipv6},${values.hosts.principal.ipv4}\""
"no-agent-forwarding"
"no-port-forwarding"
"no-pty"
"no-X11-forwarding"
];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGpMVrOppyqYaDiAhqmAuOaRsubFvcQGBGyz+NHB6+0o gitea rsync backup";
};
};
systemd.services.gitea-dump = {
serviceConfig.ExecStart = let
args = lib.cli.toGNUCommandLineShell { } {
type = cfg.dump.type;
serviceConfig.ExecStart =
let
args = lib.cli.toGNUCommandLineShell { } {
type = cfg.dump.type;
# This should be declarative on nixos, no need to backup.
skip-custom-dir = true;
# This should be declarative on nixos, no need to backup.
skip-custom-dir = true;
# This can be regenerated, no need to backup
skip-index = true;
# This can be regenerated, no need to backup
skip-index = true;
# Logs are stored in the systemd journal
skip-log = true;
};
in lib.mkForce "${lib.getExe cfg.package} ${args}";
# Logs are stored in the systemd journal
skip-log = true;
};
in
lib.mkForce "${lib.getExe cfg.package} ${args}";
# Only keep n backup files at a time
postStop = let
cu = prog: "'${lib.getExe' pkgs.coreutils prog}'";
backupCount = 3;
in ''
for file in $(${cu "ls"} -t1 '${cfg.dump.backupDir}' | ${cu "sort"} --reverse | ${cu "tail"} -n+${toString (backupCount + 1)}); do
${cu "rm"} "$file"
done
postStop =
let
cu = prog: "'${lib.getExe' pkgs.coreutils prog}'";
backupCount = 3;
in
''
for file in $(${cu "ls"} -t1 '${cfg.dump.backupDir}' | ${cu "sort"} --reverse | ${cu "tail"} -n+${toString (backupCount + 1)}); do
${cu "rm"} "$file"
done
'';
};
}

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
let
cfg = config.services.gitea;
GNUPGHOME = "${config.users.users.gitea.home}/gnupg";

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
let
cfg = config.services.gitea;
in
@@ -11,7 +16,7 @@ in
systemd.services.gitea-import-users = lib.mkIf cfg.enable {
enable = true;
preStart=''${pkgs.rsync}/bin/rsync -e "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=$CREDENTIALS_DIRECTORY/ssh-known-hosts -i $CREDENTIALS_DIRECTORY/sshkey" -a pvv@smtp.pvv.ntnu.no:/etc/passwd /run/gitea-import-users/passwd'';
preStart = ''${pkgs.rsync}/bin/rsync -e "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=$CREDENTIALS_DIRECTORY/ssh-known-hosts -i $CREDENTIALS_DIRECTORY/sshkey" -a pvv@smtp.pvv.ntnu.no:/etc/passwd /run/gitea-import-users/passwd'';
environment.PASSWD_FILE_PATH = "/run/gitea-import-users/passwd";
serviceConfig = {
ExecStart = pkgs.writers.writePython3 "gitea-import-users" {
@@ -20,12 +25,12 @@ in
];
libraries = with pkgs.python3Packages; [ requests ];
} (builtins.readFile ./gitea-import-users.py);
LoadCredential=[
LoadCredential = [
"sshkey:${config.sops.secrets."gitea/passwd-ssh-key".path}"
"ssh-known-hosts:${config.sops.secrets."gitea/ssh-known-hosts".path}"
];
DynamicUser="yes";
EnvironmentFile=config.sops.secrets."gitea/import-user-env".path;
DynamicUser = "yes";
EnvironmentFile = config.sops.secrets."gitea/import-user-env".path;
RuntimeDirectory = "gitea-import-users";
};
};

Some files were not shown because too many files have changed in this diff Show More