Compare commits

..

1 Commits

Author SHA1 Message Date
7e7b576540 WIP: deploy pvv-doorbell-bot 2024-08-24 22:50:26 +02:00
132 changed files with 1051 additions and 6396 deletions
.mailmap.sops.yamlREADME.MDbase.nix
base
flake.lockflake.nix
hosts
justfile
keys
misc
modules
packages
secrets
shell.nix
shells
users
values.nix

@@ -1,25 +0,0 @@
Daniel Løvbrøtte Olsen <danio@pvv.ntnu.no> <daniel.olsen99@gmail.com>
Daniel Løvbrøtte Olsen <danio@pvv.ntnu.no> Daniel <danio@pvv.ntnu.no>
Daniel Løvbrøtte Olsen <danio@pvv.ntnu.no> Daniel Lovbrotte Olsen <danio@pvv.ntnu.no>
Daniel Løvbrøtte Olsen <danio@pvv.ntnu.no> Daniel Olsen <danio@pvv.ntnu.no>
Daniel Løvbrøtte Olsen <danio@pvv.ntnu.no> danio <danio@pvv.ntnu.no>
Daniel Løvbrøtte Olsen <danio@pvv.ntnu.no> Daniel Olsen <danio@bicep.pvv.ntnu.no>
Øystein Kristoffer Tveit <oysteikt@pvv.ntnu.no> h7x4 <h7x4@nani.wtf>
Øystein Kristoffer Tveit <oysteikt@pvv.ntnu.no> Øystein Tveit <oysteikt@pvv.ntnu.no>
Øystein Kristoffer Tveit <oysteikt@pvv.ntnu.no> oysteikt <oysteikt@pvv.ntnu.no>
Øystein Kristoffer Tveit <oysteikt@pvv.ntnu.no> Øystein <oysteikt@pvv.org>
Øystein Kristoffer Tveit <oysteikt@pvv.ntnu.no> Oystein Kristoffer Tveit <oysteikt@pvv.ntnu.no>
Felix Albrigtsen <felixalb@pvv.ntnu.no> <felix@albrigtsen.it>
Felix Albrigtsen <felixalb@pvv.ntnu.no> <felixalbrigtsen@gmail.com>
Felix Albrigtsen <felixalb@pvv.ntnu.no> felixalb <felixalb@pvv.ntnu.no>
Peder Bergebakken Sundt <pederbs@pvv.ntnu.no> <pbsds@hotmail.com>
Adrian Gunnar Lauterer <adriangl@pvv.ntnu.no> Adrian G L <adrian@lauterer.it>
Adrian Gunnar Lauterer <adriangl@pvv.ntnu.no> Adrian Gunnar Lauterer <adrian@lauterer.it>
Fredrik Robertsen <frero@pvv.ntnu.no> frero <frero@pvv.ntnu.no>
Fredrik Robertsen <frero@pvv.ntnu.no> fredrikr79 <fredrikrobertsen7@gmail.com>

@@ -13,13 +13,6 @@ keys:
- &host_ildkule age1x28hmzvuv6f2n66c0jtqcca3h9rput8d7j5uek6jcpx8n9egd52sqpejq0 - &host_ildkule age1x28hmzvuv6f2n66c0jtqcca3h9rput8d7j5uek6jcpx8n9egd52sqpejq0
- &host_bekkalokk age12nj59tguy9wg882updc2vjdusx5srnxmjyfaqve4zx6jnnsaw3qsyjq6zd - &host_bekkalokk age12nj59tguy9wg882updc2vjdusx5srnxmjyfaqve4zx6jnnsaw3qsyjq6zd
- &host_bicep age1sl43gc9cw939z5tgha2lpwf0xxxgcnlw7w4xem4sqgmt2pt264vq0dmwx2 - &host_bicep age1sl43gc9cw939z5tgha2lpwf0xxxgcnlw7w4xem4sqgmt2pt264vq0dmwx2
- &host_ustetind age1hffjafs4slznksefmtqrlj7rdaqgzqncn4un938rhr053237ry8s3rs0v8
- &host_kommode age1mt4d0hg5g76qp7j0884llemy0k2ymr5up8vfudz6vzvsflk5nptqqd32ly
- &host_lupine-1 age1fkrypl6fu4ldsa7te4g3v4qsegnk7sd6qhkquuwzh04vguy96qus08902e
- &host_lupine-2 age1mu0ej57n4s30ghealhyju3enls83qyjua69986la35t2yh0q2s0seruz5n
- &host_lupine-3 age1j2u876z8hu87q5npfxzzpfgllyw8ypj66d7cgelmzmnrf3xud34qzkntp9
- &host_lupine-4 age1t8zlawqkmhye737pn8yx0z3p9cl947d9ktv2cajdc6hnvn52d3fsc59s2k
- &host_lupine-5 age199zkqq4jp4yc3d0hx2q0ksxdtp42xhmjsqwyngh8tswuck34ke3smrfyqu
creation_rules: creation_rules:
# Global secrets # Global secrets
@@ -50,18 +43,6 @@ creation_rules:
pgp: pgp:
- *user_oysteikt - *user_oysteikt
- path_regex: secrets/kommode/[^/]+\.yaml$
key_groups:
- age:
- *host_kommode
- *user_danio
- *user_felixalb
- *user_pederbs_sopp
- *user_pederbs_nord
- *user_pederbs_bjarte
pgp:
- *user_oysteikt
- path_regex: secrets/jokum/[^/]+\.yaml$ - path_regex: secrets/jokum/[^/]+\.yaml$
key_groups: key_groups:
- age: - age:
@@ -97,31 +78,3 @@ creation_rules:
- *user_pederbs_bjarte - *user_pederbs_bjarte
pgp: pgp:
- *user_oysteikt - *user_oysteikt
- path_regex: secrets/ustetind/[^/]+\.yaml$
key_groups:
- age:
- *host_ustetind
- *user_danio
- *user_felixalb
- *user_pederbs_sopp
- *user_pederbs_nord
- *user_pederbs_bjarte
pgp:
- *user_oysteikt
- path_regex: secrets/lupine/[^/]+\.yaml$
key_groups:
- age:
- *host_lupine-1
- *host_lupine-2
- *host_lupine-3
- *host_lupine-4
- *host_lupine-5
- *user_danio
- *user_felixalb
- *user_pederbs_sopp
- *user_pederbs_nord
- *user_pederbs_bjarte
pgp:
- *user_oysteikt

@@ -26,14 +26,10 @@ Det er sikkert lurt å lage en PR først om du ikke er vandt til nix enda.
Innen 24h skal alle systemene hente ned den nye konfigurasjonen og deploye den. Innen 24h skal alle systemene hente ned den nye konfigurasjonen og deploye den.
Du kan tvinge en maskin til å oppdatere seg før dette ved å kjøre: Du kan tvinge en maskin til å oppdatere seg før dette ved å kjøre:
`nixos-rebuild switch --update-input nixpkgs --update-input nixpkgs-unstable --no-write-lock-file --refresh --upgrade --flake git+https://git.pvv.ntnu.no/Drift/pvv-nixos-config.git` `nixos-rebuild switch --update-input nixpkgs --update-input nixpkgs-unstable --no-write-lock-file --refresh --flake git+https://git.pvv.ntnu.no/Drift/pvv-nixos-config.git --upgrade`
som root på maskinen. som root på maskinen.
Hvis du ikke har lyst til å oppdatere alle pakkene (og kanskje måtte vente en stund!) kan du kjøre
`nixos-rebuild switch --override-input nixpkgs nixpkgs --override-input nixpkgs-unstable nixpkgs-unstable --flake git+https://git.pvv.ntnu.no/Drift/pvv-nixos-config.git`
## Seksjonen for hemmeligheter ## Seksjonen for hemmeligheter
For at hemmeligheter ikke skal deles med hele verden i git - eller å være world For at hemmeligheter ikke skal deles med hele verden i git - eller å være world

150
base.nix Normal file

@@ -0,0 +1,150 @@
{ config, lib, pkgs, inputs, values, ... }:
{
imports = [
./users
./modules/snakeoil-certs.nix
];
networking.domain = "pvv.ntnu.no";
networking.useDHCP = false;
# networking.search = [ "pvv.ntnu.no" "pvv.org" ];
# networking.nameservers = lib.mkDefault [ "129.241.0.200" "129.241.0.201" ];
# networking.tempAddresses = lib.mkDefault "disabled";
# networking.defaultGateway = values.hosts.gateway;
systemd.network.enable = true;
services.resolved = {
enable = lib.mkDefault true;
dnssec = "false"; # Supposdly this keeps breaking and the default is to allow downgrades anyways...
};
time.timeZone = "Europe/Oslo";
i18n.defaultLocale = "en_US.UTF-8";
console = {
font = "Lat2-Terminus16";
keyMap = "no";
};
system.autoUpgrade = {
enable = true;
flake = "git+https://git.pvv.ntnu.no/Drift/pvv-nixos-config.git";
flags = [
"--update-input" "nixpkgs"
"--update-input" "nixpkgs-unstable"
"--no-write-lock-file"
];
};
nix.gc.automatic = true;
nix.gc.options = "--delete-older-than 2d";
nix.settings.experimental-features = [ "nix-command" "flakes" ];
/* This makes commandline tools like
** nix run nixpkgs#hello
** and nix-shell -p hello
** use the same channel the system
** was built with
*/
nix.registry = {
nixpkgs.flake = inputs.nixpkgs;
};
nix.nixPath = [ "nixpkgs=${inputs.nixpkgs}" ];
environment.systemPackages = with pkgs; [
file
git
gnupg
htop
nano
ripgrep
rsync
screen
tmux
vim
wget
kitty.terminfo
];
programs.zsh.enable = true;
users.groups."drift".name = "drift";
# Trusted users on the nix builder machines
users.groups."nix-builder-users".name = "nix-builder-users";
# Let's not thermal throttle
services.thermald.enable = lib.mkIf (lib.all (x: x) [
(config.nixpkgs.system == "x86_64-linux")
(!config.boot.isContainer or false)
]) true;
services.openssh = {
enable = true;
extraConfig = ''
PubkeyAcceptedAlgorithms=+ssh-rsa
Match Group wheel
PasswordAuthentication no
Match All
'';
settings.PermitRootLogin = "yes";
};
# nginx return 444 for all nonexistent virtualhosts
systemd.services.nginx.after = [ "generate-snakeoil-certs.service" ];
environment.snakeoil-certs = lib.mkIf config.services.nginx.enable {
"/etc/certs/nginx" = {
owner = "nginx";
group = "nginx";
};
};
services.nginx = {
recommendedTlsSettings = true;
recommendedProxySettings = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
appendConfig = ''
pcre_jit on;
worker_processes auto;
worker_rlimit_nofile 100000;
'';
eventsConfig = ''
worker_connections 2048;
use epoll;
multi_accept on;
'';
};
systemd.services.nginx.serviceConfig = lib.mkIf config.services.nginx.enable {
LimitNOFILE = 65536;
};
services.nginx.virtualHosts."_" = lib.mkIf config.services.nginx.enable {
sslCertificate = "/etc/certs/nginx.crt";
sslCertificateKey = "/etc/certs/nginx.key";
addSSL = true;
extraConfig = "return 444;";
};
networking.firewall.allowedTCPPorts = lib.mkIf config.services.nginx.enable [ 80 443 ];
security.acme = {
acceptTerms = true;
defaults.email = "drift@pvv.ntnu.no";
};
# Let's not spam LetsEncrypt in `nixos-rebuild build-vm` mode:
virtualisation.vmVariant = {
security.acme.defaults.server = "https://127.0.0.1";
security.acme.preliminarySelfsigned = true;
users.users.root.initialPassword = "root";
};
}

@@ -1,80 +0,0 @@
{ pkgs, lib, fp, ... }:
{
imports = [
(fp /users)
(fp /modules/snakeoil-certs.nix)
./networking.nix
./nix.nix
./vm.nix
./services/acme.nix
./services/uptimed.nix
./services/auto-upgrade.nix
./services/dbus.nix
./services/fwupd.nix
./services/irqbalance.nix
./services/logrotate.nix
./services/nginx.nix
./services/openssh.nix
./services/postfix.nix
./services/smartd.nix
./services/thermald.nix
./services/userborn.nix
./services/userdbd.nix
];
boot.tmp.cleanOnBoot = lib.mkDefault true;
boot.kernelPackages = lib.mkDefault pkgs.linuxPackages_latest;
time.timeZone = "Europe/Oslo";
i18n.defaultLocale = "en_US.UTF-8";
console = {
font = "Lat2-Terminus16";
keyMap = "no";
};
environment.systemPackages = with pkgs; [
file
git
gnupg
htop
nano
ripgrep
rsync
screen
tmux
vim
wget
kitty.terminfo
];
# .bash_profile already works, but lets also use .bashrc like literally every other distro
# https://man.archlinux.org/man/core/bash/bash.1.en#INVOCATION
# home-manager usually handles this for you: https://github.com/nix-community/home-manager/blob/22a36aa709de7dd42b562a433b9cefecf104a6ee/modules/programs/bash.nix#L203-L209
# btw, programs.bash.shellInit just goes into environment.shellInit which in turn goes into /etc/profile, spooky shit
programs.bash.shellInit = ''
if [ -n "''${BASH_VERSION:-}" ]; then
if [[ ! -f ~/.bash_profile && ! -f ~/.bash_login ]]; then
[[ -f ~/.bashrc ]] && . ~/.bashrc
fi
fi
'';
programs.zsh.enable = true;
# security.lockKernelModules = true;
security.protectKernelImage = true;
security.sudo.execWheelOnly = true;
security.sudo.extraConfig = ''
Defaults lecture = never
'';
users.groups."drift".name = "drift";
# Trusted users on the nix builder machines
users.groups."nix-builder-users".name = "nix-builder-users";
}

@@ -1,13 +0,0 @@
{ lib, values, ... }:
{
systemd.network.enable = true;
networking.domain = "pvv.ntnu.no";
networking.useDHCP = false;
# The rest of the networking configuration is usually sourced from /values.nix
services.resolved = {
enable = lib.mkDefault true;
dnssec = "false"; # Supposdly this keeps breaking and the default is to allow downgrades anyways...
};
}

@@ -1,39 +0,0 @@
{ lib, config, inputs, ... }:
{
nix = {
gc = {
automatic = true;
options = "--delete-older-than 2d";
};
optimise.automatic = true;
settings = {
allow-dirty = true;
builders-use-substitutes = true;
experimental-features = [ "nix-command" "flakes" ];
log-lines = 50;
use-xdg-base-directories = true;
};
/* This makes commandline tools like
** nix run nixpkgs#hello
** and nix-shell -p hello
** use the same channel the system
** was built with
*/
registry = lib.mkMerge [
{
"nixpkgs".flake = inputs.nixpkgs;
"nixpkgs-unstable".flake = inputs.nixpkgs-unstable;
}
# We avoid the reference to self in vmVariant to get a stable system .outPath for equivalence testing
(lib.mkIf (!config.virtualisation.isVmVariant) {
"pvv-nix".flake = inputs.self;
})
];
nixPath = [
"nixpkgs=${inputs.nixpkgs}"
"unstable=${inputs.nixpkgs-unstable}"
];
};
}

@@ -1,15 +0,0 @@
{ ... }:
{
security.acme = {
acceptTerms = true;
defaults.email = "drift@pvv.ntnu.no";
};
# Let's not spam LetsEncrypt in `nixos-rebuild build-vm` mode:
virtualisation.vmVariant = {
security.acme.defaults.server = "https://127.0.0.1";
security.acme.preliminarySelfsigned = true;
users.users.root.initialPassword = "root";
};
}

@@ -1,39 +0,0 @@
{ config, inputs, pkgs, lib, ... }:
let
inputUrls = lib.mapAttrs (input: value: value.url) (import "${inputs.self}/flake.nix").inputs;
in
{
system.autoUpgrade = {
enable = true;
flake = "git+https://git.pvv.ntnu.no/Drift/pvv-nixos-config.git";
flags = [
"--refresh"
"--no-write-lock-file"
# --update-input is deprecated since nix 2.22, and removed in lix 2.90
# as such we instead use --override-input combined with --refresh
# https://git.lix.systems/lix-project/lix/issues/400
] ++ (lib.pipe inputUrls [
(lib.intersectAttrs {
nixpkgs = { };
nixpkgs-unstable = { };
})
(lib.mapAttrsToList (input: url: ["--override-input" input url]))
lib.concatLists
]);
};
# workaround for https://github.com/NixOS/nix/issues/6895
# via https://git.lix.systems/lix-project/lix/issues/400
environment.etc = lib.mkIf (!config.virtualisation.isVmVariant) {
"current-system-flake-inputs.json".source
= pkgs.writers.writeJSON "flake-inputs.json" (
lib.flip lib.mapAttrs inputs (name: input:
# inputs.*.sourceInfo sans outPath, since writeJSON will otherwise serialize sourceInfo like a derivation
lib.removeAttrs (input.sourceInfo or {}) [ "outPath" ]
// { store-path = input.outPath; } # comment this line if you don't want to retain a store reference to the flake inputs
)
);
};
}

@@ -1,7 +0,0 @@
{ ... }:
{
services.dbus = {
enable = true;
implementation = "broker";
};
}

@@ -1,4 +0,0 @@
{ ... }:
{
services.fwupd.enable = true;
}

@@ -1,4 +0,0 @@
{ ... }:
{
services.irqbalance.enable = true;
}

@@ -1,8 +0,0 @@
{ ... }:
{
systemd.services.logrotate = {
documentation = [ "man:logrotate(8)" "man:logrotate.conf(5)" ];
unitConfig.RequiresMountsFor = "/var/log";
serviceConfig.ReadWritePaths = [ "/var/log" ];
};
}

@@ -1,48 +0,0 @@
{ config, lib, ... }:
{
# nginx return 444 for all nonexistent virtualhosts
systemd.services.nginx.after = [ "generate-snakeoil-certs.service" ];
environment.snakeoil-certs = lib.mkIf config.services.nginx.enable {
"/etc/certs/nginx" = {
owner = "nginx";
group = "nginx";
};
};
networking.firewall.allowedTCPPorts = lib.mkIf config.services.nginx.enable [ 80 443 ];
services.nginx = {
recommendedTlsSettings = true;
recommendedProxySettings = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
appendConfig = ''
# pcre_jit on;
worker_processes auto;
worker_rlimit_nofile 100000;
'';
eventsConfig = ''
worker_connections 2048;
use epoll;
# multi_accept on;
'';
};
systemd.services.nginx.serviceConfig = lib.mkIf config.services.nginx.enable {
LimitNOFILE = 65536;
# We use jit my dudes
MemoryDenyWriteExecute = lib.mkForce false;
# What the fuck do we use that where the defaults are not enough???
SystemCallFilter = lib.mkForce null;
};
services.nginx.virtualHosts."_" = lib.mkIf config.services.nginx.enable {
sslCertificate = "/etc/certs/nginx.crt";
sslCertificateKey = "/etc/certs/nginx.key";
addSSL = true;
extraConfig = "return 444;";
};
}

@@ -1,21 +0,0 @@
{ ... }:
{
services.openssh = {
enable = true;
startWhenNeeded = true;
extraConfig = ''
PubkeyAcceptedAlgorithms=+ssh-rsa
Match Group wheel
PasswordAuthentication no
Match All
'';
settings.PermitRootLogin = "yes";
};
users.users."root".openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCqVt4LCe0YIttr9swFxjkjn37ZDY9JxwVC+2gvfSINDJorOCtqPjDOTD2fTS1Gz08QCwpnLWq2kyvRchu6WgriAbSACpbZZBgxRaF/FVh3oiMVFGnNKGnv6/fdo/vZtu8mUVuqtmTrgLYpZdbR4oD3XiBlDKs7Cv5hPqt95lnP6MNFvE8mICCfd1PwhsABd2IQ5laz3u77/RXhNFJL0Kf2/+6gk9awcLuwHrPdvq7c3BxRHbc9UMRQENyjyQPa7aLe+uJBFLKP51I8VBuDpDacuibQx7nMt6N2UJ2KWI0JxRMHuJNq4S5jidR82aOw9gzGbTv30SKNLMqsZ0xj4LtdqCXDiZF6Lr09PsJYsvnBUFWa14HGcThKDtgwQwBryNViYmfv//0h9+RLZiU0ab+NEwSs7Zh5iAD+vhx64QqNX3tR7Le4SWXh8W0eShU9N78qYdSkiC3Ui7htxeqOocXM/P4AwbnHsLELIvkHdvgchCPvl8ygZa4WJTEWv16+ICskJcAKWGuqjvXAFuwjJJmPp9xLW9O0DFfQhMELiGamQR9wK07yYQVr34iah6qZO7cwhSKyEPFrVPIaNtfDhsjED639F7vmktf26SWNJHWfW0wOHILjI6TgqUvy0JDd8W8w0CHlAfz6Fs2l99NNgNF8dB3vBASbxS0hu/y0PVu/xQ== openstack-sleipner"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICCbgJ0Uwh9VSVhfId7l9i5/jk4CvAK5rbkiab8R+moF root@sleipner"
];
}

@@ -1,23 +0,0 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.postfix;
in
{
services.postfix = {
enable = true;
hostname = "${config.networking.hostName}.pvv.ntnu.no";
domain = "pvv.ntnu.no";
relayHost = "smtp.pvv.ntnu.no";
relayPort = 465;
config = {
smtp_tls_wrappermode = "yes";
smtp_tls_security_level = "encrypt";
};
# Nothing should be delivered to this machine
destination = [ ];
};
}

@@ -1,20 +0,0 @@
{ config, pkgs, lib, ... }:
{
services.smartd = {
enable = lib.mkDefault true;
notifications = {
mail = {
enable = true;
sender = "root@pvv.ntnu.no";
recipient = "root@pvv.ntnu.no";
};
wall.enable = false;
};
};
environment.systemPackages = lib.optionals config.services.smartd.enable (with pkgs; [
smartmontools
]);
systemd.services.smartd.unitConfig.ConditionVirtualization = "no";
}

@@ -1,8 +0,0 @@
{ config, lib, ... }:
{
# Let's not thermal throttle
services.thermald.enable = lib.mkIf (lib.all (x: x) [
(config.nixpkgs.system == "x86_64-linux")
(!config.boot.isContainer or false)
]) true;
}

@@ -1,59 +0,0 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.uptimed;
in
{
options.services.uptimed.settings = lib.mkOption {
description = "";
default = { };
type = lib.types.submodule {
freeformType = with lib.types; attrsOf (either str (listOf str));
};
};
config = {
services.uptimed = {
enable = true;
settings = let
stateDir = "/var/lib/uptimed";
in {
PIDFILE = "${stateDir}/pid";
SENDMAIL = lib.mkDefault "${pkgs.system-sendmail}/bin/sendmail -t";
};
};
systemd.services.uptimed = lib.mkIf (cfg.enable) {
serviceConfig = let
uptimed = pkgs.uptimed.overrideAttrs (prev: {
postPatch = ''
substituteInPlace Makefile.am \
--replace-fail '$(sysconfdir)/uptimed.conf' '/var/lib/uptimed/uptimed.conf'
substituteInPlace src/Makefile.am \
--replace-fail '$(sysconfdir)/uptimed.conf' '/var/lib/uptimed/uptimed.conf'
'';
});
in {
Type = "notify";
ExecStart = lib.mkForce "${uptimed}/sbin/uptimed -f";
BindReadOnlyPaths = let
configFile = lib.pipe cfg.settings [
(lib.mapAttrsToList
(k: v:
if builtins.isList v
then lib.mapConcatStringsSep "\n" (v': "${k}=${v'}") v
else "${k}=${v}")
)
(lib.concatStringsSep "\n")
(pkgs.writeText "uptimed.conf")
];
in [
"${configFile}:/var/lib/uptimed/uptimed.conf"
];
};
};
};
}

@@ -1,4 +0,0 @@
{ ... }:
{
services.userborn.enable = true;
}

@@ -1,4 +0,0 @@
{ ... }:
{
services.userdbd.enable = true;
}

@@ -1,15 +0,0 @@
{ lib, ... }:
# This enables
# lib.mkIf (!config.virtualisation.isVmVariant) { ... }
{
options.virtualisation.isVmVariant = lib.mkOption {
description = "`true` if system is build with 'nixos-rebuild build-vm'";
type = lib.types.bool;
default = false;
};
config.virtualisation.vmVariant = {
virtualisation.isVmVariant = true;
};
}

282
flake.lock generated

@@ -7,11 +7,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1752113600, "lastModified": 1715445235,
"narHash": "sha256-7LYDxKxZgBQ8LZUuolAQ8UkIB+jb4A2UmiR+kzY9CLI=", "narHash": "sha256-SUu+oIWn+xqQIOlwfwNfS9Sek4i1HKsrLJchsDReXwA=",
"owner": "nix-community", "owner": "nix-community",
"repo": "disko", "repo": "disko",
"rev": "79264292b7e3482e5702932949de9cbb69fedf6d", "rev": "159d87ea5b95bbdea46f0288a33c5e1570272725",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -20,45 +20,64 @@
"type": "github" "type": "github"
} }
}, },
"gergle": { "fix-python": {
"inputs": { "inputs": {
"flake-utils": "flake-utils",
"nixpkgs": [ "nixpkgs": [
"grzegorz",
"nixpkgs" "nixpkgs"
] ]
}, },
"locked": { "locked": {
"lastModified": 1736621371, "lastModified": 1713887124,
"narHash": "sha256-45UIQSQA7R5iU4YWvilo7mQbhY1Liql9bHBvYa3qRI0=", "narHash": "sha256-hGTSm0p9xXUYDgsAAr/ORZICo6T6u33vLfX3tILikaQ=",
"ref": "refs/heads/main", "owner": "GuillaumeDesforges",
"rev": "3729796c1213fe76e568ac28f1df8de4e596950b", "repo": "fix-python",
"revCount": 20, "rev": "f7f4b33e22414071fc1f9cbf68072c413c3a7fdf",
"type": "git", "type": "github"
"url": "https://git.pvv.ntnu.no/Grzegorz/gergle.git"
}, },
"original": { "original": {
"type": "git", "owner": "GuillaumeDesforges",
"url": "https://git.pvv.ntnu.no/Grzegorz/gergle.git" "repo": "fix-python",
"type": "github"
} }
}, },
"greg-ng": { "flake-utils": {
"inputs": { "inputs": {
"nixpkgs": [ "systems": "systems"
"nixpkgs"
],
"rust-overlay": "rust-overlay"
}, },
"locked": { "locked": {
"lastModified": 1752258704, "lastModified": 1689068808,
"narHash": "sha256-pRK99+MCgkeVptbJxXhVMXIXl8uwSdkZDpQzFi3OgkA=", "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
"ref": "refs/heads/main", "owner": "numtide",
"rev": "9ff525339b62855d53a44b4dc0154a33ac19e44d", "repo": "flake-utils",
"revCount": 48, "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
"type": "git", "type": "github"
"url": "https://git.pvv.ntnu.no/Grzegorz/greg-ng.git"
}, },
"original": { "original": {
"type": "git", "id": "flake-utils",
"url": "https://git.pvv.ntnu.no/Grzegorz/greg-ng.git" "type": "indirect"
}
},
"grzegorz": {
"inputs": {
"fix-python": "fix-python",
"nixpkgs": [
"nixpkgs-unstable"
]
},
"locked": {
"lastModified": 1715364232,
"narHash": "sha256-ZJC3SkanEgbV7p+LFhP+85CviRWOXJNHzZwR/Stb7hE=",
"owner": "Programvareverkstedet",
"repo": "grzegorz",
"rev": "3841cda1cdcac470440b06838d56a2eb2256378c",
"type": "github"
},
"original": {
"owner": "Programvareverkstedet",
"repo": "grzegorz",
"type": "github"
} }
}, },
"grzegorz-clients": { "grzegorz-clients": {
@@ -68,17 +87,17 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1736178795, "lastModified": 1715384651,
"narHash": "sha256-mPdi8cgvIDYcgG3FRG7A4BOIMu2Jef96TPMnV00uXlM=", "narHash": "sha256-7RhckgUTjqeCjWkhiCc1iB+5CBx9fl80d/3O4Jh+5kM=",
"ref": "refs/heads/master", "owner": "Programvareverkstedet",
"rev": "fde738910de1fd8293535a6382c2f0c2749dd7c1", "repo": "grzegorz-clients",
"revCount": 79, "rev": "738a4f3dd887f7c3612e4e772b83cbfa3cde5693",
"type": "git", "type": "github"
"url": "https://git.pvv.ntnu.no/Grzegorz/grzegorz-clients.git"
}, },
"original": { "original": {
"type": "git", "owner": "Programvareverkstedet",
"url": "https://git.pvv.ntnu.no/Grzegorz/grzegorz-clients.git" "repo": "grzegorz-clients",
"type": "github"
} }
}, },
"matrix-next": { "matrix-next": {
@@ -88,55 +107,20 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1753216555, "lastModified": 1717234745,
"narHash": "sha256-qfgVfgXjVPV7vEER4PVFiGUOUW08GHH71CVXgYW8EVc=", "narHash": "sha256-MFyKRdw4WQD6V3vRGbP6MYbtJhZp712zwzjW6YiOBYM=",
"owner": "dali99", "owner": "dali99",
"repo": "nixos-matrix-modules", "repo": "nixos-matrix-modules",
"rev": "099db715d1eba526a464f271b05cead5166fd9a9", "rev": "d7dc42c9bbb155c5e4aa2f0985d0df75ce978456",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "dali99", "owner": "dali99",
"ref": "v0.7.1", "ref": "v0.6.0",
"repo": "nixos-matrix-modules", "repo": "nixos-matrix-modules",
"type": "github" "type": "github"
} }
}, },
"minecraft-data": {
"locked": {
"lastModified": 1725277886,
"narHash": "sha256-Fw4VbbE3EfypQWSgPDFfvVH47BHeg3ptsO715NlUM8Q=",
"ref": "refs/heads/master",
"rev": "1b4087bd3322a2e2ba84271c8fcc013e6b641a58",
"revCount": 2,
"type": "git",
"url": "https://git.pvv.ntnu.no/Projects/minecraft-kartverket.git"
},
"original": {
"type": "git",
"url": "https://git.pvv.ntnu.no/Projects/minecraft-kartverket.git"
}
},
"minecraft-heatmap": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1756124334,
"narHash": "sha256-DXFmSpgI8FrqcdqY7wg5l/lpssWjslHq5ufvyp/5k4o=",
"ref": "refs/heads/main",
"rev": "83760b1ebcd9722ddf58a4117d29555da65538ad",
"revCount": 13,
"type": "git",
"url": "https://git.pvv.ntnu.no/Projects/minecraft-heatmap.git"
},
"original": {
"type": "git",
"url": "https://git.pvv.ntnu.no/Projects/minecraft-heatmap.git"
}
},
"nix-gitea-themes": { "nix-gitea-themes": {
"inputs": { "inputs": {
"nixpkgs": [ "nixpkgs": [
@@ -144,43 +128,63 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1743881366, "lastModified": 1714416973,
"narHash": "sha256-ScGA2IHPk9ugf9bqEZnp+YB/OJgrkZblnG/XLEKvJAo=", "narHash": "sha256-aZUcvXjdETUC6wVQpWDVjLUzwpDAEca8yR0ITDeK39o=",
"ref": "refs/heads/main", "ref": "refs/heads/main",
"rev": "db2e4becf1b11e5dfd33de12a90a7d089fcf68ec", "rev": "2b23c0ba8aae68d3cb6789f0f6e4891cef26cc6d",
"revCount": 11, "revCount": 6,
"type": "git", "type": "git",
"url": "https://git.pvv.ntnu.no/Drift/nix-gitea-themes.git" "url": "https://git.pvv.ntnu.no/oysteikt/nix-gitea-themes.git"
}, },
"original": { "original": {
"type": "git", "type": "git",
"url": "https://git.pvv.ntnu.no/Drift/nix-gitea-themes.git" "url": "https://git.pvv.ntnu.no/oysteikt/nix-gitea-themes.git"
} }
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1752439653, "lastModified": 1719520878,
"narHash": "sha256-mG27U2CFuggpAuozOu/4XAMKaOtJxzJVzdEemjQEBgg=", "narHash": "sha256-5BXzNOl2RVHcfS/oxaZDKOi7gVuTyWPibQG0DHd5sSc=",
"rev": "dfcd5b901dbab46c9c6e80b265648481aafb01f8", "owner": "NixOS",
"type": "tarball", "repo": "nixpkgs",
"url": "https://releases.nixos.org/nixos/25.05-small/nixos-25.05.806304.dfcd5b901dba/nixexprs.tar.xz" "rev": "a44bedbb48c367f0476e6a3a27bf28f6330faf23",
"type": "github"
}, },
"original": { "original": {
"type": "tarball", "id": "nixpkgs",
"url": "https://nixos.org/channels/nixos-25.05-small/nixexprs.tar.xz" "ref": "nixos-24.05-small",
"type": "indirect"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1714858427,
"narHash": "sha256-tCxeDP4C1pWe2rYY3IIhdA40Ujz32Ufd4tcrHPSKx2M=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "b980b91038fc4b09067ef97bbe5ad07eecca1e76",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "release-23.11",
"repo": "nixpkgs",
"type": "github"
} }
}, },
"nixpkgs-unstable": { "nixpkgs-unstable": {
"locked": { "locked": {
"lastModified": 1752439402, "lastModified": 1715435713,
"narHash": "sha256-xDfOnjnKStgsgcn9SFPgOV6qzwac4JvGKYyfR++49Pw=", "narHash": "sha256-lb2HqDQGfTdnCCpc1pgF6fkdgIOuBQ0nP8jjVSfLFqg=",
"rev": "b47d4f01d4213715a1f09b999bab96bb6a5b675e", "owner": "NixOS",
"type": "tarball", "repo": "nixpkgs",
"url": "https://releases.nixos.org/nixos/unstable-small/nixos-25.11pre829909.b47d4f01d421/nixexprs.tar.xz" "rev": "52b40f6c4be12742b1504ca2eb4527e597bf2526",
"type": "github"
}, },
"original": { "original": {
"type": "tarball", "id": "nixpkgs",
"url": "https://nixos.org/channels/nixos-unstable-small/nixexprs.tar.xz" "ref": "nixos-unstable-small",
"type": "indirect"
} }
}, },
"pvv-calendar-bot": { "pvv-calendar-bot": {
@@ -190,11 +194,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1742225512, "lastModified": 1723850344,
"narHash": "sha256-OB0ndlrGLE5wMUeYP4lmxly9JUEpPCeZRQyMzITKCB0=", "narHash": "sha256-aT37O9l9eclWEnqxASVNBL1dKwDHZUOqdbA4VO9DJvw=",
"ref": "refs/heads/main", "ref": "refs/heads/main",
"rev": "c4a6a02c84d8227abf00305dc995d7242176e6f6", "rev": "38b66677ab8c01aee10cd59e745af9ce3ea88092",
"revCount": 21, "revCount": 19,
"type": "git", "type": "git",
"url": "https://git.pvv.ntnu.no/Projects/calendar-bot.git" "url": "https://git.pvv.ntnu.no/Projects/calendar-bot.git"
}, },
@@ -203,6 +207,25 @@
"url": "https://git.pvv.ntnu.no/Projects/calendar-bot.git" "url": "https://git.pvv.ntnu.no/Projects/calendar-bot.git"
} }
}, },
"pvv-doorbell-bot": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"dirtyRev": "cec320746bbf5b5bc6618a145c1a997ebd0b5196-dirty",
"dirtyShortRev": "cec3207-dirty",
"lastModified": 1724515328,
"narHash": "sha256-Vj3ZJkCaLq+6d1LJtl7Hg5f7XV4NDPeNC1xEyu9QkOI=",
"type": "git",
"url": "file:///home/felixalb/doorbell-matrix-bot"
},
"original": {
"type": "git",
"url": "file:///home/felixalb/doorbell-matrix-bot"
}
},
"pvv-nettsiden": { "pvv-nettsiden": {
"inputs": { "inputs": {
"nixpkgs": [ "nixpkgs": [
@@ -210,11 +233,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1755475409, "lastModified": 1722722932,
"narHash": "sha256-9nzP3rpYNWNXtGQnGUS+WjeDkhFiTOBwxoJL9bMi1w0=", "narHash": "sha256-K81a2GQpY2kRX+C9ek9r91THlZB674CqRTSMMb5IO7E=",
"ref": "refs/heads/main", "ref": "refs/heads/master",
"rev": "617a799ad8e365192084e51de25cb6f8260668ae", "rev": "6580cfe546c902cdf11e17b0b8aa30b3c412bb34",
"revCount": 511, "revCount": 465,
"type": "git", "type": "git",
"url": "https://git.pvv.ntnu.no/Projects/nettsiden.git" "url": "https://git.pvv.ntnu.no/Projects/nettsiden.git"
}, },
@@ -226,53 +249,31 @@
"root": { "root": {
"inputs": { "inputs": {
"disko": "disko", "disko": "disko",
"gergle": "gergle", "grzegorz": "grzegorz",
"greg-ng": "greg-ng",
"grzegorz-clients": "grzegorz-clients", "grzegorz-clients": "grzegorz-clients",
"matrix-next": "matrix-next", "matrix-next": "matrix-next",
"minecraft-data": "minecraft-data",
"minecraft-heatmap": "minecraft-heatmap",
"nix-gitea-themes": "nix-gitea-themes", "nix-gitea-themes": "nix-gitea-themes",
"nixpkgs": "nixpkgs", "nixpkgs": "nixpkgs",
"nixpkgs-unstable": "nixpkgs-unstable", "nixpkgs-unstable": "nixpkgs-unstable",
"pvv-calendar-bot": "pvv-calendar-bot", "pvv-calendar-bot": "pvv-calendar-bot",
"pvv-doorbell-bot": "pvv-doorbell-bot",
"pvv-nettsiden": "pvv-nettsiden", "pvv-nettsiden": "pvv-nettsiden",
"sops-nix": "sops-nix" "sops-nix": "sops-nix"
} }
}, },
"rust-overlay": {
"inputs": {
"nixpkgs": [
"greg-ng",
"nixpkgs"
]
},
"locked": {
"lastModified": 1752201818,
"narHash": "sha256-d8KczaVT8WFEZdWg//tMAbv8EDyn2YTWcJvSY8gqKBU=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "bd8f8329780b348fedcd37b53dbbee48c08c496d",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"sops-nix": { "sops-nix": {
"inputs": { "inputs": {
"nixpkgs": [ "nixpkgs": [
"nixpkgs" "nixpkgs"
] ],
"nixpkgs-stable": "nixpkgs-stable"
}, },
"locked": { "locked": {
"lastModified": 1751606940, "lastModified": 1715244550,
"narHash": "sha256-KrDPXobG7DFKTOteqdSVeL1bMVitDcy7otpVZWDE6MA=", "narHash": "sha256-ffOZL3eaZz5Y1nQ9muC36wBCWwS1hSRLhUzlA9hV2oI=",
"owner": "Mic92", "owner": "Mic92",
"repo": "sops-nix", "repo": "sops-nix",
"rev": "3633fc4acf03f43b260244d94c71e9e14a2f6e0d", "rev": "0dc50257c00ee3c65fef3a255f6564cfbfe6eb7f",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -280,6 +281,21 @@
"repo": "sops-nix", "repo": "sops-nix",
"type": "github" "type": "github"
} }
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
} }
}, },
"root": "root", "root": "root",

159
flake.nix

@@ -2,8 +2,8 @@
description = "PVV System flake"; description = "PVV System flake";
inputs = { inputs = {
nixpkgs.url = "https://nixos.org/channels/nixos-25.05-small/nixexprs.tar.xz"; nixpkgs.url = "nixpkgs/nixos-24.05-small";
nixpkgs-unstable.url = "https://nixos.org/channels/nixos-unstable-small/nixexprs.tar.xz"; nixpkgs-unstable.url = "nixpkgs/nixos-unstable-small";
sops-nix.url = "github:Mic92/sops-nix"; sops-nix.url = "github:Mic92/sops-nix";
sops-nix.inputs.nixpkgs.follows = "nixpkgs"; sops-nix.inputs.nixpkgs.follows = "nixpkgs";
@@ -17,34 +17,31 @@
pvv-calendar-bot.url = "git+https://git.pvv.ntnu.no/Projects/calendar-bot.git"; pvv-calendar-bot.url = "git+https://git.pvv.ntnu.no/Projects/calendar-bot.git";
pvv-calendar-bot.inputs.nixpkgs.follows = "nixpkgs"; pvv-calendar-bot.inputs.nixpkgs.follows = "nixpkgs";
matrix-next.url = "github:dali99/nixos-matrix-modules/v0.7.1"; pvv-doorbell-bot.url = "git+https://git.pvv.ntnu.no/Projects/doorbell-matrix-bot.git";
#pvv-doorbell-bot.url = "git+file:///home/felixalb/doorbell-matrix-bot";
pvv-doorbell-bot.inputs.nixpkgs.follows = "nixpkgs";
matrix-next.url = "github:dali99/nixos-matrix-modules/v0.6.0";
matrix-next.inputs.nixpkgs.follows = "nixpkgs"; matrix-next.inputs.nixpkgs.follows = "nixpkgs";
nix-gitea-themes.url = "git+https://git.pvv.ntnu.no/Drift/nix-gitea-themes.git"; nix-gitea-themes.url = "git+https://git.pvv.ntnu.no/oysteikt/nix-gitea-themes.git";
nix-gitea-themes.inputs.nixpkgs.follows = "nixpkgs"; nix-gitea-themes.inputs.nixpkgs.follows = "nixpkgs";
minecraft-heatmap.url = "git+https://git.pvv.ntnu.no/Projects/minecraft-heatmap.git"; grzegorz.url = "github:Programvareverkstedet/grzegorz";
minecraft-heatmap.inputs.nixpkgs.follows = "nixpkgs"; grzegorz.inputs.nixpkgs.follows = "nixpkgs-unstable";
grzegorz-clients.url = "github:Programvareverkstedet/grzegorz-clients";
greg-ng.url = "git+https://git.pvv.ntnu.no/Grzegorz/greg-ng.git";
greg-ng.inputs.nixpkgs.follows = "nixpkgs";
gergle.url = "git+https://git.pvv.ntnu.no/Grzegorz/gergle.git";
gergle.inputs.nixpkgs.follows = "nixpkgs";
grzegorz-clients.url = "git+https://git.pvv.ntnu.no/Grzegorz/grzegorz-clients.git";
grzegorz-clients.inputs.nixpkgs.follows = "nixpkgs"; grzegorz-clients.inputs.nixpkgs.follows = "nixpkgs";
minecraft-data.url = "git+https://git.pvv.ntnu.no/Projects/minecraft-kartverket.git";
}; };
outputs = { self, nixpkgs, nixpkgs-unstable, sops-nix, disko, ... }@inputs: outputs = { self, nixpkgs, nixpkgs-unstable, sops-nix, disko, ... }@inputs:
let let
inherit (nixpkgs) lib; nixlib = nixpkgs.lib;
systems = [ systems = [
"x86_64-linux" "x86_64-linux"
"aarch64-linux" "aarch64-linux"
"aarch64-darwin" "aarch64-darwin"
]; ];
forAllSystems = f: lib.genAttrs systems f; forAllSystems = f: nixlib.genAttrs systems f;
allMachines = builtins.attrNames self.nixosConfigurations; allMachines = builtins.attrNames self.nixosConfigurations;
importantMachines = [ importantMachines = [
"bekkalokk" "bekkalokk"
@@ -54,69 +51,45 @@
"ildkule" "ildkule"
]; ];
in { in {
inputs = lib.mapAttrs (_: src: src.outPath) inputs; inherit inputs;
nixosConfigurations = let nixosConfigurations = let
unstablePkgs = nixpkgs-unstable.legacyPackages.x86_64-linux; unstablePkgs = nixpkgs-unstable.legacyPackages.x86_64-linux;
nixosConfig = nixpkgs: name: config: nixpkgs.lib.nixosSystem (nixpkgs.lib.recursiveUpdate
nixosConfig = rec {
nixpkgs:
name:
configurationPath:
extraArgs:
lib.nixosSystem (lib.recursiveUpdate
(let
system = "x86_64-linux"; system = "x86_64-linux";
in {
inherit system;
specialArgs = { specialArgs = {
inherit unstablePkgs inputs; inherit nixpkgs-unstable inputs;
values = import ./values.nix; values = import ./values.nix;
fp = path: ./${path}; };
} // extraArgs.specialArgs or { };
modules = [ modules = [
configurationPath ./hosts/${name}/configuration.nix
sops-nix.nixosModules.sops sops-nix.nixosModules.sops
] ++ extraArgs.modules or []; ] ++ config.modules or [];
pkgs = import nixpkgs { pkgs = import nixpkgs {
inherit system; inherit system;
extraArgs.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg)
[
"nvidia-x11"
"nvidia-settings"
];
overlays = [ overlays = [
# Global overlays go here # Global overlays go here
] ++ extraArgs.overlays or [ ]; ] ++ config.overlays or [ ];
}; };
}) }
(builtins.removeAttrs extraArgs [ (removeAttrs config [ "modules" "overlays" ])
"modules"
"overlays"
"specialArgs"
])
); );
stableNixosConfig = name: extraArgs: stableNixosConfig = nixosConfig nixpkgs;
nixosConfig nixpkgs name ./hosts/${name}/configuration.nix extraArgs; unstableNixosConfig = nixosConfig nixpkgs-unstable;
in { in {
bicep = stableNixosConfig "bicep" { bicep = stableNixosConfig "bicep" {
modules = [ modules = [
inputs.matrix-next.nixosModules.default inputs.matrix-next.nixosModules.default
inputs.pvv-calendar-bot.nixosModules.default inputs.pvv-calendar-bot.nixosModules.default
inputs.minecraft-heatmap.nixosModules.default inputs.pvv-doorbell-bot.nixosModules.default
self.nixosModules.gickup
self.nixosModules.matrix-ooye
]; ];
overlays = [ overlays = [
inputs.pvv-calendar-bot.overlays.x86_64-linux.default inputs.pvv-calendar-bot.overlays.x86_64-linux.default
inputs.minecraft-heatmap.overlays.default inputs.pvv-doorbell-bot.overlays.x86_64-linux.default
(final: prev: {
inherit (self.packages.${prev.system}) out-of-your-element;
})
]; ];
}; };
bekkalokk = stableNixosConfig "bekkalokk" { bekkalokk = stableNixosConfig "bekkalokk" {
@@ -125,86 +98,47 @@
heimdal = unstablePkgs.heimdal; heimdal = unstablePkgs.heimdal;
mediawiki-extensions = final.callPackage ./packages/mediawiki-extensions { }; mediawiki-extensions = final.callPackage ./packages/mediawiki-extensions { };
simplesamlphp = final.callPackage ./packages/simplesamlphp { }; simplesamlphp = final.callPackage ./packages/simplesamlphp { };
bluemap = final.callPackage ./packages/bluemap.nix { };
}) })
inputs.nix-gitea-themes.overlays.default
inputs.pvv-nettsiden.overlays.default inputs.pvv-nettsiden.overlays.default
]; ];
modules = [ modules = [
inputs.nix-gitea-themes.nixosModules.default
inputs.pvv-nettsiden.nixosModules.default inputs.pvv-nettsiden.nixosModules.default
]; ];
}; };
bob = stableNixosConfig "bob" {
modules = [
disko.nixosModules.disko
{ disko.devices.disk.disk1.device = "/dev/vda"; }
];
};
ildkule = stableNixosConfig "ildkule" { }; ildkule = stableNixosConfig "ildkule" { };
#ildkule-unstable = unstableNixosConfig "ildkule" { }; #ildkule-unstable = unstableNixosConfig "ildkule" { };
shark = stableNixosConfig "shark" { }; shark = stableNixosConfig "shark" { };
wenche = stableNixosConfig "wenche" { };
kommode = stableNixosConfig "kommode" {
overlays = [
inputs.nix-gitea-themes.overlays.default
];
modules = [
inputs.nix-gitea-themes.nixosModules.default
];
};
ustetind = stableNixosConfig "ustetind" {
modules = [
"${nixpkgs}/nixos/modules/virtualisation/lxc-container.nix"
];
};
brzeczyszczykiewicz = stableNixosConfig "brzeczyszczykiewicz" { brzeczyszczykiewicz = stableNixosConfig "brzeczyszczykiewicz" {
modules = [ modules = [
inputs.grzegorz.nixosModules.grzegorz-kiosk
inputs.grzegorz-clients.nixosModules.grzegorz-webui inputs.grzegorz-clients.nixosModules.grzegorz-webui
inputs.gergle.nixosModules.default
inputs.greg-ng.nixosModules.default
];
overlays = [
inputs.greg-ng.overlays.default
inputs.gergle.overlays.default
]; ];
}; };
georg = stableNixosConfig "georg" { georg = stableNixosConfig "georg" {
modules = [ modules = [
inputs.grzegorz.nixosModules.grzegorz-kiosk
inputs.grzegorz-clients.nixosModules.grzegorz-webui inputs.grzegorz-clients.nixosModules.grzegorz-webui
inputs.gergle.nixosModules.default
inputs.greg-ng.nixosModules.default
];
overlays = [
inputs.greg-ng.overlays.default
inputs.gergle.overlays.default
]; ];
}; };
} buskerud = stableNixosConfig "buskerud" { };
// };
(let
machineNames = map (i: "lupine-${toString i}") (lib.range 1 5);
stableLupineNixosConfig = name: extraArgs:
nixosConfig nixpkgs name ./hosts/lupine/configuration.nix extraArgs;
in lib.genAttrs machineNames (name: stableLupineNixosConfig name {
modules = [{ networking.hostName = name; }];
specialArgs.lupineName = name;
}));
nixosModules = { nixosModules = {
snakeoil-certs = ./modules/snakeoil-certs.nix; snakeoil-certs = ./modules/snakeoil-certs.nix;
snappymail = ./modules/snappymail.nix; snappymail = ./modules/snappymail.nix;
robots-txt = ./modules/robots-txt.nix;
gickup = ./modules/gickup;
matrix-ooye = ./modules/matrix-ooye.nix;
}; };
devShells = forAllSystems (system: { devShells = forAllSystems (system: {
default = nixpkgs-unstable.legacyPackages.${system}.callPackage ./shell.nix { }; default = nixpkgs.legacyPackages.${system}.callPackage ./shell.nix { };
cuda = let
cuda-pkgs = import nixpkgs-unstable {
inherit system;
config = {
allowUnfree = true;
cudaSupport = true;
};
};
in cuda-pkgs.callPackage ./shells/cuda.nix { };
}); });
packages = { packages = {
@@ -213,20 +147,19 @@
in rec { in rec {
default = important-machines; default = important-machines;
important-machines = pkgs.linkFarm "important-machines" important-machines = pkgs.linkFarm "important-machines"
(lib.getAttrs importantMachines self.packages.x86_64-linux); (nixlib.getAttrs importantMachines self.packages.x86_64-linux);
all-machines = pkgs.linkFarm "all-machines" all-machines = pkgs.linkFarm "all-machines"
(lib.getAttrs allMachines self.packages.x86_64-linux); (nixlib.getAttrs allMachines self.packages.x86_64-linux);
simplesamlphp = pkgs.callPackage ./packages/simplesamlphp { }; simplesamlphp = pkgs.callPackage ./packages/simplesamlphp { };
out-of-your-element = pkgs.callPackage ./packages/out-of-your-element.nix { };
} // } //
(lib.pipe null [ (nixlib.pipe null [
(_: pkgs.callPackage ./packages/mediawiki-extensions { }) (_: pkgs.callPackage ./packages/mediawiki-extensions { })
(lib.flip builtins.removeAttrs ["override" "overrideDerivation"]) (nixlib.flip builtins.removeAttrs ["override" "overrideDerivation"])
(lib.mapAttrs' (name: lib.nameValuePair "mediawiki-${name}")) (nixlib.mapAttrs' (name: nixlib.nameValuePair "mediawiki-${name}"))
]) ])
// lib.genAttrs allMachines // nixlib.genAttrs allMachines
(machine: self.nixosConfigurations.${machine}.config.system.build.toplevel); (machine: self.nixosConfigurations.${machine}.config.system.build.toplevel);
}; };
}; };

@@ -1,24 +1,22 @@
{ fp, pkgs, values, ... }: { pkgs, values, ... }:
{ {
imports = [ imports = [
./hardware-configuration.nix ./hardware-configuration.nix
(fp /base) ../../base.nix
(fp /misc/metrics-exporters.nix) ../../misc/metrics-exporters.nix
./services/bluemap/default.nix ./services/gitea/default.nix
./services/idp-simplesamlphp ./services/idp-simplesamlphp
./services/kerberos ./services/kerberos
./services/mediawiki ./services/mediawiki
./services/nginx.nix ./services/nginx.nix
./services/phpfpm.nix
./services/vaultwarden.nix ./services/vaultwarden.nix
./services/webmail ./services/webmail
./services/website ./services/website
./services/well-known
]; ];
sops.defaultSopsFile = fp /secrets/bekkalokk/bekkalokk.yaml; sops.defaultSopsFile = ../../secrets/bekkalokk/bekkalokk.yaml;
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ]; sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
sops.age.keyFile = "/var/lib/sops-nix/key.txt"; sops.age.keyFile = "/var/lib/sops-nix/key.txt";
sops.age.generateKey = true; sops.age.generateKey = true;
@@ -33,8 +31,6 @@
address = with values.hosts.bekkalokk; [ (ipv4 + "/25") (ipv6 + "/64") ]; address = with values.hosts.bekkalokk; [ (ipv4 + "/25") (ipv6 + "/64") ];
}; };
services.btrfs.autoScrub.enable = true;
# Do not change, even during upgrades. # Do not change, even during upgrades.
# See https://search.nixos.org/options?show=system.stateVersion # See https://search.nixos.org/options?show=system.stateVersion
system.stateVersion = "22.11"; system.stateVersion = "22.11";

@@ -1,85 +0,0 @@
{ config, lib, pkgs, inputs, ... }:
let
vanillaSurvival = "/var/lib/bluemap/vanilla_survival_world";
in {
imports = [
./module.nix # From danio, pending upstreaming
];
disabledModules = [ "services/web-apps/bluemap.nix" ];
sops.secrets."bluemap/ssh-key" = { };
sops.secrets."bluemap/ssh-known-hosts" = { };
services.bluemap = {
enable = true;
package = pkgs.callPackage ./package.nix { };
eula = true;
onCalendar = "*-*-* 05:45:00"; # a little over an hour after auto-upgrade
host = "minecraft.pvv.ntnu.no";
maps = {
"verden" = {
settings = {
world = vanillaSurvival;
sorting = 0;
ambient-light = 0.1;
cave-detection-ocean-floor = -5;
marker-sets = inputs.minecraft-data.map-markers.vanillaSurvival.verden;
};
};
"underverden" = {
settings = {
world = "${vanillaSurvival}/DIM-1";
sorting = 100;
sky-color = "#290000";
void-color = "#150000";
ambient-light = 0.6;
world-sky-light = 0;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
cave-detection-uses-block-light = true;
max-y = 90;
marker-sets = inputs.minecraft-data.map-markers.vanillaSurvival.underverden;
};
};
"enden" = {
settings = {
world = "${vanillaSurvival}/DIM1";
sorting = 200;
sky-color = "#080010";
void-color = "#080010";
ambient-light = 0.6;
world-sky-light = 0;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
};
};
};
};
services.nginx.virtualHosts."minecraft.pvv.ntnu.no" = {
enableACME = true;
forceSSL = true;
};
# TODO: render somewhere else lmao
systemd.services."render-bluemap-maps" = {
preStart = ''
mkdir -p /var/lib/bluemap/world
${pkgs.rsync}/bin/rsync \
-e "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=$CREDENTIALS_DIRECTORY/ssh-known-hosts -i $CREDENTIALS_DIRECTORY/sshkey" \
-avz --no-owner --no-group \
root@innovation.pvv.ntnu.no:/ \
${vanillaSurvival}
'';
serviceConfig = {
LoadCredential = [
"sshkey:${config.sops.secrets."bluemap/ssh-key".path}"
"ssh-known-hosts:${config.sops.secrets."bluemap/ssh-known-hosts".path}"
];
};
};
}

@@ -1,351 +0,0 @@
{ config, lib, pkgs, ... }:
let
cfg = config.services.bluemap;
format = pkgs.formats.hocon { };
coreConfig = format.generate "core.conf" cfg.coreSettings;
webappConfig = format.generate "webapp.conf" cfg.webappSettings;
webserverConfig = format.generate "webserver.conf" cfg.webserverSettings;
storageFolder = pkgs.linkFarm "storage"
(lib.attrsets.mapAttrs' (name: value:
lib.nameValuePair "${name}.conf"
(format.generate "${name}.conf" value))
cfg.storage);
mapsFolder = pkgs.linkFarm "maps"
(lib.attrsets.mapAttrs' (name: value:
lib.nameValuePair "${name}.conf"
(format.generate "${name}.conf" value.settings))
cfg.maps);
webappConfigFolder = pkgs.linkFarm "bluemap-config" {
"maps" = mapsFolder;
"storages" = storageFolder;
"core.conf" = coreConfig;
"webapp.conf" = webappConfig;
"webserver.conf" = webserverConfig;
"packs" = cfg.resourcepacks;
};
renderConfigFolder = name: value: pkgs.linkFarm "bluemap-${name}-config" {
"maps" = pkgs.linkFarm "maps" {
"${name}.conf" = (format.generate "${name}.conf" value.settings);
};
"storages" = storageFolder;
"core.conf" = coreConfig;
"webapp.conf" = format.generate "webapp.conf" (cfg.webappSettings // { "update-settings-file" = false; });
"webserver.conf" = webserverConfig;
"packs" = value.resourcepacks;
};
inherit (lib) mkOption;
in {
options.services.bluemap = {
enable = lib.mkEnableOption "bluemap";
package = lib.mkPackageOption pkgs "bluemap" { };
eula = mkOption {
type = lib.types.bool;
description = ''
By changing this option to true you confirm that you own a copy of minecraft Java Edition,
and that you agree to minecrafts EULA.
'';
default = false;
};
defaultWorld = mkOption {
type = lib.types.path;
description = ''
The world used by the default map ruleset.
If you configure your own maps you do not need to set this.
'';
example = lib.literalExpression "\${config.services.minecraft.dataDir}/world";
};
enableRender = mkOption {
type = lib.types.bool;
description = "Enable rendering";
default = true;
};
webRoot = mkOption {
type = lib.types.path;
default = "/var/lib/bluemap/web";
description = "The directory for saving and serving the webapp and the maps";
};
enableNginx = mkOption {
type = lib.types.bool;
default = true;
description = "Enable configuring a virtualHost for serving the bluemap webapp";
};
host = mkOption {
type = lib.types.str;
default = "bluemap.${config.networking.domain}";
defaultText = lib.literalExpression "bluemap.\${config.networking.domain}";
description = "Domain to configure nginx for";
};
onCalendar = mkOption {
type = lib.types.str;
description = ''
How often to trigger rendering the map,
in the format of a systemd timer onCalendar configuration.
See {manpage}`systemd.timer(5)`.
'';
default = "*-*-* 03:10:00";
};
coreSettings = mkOption {
type = lib.types.submodule {
freeformType = format.type;
options = {
data = mkOption {
type = lib.types.path;
description = "Folder for where bluemap stores its data";
default = "/var/lib/bluemap";
};
metrics = lib.mkEnableOption "Sending usage metrics containing the version of bluemap in use";
};
};
description = "Settings for the core.conf file, [see upstream docs](https://github.com/BlueMap-Minecraft/BlueMap/blob/master/BlueMapCommon/src/main/resources/de/bluecolored/bluemap/config/core.conf).";
};
webappSettings = mkOption {
type = lib.types.submodule {
freeformType = format.type;
};
default = {
enabled = true;
webroot = cfg.webRoot;
};
defaultText = lib.literalExpression ''
{
enabled = true;
webroot = config.services.bluemap.webRoot;
}
'';
description = "Settings for the webapp.conf file, see [upstream docs](https://github.com/BlueMap-Minecraft/BlueMap/blob/master/BlueMapCommon/src/main/resources/de/bluecolored/bluemap/config/webapp.conf).";
};
webserverSettings = mkOption {
type = lib.types.submodule {
freeformType = format.type;
options = {
enabled = mkOption {
type = lib.types.bool;
description = ''
Enable bluemap's built-in webserver.
Disabled by default in nixos for use of nginx directly.
'';
default = false;
};
};
};
default = { };
description = ''
Settings for the webserver.conf file, usually not required.
[See upstream docs](https://github.com/BlueMap-Minecraft/BlueMap/blob/master/BlueMapCommon/src/main/resources/de/bluecolored/bluemap/config/webserver.conf).
'';
};
maps = mkOption {
type = lib.types.attrsOf (lib.types.submodule {
options = {
resourcepacks = mkOption {
type = lib.types.path;
default = cfg.resourcepacks;
defaultText = lib.literalExpression "config.services.bluemap.resourcepacks";
description = "A set of resourcepacks/mods/bluemap-addons to extract models from loaded in alphabetical order";
};
settings = mkOption {
type = (lib.types.submodule {
freeformType = format.type;
options = {
world = mkOption {
type = lib.types.path;
description = "Path to world folder containing the dimension to render";
};
};
});
description = ''
Settings for files in `maps/`.
See the default for an example with good options for the different world types.
For valid values [consult upstream docs](https://github.com/BlueMap-Minecraft/BlueMap/blob/master/BlueMapCommon/src/main/resources/de/bluecolored/bluemap/config/maps/map.conf).
'';
};
};
});
default = {
"overworld".settings = {
world = "${cfg.defaultWorld}";
ambient-light = 0.1;
cave-detection-ocean-floor = -5;
};
"nether".settings = {
world = "${cfg.defaultWorld}/DIM-1";
sorting = 100;
sky-color = "#290000";
void-color = "#150000";
ambient-light = 0.6;
world-sky-light = 0;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
cave-detection-uses-block-light = true;
max-y = 90;
};
"end".settings = {
world = "${cfg.defaultWorld}/DIM1";
sorting = 200;
sky-color = "#080010";
void-color = "#080010";
ambient-light = 0.6;
world-sky-light = 0;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
};
};
defaultText = lib.literalExpression ''
{
"overworld".settings = {
world = "''${cfg.defaultWorld}";
ambient-light = 0.1;
cave-detection-ocean-floor = -5;
};
"nether".settings = {
world = "''${cfg.defaultWorld}/DIM-1";
sorting = 100;
sky-color = "#290000";
void-color = "#150000";
ambient-light = 0.6;
world-sky-light = 0;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
cave-detection-uses-block-light = true;
max-y = 90;
};
"end".settings = {
world = "''${cfg.defaultWorld}/DIM1";
sorting = 200;
sky-color = "#080010";
void-color = "#080010";
ambient-light = 0.6;
world-sky-light = 0;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
};
};
'';
description = ''
map-specific configuration.
These correspond to views in the webapp and are usually
different dimension of a world or different render settings of the same dimension.
If you set anything in this option you must configure all dimensions yourself!
'';
};
storage = mkOption {
type = lib.types.attrsOf (lib.types.submodule {
freeformType = format.type;
options = {
storage-type = mkOption {
type = lib.types.enum [ "FILE" "SQL" ];
description = "Type of storage config";
default = "FILE";
};
};
});
description = ''
Where the rendered map will be stored.
Unless you are doing something advanced you should probably leave this alone and configure webRoot instead.
[See upstream docs](https://github.com/BlueMap-Minecraft/BlueMap/tree/master/BlueMapCommon/src/main/resources/de/bluecolored/bluemap/config/storages)
'';
default = {
"file" = {
root = "${cfg.webRoot}/maps";
};
};
defaultText = lib.literalExpression ''
{
"file" = {
root = "''${config.services.bluemap.webRoot}/maps";
};
}
'';
};
resourcepacks = mkOption {
type = lib.types.path;
default = pkgs.linkFarm "resourcepacks" { };
description = ''
A set of resourcepacks/mods to extract models from loaded in alphabetical order.
Can be overriden on a per-map basis with `services.bluemap.maps.<name>.resourcepacks`.
'';
};
};
config = lib.mkIf cfg.enable {
assertions =
[ { assertion = config.services.bluemap.eula;
message = ''
You have enabled bluemap but have not accepted minecraft's EULA.
You can achieve this through setting `services.bluemap.eula = true`
'';
}
];
services.bluemap.coreSettings.accept-download = cfg.eula;
systemd.services."render-bluemap-maps" = lib.mkIf cfg.enableRender {
serviceConfig = {
Type = "oneshot";
Group = "nginx";
UMask = "026";
};
script = ''
# If web folder doesnt exist generate it
test -f "${cfg.webRoot}" || ${lib.getExe cfg.package} -c ${webappConfigFolder} -gs
# Render each minecraft map
${lib.strings.concatStringsSep "\n" (lib.attrsets.mapAttrsToList
(name: value: "${lib.getExe cfg.package} -c ${renderConfigFolder name value} -r")
cfg.maps)}
# Generate updated webapp
${lib.getExe cfg.package} -c ${webappConfigFolder} -gs
'';
};
systemd.timers."render-bluemap-maps" = lib.mkIf cfg.enableRender {
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = cfg.onCalendar;
Persistent = true;
Unit = "render-bluemap-maps.service";
};
};
services.nginx.virtualHosts = lib.mkIf cfg.enableNginx {
"${cfg.host}" = {
root = config.services.bluemap.webRoot;
locations = {
"~* ^/maps/[^/]*/tiles/".extraConfig = ''
error_page 404 = @empty;
'';
"@empty".return = "204";
};
};
};
};
meta = {
maintainers = with lib.maintainers; [ dandellion h7x4 ];
};
}

@@ -1,30 +0,0 @@
{ lib, stdenvNoCC, fetchurl, makeWrapper, jre }:
stdenvNoCC.mkDerivation rec {
pname = "bluemap";
version = "5.7";
src = fetchurl {
url = "https://github.com/BlueMap-Minecraft/BlueMap/releases/download/v${version}/BlueMap-${version}-cli.jar";
hash = "sha256-8udZYJgrr4bi2mjRYrASd8JwUoUVZW1tZpOLRgafAIw=";
};
dontUnpack = true;
nativeBuildInputs = [ makeWrapper ];
installPhase = ''
runHook preInstall
makeWrapper ${jre}/bin/java $out/bin/bluemap --add-flags "-jar $src"
runHook postInstall
'';
meta = {
description = "3D minecraft map renderer";
homepage = "https://bluemap.bluecolored.de/";
sourceProvenance = with lib.sourceTypes; [ binaryBytecode ];
license = lib.licenses.mit;
maintainers = with lib.maintainers; [ dandellion h7x4 ];
mainProgram = "bluemap";
};
}

@@ -15,8 +15,8 @@ let
enable = true; enable = true;
name = "git-runner-${name}"; url = "https://git.pvv.ntnu.no"; name = "git-runner-${name}"; url = "https://git.pvv.ntnu.no";
labels = [ labels = [
"debian-latest:docker://node:current-bookworm" "debian-latest:docker://node:18-bullseye"
"ubuntu-latest:docker://node:current-bookworm" "ubuntu-latest:docker://node:18-bullseye"
]; ];
tokenFile = config.sops.secrets."gitea/runners/${name}".path; tokenFile = config.sops.secrets."gitea/runners/${name}".path;
}; };
@@ -27,15 +27,5 @@ lib.mkMerge [
(mkRunner "alpha") (mkRunner "alpha")
(mkRunner "beta") (mkRunner "beta")
(mkRunner "epsilon") (mkRunner "epsilon")
{ { virtualisation.podman.enable = true; }
virtualisation.podman = {
enable = true;
defaultNetwork.settings.dns_enabled = true;
autoPrune.enable = true;
};
networking.dhcpcd.IPv6rs = false;
networking.firewall.interfaces."podman+".allowedUDPPorts = [53 5353];
}
] ]

@@ -1,35 +1,29 @@
{ config, values, lib, unstablePkgs, ... }: { config, values, pkgs, lib, ... }:
let let
cfg = config.services.gitea; cfg = config.services.gitea;
domain = "git.pvv.ntnu.no"; domain = "git.pvv.ntnu.no";
sshPort = 2222; sshPort = 2222;
in { in {
imports = [ imports = [
./customization ./ci.nix
./gpg.nix ./import-users.nix
./import-users
./web-secret-provider
]; ];
sops.secrets = let sops.secrets = {
defaultConfig = { "gitea/database" = {
owner = "gitea";
group = "gitea";
};
"gitea/email-password" = {
owner = "gitea"; owner = "gitea";
group = "gitea"; group = "gitea";
}; };
in {
"gitea/database" = defaultConfig;
"gitea/email-password" = defaultConfig;
"gitea/lfs-jwt-secret" = defaultConfig;
"gitea/oauth2-jwt-secret" = defaultConfig;
"gitea/secret-key" = defaultConfig;
}; };
services.gitea = { services.gitea = {
enable = true; enable = true;
appName = "PVV Git"; appName = "PVV Git";
package = unstablePkgs.gitea;
database = { database = {
type = "postgres"; type = "postgres";
host = "postgres.pvv.ntnu.no"; host = "postgres.pvv.ntnu.no";
@@ -47,19 +41,9 @@ in {
ROOT_URL = "https://${domain}/"; ROOT_URL = "https://${domain}/";
PROTOCOL = "http+unix"; PROTOCOL = "http+unix";
SSH_PORT = sshPort; SSH_PORT = sshPort;
LANDING_PAGE = "explore";
START_SSH_SERVER = true; START_SSH_SERVER = true;
START_LFS_SERVER = true; START_LFS_SERVER = true;
LFS_JWT_SECRET = lib.mkForce ""; LANDING_PAGE = "explore";
LFS_JWT_SECRET_URI = "file:${config.sops.secrets."gitea/lfs-jwt-secret".path}";
};
oauth2 = {
JWT_SECRET = lib.mkForce "";
JWT_SECRET_URI = "file:${config.sops.secrets."gitea/oauth2-jwt-secret".path}";
};
"git.timeout" = {
MIGRATE = 3600;
MIRROR = 1800;
}; };
mailer = { mailer = {
ENABLED = true; ENABLED = true;
@@ -70,23 +54,13 @@ in {
USER = "gitea@pvv.ntnu.no"; USER = "gitea@pvv.ntnu.no";
SUBJECT_PREFIX = "[pvv-git]"; SUBJECT_PREFIX = "[pvv-git]";
}; };
metrics = {
ENABLED = true;
ENABLED_ISSUE_BY_LABEL = true;
ENABLED_ISSUE_BY_REPOSITORY = true;
};
indexer.REPO_INDEXER_ENABLED = true; indexer.REPO_INDEXER_ENABLED = true;
service = { service = {
DISABLE_REGISTRATION = true; DISABLE_REGISTRATION = true;
ENABLE_NOTIFY_MAIL = true; ENABLE_NOTIFY_MAIL = true;
AUTO_WATCH_NEW_REPOS = false;
}; };
admin.DEFAULT_EMAIL_NOTIFICATIONS = "onmention"; admin.DEFAULT_EMAIL_NOTIFICATIONS = "onmention";
session.COOKIE_SECURE = true; session.COOKIE_SECURE = true;
security = {
SECRET_KEY = lib.mkForce "";
SECRET_KEY_URI = "file:${config.sops.secrets."gitea/secret-key".path}";
};
database.LOG_SQL = false; database.LOG_SQL = false;
repository = { repository = {
PREFERRED_LICENSES = lib.concatStringsSep "," [ PREFERRED_LICENSES = lib.concatStringsSep "," [
@@ -123,65 +97,54 @@ in {
ENABLE_FEDERATED_AVATAR = false; ENABLE_FEDERATED_AVATAR = false;
}; };
actions.ENABLED = true; actions.ENABLED = true;
ui = {
REACTIONS = lib.concatStringsSep "," [
"+1"
"-1"
"laugh"
"confused"
"heart"
"hooray"
"rocket"
"eyes"
"100"
"anger"
"astonished"
"no_good"
"ok_hand"
"pensive"
"pizza"
"point_up"
"sob"
"skull"
"upside_down_face"
"shrug"
];
};
"ui.meta".DESCRIPTION = "Bokstavelig talt programvareverkstedet"; "ui.meta".DESCRIPTION = "Bokstavelig talt programvareverkstedet";
}; };
dump = {
enable = true;
interval = "weekly";
type = "tar.gz";
};
}; };
environment.systemPackages = [ cfg.package ]; environment.systemPackages = [ cfg.package ];
systemd.services.gitea.serviceConfig.CPUSchedulingPolicy = "batch";
services.nginx.virtualHosts."${domain}" = { services.nginx.virtualHosts."${domain}" = {
forceSSL = true; forceSSL = true;
enableACME = true; enableACME = true;
kTLS = true; kTLS = true;
locations = { locations."/" = {
"/" = { proxyPass = "http://unix:${cfg.settings.server.HTTP_ADDR}";
proxyPass = "http://unix:${cfg.settings.server.HTTP_ADDR}"; extraConfig = ''
extraConfig = '' client_max_body_size 512M;
client_max_body_size 512M; '';
'';
};
"/metrics" = {
proxyPass = "http://unix:${cfg.settings.server.HTTP_ADDR}";
extraConfig = ''
allow ${values.hosts.ildkule.ipv4}/32;
allow ${values.hosts.ildkule.ipv6}/128;
deny all;
'';
};
}; };
}; };
networking.firewall.allowedTCPPorts = [ sshPort ]; networking.firewall.allowedTCPPorts = [ sshPort ];
# Extra customization
services.gitea-themes.monokai = pkgs.gitea-theme-monokai;
systemd.services.install-gitea-customization = {
description = "Install extra customization in gitea's CUSTOM_DIR";
wantedBy = [ "gitea.service" ];
requiredBy = [ "gitea.service" ];
serviceConfig = {
Type = "oneshot";
User = cfg.user;
Group = cfg.group;
};
script = let
logo-svg = ../../../../assets/logo_blue_regular.svg;
logo-png = ../../../../assets/logo_blue_regular.png;
extraLinks = pkgs.writeText "gitea-extra-links.tmpl" ''
<a class="item" href="https://www.pvv.ntnu.no/">PVV</a>
<a class="item" href="https://wiki.pvv.ntnu.no/">Wiki</a>
<a class="item" href="https://git.pvv.ntnu.no/Drift/-/projects/4">Tokyo Drift Issues</a>
'';
in ''
install -Dm444 ${logo-svg} ${cfg.customDir}/public/assets/img/logo.svg
install -Dm444 ${logo-png} ${cfg.customDir}/public/assets/img/logo.png
install -Dm444 ${./loading.apng} ${cfg.customDir}/public/assets/img/loading.png
install -Dm444 ${extraLinks} ${cfg.customDir}/templates/custom/extra_links.tmpl
'';
};
} }

@@ -0,0 +1,94 @@
import requests
import secrets
import os
EMAIL_DOMAIN = os.getenv('EMAIL_DOMAIN')
if EMAIL_DOMAIN is None:
EMAIL_DOMAIN = 'pvv.ntnu.no'
API_TOKEN = os.getenv('API_TOKEN')
if API_TOKEN is None:
raise Exception('API_TOKEN not set')
GITEA_API_URL = os.getenv('GITEA_API_URL')
if GITEA_API_URL is None:
GITEA_API_URL = 'https://git.pvv.ntnu.no/api/v1'
BANNED_SHELLS = [
"/usr/bin/nologin",
"/usr/sbin/nologin",
"/sbin/nologin",
"/bin/false",
"/bin/msgsh",
]
existing_users = {}
# This function should only ever be called when adding users
# from the passwd file
def add_user(username, name):
user = {
"full_name": name,
"username": username,
"login_name": username,
"source_id": 1, # 1 = SMTP
}
if username not in existing_users:
user["password"] = secrets.token_urlsafe(32)
user["must_change_password"] = False
user["visibility"] = "private"
user["email"] = username + '@' + EMAIL_DOMAIN
r = requests.post(GITEA_API_URL + '/admin/users', json=user,
headers={'Authorization': 'token ' + API_TOKEN})
if r.status_code != 201:
print('ERR: Failed to create user ' + username + ': ' + r.text)
return
print('Created user ' + username)
existing_users[username] = user
else:
user["visibility"] = existing_users[username]["visibility"]
r = requests.patch(GITEA_API_URL + f'/admin/users/{username}',
json=user,
headers={'Authorization': 'token ' + API_TOKEN})
if r.status_code != 200:
print('ERR: Failed to update user ' + username + ': ' + r.text)
return
print('Updated user ' + username)
def main():
# Fetch existing users
r = requests.get(GITEA_API_URL + '/admin/users',
headers={'Authorization': 'token ' + API_TOKEN})
if r.status_code != 200:
raise Exception('Failed to get users: ' + r.text)
for user in r.json():
existing_users[user['login']] = user
# Read the file, add each user
with open("/tmp/passwd-import", 'r') as f:
for line in f.readlines():
uid = int(line.split(':')[2])
if uid < 1000:
continue
shell = line.split(':')[-1]
if shell in BANNED_SHELLS:
continue
username = line.split(':')[0]
name = line.split(':')[4].split(',')[0]
add_user(username, name)
if __name__ == '__main__':
main()

@@ -11,13 +11,9 @@ in
systemd.services.gitea-import-users = lib.mkIf cfg.enable { systemd.services.gitea-import-users = lib.mkIf cfg.enable {
enable = true; enable = true;
preStart=''${pkgs.rsync}/bin/rsync -e "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=$CREDENTIALS_DIRECTORY/ssh-known-hosts -i $CREDENTIALS_DIRECTORY/sshkey" -a pvv@smtp.pvv.ntnu.no:/etc/passwd /run/gitea-import-users/passwd''; preStart=''${pkgs.rsync}/bin/rsync -e "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=$CREDENTIALS_DIRECTORY/ssh-known-hosts -i $CREDENTIALS_DIRECTORY/sshkey" -a pvv@smtp.pvv.ntnu.no:/etc/passwd /tmp/passwd-import'';
environment.PASSWD_FILE_PATH = "/run/gitea-import-users/passwd";
serviceConfig = { serviceConfig = {
ExecStart = pkgs.writers.writePython3 "gitea-import-users" { ExecStart = pkgs.writers.writePython3 "gitea-import-users" {
flakeIgnore = [
"E501" # Line over 80 chars lol
];
libraries = with pkgs.python3Packages; [ requests ]; libraries = with pkgs.python3Packages; [ requests ];
} (builtins.readFile ./gitea-import-users.py); } (builtins.readFile ./gitea-import-users.py);
LoadCredential=[ LoadCredential=[
@@ -26,7 +22,6 @@ in
]; ];
DynamicUser="yes"; DynamicUser="yes";
EnvironmentFile=config.sops.secrets."gitea/import-user-env".path; EnvironmentFile=config.sops.secrets."gitea/import-user-env".path;
RuntimeDirectory = "gitea-import-users";
}; };
}; };

@@ -202,12 +202,6 @@ in
rewrite ^/simplesaml/(.*)$ /$1 redirect; rewrite ^/simplesaml/(.*)$ /$1 redirect;
return 404; return 404;
''; '';
"/robots.txt" = {
root = pkgs.writeTextDir "robots.txt" ''
User-agent: *
Disallow: /
'';
};
}; };
}; };
}; };

@@ -1,4 +1,4 @@
{ pkgs, lib, fp, config, values, pkgs-unstable, ... }: let { pkgs, lib, config, values, pkgs-unstable, ... }: let
cfg = config.services.mediawiki; cfg = config.services.mediawiki;
# "mediawiki" # "mediawiki"
@@ -61,6 +61,7 @@ in {
user = "mediawiki"; user = "mediawiki";
passwordFile = config.sops.secrets."mediawiki/postgres_password".path; passwordFile = config.sops.secrets."mediawiki/postgres_password".path;
createLocally = false; createLocally = false;
# TODO: create a normal database and copy over old data when the service is production ready
name = "mediawiki"; name = "mediawiki";
}; };
@@ -209,16 +210,16 @@ in {
''; '';
}; };
"= /PNG/PVV-logo.svg".alias = fp /assets/logo_blue_regular.svg; "= /PNG/PVV-logo.svg".alias = ../../../../assets/logo_blue_regular.svg;
"= /PNG/PVV-logo.png".alias = fp /assets/logo_blue_regular.png; "= /PNG/PVV-logo.png".alias = ../../../../assets/logo_blue_regular.png;
"= /favicon.ico".alias = pkgs.runCommandLocal "mediawiki-favicon.ico" { "= /favicon.ico".alias = pkgs.runCommandLocal "mediawiki-favicon.ico" {
buildInputs = with pkgs; [ imagemagick ]; buildInputs = with pkgs; [ imagemagick ];
} '' } ''
magick \ convert \
${fp /assets/logo_blue_regular.png} \
-resize x64 \ -resize x64 \
-gravity center \ -gravity center \
-crop 64x64+0+0 \ -crop 64x64+0+0 \
${../../../../assets/logo_blue_regular.png} \
-flatten \ -flatten \
-colors 256 \ -colors 256 \
-background transparent \ -background transparent \

@@ -1,51 +0,0 @@
{ lib, ... }:
let
pools = map (pool: "phpfpm-${pool}") [
"idp"
"mediawiki"
"pvv-nettsiden"
"roundcube"
"snappymail"
];
in
{
# Source: https://www.pierreblazquez.com/2023/06/17/how-to-harden-apache-php-fpm-daemons-using-systemd/
systemd.services = lib.genAttrs pools (_: {
serviceConfig = let
caps = [
"CAP_NET_BIND_SERVICE"
"CAP_SETGID"
"CAP_SETUID"
"CAP_CHOWN"
"CAP_KILL"
"CAP_IPC_LOCK"
"CAP_DAC_OVERRIDE"
];
in {
AmbientCapabilities = caps;
CapabilityBoundingSet = caps;
DeviceAllow = [ "" ];
LockPersonality = true;
MemoryDenyWriteExecute = false;
NoNewPrivileges = true;
PrivateMounts = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
RemoveIPC = true;
UMask = "0077";
RestrictNamespaces = "~mnt";
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
KeyringMode = "private";
SystemCallFilter = [
"@system-service"
];
};
});
}

@@ -65,38 +65,4 @@ in {
proxyWebsockets = true; proxyWebsockets = true;
}; };
}; };
systemd.services.vaultwarden = lib.mkIf cfg.enable {
serviceConfig = {
AmbientCapabilities = [ "" ];
CapabilityBoundingSet = [ "" ];
DeviceAllow = [ "" ];
LockPersonality = true;
NoNewPrivileges = true;
# MemoryDenyWriteExecute = true;
PrivateMounts = true;
PrivateUsers = true;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
"AF_UNIX"
];
RemoveIPC = true;
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = [
"@system-service"
"~@privileged"
];
};
};
} }

@@ -6,11 +6,6 @@ let
domain = "webmail.pvv.ntnu.no"; domain = "webmail.pvv.ntnu.no";
in in
{ {
sops.secrets."roundcube/postgres_password" = {
owner = "nginx";
group = "nginx";
};
services.roundcube = { services.roundcube = {
enable = true; enable = true;
@@ -21,15 +16,10 @@ in
custom_from custom_from
]); ]);
dicts = with pkgs.aspellDicts; [ en en-computers nb nn fr de it ]; dicts = with pkgs.aspellDicts; [ en en-science en-computers nb nn fr de it ];
maxAttachmentSize = 20; maxAttachmentSize = 20;
hostName = "roundcubeplaceholder.example.com"; hostName = "roundcubeplaceholder.example.com";
database = {
host = "postgres.pvv.ntnu.no";
passwordFile = config.sops.secrets."roundcube/postgres_password".path;
};
extraConfig = '' extraConfig = ''
$config['enable_installer'] = false; $config['enable_installer'] = false;
$config['default_host'] = "ssl://imap.pvv.ntnu.no"; $config['default_host'] = "ssl://imap.pvv.ntnu.no";

@@ -1,8 +1,8 @@
{ config, lib, fp, pkgs, ... }: { config, lib, pkgs, ... }:
let let
cfg = config.services.snappymail; cfg = config.services.snappymail;
in { in {
imports = [ (fp /modules/snappymail.nix) ]; imports = [ ../../../../modules/snappymail.nix ];
services.snappymail = { services.snappymail = {
enable = true; enable = true;

@@ -67,12 +67,7 @@ in {
ADMIN_NAME = "PVV Drift"; ADMIN_NAME = "PVV Drift";
ADMIN_EMAIL = "drift@pvv.ntnu.no"; ADMIN_EMAIL = "drift@pvv.ntnu.no";
ADMIN_PASSWORD = includeFromSops "simplesamlphp/admin_password"; ADMIN_PASSWORD = includeFromSops "simplesamlphp/admin_password";
TRUSTED_DOMAINS = [ TRUSTED_DOMAINS = [ cfg.domainName ];
"www.pvv.ntnu.no"
"pvv.ntnu.no"
"www.pvv.org"
"pvv.org"
];
}; };
}; };
}; };
@@ -121,6 +116,16 @@ in {
"/drift".return = "301 https://wiki.pvv.ntnu.no/wiki/Drift"; "/drift".return = "301 https://wiki.pvv.ntnu.no/wiki/Drift";
"/diverse/abuse.php".return = "301 https://wiki.pvv.ntnu.no/wiki/CERT/Abuse"; "/diverse/abuse.php".return = "301 https://wiki.pvv.ntnu.no/wiki/CERT/Abuse";
"/nerds/".return = "301 https://wiki.pvv.ntnu.no/wiki/Nerdepizza"; "/nerds/".return = "301 https://wiki.pvv.ntnu.no/wiki/Nerdepizza";
# Proxy the matrix well-known files
# Host has be set before proxy_pass
# The header must be set so nginx on the other side routes it to the right place
"^~ /.well-known/matrix/" = {
extraConfig = ''
proxy_set_header Host matrix.pvv.ntnu.no;
proxy_pass https://matrix.pvv.ntnu.no/.well-known/matrix/;
'';
};
}; };
}; };
} }

@@ -53,7 +53,7 @@ in {
echo "Creating thumbnail for $fname" echo "Creating thumbnail for $fname"
mkdir -p $(dirname ".thumbnails/$fname") mkdir -p $(dirname ".thumbnails/$fname")
magick -define jpeg:size=200x200 "$fname" -thumbnail 300 -auto-orient ".thumbnails/$fname.png" ||: convert -define jpeg:size=200x200 "$fname" -thumbnail 300 -auto-orient ".thumbnails/$fname.png" ||:
touch -m -d "$(date -R -r "$fname")" ".thumbnails/$fname.png" touch -m -d "$(date -R -r "$fname")" ".thumbnails/$fname.png"
done <<< "$images" done <<< "$images"
''; '';
@@ -62,33 +62,6 @@ in {
WorkingDirectory = galleryDir; WorkingDirectory = galleryDir;
User = config.services.pvv-nettsiden.user; User = config.services.pvv-nettsiden.user;
Group = config.services.pvv-nettsiden.group; Group = config.services.pvv-nettsiden.group;
AmbientCapabilities = [ "" ];
CapabilityBoundingSet = [ "" ];
DeviceAllow = [ "" ];
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true; # disable for third party rotate scripts
PrivateDevices = true;
PrivateNetwork = true; # disable for mail delivery
PrivateTmp = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true; # disable for userdir logs
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "full";
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true; # disable for creating setgid directories
SocketBindDeny = [ "any" ];
SystemCallArchitectures = "native";
SystemCallFilter = [
"@system-service"
];
}; };
}; };
} }

@@ -1,18 +0,0 @@
{ ... }:
{
services.nginx.virtualHosts."www.pvv.ntnu.no".locations = {
"^~ /.well-known/" = {
alias = (toString ./root) + "/";
};
# Proxy the matrix well-known files
# Host has be set before proxy_pass
# The header must be set so nginx on the other side routes it to the right place
"^~ /.well-known/matrix/" = {
extraConfig = ''
proxy_set_header Host matrix.pvv.ntnu.no;
proxy_pass https://matrix.pvv.ntnu.no/.well-known/matrix/;
'';
};
};
}

@@ -1,31 +0,0 @@
<?xml version="1.0"?>
<clientConfig version="1.1">
<emailProvider id="pvv.ntnu.no">
<domain>pvv.ntnu.no</domain>
<domain>pvv.org</domain>
<displayName>Programvareverkstedet</displayName>
<incomingServer type="imap">
<hostname>imap.pvv.ntnu.no</hostname>
<port>993</port>
<socketType>SSL</socketType>
<username>%EMAILLOCALPART%</username>
<authentication>password-cleartext</authentication>
</incomingServer>
<outgoingServer type="smtp">
<hostname>smtp.pvv.ntnu.no</hostname>
<port>587</port>
<socketType>STARTTLS</socketType>
<username>%EMAILLOCALPART%</username>
<authentication>password-cleartext</authentication>
<useGlobalPreferredServer>true</useGlobalPreferredServer>
</outgoingServer>
<documentation url="https://www.pvv.ntnu.no/pvv/Drift/Mail/IMAP_POP3">
<descr lang="en">Setup programvareverkstedet email user with IMAP or POP3</descr>
<descr lang="nb">Sett opp programvareverkstedet email bruker med IMAP eller POP3</descr>
</documentation>
</emailProvider>
</clientConfig>

@@ -1,12 +0,0 @@
Contact: mailto:drift@pvv.ntnu.no
Contact: mailto:cert@pvv.ntnu.no
# drift@pvv.ntnu.no is read by more people and have a quicker reaction time,
# but cert@pvv.ntnu.no can be used for more severe issues.
Preferred-Languages: no, en
Expires: 2032-12-31T23:59:59.000Z
# This file was last updated 2024-09-14.
# You can find a wikipage for our security policies at:
# https://wiki.pvv.ntnu.no/wiki/CERT

24
hosts/bicep/acmeCert.nix Normal file

@@ -0,0 +1,24 @@
{ values, ... }:
{
users.groups.acme.members = [ "nginx" ];
security.acme.certs."postgres.pvv.ntnu.no" = {
group = "acme";
extraDomainNames = [
# "postgres.pvv.org"
"bicep.pvv.ntnu.no"
# "bicep.pvv.org"
# values.hosts.bicep.ipv4
# values.hosts.bicep.ipv6
];
};
services.nginx = {
enable = true;
virtualHosts."postgres.pvv.ntnu.no" = {
forceSSL = true;
enableACME = true;
# useACMEHost = "postgres.pvv.ntnu.no";
};
};
}

@@ -1,22 +1,24 @@
{ fp, pkgs, values, ... }: { pkgs, values, ... }:
{ {
imports = [ imports = [
./hardware-configuration.nix ./hardware-configuration.nix
(fp /base) ../../base.nix
(fp /misc/metrics-exporters.nix) ../../misc/metrics-exporters.nix
./services/nginx ./services/nginx
./acmeCert.nix
./services/calendar-bot.nix ./services/calendar-bot.nix
./services/git-mirrors ./services/doorbell-bot.nix
./services/minecraft-heatmap.nix ./services/mysql.nix
./services/mysql.nix ./services/mysql.nix
./services/postgres.nix ./services/postgres.nix
./services/matrix ./services/matrix
]; ];
sops.defaultSopsFile = fp /secrets/bicep/bicep.yaml; sops.defaultSopsFile = ../../secrets/bicep/bicep.yaml;
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ]; sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
sops.age.keyFile = "/var/lib/sops-nix/key.txt"; sops.age.keyFile = "/var/lib/sops-nix/key.txt";
sops.age.generateKey = true; sops.age.generateKey = true;
@@ -35,9 +37,6 @@
anyInterface = true; anyInterface = true;
}; };
# There are no smart devices
services.smartd.enable = false;
# Do not change, even during upgrades. # Do not change, even during upgrades.
# See https://search.nixos.org/options?show=system.stateVersion # See https://search.nixos.org/options?show=system.stateVersion
system.stateVersion = "22.11"; system.stateVersion = "22.11";

@@ -1,16 +1,16 @@
{ config, fp, lib, pkgs, ... }: { config, lib, pkgs, ... }:
let let
cfg = config.services.pvv-calendar-bot; cfg = config.services.pvv-calendar-bot;
in { in {
sops.secrets = { sops.secrets = {
"calendar-bot/matrix_token" = { "calendar-bot/matrix_token" = {
sopsFile = fp /secrets/bicep/bicep.yaml; sopsFile = ../../../secrets/bicep/bicep.yaml;
key = "calendar-bot/matrix_token"; key = "calendar-bot/matrix_token";
owner = cfg.user; owner = cfg.user;
group = cfg.group; group = cfg.group;
}; };
"calendar-bot/mysql_password" = { "calendar-bot/mysql_password" = {
sopsFile = fp /secrets/bicep/bicep.yaml; sopsFile = ../../../secrets/bicep/bicep.yaml;
key = "calendar-bot/mysql_password"; key = "calendar-bot/mysql_password";
owner = cfg.user; owner = cfg.user;
group = cfg.group; group = cfg.group;

@@ -0,0 +1,16 @@
{ config, lib, pkgs, ... }:
let
cfg = config.services.pvv-doorbell-bot;
in {
sops.secrets."doorbell-bot/config-json" = {
owner = cfg.user;
group = cfg.group;
};
services.pvv-doorbell-bot = {
enable = true;
settings = {
configFile = config.sops.secrets."doorbell-bot/config-json".path;
};
};
}

@@ -1,100 +0,0 @@
{ config, pkgs, lib, fp, ... }:
let
cfg = config.services.gickup;
in
{
sops.secrets."gickup/github-token" = {
owner = "gickup";
};
services.gickup = {
enable = true;
dataDir = "/data/gickup";
destinationSettings = {
structured = true;
zip = false;
keep = 10;
bare = true;
lfs = false;
};
instances = let
defaultGithubConfig = {
settings.token_file = config.sops.secrets."gickup/github-token".path;
};
defaultGitlabConfig = {
# settings.token_file = ...
};
in {
"github:Git-Mediawiki/Git-Mediawiki" = defaultGithubConfig;
"github:NixOS/nixpkgs" = defaultGithubConfig;
"github:go-gitea/gitea" = defaultGithubConfig;
"github:heimdal/heimdal" = defaultGithubConfig;
"github:saltstack/salt" = defaultGithubConfig;
"github:typst/typst" = defaultGithubConfig;
"github:unmojang/FjordLauncher" = defaultGithubConfig;
"github:unmojang/drasl" = defaultGithubConfig;
"github:yushijinhun/authlib-injector" = defaultGithubConfig;
"gitlab:mx-puppet/discord/better-discord.js" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/discord-markdown" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/matrix-discord-parser" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/mx-puppet-discord" = defaultGitlabConfig;
"gitlab:mx-puppet/mx-puppet-bridge" = defaultGitlabConfig;
"any:glibc" = {
settings.url = "https://sourceware.org/git/glibc.git";
};
"any:out-of-your-element" = {
settings.url = "https://gitdab.com/cadence/out-of-your-element.git";
};
"any:out-of-your-element-module" = {
settings.url = "https://cgit.rory.gay/nix/OOYE-module.git";
};
};
};
services.cgit = let
domain = "mirrors.pvv.ntnu.no";
in {
${domain} = {
enable = true;
package = pkgs.callPackage (fp /packages/cgit.nix) { };
group = "gickup";
scanPath = "${cfg.dataDir}/linktree";
settings = {
enable-commit-graph = true;
enable-follow-links = true;
enable-http-clone = true;
enable-remote-branches = true;
clone-url = "https://${domain}/$CGIT_REPO_URL";
remove-suffix = true;
root-title = "PVVSPPP";
root-desc = "PVV Speiler Praktisk og Prominent Programvare";
snapshots = "all";
logo = "/PVV-logo.png";
};
};
};
services.nginx.virtualHosts."mirrors.pvv.ntnu.no" = {
forceSSL = true;
enableACME = true;
locations."= /PVV-logo.png".alias = let
small-pvv-logo = pkgs.runCommandLocal "pvv-logo-96x96" {
nativeBuildInputs = [ pkgs.imagemagick ];
} ''
magick '${fp /assets/logo_blue_regular.svg}' -resize 96x96 PNG:"$out"
'';
in toString small-pvv-logo;
};
systemd.services."fcgiwrap-cgit-mirrors.pvv.ntnu.no" = {
serviceConfig.BindReadOnlyPaths = [ cfg.dataDir ];
};
}

@@ -1,14 +1,14 @@
{ config, lib, fp, pkgs, secrets, values, ... }: { config, lib, pkgs, secrets, ... }:
{ {
sops.secrets."matrix/synapse/turnconfig" = { sops.secrets."matrix/synapse/turnconfig" = {
sopsFile = fp /secrets/bicep/matrix.yaml; sopsFile = ../../../../secrets/bicep/matrix.yaml;
key = "synapse/turnconfig"; key = "synapse/turnconfig";
owner = config.users.users.matrix-synapse.name; owner = config.users.users.matrix-synapse.name;
group = config.users.users.matrix-synapse.group; group = config.users.users.matrix-synapse.group;
}; };
sops.secrets."matrix/coturn/static-auth-secret" = { sops.secrets."matrix/coturn/static-auth-secret" = {
sopsFile = fp /secrets/bicep/matrix.yaml; sopsFile = ../../../../secrets/bicep/matrix.yaml;
key = "coturn/static-auth-secret"; key = "coturn/static-auth-secret";
owner = config.users.users.turnserver.name; owner = config.users.users.turnserver.name;
group = config.users.users.turnserver.group; group = config.users.users.turnserver.group;
@@ -48,9 +48,6 @@
users.users.turnserver.extraGroups = [ "acme" ]; users.users.turnserver.extraGroups = [ "acme" ];
# It needs this to be allowed to access the files with the acme group
systemd.services.coturn.serviceConfig.PrivateUsers = lib.mkForce false;
systemd.services."acme-${config.services.coturn.realm}".serviceConfig = { systemd.services."acme-${config.services.coturn.realm}".serviceConfig = {
AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ]; AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
}; };
@@ -63,14 +60,12 @@
pkey = "${config.security.acme.certs.${realm}.directory}/key.pem"; pkey = "${config.security.acme.certs.${realm}.directory}/key.pem";
use-auth-secret = true; use-auth-secret = true;
# World readable but I dont think it's that bad
static-auth-secret-file = config.sops.secrets."matrix/coturn/static-auth-secret".path; static-auth-secret-file = config.sops.secrets."matrix/coturn/static-auth-secret".path;
secure-stun = true; secure-stun = true;
listening-ips = [ listening-ips = [ "129.241.210.213" "2001:700:300:1900::213" ];
values.services.turn.ipv4
values.services.turn.ipv6
];
tls-listening-port = 443; tls-listening-port = 443;
alt-tls-listening-port = 5349; alt-tls-listening-port = 5349;

@@ -9,9 +9,7 @@
./coturn.nix ./coturn.nix
./mjolnir.nix ./mjolnir.nix
# ./discord.nix ./discord.nix
./out-of-your-element.nix
./hookshot
]; ];

@@ -1,4 +1,4 @@
{ config, lib, fp, ... }: { config, lib, ... }:
let let
cfg = config.services.mx-puppet-discord; cfg = config.services.mx-puppet-discord;
@@ -6,46 +6,19 @@ in
{ {
users.groups.keys-matrix-registrations = { }; users.groups.keys-matrix-registrations = { };
sops.secrets."matrix/discord/as_token" = { sops.secrets."matrix/registrations/mx-puppet-discord" = {
sopsFile = fp /secrets/bicep/matrix.yaml; sopsFile = ../../../../secrets/bicep/matrix.yaml;
key = "discord/as_token"; key = "registrations/mx-puppet-discord";
};
sops.secrets."matrix/discord/hs_token" = {
sopsFile = fp /secrets/bicep/matrix.yaml;
key = "discord/hs_token";
};
sops.templates."discord-registration.yaml" = {
owner = config.users.users.matrix-synapse.name; owner = config.users.users.matrix-synapse.name;
group = config.users.groups.keys-matrix-registrations.name; group = config.users.groups.keys-matrix-registrations.name;
content = ''
as_token: "${config.sops.placeholder."matrix/discord/as_token"}"
hs_token: "${config.sops.placeholder."matrix/discord/hs_token"}"
id: discord-puppet
namespaces:
users:
- exclusive: true
regex: '@_discordpuppet_.*'
rooms: []
aliases:
- exclusive: true
regex: '#_discordpuppet_.*'
protocols: []
rate_limited: false
sender_localpart: _discordpuppet_bot
url: 'http://localhost:8434'
de.sorunome.msc2409.push_ephemeral: true
'';
}; };
systemd.services.mx-puppet-discord = { systemd.services.mx-puppet-discord = {
serviceConfig.SupplementaryGroups = [ serviceConfig.SupplementaryGroups = [ config.users.groups.keys-matrix-registrations.name ];
config.users.groups.keys-matrix-registrations.name
];
}; };
services.mx-puppet-discord.enable = false; services.mx-puppet-discord.enable = true;
services.mx-puppet-discord.settings = { services.mx-puppet-discord.settings = {
bridge = { bridge = {
bindAddress = "localhost"; bindAddress = "localhost";
@@ -56,16 +29,11 @@ in
relay.whitelist = [ ".*" ]; relay.whitelist = [ ".*" ];
selfService.whitelist = [ "@danio:pvv\\.ntnu\\.no" "@dandellion:dodsorf\\.as" ]; selfService.whitelist = [ "@danio:pvv\\.ntnu\\.no" "@dandellion:dodsorf\\.as" ];
}; };
services.mx-puppet-discord.serviceDependencies = [ services.mx-puppet-discord.serviceDependencies = [ "matrix-synapse.target" "nginx.service" ];
"matrix-synapse.target"
"nginx.service"
];
services.matrix-synapse-next.settings = { services.matrix-synapse-next.settings = {
app_service_config_files = [ app_service_config_files = [ config.sops.secrets."matrix/registrations/mx-puppet-discord".path ];
config.sops.templates."discord-registration.yaml".path
];
use_appservice_legacy_authorization = true; use_appservice_legacy_authorization = true;
}; };

@@ -1,135 +0,0 @@
{ config, lib, fp, unstablePkgs, inputs, ... }:
let
cfg = config.services.matrix-hookshot;
webhookListenAddress = "127.0.0.1";
webhookListenPort = 8435;
in
{
sops.secrets."matrix/hookshot/as_token" = {
sopsFile = fp /secrets/bicep/matrix.yaml;
key = "hookshot/as_token";
};
sops.secrets."matrix/hookshot/hs_token" = {
sopsFile = fp /secrets/bicep/matrix.yaml;
key = "hookshot/hs_token";
};
sops.templates."hookshot-registration.yaml" = {
owner = config.users.users.matrix-synapse.name;
group = config.users.groups.keys-matrix-registrations.name;
content = ''
id: matrix-hookshot
as_token: "${config.sops.placeholder."matrix/hookshot/as_token"}"
hs_token: "${config.sops.placeholder."matrix/hookshot/hs_token"}"
namespaces:
rooms: []
users:
- regex: "@_webhooks_.*:pvv.ntnu.no"
exclusive: true
- regex: "@bot_feeds:pvv.ntnu.no"
exclusive: true
aliases: []
sender_localpart: hookshot
url: "http://${cfg.settings.bridge.bindAddress}:${toString cfg.settings.bridge.port}"
rate_limited: false
# If enabling encryption
de.sorunome.msc2409.push_ephemeral: true
push_ephemeral: true
org.matrix.msc3202: true
'';
};
systemd.services.matrix-hookshot = {
serviceConfig.SupplementaryGroups = [
config.users.groups.keys-matrix-registrations.name
];
};
services.matrix-hookshot = {
enable = true;
package = unstablePkgs.matrix-hookshot;
registrationFile = config.sops.templates."hookshot-registration.yaml".path;
settings = {
bridge = {
bindAddress = "127.0.0.1";
domain = "pvv.ntnu.no";
url = "https://matrix.pvv.ntnu.no";
mediaUrl = "https://matrix.pvv.ntnu.no";
port = 9993;
};
listeners = [
{
bindAddress = webhookListenAddress;
port = webhookListenPort;
resources = [
"webhooks"
# "metrics"
# "provisioning"
"widgets"
];
}
];
generic = {
enabled = true;
outbound = true;
urlPrefix = "https://hookshot.pvv.ntnu.no/webhook/";
userIdPrefix = "_webhooks_";
allowJsTransformationFunctions = false;
waitForComplete = false;
};
feeds = {
enabled = true;
pollIntervalSeconds = 600;
};
serviceBots = [
{ localpart = "bot_feeds";
displayname = "Aya";
avatar = ./feeds.png;
prefix = "!aya";
service = "feeds";
}
];
permissions = [
# Users of the PVV Server
{ actor = "pvv.ntnu.no";
services = [ { service = "*"; level = "commands"; } ];
}
# Members of Medlem space (for people with their own hs)
{ actor = "!pZOTJQinWyyTWaeOgK:pvv.ntnu.no";
services = [ { service = "*"; level = "commands"; } ];
}
# Members of Drift
{ actor = "!eYgeufLrninXxQpYml:pvv.ntnu.no";
services = [ { service = "*"; level = "admin"; } ];
}
# Dan bootstrap
{ actor = "@dandellion:dodsorf.as";
services = [ { service = "*"; level = "admin"; } ];
}
];
};
};
services.matrix-hookshot.serviceDependencies = [
"matrix-synapse.target"
"nginx.service"
];
services.matrix-synapse-next.settings = {
app_service_config_files = [
config.sops.templates."hookshot-registration.yaml".path
];
};
services.nginx.virtualHosts."hookshot.pvv.ntnu.no" = {
enableACME = true;
locations."/" = {
proxyPass = "http://${webhookListenAddress}:${toString webhookListenPort}";
};
};
}

Binary file not shown.

Before

(image error) Size: 1.1 MiB

@@ -1,8 +1,8 @@
{ config, lib, fp, ... }: { config, lib, ... }:
{ {
sops.secrets."matrix/mjolnir/access_token" = { sops.secrets."matrix/mjolnir/access_token" = {
sopsFile = fp /secrets/bicep/matrix.yaml; sopsFile = ../../../../secrets/bicep/matrix.yaml;
key = "mjolnir/access_token"; key = "mjolnir/access_token";
owner = config.users.users.mjolnir.name; owner = config.users.users.mjolnir.name;
group = config.users.users.mjolnir.group; group = config.users.users.mjolnir.group;
@@ -11,7 +11,7 @@
services.mjolnir = { services.mjolnir = {
enable = true; enable = true;
pantalaimon.enable = false; pantalaimon.enable = false;
homeserverUrl = "https://matrix.pvv.ntnu.no"; homeserverUrl = "http://127.0.0.1:8008";
accessTokenFile = config.sops.secrets."matrix/mjolnir/access_token".path; accessTokenFile = config.sops.secrets."matrix/mjolnir/access_token".path;
managementRoom = "!gsdeCoWjvYRBrzuiRq:pvv.ntnu.no"; managementRoom = "!gsdeCoWjvYRBrzuiRq:pvv.ntnu.no";
protectedRooms = map (a: "https://matrix.to/#/${a}") [ protectedRooms = map (a: "https://matrix.to/#/${a}") [

@@ -1,66 +0,0 @@
{ config, pkgs, fp, ... }:
let
cfg = config.services.matrix-ooye;
in
{
users.groups.keys-matrix-registrations = { };
sops.secrets = {
"matrix/ooye/as_token" = {
sopsFile = fp /secrets/bicep/matrix.yaml;
key = "ooye/as_token";
};
"matrix/ooye/hs_token" = {
sopsFile = fp /secrets/bicep/matrix.yaml;
key = "ooye/hs_token";
};
"matrix/ooye/discord_token" = {
sopsFile = fp /secrets/bicep/matrix.yaml;
key = "ooye/discord_token";
};
"matrix/ooye/discord_client_secret" = {
sopsFile = fp /secrets/bicep/matrix.yaml;
key = "ooye/discord_client_secret";
};
};
services.matrix-ooye = {
enable = true;
homeserver = "https://matrix.pvv.ntnu.no";
homeserverName = "pvv.ntnu.no";
discordTokenPath = config.sops.secrets."matrix/ooye/discord_token".path;
discordClientSecretPath = config.sops.secrets."matrix/ooye/discord_client_secret".path;
bridgeOrigin = "https://ooye.pvv.ntnu.no";
enableSynapseIntegration = false;
};
systemd.services."matrix-synapse" = {
after = [
"matrix-ooye-pre-start.service"
"network-online.target"
];
requires = [ "matrix-ooye-pre-start.service" ];
serviceConfig = {
LoadCredential = [
"matrix-ooye-registration:/var/lib/matrix-ooye/registration.yaml"
];
ExecStartPre = [
"+${pkgs.coreutils}/bin/cp /run/credentials/matrix-synapse.service/matrix-ooye-registration ${config.services.matrix-synapse-next.dataDir}/ooye-registration.yaml"
"+${pkgs.coreutils}/bin/chown matrix-synapse:keys-matrix-registrations ${config.services.matrix-synapse-next.dataDir}/ooye-registration.yaml"
];
};
};
services.matrix-synapse-next.settings = {
app_service_config_files = [
"${config.services.matrix-synapse-next.dataDir}/ooye-registration.yaml"
];
};
services.nginx.virtualHosts."ooye.pvv.ntnu.no" = {
forceSSL = true;
enableACME = true;
locations."/".proxyPass = "http://localhost:${cfg.socket}";
};
}

@@ -1,4 +1,4 @@
{ config, lib, fp, pkgs, values, inputs, ... }: { config, lib, pkgs, values, inputs, ... }:
let let
cfg = config.services.matrix-synapse-next; cfg = config.services.matrix-synapse-next;
@@ -10,18 +10,23 @@ let
in { in {
sops.secrets."matrix/synapse/signing_key" = { sops.secrets."matrix/synapse/signing_key" = {
key = "synapse/signing_key"; key = "synapse/signing_key";
sopsFile = fp /secrets/bicep/matrix.yaml; sopsFile = ../../../../secrets/bicep/matrix.yaml;
owner = config.users.users.matrix-synapse.name; owner = config.users.users.matrix-synapse.name;
group = config.users.users.matrix-synapse.group; group = config.users.users.matrix-synapse.group;
}; };
sops.secrets."matrix/synapse/user_registration" = { sops.secrets."matrix/synapse/user_registration" = {
sopsFile = fp /secrets/bicep/matrix.yaml; sopsFile = ../../../../secrets/bicep/matrix.yaml;
key = "synapse/signing_key"; key = "synapse/signing_key";
owner = config.users.users.matrix-synapse.name; owner = config.users.users.matrix-synapse.name;
group = config.users.users.matrix-synapse.group; group = config.users.users.matrix-synapse.group;
}; };
sops.secrets."matrix/sliding-sync/env" = {
sopsFile = ../../../../secrets/bicep/matrix.yaml;
key = "sliding-sync/env";
};
services.matrix-synapse-next = { services.matrix-synapse-next = {
enable = true; enable = true;
@@ -38,6 +43,8 @@ in {
workers.eventPersisters = 2; workers.eventPersisters = 2;
workers.useUserDirectoryWorker = true; workers.useUserDirectoryWorker = true;
enableSlidingSync = true;
enableNginx = true; enableNginx = true;
settings = { settings = {
@@ -130,6 +137,9 @@ in {
}; };
}; };
services.matrix-synapse.sliding-sync.environmentFile = config.sops.secrets."matrix/sliding-sync/env".path;
services.redis.servers."".enable = true; services.redis.servers."".enable = true;
services.nginx.virtualHosts."matrix.pvv.ntnu.no" = lib.mkMerge [ services.nginx.virtualHosts."matrix.pvv.ntnu.no" = lib.mkMerge [
@@ -147,18 +157,6 @@ in {
''; '';
}; };
} }
{
locations."/_synapse/admin" = {
proxyPass = "http://$synapse_backend";
extraConfig = ''
allow 127.0.0.1;
allow ::1;
allow ${values.hosts.bicep.ipv4};
allow ${values.hosts.bicep.ipv6};
deny all;
'';
};
}
{ {
locations = let locations = let
connectionInfo = w: matrix-lib.workerConnectionResource "metrics" w; connectionInfo = w: matrix-lib.workerConnectionResource "metrics" w;
@@ -172,6 +170,8 @@ in {
extraConfig = '' extraConfig = ''
allow ${values.hosts.ildkule.ipv4}; allow ${values.hosts.ildkule.ipv4};
allow ${values.hosts.ildkule.ipv6}; allow ${values.hosts.ildkule.ipv6};
allow ${values.hosts.ildkule.ipv4_global};
allow ${values.hosts.ildkule.ipv6_global};
deny all; deny all;
''; '';
}) })
@@ -183,6 +183,8 @@ in {
extraConfig = '' extraConfig = ''
allow ${values.hosts.ildkule.ipv4}; allow ${values.hosts.ildkule.ipv4};
allow ${values.hosts.ildkule.ipv6}; allow ${values.hosts.ildkule.ipv6};
allow ${values.hosts.ildkule.ipv4_global};
allow ${values.hosts.ildkule.ipv6_global};
deny all; deny all;
''; '';
}; };

@@ -1,49 +0,0 @@
{ config, lib, pkgs, ... }:
let
cfg = config.services.minecraft-heatmap;
in
{
sops.secrets."minecraft-heatmap/ssh-key/private" = {
mode = "600";
};
sops.secrets."minecraft-heatmap/postgres-passwd" = {
mode = "600";
};
services.minecraft-heatmap = {
enable = true;
database = {
host = "postgres.pvv.ntnu.no";
port = 5432;
name = "minecraft_heatmap";
user = "minecraft_heatmap";
passwordFile = config.sops.secrets."minecraft-heatmap/postgres-passwd".path;
};
};
systemd.services.minecraft-heatmap-ingest-logs = {
serviceConfig.LoadCredential = [
"sshkey:${config.sops.secrets."minecraft-heatmap/ssh-key/private".path}"
];
preStart = let
knownHostsFile = pkgs.writeText "minecraft-heatmap-known-hosts" ''
innovation.pvv.ntnu.no ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE9O/y5uqcLKCodg2Q+XfZPH/AoUIyBlDhigImU+4+Kn
innovation.pvv.ntnu.no ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQClR9GvWeVPZHudlnFXhGHUX5sGX9nscsOsotnlQ4uVuGsgvRifsVsuDULlAFXwoV1tYp4vnyXlsVtMddpLI5ANOIDcZ4fgDxpfSQmtHKssNpDcfMhFJbfRVyacipjA4osxTxvLox/yjtVt+URjTHUA1MWzEwc26KfiOvWO5tCBTan7doN/4KOyT05GwBxwzUAwUmoGTacIITck2Y9qp4+xFYqehbXqPdBb15hFyd38OCQhtU1hWV2Yi18+hJ4nyjc/g5pr6mW09ULlFghe/BaTUXrTisYC6bMcJZsTDwsvld9581KPvoNZOTQhZPTEQCZZ1h54fe0ZHuveVB3TIHovZyjoUuaf4uiFOjJVaKRB+Ig+Il6r7tMUn9CyHtus/Nd86E0TFBzoKxM0OFu88oaUlDtZVrUJL5En1lGoimajebb1JPxllFN5hqIT+gVyMY6nRzkcfS7ieny/U4rzXY2rfz98selftgh3LsBywwADv65i+mPw1A/1QdND1R6fV4U=
innovation.pvv.ntnu.no ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNjl3HfsDqmALWCL9uhz9k93RAD2565ndBqUh4N/rvI7MCwEJ6iRCdDev0YzB1Fpg24oriyYoxZRP24ifC2sQf8=
'';
in ''
mkdir -p '${cfg.minecraftLogsDir}'
"${lib.getExe pkgs.rsync}" \
--archive \
--verbose \
--progress \
--no-owner \
--no-group \
--rsh="${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=\"${knownHostsFile}\" -i \"$CREDENTIALS_DIRECTORY\"/sshkey" \
root@innovation.pvv.ntnu.no:/ \
'${cfg.minecraftLogsDir}'/
'';
};
}

@@ -1,4 +1,7 @@
{ config, pkgs, ... }: { config, pkgs, ... }:
let
sslCert = config.security.acme.certs."postgres.pvv.ntnu.no";
in
{ {
services.postgresql = { services.postgresql = {
enable = true; enable = true;
@@ -76,16 +79,12 @@
systemd.services.postgresql.serviceConfig = { systemd.services.postgresql.serviceConfig = {
LoadCredential = [ LoadCredential = [
"cert:/etc/certs/postgres.crt" "cert:${sslCert.directory}/cert.pem"
"key:/etc/certs/postgres.key" "key:${sslCert.directory}/key.pem"
]; ];
}; };
environment.snakeoil-certs."/etc/certs/postgres" = { users.groups.acme.members = [ "postgres" ];
owner = "postgres";
group = "postgres";
subject = "/C=NO/O=Programvareverkstedet/CN=postgres.pvv.ntnu.no/emailAddress=drift@pvv.ntnu.no";
};
networking.firewall.allowedTCPPorts = [ 5432 ]; networking.firewall.allowedTCPPorts = [ 5432 ];
networking.firewall.allowedUDPPorts = [ 5432 ]; networking.firewall.allowedUDPPorts = [ 5432 ];

@@ -0,0 +1,46 @@
{ config, pkgs, values, ... }:
{
imports = [
# Include the results of the hardware scan.
./hardware-configuration.nix
../../base.nix
../../misc/metrics-exporters.nix
./disks.nix
../../misc/builder.nix
];
sops.defaultSopsFile = ../../secrets/bob/bob.yaml;
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
sops.age.keyFile = "/var/lib/sops-nix/key.txt";
sops.age.generateKey = true;
boot.loader.grub = {
enable = true;
efiSupport = true;
efiInstallAsRemovable = true;
};
networking.hostName = "bob"; # Define your hostname.
systemd.network.networks."30-all" = values.defaultNetworkConfig // {
matchConfig.Name = "en*";
DHCP = "yes";
gateway = [ ];
};
# List packages installed in system profile
environment.systemPackages = with pkgs; [
];
# List services that you want to enable:
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "23.05"; # Did you read the comment?
}

39
hosts/bob/disks.nix Normal file

@@ -0,0 +1,39 @@
# Example to create a bios compatible gpt partition
{ lib, ... }:
{
disko.devices = {
disk.disk1 = {
device = lib.mkDefault "/dev/sda";
type = "disk";
content = {
type = "gpt";
partitions = {
boot = {
name = "boot";
size = "1M";
type = "EF02";
};
esp = {
name = "ESP";
size = "500M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
root = {
name = "root";
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
}

@@ -8,32 +8,17 @@
[ (modulesPath + "/profiles/qemu-guest.nix") [ (modulesPath + "/profiles/qemu-guest.nix")
]; ];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "virtio_pci" "virtio_scsi" "sd_mod" "sr_mod" ]; boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "virtio_pci" "virtio_blk" ];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ]; boot.kernelModules = [ ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-uuid/d421538f-a260-44ae-8e03-47cac369dcc1";
fsType = "btrfs";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/86CD-4C23";
fsType = "vfat";
options = [ "fmask=0077" "dmask=0077" ];
};
swapDevices =
[ { device = "/dev/disk/by-uuid/4cfbb41e-801f-40dd-8c58-0a0c1a6025f6"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking # Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's # (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction # still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`. # with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true; networking.useDHCP = lib.mkDefault true;
# networking.interfaces.ens18.useDHCP = lib.mkDefault true; # networking.interfaces.ens3.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux"; nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
} }

@@ -1,10 +1,10 @@
{ config, fp, pkgs, values, ... }: { config, pkgs, values, ... }:
{ {
imports = [ imports = [
# Include the results of the hardware scan. # Include the results of the hardware scan.
./hardware-configuration.nix ./hardware-configuration.nix
(fp /base) ../../base.nix
(fp /misc/metrics-exporters.nix) ../../misc/metrics-exporters.nix
./services/grzegorz.nix ./services/grzegorz.nix
]; ];

@@ -1,6 +1,6 @@
{ config, fp, ... }: { config, ... }:
{ {
imports = [ (fp /modules/grzegorz.nix) ]; imports = [ ../../../modules/grzegorz.nix ];
services.nginx.virtualHosts."${config.networking.fqdn}" = { services.nginx.virtualHosts."${config.networking.fqdn}" = {
serverAliases = [ serverAliases = [

@@ -0,0 +1,38 @@
{ config, pkgs, values, ... }:
{
imports = [
./hardware-configuration.nix
../../base.nix
../../misc/metrics-exporters.nix
./services/libvirt.nix
];
# buskerud does not support efi?
# boot.loader.systemd-boot.enable = true;
# boot.loader.efi.canTouchEfiVariables = true;
boot.loader.grub.enable = true;
boot.loader.grub.device = "/dev/sdb";
networking.hostName = "buskerud";
networking.search = [ "pvv.ntnu.no" "pvv.org" ];
networking.nameservers = [ "129.241.0.200" "129.241.0.201" ];
networking.tempAddresses = "disabled";
systemd.network.networks."enp3s0f0" = values.defaultNetworkConfig // {
matchConfig.Name = "enp3s0f0";
address = with values.hosts.buskerud; [ (ipv4 + "/25") (ipv6 + "/64") ];
};
# List packages installed in system profile
environment.systemPackages = with pkgs; [
];
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "23.05"; # Did you read the comment?
}

@@ -8,32 +8,29 @@
[ (modulesPath + "/installer/scan/not-detected.nix") [ (modulesPath + "/installer/scan/not-detected.nix")
]; ];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ]; boot.initrd.availableKernelModules = [ "uhci_hcd" "ehci_pci" "ata_piix" "hpsa" "usb_storage" "usbhid" "sd_mod" "sr_mod" ];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ]; boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" =
{ device = "/dev/disk/by-uuid/a949e2e8-d973-4925-83e4-bcd815e65af7"; { device = "/dev/disk/by-uuid/ed9654fe-575a-4fb3-b6ff-1b059479acff";
fsType = "ext4"; fsType = "ext4";
}; };
fileSystems."/boot" = swapDevices = [ ];
{ device = "/dev/disk/by-uuid/81D6-38D3";
fsType = "vfat";
options = [ "fmask=0077" "dmask=0077" ];
};
swapDevices =
[ { device = "/dev/disk/by-uuid/82c2d7fa-7cd0-4398-8cf6-c892bc56264b"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking # Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's # (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction # still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`. # with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true; networking.useDHCP = lib.mkDefault true;
# networking.interfaces.enp0s31f6.useDHCP = lib.mkDefault true; # networking.interfaces.enp14s0f0.useDHCP = lib.mkDefault true;
# networking.interfaces.enp14s0f1.useDHCP = lib.mkDefault true;
# networking.interfaces.enp3s0f0.useDHCP = lib.mkDefault true;
# networking.interfaces.enp3s0f1.useDHCP = lib.mkDefault true;
# networking.interfaces.enp4s0f0.useDHCP = lib.mkDefault true;
# networking.interfaces.enp4s0f1.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux"; nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware; hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;

@@ -0,0 +1,10 @@
{ config, pkgs, lib, ... }:
{
virtualisation.libvirtd.enable = true;
programs.dconf.enable = true;
boot.kernelModules = [ "kvm-intel" ];
# On a gui-enabled machine, connect with:
# $ virt-manager --connect "qemu+ssh://buskerud/system?socket=/var/run/libvirt/libvirt-sock"
}

@@ -1,12 +1,12 @@
{ config, fp, pkgs, values, ... }: { config, pkgs, values, ... }:
{ {
imports = [ imports = [
# Include the results of the hardware scan. # Include the results of the hardware scan.
./hardware-configuration.nix ./hardware-configuration.nix
(fp /base) ../../base.nix
(fp /misc/metrics-exporters.nix) ../../misc/metrics-exporters.nix
(fp /modules/grzegorz.nix) ../../modules/grzegorz.nix
]; ];
boot.loader.systemd-boot.enable = true; boot.loader.systemd-boot.enable = true;
@@ -25,26 +25,6 @@
# List services that you want to enable: # List services that you want to enable:
services.spotifyd = {
enable = true;
settings.global = {
device_name = "georg";
use_mpris = false;
#dbus_type = "system";
#zeroconf_port = 1234;
};
};
networking.firewall.allowedTCPPorts = [
# config.services.spotifyd.settings.zeroconf_port
5353 # spotifyd is its own mDNS service wtf
];
# This value determines the NixOS release from which the default # This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions # settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave # on your system were taken. Its perfectly fine and recommended to leave

@@ -1,16 +1,16 @@
{ config, fp, pkgs, lib, values, ... }: { config, pkgs, values, ... }:
{ {
imports = [ imports = [
# Include the results of the hardware scan. # Include the results of the hardware scan.
./hardware-configuration.nix ./hardware-configuration.nix
(fp /base) ../../base.nix
(fp /misc/metrics-exporters.nix) ../../misc/metrics-exporters.nix
./services/monitoring ./services/monitoring
./services/nginx ./services/nginx
]; ];
sops.defaultSopsFile = fp /secrets/ildkule/ildkule.yaml; sops.defaultSopsFile = ../../secrets/ildkule/ildkule.yaml;
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ]; sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
sops.age.keyFile = "/var/lib/sops-nix/key.txt"; sops.age.keyFile = "/var/lib/sops-nix/key.txt";
sops.age.generateKey = true; sops.age.generateKey = true;
@@ -19,37 +19,33 @@
boot.tmp.cleanOnBoot = true; boot.tmp.cleanOnBoot = true;
zramSwap.enable = true; zramSwap.enable = true;
# Openstack Neutron and systemd-networkd are not best friends, use something else: networking.hostName = "ildkule"; # Define your hostname.
systemd.network.enable = lib.mkForce false;
networking = let
hostConf = values.hosts.ildkule;
in {
hostName = "ildkule";
tempAddresses = "disabled";
useDHCP = lib.mkForce true;
search = values.defaultNetworkConfig.domains; # Main connection, using the global/floatig IP, for communications with the world
nameservers = values.defaultNetworkConfig.dns; systemd.network.networks."30-ntnu-global" = values.openstackGlobalNetworkConfig // {
defaultGateway.address = hostConf.ipv4_internal_gw; matchConfig.Name = "ens4";
interfaces."ens4" = { # Add the global addresses in addition to the local address learned from DHCP
ipv4.addresses = [ addresses = [
{ address = hostConf.ipv4; prefixLength = 32; } { addressConfig.Address = "${values.hosts.ildkule.ipv4_global}/32"; }
{ address = hostConf.ipv4_internal; prefixLength = 24; } { addressConfig.Address = "${values.hosts.ildkule.ipv6_global}/128"; }
]; ];
ipv6.addresses = [ };
{ address = hostConf.ipv6; prefixLength = 64; }
]; # Secondary connection only for use within the university network
}; systemd.network.networks."40-ntnu-internal" = values.openstackLocalNetworkConfig // {
matchConfig.Name = "ens3";
# Add the ntnu-internal addresses in addition to the local address learned from DHCP
addresses = [
{ addressConfig.Address = "${values.hosts.ildkule.ipv4}/32"; }
{ addressConfig.Address = "${values.hosts.ildkule.ipv6}/128"; }
];
}; };
# List packages installed in system profile # List packages installed in system profile
environment.systemPackages = with pkgs; [ environment.systemPackages = with pkgs; [
]; ];
# No devices with SMART
services.smartd.enable = false;
system.stateVersion = "23.11"; # Did you read the comment? system.stateVersion = "23.11"; # Did you read the comment?
} }

@@ -3,14 +3,7 @@
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ]; imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "xen_blkfront" "vmw_pvscsi" ]; boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "xen_blkfront" "vmw_pvscsi" ];
boot.initrd.kernelModules = [ "nvme" ]; boot.initrd.kernelModules = [ "nvme" ];
fileSystems."/" = { fileSystems."/" = { device = "/dev/vda1"; fsType = "ext4"; };
device = "/dev/disk/by-uuid/e35eb4ce-aac3-4f91-8383-6e7cd8bbf942";
fsType = "ext4";
};
fileSystems."/data" = {
device = "/dev/disk/by-uuid/0a4c1234-02d3-4b53-aeca-d95c4c8d534b";
fsType = "ext4";
};
networking.useDHCP = lib.mkDefault true; networking.useDHCP = lib.mkDefault true;
} }

File diff suppressed because it is too large Load Diff

@@ -56,12 +56,13 @@ in {
url = "https://raw.githubusercontent.com/matrix-org/synapse/develop/contrib/grafana/synapse.json"; url = "https://raw.githubusercontent.com/matrix-org/synapse/develop/contrib/grafana/synapse.json";
options.path = dashboards/synapse.json; options.path = dashboards/synapse.json;
} }
{ # TODO: enable once https://github.com/NixOS/nixpkgs/pull/242365 gets merged
name = "MySQL"; # {
type = "file"; # name = "MySQL";
url = "https://raw.githubusercontent.com/prometheus/mysqld_exporter/main/mysqld-mixin/dashboards/mysql-overview.json"; # type = "file";
options.path = dashboards/mysql.json; # url = "https://raw.githubusercontent.com/prometheus/mysqld_exporter/main/mysqld-mixin/dashboards/mysql-overview.json";
} # options.path = dashboards/mysql.json;
# }
{ {
name = "Postgresql"; name = "Postgresql";
type = "file"; type = "file";
@@ -74,12 +75,6 @@ in {
url = "https://grafana.com/api/dashboards/240/revisions/3/download"; url = "https://grafana.com/api/dashboards/240/revisions/3/download";
options.path = dashboards/go-processes.json; options.path = dashboards/go-processes.json;
} }
{
name = "Gitea Dashboard";
type = "file";
url = "https://grafana.com/api/dashboards/17802/revisions/3/download";
options.path = dashboards/gitea-dashboard.json;
}
]; ];
}; };

@@ -2,7 +2,6 @@
let let
cfg = config.services.loki; cfg = config.services.loki;
stateDir = "/data/monitoring/loki";
in { in {
services.loki = { services.loki = {
enable = true; enable = true;
@@ -17,7 +16,7 @@ in {
ingester = { ingester = {
wal = { wal = {
enabled = true; enabled = true;
dir = "${stateDir}/wal"; dir = "/var/lib/loki/wal";
}; };
lifecycler = { lifecycler = {
address = "127.0.0.1"; address = "127.0.0.1";
@@ -49,12 +48,12 @@ in {
storage_config = { storage_config = {
boltdb_shipper = { boltdb_shipper = {
active_index_directory = "${stateDir}/boltdb-shipper-index"; active_index_directory = "/var/lib/loki/boltdb-shipper-index";
cache_location = "${stateDir}/boltdb-shipper-cache"; cache_location = "/var/lib/loki/boltdb-shipper-cache";
cache_ttl = "24h"; cache_ttl = "24h";
}; };
filesystem = { filesystem = {
directory = "${stateDir}/chunks"; directory = "/var/lib/loki/chunks";
}; };
}; };
@@ -65,14 +64,14 @@ in {
}; };
compactor = { compactor = {
working_directory = "${stateDir}/compactor"; working_directory = "/var/lib/loki/compactor";
}; };
# ruler = { # ruler = {
# storage = { # storage = {
# type = "local"; # type = "local";
# local = { # local = {
# directory = "${stateDir}/rules"; # directory = "/var/lib/loki/rules";
# }; # };
# }; # };
# rule_path = "/etc/loki/rules"; # rule_path = "/etc/loki/rules";

@@ -1,26 +1,18 @@
{ config, ... }: let { config, ... }: {
stateDir = "/data/monitoring/prometheus";
in {
imports = [ imports = [
./exim.nix ./gogs.nix
./gitea.nix
./machines.nix
./matrix-synapse.nix ./matrix-synapse.nix
./mysqld.nix # TODO: enable once https://github.com/NixOS/nixpkgs/pull/242365 gets merged
# ./mysqld.nix
./node.nix
./postgres.nix ./postgres.nix
]; ];
services.prometheus = { services.prometheus = {
enable = true; enable = true;
listenAddress = "127.0.0.1"; listenAddress = "127.0.0.1";
port = 9001; port = 9001;
ruleFiles = [ rules/synapse-v2.rules ]; ruleFiles = [ rules/synapse-v2.rules ];
}; };
fileSystems."/var/lib/prometheus2" = {
device = stateDir;
options = [ "bind" ];
};
} }

@@ -1,14 +0,0 @@
{ ... }:
{
services.prometheus = {
scrapeConfigs = [
{
job_name = "exim";
scrape_interval = "15s";
static_configs = [{
targets = [ "microbel.pvv.ntnu.no:9636" ];
}];
}
];
};
}

@@ -1,16 +0,0 @@
{ ... }:
{
services.prometheus.scrapeConfigs = [{
job_name = "gitea";
scrape_interval = "60s";
scheme = "https";
static_configs = [
{
targets = [
"git.pvv.ntnu.no:443"
];
}
];
}];
}

@@ -0,0 +1,16 @@
{ config, ... }: let
cfg = config.services.prometheus;
in {
services.prometheus.scrapeConfigs = [{
job_name = "git-gogs";
scheme = "https";
metrics_path = "/-/metrics";
static_configs = [
{
targets = [
"essendrop.pvv.ntnu.no:443"
];
}
];
}];
}

@@ -1,36 +0,0 @@
{ config, ... }: let
cfg = config.services.prometheus;
mkHostScrapeConfig = name: ports: {
labels.hostname = name;
targets = map (port: "${name}.pvv.ntnu.no:${toString port}") ports;
};
defaultNodeExporterPort = 9100;
defaultSystemdExporterPort = 9101;
in {
services.prometheus.scrapeConfigs = [{
job_name = "base_info";
static_configs = [
(mkHostScrapeConfig "ildkule" [ cfg.exporters.node.port cfg.exporters.systemd.port ])
(mkHostScrapeConfig "bekkalokk" [ defaultNodeExporterPort defaultSystemdExporterPort ])
(mkHostScrapeConfig "bicep" [ defaultNodeExporterPort defaultSystemdExporterPort ])
(mkHostScrapeConfig "brzeczyszczykiewicz" [ defaultNodeExporterPort defaultSystemdExporterPort ])
(mkHostScrapeConfig "georg" [ defaultNodeExporterPort defaultSystemdExporterPort ])
(mkHostScrapeConfig "kommode" [ defaultNodeExporterPort defaultSystemdExporterPort ])
(mkHostScrapeConfig "ustetind" [ defaultNodeExporterPort defaultSystemdExporterPort ])
(mkHostScrapeConfig "wenche" [ defaultNodeExporterPort defaultSystemdExporterPort ])
(mkHostScrapeConfig "lupine-1" [ defaultNodeExporterPort defaultSystemdExporterPort ])
# (mkHostScrapeConfig "lupine-2" [ defaultNodeExporterPort defaultSystemdExporterPort ])
(mkHostScrapeConfig "lupine-3" [ defaultNodeExporterPort defaultSystemdExporterPort ])
(mkHostScrapeConfig "lupine-4" [ defaultNodeExporterPort defaultSystemdExporterPort ])
(mkHostScrapeConfig "lupine-5" [ defaultNodeExporterPort defaultSystemdExporterPort ])
(mkHostScrapeConfig "hildring" [ defaultNodeExporterPort ])
(mkHostScrapeConfig "isvegg" [ defaultNodeExporterPort ])
(mkHostScrapeConfig "microbel" [ defaultNodeExporterPort ])
];
}];
}

@@ -1,22 +1,7 @@
{ config, ... }: let { config, ... }: let
cfg = config.services.prometheus; cfg = config.services.prometheus;
in { in {
sops = { sops.secrets."config/mysqld_exporter" = { };
secrets."config/mysqld_exporter_password" = { };
templates."mysqld_exporter.conf" = {
restartUnits = [ "prometheus-mysqld-exporter.service" ];
content = let
inherit (config.sops) placeholder;
in ''
[client]
host = bicep.pvv.ntnu.no
port = 3306
user = prometheus_mysqld_exporter
password = ${placeholder."config/mysqld_exporter_password"}
'';
};
};
services.prometheus = { services.prometheus = {
scrapeConfigs = [{ scrapeConfigs = [{
@@ -34,7 +19,7 @@ in {
exporters.mysqld = { exporters.mysqld = {
enable = true; enable = true;
configFile = config.sops.templates."mysqld_exporter.conf".path; configFilePath = config.sops.secrets."config/mysqld_exporter".path;
}; };
}; };
} }

@@ -0,0 +1,22 @@
{ config, ... }: let
cfg = config.services.prometheus;
in {
services.prometheus.scrapeConfigs = [{
job_name = "node";
static_configs = [
{
targets = [
"ildkule.pvv.ntnu.no:${toString cfg.exporters.node.port}"
"microbel.pvv.ntnu.no:9100"
"isvegg.pvv.ntnu.no:9100"
"knakelibrak.pvv.ntnu.no:9100"
"hildring.pvv.ntnu.no:9100"
"bicep.pvv.ntnu.no:9100"
"essendrop.pvv.ntnu.no:9100"
"andresbu.pvv.ntnu.no:9100"
"bekkalokk.pvv.ntnu.no:9100"
];
}
];
}];
}

@@ -2,7 +2,6 @@
let let
cfg = config.services.uptime-kuma; cfg = config.services.uptime-kuma;
domain = "status.pvv.ntnu.no"; domain = "status.pvv.ntnu.no";
stateDir = "/data/monitoring/uptime-kuma";
in { in {
services.uptime-kuma = { services.uptime-kuma = {
enable = true; enable = true;
@@ -18,9 +17,4 @@ in {
kTLS = true; kTLS = true;
locations."/".proxyPass = "http://${cfg.settings.HOST}:${cfg.settings.PORT}"; locations."/".proxyPass = "http://${cfg.settings.HOST}:${cfg.settings.PORT}";
}; };
fileSystems."/var/lib/uptime-kuma" = {
device = stateDir;
options = [ "bind" ];
};
} }

@@ -1,34 +0,0 @@
{ pkgs, values, fp, ... }:
{
imports = [
# Include the results of the hardware scan.
./hardware-configuration.nix
(fp /base)
(fp /misc/metrics-exporters.nix)
./services/gitea
./services/nginx.nix
];
sops.defaultSopsFile = fp /secrets/kommode/kommode.yaml;
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
sops.age.keyFile = "/var/lib/sops-nix/key.txt";
sops.age.generateKey = true;
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "kommode"; # Define your hostname.
systemd.network.networks."30-ens18" = values.defaultNetworkConfig // {
matchConfig.Name = "ens18";
address = with values.hosts.kommode; [ (ipv4 + "/25") (ipv6 + "/64") ];
};
services.btrfs.autoScrub.enable = true;
environment.systemPackages = with pkgs; [];
system.stateVersion = "24.11";
}

@@ -1,57 +0,0 @@
{ config, pkgs, lib, fp, ... }:
let
cfg = config.services.gitea;
in
{
services.gitea-themes = {
monokai = pkgs.gitea-theme-monokai;
earl-grey = pkgs.gitea-theme-earl-grey;
pitch-black = pkgs.gitea-theme-pitch-black;
catppuccin = pkgs.gitea-theme-catppuccin;
};
systemd.services.gitea-customization = lib.mkIf cfg.enable {
description = "Install extra customization in gitea's CUSTOM_DIR";
wantedBy = [ "gitea.service" ];
requiredBy = [ "gitea.service" ];
serviceConfig = {
Type = "oneshot";
User = cfg.user;
Group = cfg.group;
};
script = let
logo-svg = fp /assets/logo_blue_regular.svg;
logo-png = fp /assets/logo_blue_regular.png;
extraLinks = pkgs.writeText "gitea-extra-links.tmpl" ''
<a class="item" href="https://www.pvv.ntnu.no/">PVV</a>
<a class="item" href="https://wiki.pvv.ntnu.no/">Wiki</a>
<a class="item" href="https://git.pvv.ntnu.no/Drift/-/projects/4">Tokyo Drift Issues</a>
'';
project-labels = (pkgs.formats.yaml { }).generate "gitea-project-labels.yaml" {
labels = lib.importJSON ./labels/projects.json;
};
customTemplates = pkgs.runCommandLocal "gitea-templates" {
nativeBuildInputs = with pkgs; [
coreutils
gnused
];
} ''
# Bigger icons
install -Dm444 "${cfg.package.src}/templates/repo/icon.tmpl" "$out/repo/icon.tmpl"
sed -i -e 's/24/48/g' "$out/repo/icon.tmpl"
'';
in ''
install -Dm444 ${logo-svg} ${cfg.customDir}/public/assets/img/logo.svg
install -Dm444 ${logo-png} ${cfg.customDir}/public/assets/img/logo.png
install -Dm444 ${./loading.apng} ${cfg.customDir}/public/assets/img/loading.png
install -Dm444 ${extraLinks} ${cfg.customDir}/templates/custom/extra_links.tmpl
install -Dm444 ${project-labels} ${cfg.customDir}/options/label/project-labels.yaml
"${lib.getExe pkgs.rsync}" -a "${customTemplates}/" ${cfg.customDir}/templates/
'';
};
}

@@ -1,116 +0,0 @@
[
{
"name": "art",
"exclusive": false,
"color": "#006b75",
"description": "Requires some creativity"
},
{
"name": "big",
"exclusive": false,
"color": "#754bc4",
"description": "This is gonna take a while"
},
{
"name": "blocked",
"exclusive": false,
"color": "#850021",
"description": "This issue/PR depends on one or more other issues/PRs"
},
{
"name": "bug",
"exclusive": false,
"color": "#f05048",
"description": "Something brokey"
},
{
"name": "ci-cd",
"exclusive": false,
"color": "#d1ff78",
"description": "Continuous integrals and continuous derivation"
},
{
"name": "crash report",
"exclusive": false,
"color": "#ed1111",
"description": "Report an oopsie"
},
{
"name": "disputed",
"exclusive": false,
"color": "#5319e7",
"description": "Kranglefanter"
},
{
"name": "documentation",
"exclusive": false,
"color": "#fbca04",
"description": "Documentation changes required"
},
{
"name": "duplicate",
"exclusive": false,
"color": "#cccccc",
"description": "This issue or pull request already exists"
},
{
"name": "feature request",
"exclusive": false,
"color": "#0052cc",
"description": ""
},
{
"name": "good first issue",
"exclusive": false,
"color": "#009800",
"description": "Get your hands dirty with a new project here"
},
{
"name": "me gusta",
"exclusive": false,
"color": "#30ff36",
"description": "( ͡° ͜ʖ ͡°)"
},
{
"name": "packaging",
"exclusive": false,
"color": "#bf642b",
"description": ""
},
{
"name": "question",
"exclusive": false,
"color": "#cc317c",
"description": ""
},
{
"name": "security",
"exclusive": false,
"color": "#ed1111",
"description": "Skommel"
},
{
"name": "techdebt spring cleaning",
"exclusive": false,
"color": "#8c6217",
"description": "The code is smelly 👃"
},
{
"name": "testing",
"exclusive": false,
"color": "#52b373",
"description": "Poke it and see if it explodes"
},
{
"name": "ui/ux",
"exclusive": false,
"color": "#f28852",
"description": "User complaints about ergonomics and economics and whatever"
},
{
"name": "wontfix",
"exclusive": false,
"color": "#ffffff",
"description": "Nei, vil ikke"
}
]

@@ -1,38 +0,0 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.gitea;
GNUPGHOME = "${config.users.users.gitea.home}/gnupg";
in
{
sops.secrets."gitea/gpg-signing-key" = {
owner = cfg.user;
inherit (cfg) group;
};
systemd.services.gitea.environment = { inherit GNUPGHOME; };
systemd.tmpfiles.settings."20-gitea-gnugpg".${GNUPGHOME}.d = {
inherit (cfg) user group;
mode = "700";
};
systemd.services.gitea-ensure-gnupg-homedir = {
description = "Import gpg key for gitea";
environment = { inherit GNUPGHOME; };
serviceConfig = {
Type = "oneshot";
User = cfg.user;
PrivateNetwork = true;
};
script = ''
${lib.getExe pkgs.gnupg} --import ${config.sops.secrets."gitea/gpg-signing-key".path}
'';
};
services.gitea.settings."repository.signing" = {
SIGNING_KEY = "0549C43374D2253C";
SIGNING_NAME = "PVV Git";
SIGNING_EMAIL = "gitea@git.pvv.ntnu.no";
INITIAL_COMMIT = "always";
};
}

@@ -1,204 +0,0 @@
import requests
import secrets
import os
EMAIL_DOMAIN = os.getenv('EMAIL_DOMAIN')
if EMAIL_DOMAIN is None:
EMAIL_DOMAIN = 'pvv.ntnu.no'
API_TOKEN = os.getenv('API_TOKEN')
if API_TOKEN is None:
raise Exception('API_TOKEN not set')
GITEA_API_URL = os.getenv('GITEA_API_URL')
if GITEA_API_URL is None:
GITEA_API_URL = 'https://git.pvv.ntnu.no/api/v1'
PASSWD_FILE_PATH = os.getenv('PASSWD_FILE_PATH')
if PASSWD_FILE_PATH is None:
PASSWD_FILE_PATH = '/tmp/passwd-import'
def gitea_list_all_users() -> dict[str, dict[str, any]] | None:
r = requests.get(
GITEA_API_URL + '/admin/users',
headers={'Authorization': 'token ' + API_TOKEN}
)
if r.status_code != 200:
print('Failed to get users:', r.text)
return None
return {user['login']: user for user in r.json()}
def gitea_create_user(username: str, userdata: dict[str, any]) -> bool:
r = requests.post(
GITEA_API_URL + '/admin/users',
json=userdata,
headers={'Authorization': 'token ' + API_TOKEN},
)
if r.status_code != 201:
print(f'ERR: Failed to create user {username}:', r.text)
return False
return True
def gitea_edit_user(username: str, userdata: dict[str, any]) -> bool:
r = requests.patch(
GITEA_API_URL + f'/admin/users/{username}',
json=userdata,
headers={'Authorization': 'token ' + API_TOKEN},
)
if r.status_code != 200:
print(f'ERR: Failed to update user {username}:', r.text)
return False
return True
def gitea_list_teams_for_organization(org: str) -> dict[str, any] | None:
r = requests.get(
GITEA_API_URL + f'/orgs/{org}/teams',
headers={'Authorization': 'token ' + API_TOKEN},
)
if r.status_code != 200:
print(f"ERR: Failed to list teams for {org}:", r.text)
return None
return {team['name']: team for team in r.json()}
def gitea_add_user_to_organization_team(username: str, team_id: int) -> bool:
r = requests.put(
GITEA_API_URL + f'/teams/{team_id}/members/{username}',
headers={'Authorization': 'token ' + API_TOKEN},
)
if r.status_code != 204:
print(f'ERR: Failed to add user {username} to org team {team_id}:', r.text)
return False
return True
# If a passwd user has one of the following shells,
# it is most likely not a PVV user, but rather a system user.
# Users with these shells should thus be ignored.
BANNED_SHELLS = [
"/usr/bin/nologin",
"/usr/sbin/nologin",
"/sbin/nologin",
"/bin/false",
"/bin/msgsh",
]
# Reads out a passwd-file line for line, and filters out
# real PVV users (as opposed to system users meant for daemons and such)
def passwd_file_parser(passwd_path):
with open(passwd_path, 'r') as f:
for line in f.readlines():
uid = int(line.split(':')[2])
if uid < 1000:
continue
shell = line.split(':')[-1]
if shell in BANNED_SHELLS:
continue
username = line.split(':')[0]
name = line.split(':')[4].split(',')[0]
yield (username, name)
# This function either creates a new user in gitea
# and fills it out with some default information if
# it does not exist, or ensures that the default information
# is correct if the user already exists. All user information
# (including non-default fields) is pulled from gitea and added
# to the `existing_users` dict
def add_or_patch_gitea_user(
username: str,
name: str,
existing_users: dict[str, dict[str, any]],
) -> None:
user = {
"full_name": name,
"username": username,
"login_name": username,
"source_id": 1, # 1 = SMTP
}
if username not in existing_users:
user["password"] = secrets.token_urlsafe(32)
user["must_change_password"] = False
user["visibility"] = "private"
user["email"] = username + '@' + EMAIL_DOMAIN
if not gitea_create_user(username, user):
return
print('Created user', username)
existing_users[username] = user
else:
user["visibility"] = existing_users[username]["visibility"]
if not gitea_edit_user(username, user):
return
print('Updated user', username)
# This function adds a user to a gitea team (part of organization)
# if the user is not already part of said team.
def ensure_gitea_user_is_part_of_team(
username: str,
org: str,
team_name: str,
) -> None:
teams = gitea_list_teams_for_organization(org)
if teams is None:
return
if team_name not in teams:
print(f'ERR: could not find team "{team_name}" in organization "{org}"')
gitea_add_user_to_organization_team(username, teams[team_name]['id'])
print(f'User {username} is now part of {org}/{team_name}')
# List of teams that all users should be part of by default
COMMON_USER_TEAMS = [
("Projects", "Members"),
("Grzegorz", "Members"),
("Kurs", "Members"),
]
def main():
existing_users = gitea_list_all_users()
if existing_users is None:
exit(1)
print(f"Reading passwd entries from {PASSWD_FILE_PATH}")
for username, name in passwd_file_parser(PASSWD_FILE_PATH):
print(f"Processing {username}")
add_or_patch_gitea_user(username, name, existing_users)
for org, team_name in COMMON_USER_TEAMS:
ensure_gitea_user_is_part_of_team(username, org, team_name)
print()
if __name__ == '__main__':
main()

@@ -1,117 +0,0 @@
{ config, pkgs, lib, ... }:
let
organizations = [
"Drift"
"Projects"
"Grzegorz"
"Kurs"
];
giteaCfg = config.services.gitea;
giteaWebSecretProviderScript = pkgs.writers.writePython3 "gitea-web-secret-provider" {
libraries = with pkgs.python3Packages; [ requests ];
flakeIgnore = [
"E501" # Line over 80 chars lol
"E201" # "whitespace after {"
"E202" # "whitespace after }"
"E251" # unexpected spaces around keyword / parameter equals
"W391" # Newline at end of file
];
makeWrapperArgs = [
"--prefix PATH : ${(lib.makeBinPath [ pkgs.openssh ])}"
];
} (builtins.readFile ./gitea-web-secret-provider.py);
in
{
users.groups."gitea-web" = { };
users.users."gitea-web" = {
group = "gitea-web";
isSystemUser = true;
shell = pkgs.bash;
};
sops.secrets."gitea/web-secret-provider/token" = {
owner = "gitea-web";
group = "gitea-web";
restartUnits = [
"gitea-web-secret-provider@"
] ++ (map (org: "gitea-web-secret-provider@${org}") organizations);
};
systemd.slices.system-giteaweb = {
description = "Gitea web directories";
};
# https://www.freedesktop.org/software/systemd/man/latest/systemd.unit.html#Specifiers
# %i - instance name (after the @)
# %d - secrets directory
systemd.services."gitea-web-secret-provider@" = {
description = "Ensure all repos in %i has an SSH key to push web content";
requires = [ "gitea.service" "network.target" ];
serviceConfig = {
Slice = "system-giteaweb.slice";
Type = "oneshot";
ExecStart = let
args = lib.cli.toGNUCommandLineShell { } {
org = "%i";
token-path = "%d/token";
api-url = "${giteaCfg.settings.server.ROOT_URL}api/v1";
key-dir = "/var/lib/gitea-web/keys/%i";
authorized-keys-path = "/var/lib/gitea-web/authorized_keys.d/%i";
rrsync-script = pkgs.writeShellScript "rrsync-chown" ''
mkdir -p "$1"
${lib.getExe pkgs.rrsync} -wo "$1"
${pkgs.coreutils}/bin/chown -R gitea-web:gitea-web "$1"
'';
web-dir = "/var/lib/gitea-web/web";
};
in "${giteaWebSecretProviderScript} ${args}";
User = "gitea-web";
Group = "gitea-web";
StateDirectory = "gitea-web";
StateDirectoryMode = "0750";
LoadCredential = [
"token:${config.sops.secrets."gitea/web-secret-provider/token".path}"
];
NoNewPrivileges = true;
PrivateTmp = true;
PrivateDevices = true;
ProtectSystem = true;
ProtectHome = true;
ProtectControlGroups = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
RestrictRealtime = true;
RestrictSUIDSGID = true;
MemoryDenyWriteExecute = true;
LockPersonality = true;
};
};
systemd.timers."gitea-web-secret-provider@" = {
description = "Ensure all repos in %i has an SSH key to push web content";
timerConfig = {
RandomizedDelaySec = "1h";
Persistent = true;
Unit = "gitea-web-secret-provider@%i.service";
OnCalendar = "daily";
};
};
systemd.targets.timers.wants = map (org: "gitea-web-secret-provider@${org}.timer") organizations;
services.openssh.authorizedKeysFiles = map (org: "/var/lib/gitea-web/authorized_keys.d/${org}") organizations;
users.users.nginx.extraGroups = [ "gitea-web" ];
services.nginx.virtualHosts."pages.pvv.ntnu.no" = {
kTLS = true;
forceSSL = true;
enableACME = true;
root = "/var/lib/gitea-web/web";
};
}

@@ -1,126 +0,0 @@
import argparse
import hashlib
import os
import requests
import subprocess
from pathlib import Path
def parse_args():
parser = argparse.ArgumentParser(description="Generate SSH keys for Gitea repositories and add them as secrets")
parser.add_argument("--org", required=True, type=str, help="The organization to generate keys for")
parser.add_argument("--token-path", metavar='PATH', required=True, type=Path, help="Path to a file containing the Gitea API token")
parser.add_argument("--api-url", metavar='URL', type=str, help="The URL of the Gitea API", default="https://git.pvv.ntnu.no/api/v1")
parser.add_argument("--key-dir", metavar='PATH', type=Path, help="The directory to store the generated keys in", default="/run/gitea-web-secret-provider")
parser.add_argument("--authorized-keys-path", metavar='PATH', type=Path, help="The path to the resulting authorized_keys file", default="/etc/ssh/authorized_keys.d/gitea-web-secret-provider")
parser.add_argument("--rrsync-script", metavar='PATH', type=Path, help="The path to a rrsync script, taking the destination path as its single argument")
parser.add_argument("--web-dir", metavar='PATH', type=Path, help="The directory to sync the repositories to", default="/var/www")
parser.add_argument("--force", action="store_true", help="Overwrite existing keys")
return parser.parse_args()
def add_secret(args: argparse.Namespace, token: str, repo: str, name: str, secret: str):
result = requests.put(
f"{args.api_url}/repos/{args.org}/{repo}/actions/secrets/{name}",
json = { 'data': secret },
headers = { 'Authorization': 'token ' + token },
)
if result.status_code not in (201, 204):
raise Exception(f"Failed to add secret: {result.json()}")
def get_org_repo_list(args: argparse.Namespace, token: str):
result = requests.get(
f"{args.api_url}/orgs/{args.org}/repos",
headers = { 'Authorization': 'token ' + token },
)
results = [repo["name"] for repo in result.json()]
target = int(result.headers['X-Total-Count'])
i = 2
while len(results) < target:
result = requests.get(
f"{args.api_url}/orgs/{args.org}/repos",
params = { 'page': i },
headers = { 'Authorization': 'token ' + token },
)
results += [repo["name"] for repo in result.json()]
i += 1
return results
def generate_ssh_key(args: argparse.Namespace, repository: str):
keyname = hashlib.sha256(args.org.encode() + repository.encode()).hexdigest()
key_path = args.key_dir / keyname
if not key_path.is_file() or args.force:
subprocess.run(
[
"ssh-keygen",
*("-t", "ed25519"),
*("-f", key_path),
*("-N", ""),
*("-C", f"{args.org}/{repository}"),
],
check=True,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
print(f"Generated SSH key for `{args.org}/{repository}`")
with open(key_path, "r") as f:
private_key = f.read()
pub_key_path = args.key_dir / (keyname + '.pub')
with open(pub_key_path, "r") as f:
public_key = f.read()
return private_key, public_key
SSH_OPTS = ",".join([
"restrict",
"no-agent-forwarding",
"no-port-forwarding",
"no-pty",
"no-X11-forwarding",
])
def generate_authorized_keys(args: argparse.Namespace, repo_public_keys: list[tuple[str, str]]):
lines = []
for repo, public_key in repo_public_keys:
command = f"{args.rrsync_script} {args.web_dir}/{args.org}/{repo}"
lines.append(f'command="{command}",{SSH_OPTS} {public_key}')
with open(args.authorized_keys_path, "w") as f:
f.writelines(lines)
def main():
args = parse_args()
with open(args.token_path, "r") as f:
token = f.read().strip()
os.makedirs(args.key_dir, 0o700, exist_ok=True)
os.makedirs(args.authorized_keys_path.parent, 0o700, exist_ok=True)
repos = get_org_repo_list(args, token)
print(f'Found {len(repos)} repositories in `{args.org}`')
repo_public_keys = []
for repo in repos:
print(f"Locating key for `{args.org}/{repo}`")
private_key, public_key = generate_ssh_key(args, repo)
add_secret(args, token, repo, "WEB_SYNC_SSH_KEY", private_key)
repo_public_keys.append((repo, public_key))
generate_authorized_keys(args, repo_public_keys)
print(f"Wrote authorized_keys file to `{args.authorized_keys_path}`")
if __name__ == "__main__":
main()

@@ -1,4 +0,0 @@
{ ... }:
{
services.nginx.enable = true;
}

@@ -1,35 +0,0 @@
{ fp, values, lupineName, ... }:
{
imports = [
./hardware-configuration/${lupineName}.nix
(fp /base)
(fp /misc/metrics-exporters.nix)
./services/gitea-runner.nix
];
sops.defaultSopsFile = fp /secrets/lupine/lupine.yaml;
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
sops.age.keyFile = "/var/lib/sops-nix/key.txt";
sops.age.generateKey = true;
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
systemd.network.networks."30-enp0s31f6" = values.defaultNetworkConfig // {
matchConfig.Name = "enp0s31f6";
address = with values.hosts.${lupineName}; [ (ipv4 + "/25") (ipv6 + "/64") ];
networkConfig.LLDP = false;
};
systemd.network.wait-online = {
anyInterface = true;
};
# There are no smart devices
services.smartd.enable = false;
# Do not change, even during upgrades.
# See https://search.nixos.org/options?show=system.stateVersion
system.stateVersion = "25.05";
}

@@ -1,40 +0,0 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-uuid/aa81d439-800b-403d-ac10-9d2aac3619d0";
fsType = "ext4";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/4A34-6AE5";
fsType = "vfat";
options = [ "fmask=0077" "dmask=0077" ];
};
swapDevices =
[ { device = "/dev/disk/by-uuid/efb7cd0c-c1ae-4a86-8bc2-8e7fd0066650"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.enp0s31f6.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

@@ -1,40 +0,0 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-uuid/39ba059b-3205-4701-a832-e72c0122cb88";
fsType = "ext4";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/63FA-297B";
fsType = "vfat";
options = [ "fmask=0077" "dmask=0077" ];
};
swapDevices =
[ { device = "/dev/disk/by-uuid/9c72eb54-ea8c-4b09-808a-8be9b9a33869"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.enp0s31f6.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

@@ -1,34 +0,0 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-uuid/c7bbb293-a0a3-4995-8892-0ec63e8c67dd";
fsType = "ext4";
};
swapDevices =
[ { device = "/dev/disk/by-uuid/a86ffda8-8ecb-42a1-bf9f-926072e90ca5"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.enp0s31f6.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

@@ -1,40 +0,0 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-uuid/5f8418ad-8ec1-4f9e-939e-f3a4c36ef343";
fsType = "ext4";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/F372-37DF";
fsType = "vfat";
options = [ "fmask=0077" "dmask=0077" ];
};
swapDevices =
[ { device = "/dev/disk/by-uuid/27bf292d-bbb3-48c4-a86e-456e0f1f648f"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.enp0s31f6.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

@@ -1,45 +0,0 @@
{ config, lupineName, ... }:
{
# This is unfortunately state, and has to be generated one at a time :(
# To do that, comment out all except one of the runners, fill in its token
# inside the sops file, rebuild the system, and only after this runner has
# successfully registered will gitea give you the next token.
# - oysteikt Sep 2023
sops = {
secrets."gitea/runners/token" = {
key = "gitea/runners/${lupineName}";
};
templates."gitea-runner-envfile" = {
restartUnits = [
"gitea-runner-${lupineName}.service"
];
content = ''
TOKEN="${config.sops.placeholder."gitea/runners/token"}"
'';
};
};
services.gitea-actions-runner.instances = {
${lupineName} = {
enable = true;
name = "git-runner-${lupineName}";
url = "https://git.pvv.ntnu.no";
labels = [
"debian-latest:docker://node:current-bookworm"
"ubuntu-latest:docker://node:current-bookworm"
];
tokenFile = config.sops.templates."gitea-runner-envfile".path;
};
};
virtualisation.podman = {
enable = true;
defaultNetwork.settings.dns_enabled = true;
autoPrune.enable = true;
};
networking.dhcpcd.IPv6rs = false;
networking.firewall.interfaces."podman+".allowedUDPPorts = [53 5353];
}

@@ -1,13 +1,13 @@
{ config, fp, pkgs, values, ... }: { config, pkgs, values, ... }:
{ {
imports = [ imports = [
# Include the results of the hardware scan. # Include the results of the hardware scan.
./hardware-configuration.nix ./hardware-configuration.nix
(fp /base) ../../base.nix
(fp /misc/metrics-exporters.nix) ../../misc/metrics-exporters.nix
]; ];
sops.defaultSopsFile = fp /secrets/shark/shark.yaml; sops.defaultSopsFile = ../../secrets/shark/shark.yaml;
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ]; sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
sops.age.keyFile = "/var/lib/sops-nix/key.txt"; sops.age.keyFile = "/var/lib/sops-nix/key.txt";
sops.age.generateKey = true; sops.age.generateKey = true;

@@ -1,44 +0,0 @@
{ config, fp, pkgs, lib, values, ... }:
{
imports = [
(fp /base)
(fp /misc/metrics-exporters.nix)
./services/gitea-runners.nix
];
sops.defaultSopsFile = fp /secrets/ustetind/ustetind.yaml;
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
sops.age.keyFile = "/var/lib/sops-nix/key.txt";
sops.age.generateKey = true;
networking.hostName = "ustetind";
networking.useHostResolvConf = lib.mkForce false;
systemd.network.networks = {
"30-lxc-eth" = values.defaultNetworkConfig // {
matchConfig = {
Type = "ether";
Kind = "veth";
Name = [
"eth*"
];
};
address = with values.hosts.ustetind; [ (ipv4 + "/25") (ipv6 + "/64") ];
};
"40-podman-veth" = values.defaultNetworkConfig // {
matchConfig = {
Type = "ether";
Kind = "veth";
Name = [
"veth*"
];
};
DHCP = "yes";
};
};
system.stateVersion = "24.11";
}

@@ -1,39 +0,0 @@
{ config, fp, pkgs, values, lib, ... }:
{
imports = [
# Include the results of the hardware scan.
./hardware-configuration.nix
(fp /base)
(fp /misc/metrics-exporters.nix)
(fp /misc/builder.nix)
];
sops.defaultSopsFile = fp /secrets/wenche/wenche.yaml;
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
sops.age.keyFile = "/var/lib/sops-nix/key.txt";
sops.age.generateKey = true;
boot.loader.grub.device = "/dev/sda";
networking.hostName = "wenche"; # Define your hostname.
systemd.network.networks."30-ens18" = values.defaultNetworkConfig // {
matchConfig.Name = "ens18";
address = with values.hosts.wenche; [ (ipv4 + "/25") (ipv6 + "/64") ];
};
hardware.graphics.enable = true;
services.xserver.videoDrivers = [ "nvidia" ];
hardware.nvidia = {
modesetting.enable = true;
open = false;
package = config.boot.kernelPackages.nvidiaPackages.production;
};
# List packages installed in system profile
environment.systemPackages = with pkgs; [
];
system.stateVersion = "24.11"; # Did you read the comment?
}

@@ -1,27 +0,0 @@
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
[ (modulesPath + "/profiles/qemu-guest.nix")
];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "virtio_pci" "virtio_scsi" "sd_mod" "sr_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "nvidia" ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-uuid/4e8ecdd2-d453-4fff-b952-f06da00f3b85";
fsType = "ext4";
};
swapDevices = [ {
device = "/var/lib/swapfile";
size = 16*1024;
} ];
networking.useDHCP = lib.mkDefault false;
# networking.interfaces.ens18.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
}

@@ -1,56 +1,25 @@
set positional-arguments # makes variables accesible as $1 $2 $@
export GUM_FILTER_HEIGHT := "15" export GUM_FILTER_HEIGHT := "15"
nom := `if [[ -t 1 ]] && command -v nom >/dev/null; then echo nom; else echo nix; fi` nom := `if command -v nom >/dev/null; then echo nom; else echo nix; fi`
nix_eval_opts := "--log-format raw --option warn-dirty false"
@_default: @_default:
just "$(gum choose --ordered --header "Pick a recipie..." $(just --summary --unsorted))" just "$(gum choose --ordered --header "Pick a recipie..." $(just --summary --unsorted))"
check *_: check:
nix flake check --keep-going "$@" nix flake check --keep-going
build-machine machine=`just _a_machine` *_: build-machine machine=`just _a_machine`:
{{nom}} build .#nixosConfigurations.{{ machine }}.config.system.build.toplevel "${@:2}" {{nom}} build .#nixosConfigurations.{{ machine }}.config.system.build.toplevel
run-vm machine=`just _a_machine` *_: run-vm machine=`just _a_machine`:
nixos-rebuild build-vm --flake .#{{ machine }} "${@:2}" nixos-rebuild build-vm --flake .#{{ machine }}
QEMU_NET_OPTS="hostfwd=tcp::8080-:80,hostfwd=tcp::8081-:443,hostfwd=tcp::2222-:22" ./result/bin/run-*-vm QEMU_NET_OPTS="hostfwd=tcp::8080-:80,hostfwd=tcp::8081-:443,hostfwd=tcp::2222-:22" ./result/bin/run-*-vm
@update-inputs *_: @update-inputs:
@git reset flake.lock nix eval .#inputs --apply builtins.attrNames --json \
@git restore flake.lock | jq '.[]' -r \
nix eval {{nix_eval_opts}} --file flake.nix --apply 'x: builtins.attrNames x.inputs' --json \ | gum choose --no-limit --height=15 \
| { printf "%s\n" --commit-lock-file; jq '.[]' -r | grep -vxF "self" ||:; } \ | xargs nix flake update --commit-lock-file
| gum choose --no-limit --header "Choose extra arguments:" \
| tee >(xargs -d'\n' echo + nix flake update "$@" >&2) \
| xargs -d'\n' nix flake update "$@"
@repl $machine=`just _a_machine` *_:
set -v; nixos-rebuild --flake .#"$machine" repl "${@:2}"
@eval $machine=`just _a_machine` $attrpath="system.build.toplevel.outPath" *_:
set -v; nix eval {{nix_eval_opts}} ".#nixosConfigurations.\"$machine\".config.$attrpath" --show-trace "${@:3}"
@eval-vm $machine=`just _a_machine` $attrpath="system.build.toplevel.outPath" *_:
just eval "$machine" "virtualisation.vmVariant.$attrpath" "${@:3}"
# helpers
[no-exit-message]
_a_machine: _a_machine:
#!/usr/bin/env -S sh -euo pipefail nix eval .#nixosConfigurations --apply builtins.attrNames --json | jq .[] -r | gum filter
machines="$(
nix eval {{nix_eval_opts}} .#nixosConfigurations --apply builtins.attrNames --json | jq .[] -r
)"
[ -n "$machines" ] || { echo >&2 "ERROR: no machines found"; false; }
if [ -s .direnv/vars/last-machine.txt ]; then
machines="$(
grep <<<"$machines" -xF "$(cat .direnv/vars/last-machine.txt)" ||:
grep <<<"$machines" -xFv "$(cat .direnv/vars/last-machine.txt)" ||:
)"
fi
choice="$(gum filter <<<"$machines")"
mkdir -p .direnv/vars
cat <<<"$choice" >.direnv/vars/last-machine.txt
cat <<<"$choice"

Some files were not shown because too many files have changed in this diff Show More