Compare commits

..

8 Commits

Author SHA1 Message Date
26d68fba3e update README to add host
All checks were successful
Eval nix flake / evals (push) Successful in 8m55s
2026-02-14 19:10:43 +01:00
3b15d1c2c4 fix skrot and skrott conflict
All checks were successful
Eval nix flake / evals (push) Successful in 8m49s
Eval nix flake / evals (pull_request) Successful in 8m49s
2026-02-14 18:22:27 +01:00
System administrator
036f0e1701 fix ttyUSB0 things
Some checks failed
Eval nix flake / evals (push) Failing after 6m45s
Eval nix flake / evals (pull_request) Failing after 6m44s
2026-02-13 19:02:02 +01:00
System administrator
c1ada2f94d fix sops
Some checks failed
Eval nix flake / evals (push) Failing after 5m54s
2026-02-13 17:57:44 +01:00
9d7cadbcbe other decrypt yaml file thing
Some checks failed
Eval nix flake / evals (push) Failing after 5m59s
2026-02-10 21:02:54 +01:00
System administrator
9048261756 sopsing
Some checks failed
Eval nix flake / evals (push) Failing after 5m49s
2026-02-10 20:53:05 +01:00
System administrator
7ba8b47d7d setup nix for skrot
All checks were successful
Eval nix flake / evals (push) Successful in 8m58s
2026-02-10 15:12:53 +01:00
f88b81672a skrot: init
All checks were successful
Eval nix flake / evals (push) Successful in 10m15s
2026-02-08 00:05:58 +01:00
134 changed files with 2453 additions and 3932 deletions

View File

@@ -43,7 +43,7 @@ revert the changes on the next nightly rebuild (tends to happen when everybody i
| [kommode][kom] | Virtual | Gitea + Gitea pages |
| [lupine][lup] | Physical | Gitea CI/CD runners |
| shark | Virtual | Test host for authentication, absolutely horrendous |
| [skrot/skrott][skr] | Physical | Kiosk, snacks and soda |
| [skrott/skrot][skr] | Physical | Kiosk, snacks and soda |
| [wenche][wen] | Virtual | Nix-builders, general purpose compute |
## Documentation

View File

@@ -1,9 +1,4 @@
{
lib,
config,
inputs,
...
}:
{ lib, config, inputs, ... }:
{
nix = {
gc = {
@@ -16,21 +11,16 @@
allow-dirty = true;
auto-allocate-uids = true;
builders-use-substitutes = true;
experimental-features = [
"nix-command"
"flakes"
"auto-allocate-uids"
];
experimental-features = [ "nix-command" "flakes" "auto-allocate-uids" ];
log-lines = 50;
use-xdg-base-directories = true;
};
/*
This makes commandline tools like
** nix run nixpkgs#hello
** and nix-shell -p hello
** use the same channel the system
** was built with
/* This makes commandline tools like
** nix run nixpkgs#hello
** and nix-shell -p hello
** use the same channel the system
** was built with
*/
registry = lib.mkMerge [
{

View File

@@ -2,7 +2,7 @@
{
security.acme = {
acceptTerms = true;
defaults.email = "acme-drift@pvv.ntnu.no";
defaults.email = "drift@pvv.ntnu.no";
};
# Let's not spam LetsEncrypt in `nixos-rebuild build-vm` mode:

View File

@@ -1,10 +1,4 @@
{
config,
inputs,
pkgs,
lib,
...
}:
{ config, inputs, pkgs, lib, ... }:
let
inputUrls = lib.mapAttrs (input: value: value.url) (import "${inputs.self}/flake.nix").inputs;
@@ -22,34 +16,26 @@ in
# --update-input is deprecated since nix 2.22, and removed in lix 2.90
# as such we instead use --override-input combined with --refresh
# https://git.lix.systems/lix-project/lix/issues/400
]
++ (lib.pipe inputUrls [
] ++ (lib.pipe inputUrls [
(lib.intersectAttrs {
nixpkgs = { };
nixpkgs-unstable = { };
})
(lib.mapAttrsToList (
input: url: [
"--override-input"
input
url
]
))
(lib.mapAttrsToList (input: url: ["--override-input" input url]))
lib.concatLists
]);
};
# workaround for https://github.com/NixOS/nix/issues/6895
# via https://git.lix.systems/lix-project/lix/issues/400
environment.etc =
lib.mkIf (!config.virtualisation.isVmVariant && config.system.autoUpgrade.enable)
{
"current-system-flake-inputs.json".source = pkgs.writers.writeJSON "flake-inputs.json" (
lib.flip lib.mapAttrs inputs (
name: input:
# inputs.*.sourceInfo sans outPath, since writeJSON will otherwise serialize sourceInfo like a derivation
lib.removeAttrs (input.sourceInfo or { }) [ "outPath" ] // { store-path = input.outPath; } # comment this line if you don't want to retain a store reference to the flake inputs
)
);
};
environment.etc = lib.mkIf (!config.virtualisation.isVmVariant && config.system.autoUpgrade.enable) {
"current-system-flake-inputs.json".source
= pkgs.writers.writeJSON "flake-inputs.json" (
lib.flip lib.mapAttrs inputs (name: input:
# inputs.*.sourceInfo sans outPath, since writeJSON will otherwise serialize sourceInfo like a derivation
lib.removeAttrs (input.sourceInfo or {}) [ "outPath" ]
// { store-path = input.outPath; } # comment this line if you don't want to retain a store reference to the flake inputs
)
);
};
}

View File

@@ -1,4 +1,4 @@
{ ... }:
{
services.irqbalance.enable = true;
}
}

View File

@@ -1,9 +1,4 @@
{
config,
lib,
values,
...
}:
{ config, lib, values, ... }:
let
cfg = config.services.journald.upload;
in

View File

@@ -1,10 +1,7 @@
{ ... }:
{
systemd.services.logrotate = {
documentation = [
"man:logrotate(8)"
"man:logrotate.conf(5)"
];
documentation = [ "man:logrotate(8)" "man:logrotate.conf(5)" ];
unitConfig.RequiresMountsFor = "/var/log";
serviceConfig.ReadWritePaths = [ "/var/log" ];
};

View File

@@ -11,10 +11,7 @@
};
};
networking.firewall.allowedTCPPorts = lib.mkIf config.services.nginx.enable [
80
443
];
networking.firewall.allowedTCPPorts = lib.mkIf config.services.nginx.enable [ 80 443 ];
services.nginx = {
recommendedTlsSettings = true;

View File

@@ -12,9 +12,10 @@
settings.PermitRootLogin = "yes";
};
users.users."root".openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCqVt4LCe0YIttr9swFxjkjn37ZDY9JxwVC+2gvfSINDJorOCtqPjDOTD2fTS1Gz08QCwpnLWq2kyvRchu6WgriAbSACpbZZBgxRaF/FVh3oiMVFGnNKGnv6/fdo/vZtu8mUVuqtmTrgLYpZdbR4oD3XiBlDKs7Cv5hPqt95lnP6MNFvE8mICCfd1PwhsABd2IQ5laz3u77/RXhNFJL0Kf2/+6gk9awcLuwHrPdvq7c3BxRHbc9UMRQENyjyQPa7aLe+uJBFLKP51I8VBuDpDacuibQx7nMt6N2UJ2KWI0JxRMHuJNq4S5jidR82aOw9gzGbTv30SKNLMqsZ0xj4LtdqCXDiZF6Lr09PsJYsvnBUFWa14HGcThKDtgwQwBryNViYmfv//0h9+RLZiU0ab+NEwSs7Zh5iAD+vhx64QqNX3tR7Le4SWXh8W0eShU9N78qYdSkiC3Ui7htxeqOocXM/P4AwbnHsLELIvkHdvgchCPvl8ygZa4WJTEWv16+ICskJcAKWGuqjvXAFuwjJJmPp9xLW9O0DFfQhMELiGamQR9wK07yYQVr34iah6qZO7cwhSKyEPFrVPIaNtfDhsjED639F7vmktf26SWNJHWfW0wOHILjI6TgqUvy0JDd8W8w0CHlAfz6Fs2l99NNgNF8dB3vBASbxS0hu/y0PVu/xQ== openstack-sleipner"
users.users."root".openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCqVt4LCe0YIttr9swFxjkjn37ZDY9JxwVC+2gvfSINDJorOCtqPjDOTD2fTS1Gz08QCwpnLWq2kyvRchu6WgriAbSACpbZZBgxRaF/FVh3oiMVFGnNKGnv6/fdo/vZtu8mUVuqtmTrgLYpZdbR4oD3XiBlDKs7Cv5hPqt95lnP6MNFvE8mICCfd1PwhsABd2IQ5laz3u77/RXhNFJL0Kf2/+6gk9awcLuwHrPdvq7c3BxRHbc9UMRQENyjyQPa7aLe+uJBFLKP51I8VBuDpDacuibQx7nMt6N2UJ2KWI0JxRMHuJNq4S5jidR82aOw9gzGbTv30SKNLMqsZ0xj4LtdqCXDiZF6Lr09PsJYsvnBUFWa14HGcThKDtgwQwBryNViYmfv//0h9+RLZiU0ab+NEwSs7Zh5iAD+vhx64QqNX3tR7Le4SWXh8W0eShU9N78qYdSkiC3Ui7htxeqOocXM/P4AwbnHsLELIvkHdvgchCPvl8ygZa4WJTEWv16+ICskJcAKWGuqjvXAFuwjJJmPp9xLW9O0DFfQhMELiGamQR9wK07yYQVr34iah6qZO7cwhSKyEPFrVPIaNtfDhsjED639F7vmktf26SWNJHWfW0wOHILjI6TgqUvy0JDd8W8w0CHlAfz6Fs2l99NNgNF8dB3vBASbxS0hu/y0PVu/xQ== openstack-sleipner"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICCbgJ0Uwh9VSVhfId7l9i5/jk4CvAK5rbkiab8R+moF root@sleipner"
];
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICCbgJ0Uwh9VSVhfId7l9i5/jk4CvAK5rbkiab8R+moF root@sleipner"
];
}

View File

@@ -1,9 +1,4 @@
{
config,
pkgs,
lib,
...
}:
{ config, pkgs, lib, ... }:
let
cfg = config.services.postfix;
in

View File

@@ -1,9 +1,4 @@
{
config,
lib,
values,
...
}:
{ config, lib, values, ... }:
let
cfg = config.services.prometheus.exporters.node;
in

View File

@@ -1,9 +1,4 @@
{
config,
lib,
values,
...
}:
{ config, lib, values, ... }:
let
cfg = config.services.prometheus.exporters.systemd;
in

View File

@@ -1,9 +1,4 @@
{
config,
lib,
values,
...
}:
{ config, lib, values, ... }:
let
cfg = config.services.prometheus.exporters.node;
in
@@ -15,33 +10,29 @@ in
http_listen_port = 28183;
grpc_listen_port = 0;
};
clients = [
{
url = "http://ildkule.pvv.ntnu.no:3100/loki/api/v1/push";
}
];
scrape_configs = [
{
job_name = "systemd-journal";
journal = {
max_age = "12h";
labels = {
job = "systemd-journal";
host = config.networking.hostName;
};
clients = [{
url = "http://ildkule.pvv.ntnu.no:3100/loki/api/v1/push";
}];
scrape_configs = [{
job_name = "systemd-journal";
journal = {
max_age = "12h";
labels = {
job = "systemd-journal";
host = config.networking.hostName;
};
relabel_configs = [
{
source_labels = [ "__journal__systemd_unit" ];
target_label = "unit";
}
{
source_labels = [ "__journal_priority_keyword" ];
target_label = "level";
}
];
}
];
};
relabel_configs = [
{
source_labels = [ "__journal__systemd_unit" ];
target_label = "unit";
}
{
source_labels = [ "__journal_priority_keyword" ];
target_label = "level";
}
];
}];
};
};
}

View File

@@ -1,9 +1,4 @@
{
config,
pkgs,
lib,
...
}:
{ config, pkgs, lib, ... }:
{
services.smartd = {
# NOTE: qemu guests tend not to have SMART-reporting disks. Please override for the
@@ -19,12 +14,9 @@
};
};
environment.systemPackages = lib.optionals config.services.smartd.enable (
with pkgs;
[
smartmontools
]
);
environment.systemPackages = lib.optionals config.services.smartd.enable (with pkgs; [
smartmontools
]);
systemd.services.smartd.unitConfig.ConditionVirtualization = "no";
}

View File

@@ -2,7 +2,7 @@
{
# Let's not thermal throttle
services.thermald.enable = lib.mkIf (lib.all (x: x) [
(config.nixpkgs.system == "x86_64-linux")
(!config.boot.isContainer or false)
]) true;
}
(config.nixpkgs.system == "x86_64-linux")
(!config.boot.isContainer or false)
]) true;
}

View File

@@ -1,9 +1,4 @@
{
config,
pkgs,
lib,
...
}:
{ config, pkgs, lib, ... }:
let
cfg = config.services.uptimed;
in
@@ -20,48 +15,45 @@ in
services.uptimed = {
enable = true;
settings =
let
stateDir = "/var/lib/uptimed";
in
{
PIDFILE = "${stateDir}/pid";
SENDMAIL = lib.mkDefault "${pkgs.system-sendmail}/bin/sendmail -t";
};
settings = let
stateDir = "/var/lib/uptimed";
in {
PIDFILE = "${stateDir}/pid";
SENDMAIL = lib.mkDefault "${pkgs.system-sendmail}/bin/sendmail -t";
};
};
systemd.services.uptimed = lib.mkIf (cfg.enable) {
serviceConfig =
let
uptimed = pkgs.uptimed.overrideAttrs (prev: {
postPatch = ''
substituteInPlace Makefile.am \
--replace-fail '$(sysconfdir)/uptimed.conf' '/var/lib/uptimed/uptimed.conf'
substituteInPlace src/Makefile.am \
--replace-fail '$(sysconfdir)/uptimed.conf' '/var/lib/uptimed/uptimed.conf'
'';
});
serviceConfig = let
uptimed = pkgs.uptimed.overrideAttrs (prev: {
postPatch = ''
substituteInPlace Makefile.am \
--replace-fail '$(sysconfdir)/uptimed.conf' '/var/lib/uptimed/uptimed.conf'
substituteInPlace src/Makefile.am \
--replace-fail '$(sysconfdir)/uptimed.conf' '/var/lib/uptimed/uptimed.conf'
'';
});
in
{
Type = "notify";
in {
Type = "notify";
ExecStart = lib.mkForce "${uptimed}/sbin/uptimed -f";
ExecStart = lib.mkForce "${uptimed}/sbin/uptimed -f";
BindReadOnlyPaths =
let
configFile = lib.pipe cfg.settings [
(lib.mapAttrsToList (
k: v: if builtins.isList v then lib.mapConcatStringsSep "\n" (v': "${k}=${v'}") v else "${k}=${v}"
))
(lib.concatStringsSep "\n")
(pkgs.writeText "uptimed.conf")
];
in
[
"${configFile}:/var/lib/uptimed/uptimed.conf"
];
};
BindReadOnlyPaths = let
configFile = lib.pipe cfg.settings [
(lib.mapAttrsToList
(k: v:
if builtins.isList v
then lib.mapConcatStringsSep "\n" (v': "${k}=${v'}") v
else "${k}=${v}")
)
(lib.concatStringsSep "\n")
(pkgs.writeText "uptimed.conf")
];
in [
"${configFile}:/var/lib/uptimed/uptimed.conf"
];
};
};
};
}

View File

@@ -1,15 +1,8 @@
{ config, fp, lib, ... }:
{
config,
fp,
lib,
...
}:
{
sops.defaultSopsFile =
let
secretsFilePath = fp /secrets/${config.networking.hostName}/${config.networking.hostName}.yaml;
in
lib.mkIf (builtins.pathExists secretsFilePath) secretsFilePath;
sops.defaultSopsFile = let
secretsFilePath = fp /secrets/${config.networking.hostName}/${config.networking.hostName}.yaml;
in lib.mkIf (builtins.pathExists secretsFilePath) secretsFilePath;
sops.age = lib.mkIf (config.sops.defaultSopsFile != null) {
sshKeyPaths = lib.mkDefault [ "/etc/ssh/ssh_host_ed25519_key" ];

16
flake.lock generated
View File

@@ -7,11 +7,11 @@
]
},
"locked": {
"lastModified": 1771267058,
"narHash": "sha256-EEL4SmD1b3BPJPsSJJ4wDTXWMumJqbR+BLzhJJG0skE=",
"lastModified": 1770133120,
"narHash": "sha256-RuAWONXb+U3omSsuIPCrPcgj0XYqv+2djG0cnPGEyKg=",
"ref": "main",
"rev": "e3962d02c78b9c7b4d18148d931a9a4bf22e7902",
"revCount": 254,
"rev": "3123b8b474319bc75ee780e0357dcdea69dc85e6",
"revCount": 244,
"type": "git",
"url": "https://git.pvv.ntnu.no/Projects/dibbler.git"
},
@@ -195,11 +195,11 @@
]
},
"locked": {
"lastModified": 1770960722,
"narHash": "sha256-IdhPsWFZUKSJh/nLjGLJvGM5d5Uta+k1FlVYPxTZi0E=",
"lastModified": 1767906352,
"narHash": "sha256-wYsH9MMAPFG3XTL+3DwI39XMG0F2fTmn/5lt265a3Es=",
"ref": "main",
"rev": "c2e4aca7e1ba27cd09eeaeab47010d32a11841b2",
"revCount": 15,
"rev": "d054c5d064b8ed6d53a0adb0cf6c0a72febe212e",
"revCount": 13,
"type": "git",
"url": "https://git.pvv.ntnu.no/Drift/nix-gitea-themes.git"
},

723
flake.nix
View File

@@ -49,403 +49,348 @@
qotd.inputs.nixpkgs.follows = "nixpkgs";
};
outputs =
{
self,
nixpkgs,
nixpkgs-unstable,
sops-nix,
disko,
...
}@inputs:
let
inherit (nixpkgs) lib;
systems = [
"x86_64-linux"
"aarch64-linux"
"aarch64-darwin"
];
forAllSystems = f: lib.genAttrs systems f;
allMachines = builtins.attrNames self.nixosConfigurations;
importantMachines = [
"bekkalokk"
"bicep"
"brzeczyszczykiewicz"
"georg"
"ildkule"
];
in
{
inputs = lib.mapAttrs (_: src: src.outPath) inputs;
outputs = { self, nixpkgs, nixpkgs-unstable, sops-nix, disko, ... }@inputs:
let
inherit (nixpkgs) lib;
systems = [
"x86_64-linux"
"aarch64-linux"
"aarch64-darwin"
];
forAllSystems = f: lib.genAttrs systems f;
allMachines = builtins.attrNames self.nixosConfigurations;
importantMachines = [
"bekkalokk"
"bicep"
"brzeczyszczykiewicz"
"georg"
"ildkule"
];
in {
inputs = lib.mapAttrs (_: src: src.outPath) inputs;
pkgs = forAllSystems (
system:
import nixpkgs {
inherit system;
config.allowUnfreePredicate =
pkg:
builtins.elem (lib.getName pkg) [
"nvidia-x11"
"nvidia-settings"
];
pkgs = forAllSystems (system: import nixpkgs {
inherit system;
config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg)
[
"nvidia-x11"
"nvidia-settings"
];
});
nixosConfigurations = let
nixosConfig =
nixpkgs:
name:
configurationPath:
extraArgs@{
localSystem ? "x86_64-linux", # buildPlatform
crossSystem ? "x86_64-linux", # hostPlatform
specialArgs ? { },
modules ? [ ],
overlays ? [ ],
enableDefaults ? true,
...
}:
let
commonPkgsConfig = {
inherit localSystem crossSystem;
config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg)
[
"nvidia-x11"
"nvidia-settings"
];
overlays = (lib.optionals enableDefaults [
# Global overlays go here
inputs.roowho2.overlays.default
]) ++ overlays;
};
pkgs = import nixpkgs commonPkgsConfig;
unstablePkgs = import nixpkgs-unstable commonPkgsConfig;
in
lib.nixosSystem (lib.recursiveUpdate
{
system = crossSystem;
inherit pkgs;
specialArgs = {
inherit inputs unstablePkgs;
values = import ./values.nix;
fp = path: ./${path};
} // specialArgs;
modules = [
{
networking.hostName = lib.mkDefault name;
}
configurationPath
] ++ (lib.optionals enableDefaults [
sops-nix.nixosModules.sops
inputs.roowho2.nixosModules.default
self.nixosModules.rsync-pull-targets
]) ++ modules;
}
(builtins.removeAttrs extraArgs [
"localSystem"
"crossSystem"
"modules"
"overlays"
"specialArgs"
"enableDefaults"
])
);
nixosConfigurations =
let
nixosConfig =
nixpkgs: name: configurationPath:
extraArgs@{
localSystem ? "x86_64-linux", # buildPlatform
crossSystem ? "x86_64-linux", # hostPlatform
specialArgs ? { },
modules ? [ ],
overlays ? [ ],
enableDefaults ? true,
...
}:
let
commonPkgsConfig = {
inherit localSystem crossSystem;
config.allowUnfreePredicate =
pkg:
builtins.elem (lib.getName pkg) [
"nvidia-x11"
"nvidia-settings"
];
overlays =
(lib.optionals enableDefaults [
# Global overlays go here
inputs.roowho2.overlays.default
])
++ overlays;
};
stableNixosConfig = name: extraArgs:
nixosConfig nixpkgs name ./hosts/${name}/configuration.nix extraArgs;
in {
bakke = stableNixosConfig "bakke" {
modules = [
inputs.disko.nixosModules.disko
];
};
bicep = stableNixosConfig "bicep" {
modules = [
inputs.matrix-next.nixosModules.default
inputs.pvv-calendar-bot.nixosModules.default
inputs.minecraft-heatmap.nixosModules.default
self.nixosModules.gickup
self.nixosModules.matrix-ooye
];
overlays = [
inputs.pvv-calendar-bot.overlays.default
inputs.minecraft-heatmap.overlays.default
(final: prev: {
inherit (self.packages.${prev.stdenv.hostPlatform.system}) out-of-your-element;
})
];
};
bekkalokk = stableNixosConfig "bekkalokk" {
overlays = [
(final: prev: {
mediawiki-extensions = final.callPackage ./packages/mediawiki-extensions { };
simplesamlphp = final.callPackage ./packages/simplesamlphp { };
bluemap = final.callPackage ./packages/bluemap.nix { };
})
inputs.pvv-nettsiden.overlays.default
inputs.qotd.overlays.default
];
modules = [
inputs.pvv-nettsiden.nixosModules.default
self.nixosModules.bluemap
inputs.qotd.nixosModules.default
];
};
ildkule = stableNixosConfig "ildkule" { };
#ildkule-unstable = unstableNixosConfig "ildkule" { };
skrot = stableNixosConfig "skrot" {
modules = [
inputs.disko.nixosModules.disko
inputs.dibbler.nixosModules.default
];
overlays = [inputs.dibbler.overlays.default];
};
shark = stableNixosConfig "shark" { };
wenche = stableNixosConfig "wenche" { };
temmie = stableNixosConfig "temmie" { };
gluttony = stableNixosConfig "gluttony" { };
pkgs = import nixpkgs commonPkgsConfig;
unstablePkgs = import nixpkgs-unstable commonPkgsConfig;
in
lib.nixosSystem (
lib.recursiveUpdate
{
system = crossSystem;
inherit pkgs;
specialArgs = {
inherit inputs unstablePkgs;
values = import ./values.nix;
fp = path: ./${path};
}
// specialArgs;
modules = [
{
networking.hostName = lib.mkDefault name;
}
configurationPath
]
++ (lib.optionals enableDefaults [
sops-nix.nixosModules.sops
inputs.roowho2.nixosModules.default
self.nixosModules.rsync-pull-targets
])
++ modules;
}
(
builtins.removeAttrs extraArgs [
"localSystem"
"crossSystem"
"modules"
"overlays"
"specialArgs"
"enableDefaults"
]
)
);
stableNixosConfig =
name: extraArgs: nixosConfig nixpkgs name ./hosts/${name}/configuration.nix extraArgs;
in
{
bakke = stableNixosConfig "bakke" {
modules = [
inputs.disko.nixosModules.disko
];
};
bicep = stableNixosConfig "bicep" {
modules = [
inputs.matrix-next.nixosModules.default
inputs.pvv-calendar-bot.nixosModules.default
inputs.minecraft-heatmap.nixosModules.default
self.nixosModules.gickup
self.nixosModules.matrix-ooye
];
overlays = [
inputs.pvv-calendar-bot.overlays.default
inputs.minecraft-heatmap.overlays.default
(final: prev: {
inherit (self.packages.${prev.stdenv.hostPlatform.system}) out-of-your-element;
})
];
};
bekkalokk = stableNixosConfig "bekkalokk" {
overlays = [
(final: prev: {
mediawiki-extensions = final.callPackage ./packages/mediawiki-extensions { };
simplesamlphp = final.callPackage ./packages/simplesamlphp { };
bluemap = final.callPackage ./packages/bluemap.nix { };
})
inputs.pvv-nettsiden.overlays.default
inputs.qotd.overlays.default
];
modules = [
inputs.pvv-nettsiden.nixosModules.default
self.nixosModules.bluemap
inputs.qotd.nixosModules.default
];
};
ildkule = stableNixosConfig "ildkule" { };
#ildkule-unstable = unstableNixosConfig "ildkule" { };
skrot = stableNixosConfig "skrot" {
modules = [
inputs.disko.nixosModules.disko
inputs.dibbler.nixosModules.default
];
overlays = [ inputs.dibbler.overlays.default ];
};
shark = stableNixosConfig "shark" { };
wenche = stableNixosConfig "wenche" { };
temmie = stableNixosConfig "temmie" { };
gluttony = stableNixosConfig "gluttony" { };
kommode = stableNixosConfig "kommode" {
overlays = [
inputs.nix-gitea-themes.overlays.default
];
modules = [
inputs.nix-gitea-themes.nixosModules.default
inputs.disko.nixosModules.disko
];
};
ustetind = stableNixosConfig "ustetind" {
modules = [
"${nixpkgs}/nixos/modules/virtualisation/lxc-container.nix"
];
};
brzeczyszczykiewicz = stableNixosConfig "brzeczyszczykiewicz" {
modules = [
inputs.grzegorz-clients.nixosModules.grzegorz-webui
inputs.gergle.nixosModules.default
inputs.greg-ng.nixosModules.default
];
overlays = [
inputs.greg-ng.overlays.default
inputs.gergle.overlays.default
];
};
georg = stableNixosConfig "georg" {
modules = [
inputs.grzegorz-clients.nixosModules.grzegorz-webui
inputs.gergle.nixosModules.default
inputs.greg-ng.nixosModules.default
];
overlays = [
inputs.greg-ng.overlays.default
inputs.gergle.overlays.default
];
};
}
// (
let
skrottConfig = {
modules = [
(nixpkgs + "/nixos/modules/installer/sd-card/sd-image-aarch64.nix")
inputs.dibbler.nixosModules.default
];
overlays = [
inputs.dibbler.overlays.default
(final: prev: {
# NOTE: Yeetus (these break crosscompile ¯\_(ツ)_/¯)
atool = prev.emptyDirectory;
micro = prev.emptyDirectory;
ncdu = prev.emptyDirectory;
})
];
};
in
{
skrott = self.nixosConfigurations.skrott-native;
skrott-native = stableNixosConfig "skrott" (
skrottConfig
// {
localSystem = "aarch64-linux";
crossSystem = "aarch64-linux";
}
);
skrott-cross = stableNixosConfig "skrott" (
skrottConfig
// {
localSystem = "x86_64-linux";
crossSystem = "aarch64-linux";
}
);
skrott-x86_64 = stableNixosConfig "skrott" (
skrottConfig
// {
localSystem = "x86_64-linux";
crossSystem = "x86_64-linux";
}
);
}
)
// (
let
machineNames = map (i: "lupine-${toString i}") (lib.range 1 5);
stableLupineNixosConfig =
name: extraArgs: nixosConfig nixpkgs name ./hosts/lupine/configuration.nix extraArgs;
in
lib.genAttrs machineNames (
name:
stableLupineNixosConfig name {
modules = [ { networking.hostName = name; } ];
specialArgs.lupineName = name;
}
)
);
nixosModules = {
bluemap = ./modules/bluemap.nix;
gickup = ./modules/gickup;
matrix-ooye = ./modules/matrix-ooye.nix;
robots-txt = ./modules/robots-txt.nix;
rsync-pull-targets = ./modules/rsync-pull-targets.nix;
snakeoil-certs = ./modules/snakeoil-certs.nix;
snappymail = ./modules/snappymail.nix;
kommode = stableNixosConfig "kommode" {
overlays = [
inputs.nix-gitea-themes.overlays.default
];
modules = [
inputs.nix-gitea-themes.nixosModules.default
inputs.disko.nixosModules.disko
];
};
devShells = forAllSystems (system: {
default =
let
pkgs = import nixpkgs-unstable {
inherit system;
overlays = [
(final: prev: {
inherit (inputs.disko.packages.${system}) disko;
})
];
};
in
pkgs.callPackage ./shell.nix { };
cuda =
let
cuda-pkgs = import nixpkgs-unstable {
inherit system;
config = {
allowUnfree = true;
cudaSupport = true;
};
};
in
cuda-pkgs.callPackage ./shells/cuda.nix { };
ustetind = stableNixosConfig "ustetind" {
modules = [
"${nixpkgs}/nixos/modules/virtualisation/lxc-container.nix"
];
};
brzeczyszczykiewicz = stableNixosConfig "brzeczyszczykiewicz" {
modules = [
inputs.grzegorz-clients.nixosModules.grzegorz-webui
inputs.gergle.nixosModules.default
inputs.greg-ng.nixosModules.default
];
overlays = [
inputs.greg-ng.overlays.default
inputs.gergle.overlays.default
];
};
georg = stableNixosConfig "georg" {
modules = [
inputs.grzegorz-clients.nixosModules.grzegorz-webui
inputs.gergle.nixosModules.default
inputs.greg-ng.nixosModules.default
];
overlays = [
inputs.greg-ng.overlays.default
inputs.gergle.overlays.default
];
};
}
//
(let
skrottConfig = {
modules = [
(nixpkgs + "/nixos/modules/installer/sd-card/sd-image-aarch64.nix")
inputs.dibbler.nixosModules.default
];
overlays = [
inputs.dibbler.overlays.default
(final: prev: {
# NOTE: Yeetus (these break crosscompile ¯\_(ツ)_/¯)
atool = prev.emptyDirectory;
micro = prev.emptyDirectory;
ncdu = prev.emptyDirectory;
})
];
};
in {
skrott = self.nixosConfigurations.skrott-native;
skrott-native = stableNixosConfig "skrott" (skrottConfig // {
localSystem = "aarch64-linux";
crossSystem = "aarch64-linux";
});
skrott-cross = stableNixosConfig "skrott" (skrottConfig // {
localSystem = "x86_64-linux";
crossSystem = "aarch64-linux";
});
skrott-x86_64 = stableNixosConfig "skrott" (skrottConfig // {
localSystem = "x86_64-linux";
crossSystem = "x86_64-linux";
});
})
//
(let
machineNames = map (i: "lupine-${toString i}") (lib.range 1 5);
stableLupineNixosConfig = name: extraArgs:
nixosConfig nixpkgs name ./hosts/lupine/configuration.nix extraArgs;
in lib.genAttrs machineNames (name: stableLupineNixosConfig name {
modules = [{ networking.hostName = name; }];
specialArgs.lupineName = name;
}));
packages = {
"x86_64-linux" =
let
system = "x86_64-linux";
pkgs = nixpkgs.legacyPackages.${system};
in
rec {
default = important-machines;
important-machines = pkgs.linkFarm "important-machines" (
lib.getAttrs importantMachines self.packages.${system}
);
all-machines = pkgs.linkFarm "all-machines" (lib.getAttrs allMachines self.packages.${system});
simplesamlphp = pkgs.callPackage ./packages/simplesamlphp { };
bluemap = pkgs.callPackage ./packages/bluemap.nix { };
out-of-your-element = pkgs.callPackage ./packages/ooye/package.nix { };
}
//
# Mediawiki extensions
(lib.pipe null [
(_: pkgs.callPackage ./packages/mediawiki-extensions { })
(lib.flip builtins.removeAttrs [
"override"
"overrideDerivation"
])
(lib.mapAttrs' (name: lib.nameValuePair "mediawiki-${name}"))
])
//
# Machines
lib.genAttrs allMachines (machine: self.nixosConfigurations.${machine}.config.system.build.toplevel)
//
# Skrott is exception
{
skrott = self.packages.${system}.skrott-native-sd;
skrott-native = self.nixosConfigurations.skrott-native.config.system.build.toplevel;
skrott-native-sd = self.nixosConfigurations.skrott-native.config.system.build.sdImage;
skrott-cross = self.nixosConfigurations.skrott-cross.config.system.build.toplevel;
skrott-cross-sd = self.nixosConfigurations.skrott-cross.config.system.build.sdImage;
skrott-x86_64 = self.nixosConfigurations.skrott-x86_64.config.system.build.toplevel;
}
//
# Nix-topology
(
let
topology' = import inputs.nix-topology {
pkgs = import nixpkgs {
inherit system;
overlays = [
inputs.nix-topology.overlays.default
(final: prev: {
inherit (nixpkgs-unstable.legacyPackages.${system}) super-tiny-icons;
})
];
};
specialArgs = {
values = import ./values.nix;
};
modules = [
./topology
{
nixosConfigurations = lib.mapAttrs (
_name: nixosCfg:
nixosCfg.extendModules {
modules = [
inputs.nix-topology.nixosModules.default
./topology/service-extractors/greg-ng.nix
./topology/service-extractors/postgresql.nix
./topology/service-extractors/mysql.nix
./topology/service-extractors/gitea-runners.nix
];
}
) self.nixosConfigurations;
}
];
};
in
{
topology = topology'.config.output;
topology-png =
pkgs.runCommand "pvv-config-topology-png"
{
nativeBuildInputs = [ pkgs.writableTmpDirAsHomeHook ];
}
''
mkdir -p "$out"
for file in '${topology'.config.output}'/*.svg; do
${lib.getExe pkgs.imagemagick} -density 300 -background none "$file" "$out"/"$(basename "''${file%.svg}.png")"
done
'';
}
);
};
nixosModules = {
bluemap = ./modules/bluemap.nix;
gickup = ./modules/gickup;
matrix-ooye = ./modules/matrix-ooye.nix;
robots-txt = ./modules/robots-txt.nix;
rsync-pull-targets = ./modules/rsync-pull-targets.nix;
snakeoil-certs = ./modules/snakeoil-certs.nix;
snappymail = ./modules/snappymail.nix;
};
devShells = forAllSystems (system: {
default = let
pkgs = import nixpkgs-unstable {
inherit system;
overlays = [
(final: prev: {
inherit (inputs.disko.packages.${system}) disko;
})
];
};
in pkgs.callPackage ./shell.nix { };
cuda = let
cuda-pkgs = import nixpkgs-unstable {
inherit system;
config = {
allowUnfree = true;
cudaSupport = true;
};
};
in cuda-pkgs.callPackage ./shells/cuda.nix { };
});
packages = {
"x86_64-linux" = let
system = "x86_64-linux";
pkgs = nixpkgs.legacyPackages.${system};
in rec {
default = important-machines;
important-machines = pkgs.linkFarm "important-machines"
(lib.getAttrs importantMachines self.packages.${system});
all-machines = pkgs.linkFarm "all-machines"
(lib.getAttrs allMachines self.packages.${system});
simplesamlphp = pkgs.callPackage ./packages/simplesamlphp { };
bluemap = pkgs.callPackage ./packages/bluemap.nix { };
out-of-your-element = pkgs.callPackage ./packages/ooye/package.nix { };
}
//
# Mediawiki extensions
(lib.pipe null [
(_: pkgs.callPackage ./packages/mediawiki-extensions { })
(lib.flip builtins.removeAttrs ["override" "overrideDerivation"])
(lib.mapAttrs' (name: lib.nameValuePair "mediawiki-${name}"))
])
//
# Machines
lib.genAttrs allMachines
(machine: self.nixosConfigurations.${machine}.config.system.build.toplevel)
//
# Skrott is exception
{
skrott = self.packages.${system}.skrott-native-sd;
skrott-native = self.nixosConfigurations.skrott-native.config.system.build.toplevel;
skrott-native-sd = self.nixosConfigurations.skrott-native.config.system.build.sdImage;
skrott-cross = self.nixosConfigurations.skrott-cross.config.system.build.toplevel;
skrott-cross-sd = self.nixosConfigurations.skrott-cross.config.system.build.sdImage;
skrott-x86_64 = self.nixosConfigurations.skrott-x86_64.config.system.build.toplevel;
}
//
# Nix-topology
(let
topology' = import inputs.nix-topology {
pkgs = import nixpkgs {
inherit system;
overlays = [
inputs.nix-topology.overlays.default
(final: prev: {
inherit (nixpkgs-unstable.legacyPackages.${system}) super-tiny-icons;
})
];
};
specialArgs = {
values = import ./values.nix;
};
modules = [
./topology
{
nixosConfigurations = lib.mapAttrs (_name: nixosCfg: nixosCfg.extendModules {
modules = [
inputs.nix-topology.nixosModules.default
./topology/service-extractors/greg-ng.nix
./topology/service-extractors/postgresql.nix
./topology/service-extractors/mysql.nix
./topology/service-extractors/gitea-runners.nix
];
}) self.nixosConfigurations;
}
];
};
in {
topology = topology'.config.output;
topology-png = pkgs.runCommand "pvv-config-topology-png" {
nativeBuildInputs = [ pkgs.writableTmpDirAsHomeHook ];
} ''
mkdir -p "$out"
for file in '${topology'.config.output}'/*.svg; do
${lib.getExe pkgs.imagemagick} -density 300 -background none "$file" "$out"/"$(basename "''${file%.svg}.png")"
done
'';
});
};
};
}

View File

@@ -1,23 +1,15 @@
{
config,
pkgs,
values,
...
}:
{ config, pkgs, values, ... }:
{
imports = [
./hardware-configuration.nix
../../base
./filesystems.nix
];
./hardware-configuration.nix
../../base
./filesystems.nix
];
networking.hostId = "99609ffc";
systemd.network.networks."30-enp2s0" = values.defaultNetworkConfig // {
matchConfig.Name = "enp2s0";
address = with values.hosts.bakke; [
(ipv4 + "/25")
(ipv6 + "/64")
];
address = with values.hosts.bakke; [ (ipv4 + "/25") (ipv6 + "/64") ];
};
# Don't change (even during upgrades) unless you know what you are doing.

View File

@@ -1,4 +1,4 @@
{ pkgs, ... }:
{ pkgs,... }:
{
# Boot drives:
boot.swraid.enable = true;

View File

@@ -1,59 +1,41 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:
{ config, lib, pkgs, modulesPath, ... }:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [
"ehci_pci"
"ahci"
"usbhid"
"usb_storage"
"sd_mod"
];
boot.initrd.availableKernelModules = [ "ehci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" = {
device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571";
fsType = "btrfs";
options = [ "subvol=root" ];
};
fileSystems."/" =
{ device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571";
fsType = "btrfs";
options = [ "subvol=root" ];
};
fileSystems."/home" = {
device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571";
fsType = "btrfs";
options = [ "subvol=home" ];
};
fileSystems."/home" =
{ device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571";
fsType = "btrfs";
options = [ "subvol=home" ];
};
fileSystems."/nix" = {
device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571";
fsType = "btrfs";
options = [
"subvol=nix"
"noatime"
];
};
fileSystems."/nix" =
{ device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571";
fsType = "btrfs";
options = [ "subvol=nix" "noatime" ];
};
fileSystems."/boot" = {
device = "/dev/sdc2";
fsType = "vfat";
options = [
"fmask=0022"
"dmask=0022"
];
};
fileSystems."/boot" =
{ device = "/dev/sdc2";
fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ];
};
swapDevices = [ ];

View File

@@ -1,9 +1,4 @@
{
fp,
pkgs,
values,
...
}:
{ fp, pkgs, values, ... }:
{
imports = [
./hardware-configuration.nix
@@ -26,10 +21,7 @@
systemd.network.networks."30-enp2s0" = values.defaultNetworkConfig // {
matchConfig.Name = "enp2s0";
address = with values.hosts.bekkalokk; [
(ipv4 + "/25")
(ipv6 + "/64")
];
address = with values.hosts.bekkalokk; [ (ipv4 + "/25") (ipv6 + "/64") ];
};
services.btrfs.autoScrub.enable = true;

View File

@@ -1,43 +1,31 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:
{ config, lib, pkgs, modulesPath, ... }:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [
"ehci_pci"
"ahci"
"usbhid"
"usb_storage"
"sd_mod"
];
boot.initrd.availableKernelModules = [ "ehci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" = {
device = "/dev/sda1";
fsType = "btrfs";
};
fileSystems."/" =
{ device = "/dev/sda1";
fsType = "btrfs";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/CE63-3B9B";
fsType = "vfat";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/CE63-3B9B";
fsType = "vfat";
};
swapDevices = [
{ device = "/dev/disk/by-uuid/2df10c7b-0dec-45c6-a728-533f7da7f4b9"; }
];
swapDevices =
[ { device = "/dev/disk/by-uuid/2df10c7b-0dec-45c6-a728-533f7da7f4b9"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -1,15 +1,8 @@
{
config,
lib,
pkgs,
inputs,
...
}:
{ config, lib, pkgs, inputs, ... }:
let
vanillaSurvival = "/var/lib/bluemap/vanilla_survival_world";
format = pkgs.formats.hocon { };
in
{
in {
# NOTE: our versino of the module gets added in flake.nix
disabledModules = [ "services/web-apps/bluemap.nix" ];
@@ -24,88 +17,82 @@ in
host = "minecraft.pvv.ntnu.no";
maps =
let
inherit (inputs.minecraft-kartverket.packages.${pkgs.stdenv.hostPlatform.system}) bluemap-export;
in
{
"verden" = {
extraHoconMarkersFile = "${bluemap-export}/overworld.hocon";
settings = {
world = vanillaSurvival;
dimension = "minecraft:overworld";
name = "Verden";
sorting = 0;
start-pos = {
x = 0;
z = 0;
};
ambient-light = 0.1;
cave-detection-ocean-floor = -5;
};
};
"underverden" = {
extraHoconMarkersFile = "${bluemap-export}/nether.hocon";
settings = {
world = vanillaSurvival;
dimension = "minecraft:the_nether";
name = "Underverden";
sorting = 100;
start-pos = {
x = 0;
z = 0;
};
sky-color = "#290000";
void-color = "#150000";
sky-light = 1;
ambient-light = 0.6;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
cave-detection-uses-block-light = true;
render-mask = [
{
max-y = 90;
}
];
};
};
"enden" = {
extraHoconMarkersFile = "${bluemap-export}/the-end.hocon";
settings = {
world = vanillaSurvival;
dimension = "minecraft:the_end";
name = "Enden";
sorting = 200;
start-pos = {
x = 0;
z = 0;
};
sky-color = "#080010";
void-color = "#080010";
sky-light = 1;
ambient-light = 0.6;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
maps = let
inherit (inputs.minecraft-kartverket.packages.${pkgs.stdenv.hostPlatform.system}) bluemap-export;
in {
"verden" = {
extraHoconMarkersFile = "${bluemap-export}/overworld.hocon";
settings = {
world = vanillaSurvival;
dimension = "minecraft:overworld";
name = "Verden";
sorting = 0;
start-pos = {
x = 0;
z = 0;
};
ambient-light = 0.1;
cave-detection-ocean-floor = -5;
};
};
"underverden" = {
extraHoconMarkersFile = "${bluemap-export}/nether.hocon";
settings = {
world = vanillaSurvival;
dimension = "minecraft:the_nether";
name = "Underverden";
sorting = 100;
start-pos = {
x = 0;
z = 0;
};
sky-color = "#290000";
void-color = "#150000";
sky-light = 1;
ambient-light = 0.6;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
cave-detection-uses-block-light = true;
render-mask = [{
max-y = 90;
}];
};
};
"enden" = {
extraHoconMarkersFile = "${bluemap-export}/the-end.hocon";
settings = {
world = vanillaSurvival;
dimension = "minecraft:the_end";
name = "Enden";
sorting = 200;
start-pos = {
x = 0;
z = 0;
};
sky-color = "#080010";
void-color = "#080010";
sky-light = 1;
ambient-light = 0.6;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
};
};
};
};
systemd.services."render-bluemap-maps" = {
serviceConfig = {
StateDirectory = [ "bluemap/world" ];
ExecStartPre =
let
rsyncArgs = lib.cli.toCommandLineShellGNU { } {
archive = true;
compress = true;
verbose = true;
no-owner = true;
no-group = true;
rsh = "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=%d/ssh-known-hosts -i %d/sshkey";
};
in
"${lib.getExe pkgs.rsync} ${rsyncArgs} root@innovation.pvv.ntnu.no:/ ${vanillaSurvival}";
ExecStartPre = let
rsyncArgs = lib.cli.toCommandLineShellGNU { } {
archive = true;
compress = true;
verbose = true;
no-owner = true;
no-group = true;
rsh = "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=%d/ssh-known-hosts -i %d/sshkey";
};
in "${lib.getExe pkgs.rsync} ${rsyncArgs} root@innovation.pvv.ntnu.no:/ ${vanillaSurvival}";
LoadCredential = [
"sshkey:${config.sops.secrets."bluemap/ssh-key".path}"
"ssh-known-hosts:${config.sops.secrets."bluemap/ssh-known-hosts".path}"

View File

@@ -1,16 +1,8 @@
{
config,
pkgs,
lib,
...
}:
{ config, pkgs, lib, ... }:
let
pwAuthScript = pkgs.writeShellApplication {
name = "pwauth";
runtimeInputs = with pkgs; [
coreutils
heimdal
];
runtimeInputs = with pkgs; [ coreutils heimdal ];
text = ''
read -r user1
user2="$(echo -n "$user1" | tr -c -d '0123456789abcdefghijklmnopqrstuvwxyz')"
@@ -41,7 +33,7 @@ let
"metadata/saml20-sp-remote.php" = pkgs.writeText "saml20-sp-remote.php" ''
<?php
${lib.pipe config.services.idp.sp-remote-metadata [
${ lib.pipe config.services.idp.sp-remote-metadata [
(map (url: ''
$metadata['${url}'] = [
'SingleLogoutService' => [
@@ -93,20 +85,14 @@ let
substituteInPlace "$out" \
--replace-warn '$SAML_COOKIE_SECURE' 'true' \
--replace-warn '$SAML_COOKIE_SALT' 'file_get_contents("${
config.sops.secrets."idp/cookie_salt".path
}")' \
--replace-warn '$SAML_COOKIE_SALT' 'file_get_contents("${config.sops.secrets."idp/cookie_salt".path}")' \
--replace-warn '$SAML_ADMIN_NAME' '"Drift"' \
--replace-warn '$SAML_ADMIN_EMAIL' '"drift@pvv.ntnu.no"' \
--replace-warn '$SAML_ADMIN_PASSWORD' 'file_get_contents("${
config.sops.secrets."idp/admin_password".path
}")' \
--replace-warn '$SAML_ADMIN_PASSWORD' 'file_get_contents("${config.sops.secrets."idp/admin_password".path}")' \
--replace-warn '$SAML_TRUSTED_DOMAINS' 'array( "idp.pvv.ntnu.no" )' \
--replace-warn '$SAML_DATABASE_DSN' '"pgsql:host=postgres.pvv.ntnu.no;port=5432;dbname=idp"' \
--replace-warn '$SAML_DATABASE_USERNAME' '"idp"' \
--replace-warn '$SAML_DATABASE_PASSWORD' 'file_get_contents("${
config.sops.secrets."idp/postgres_password".path
}")' \
--replace-warn '$SAML_DATABASE_PASSWORD' 'file_get_contents("${config.sops.secrets."idp/postgres_password".path}")' \
--replace-warn '$CACHE_DIRECTORY' '/var/cache/idp'
'';
@@ -172,25 +158,23 @@ in
services.phpfpm.pools.idp = {
user = "idp";
group = "idp";
settings =
let
listenUser = config.services.nginx.user;
listenGroup = config.services.nginx.group;
in
{
"pm" = "dynamic";
"pm.max_children" = 32;
"pm.max_requests" = 500;
"pm.start_servers" = 2;
"pm.min_spare_servers" = 2;
"pm.max_spare_servers" = 4;
"listen.owner" = listenUser;
"listen.group" = listenGroup;
settings = let
listenUser = config.services.nginx.user;
listenGroup = config.services.nginx.group;
in {
"pm" = "dynamic";
"pm.max_children" = 32;
"pm.max_requests" = 500;
"pm.start_servers" = 2;
"pm.min_spare_servers" = 2;
"pm.max_spare_servers" = 4;
"listen.owner" = listenUser;
"listen.group" = listenGroup;
"catch_workers_output" = true;
"php_admin_flag[log_errors]" = true;
# "php_admin_value[error_log]" = "stderr";
};
"catch_workers_output" = true;
"php_admin_flag[log_errors]" = true;
# "php_admin_value[error_log]" = "stderr";
};
};
services.nginx.virtualHosts."idp.pvv.ntnu.no" = {
@@ -198,7 +182,7 @@ in
enableACME = true;
kTLS = true;
root = "${package}/share/php/simplesamlphp/public";
locations = {
locations = {
# based on https://simplesamlphp.org/docs/stable/simplesamlphp-install.html#configuring-nginx
"/" = {
alias = "${package}/share/php/simplesamlphp/public/";

View File

@@ -1,9 +1,4 @@
{
config,
pkgs,
lib,
...
}:
{ config, pkgs, lib, ... }:
{
security.krb5 = {
enable = true;

View File

@@ -1,12 +1,4 @@
{
pkgs,
lib,
fp,
config,
values,
...
}:
let
{ pkgs, lib, fp, config, values, ... }: let
cfg = config.services.mediawiki;
# "mediawiki"
@@ -17,9 +9,7 @@ let
simplesamlphp = pkgs.simplesamlphp.override {
extra_files = {
"metadata/saml20-idp-remote.php" = pkgs.writeText "mediawiki-saml20-idp-remote.php" (
import ../idp-simplesamlphp/metadata.php.nix
);
"metadata/saml20-idp-remote.php" = pkgs.writeText "mediawiki-saml20-idp-remote.php" (import ../idp-simplesamlphp/metadata.php.nix);
"config/authsources.php" = ./simplesaml-authsources.php;
@@ -28,49 +18,36 @@ let
substituteInPlace "$out" \
--replace-warn '$SAML_COOKIE_SECURE' 'true' \
--replace-warn '$SAML_COOKIE_SALT' 'file_get_contents("${
config.sops.secrets."mediawiki/simplesamlphp/cookie_salt".path
}")' \
--replace-warn '$SAML_COOKIE_SALT' 'file_get_contents("${config.sops.secrets."mediawiki/simplesamlphp/cookie_salt".path}")' \
--replace-warn '$SAML_ADMIN_NAME' '"Drift"' \
--replace-warn '$SAML_ADMIN_EMAIL' '"drift@pvv.ntnu.no"' \
--replace-warn '$SAML_ADMIN_PASSWORD' 'file_get_contents("${
config.sops.secrets."mediawiki/simplesamlphp/admin_password".path
}")' \
--replace-warn '$SAML_ADMIN_PASSWORD' 'file_get_contents("${config.sops.secrets."mediawiki/simplesamlphp/admin_password".path}")' \
--replace-warn '$SAML_TRUSTED_DOMAINS' 'array( "wiki.pvv.ntnu.no" )' \
--replace-warn '$SAML_DATABASE_DSN' '"pgsql:host=postgres.pvv.ntnu.no;port=5432;dbname=mediawiki_simplesamlphp"' \
--replace-warn '$SAML_DATABASE_USERNAME' '"mediawiki_simplesamlphp"' \
--replace-warn '$SAML_DATABASE_PASSWORD' 'file_get_contents("${
config.sops.secrets."mediawiki/simplesamlphp/postgres_password".path
}")' \
--replace-warn '$SAML_DATABASE_PASSWORD' 'file_get_contents("${config.sops.secrets."mediawiki/simplesamlphp/postgres_password".path}")' \
--replace-warn '$CACHE_DIRECTORY' '/var/cache/mediawiki/idp'
'';
};
};
in
{
in {
services.idp.sp-remote-metadata = [ "https://wiki.pvv.ntnu.no/simplesaml/" ];
sops.secrets =
lib.pipe
[
"mediawiki/secret-key"
"mediawiki/password"
"mediawiki/postgres_password"
"mediawiki/simplesamlphp/postgres_password"
"mediawiki/simplesamlphp/cookie_salt"
"mediawiki/simplesamlphp/admin_password"
]
[
(map (
key:
lib.nameValuePair key {
owner = user;
group = group;
restartUnits = [ "phpfpm-mediawiki.service" ];
}
))
lib.listToAttrs
];
sops.secrets = lib.pipe [
"mediawiki/secret-key"
"mediawiki/password"
"mediawiki/postgres_password"
"mediawiki/simplesamlphp/postgres_password"
"mediawiki/simplesamlphp/cookie_salt"
"mediawiki/simplesamlphp/admin_password"
] [
(map (key: lib.nameValuePair key {
owner = user;
group = group;
restartUnits = [ "phpfpm-mediawiki.service" ];
}))
lib.listToAttrs
];
services.rsync-pull-targets = {
enable = true;
@@ -238,13 +215,11 @@ in
# Cache directory for simplesamlphp
# systemd.services.phpfpm-mediawiki.serviceConfig.CacheDirectory = "mediawiki/simplesamlphp";
systemd.tmpfiles.settings."10-mediawiki"."/var/cache/mediawiki/simplesamlphp".d =
lib.mkIf cfg.enable
{
user = "mediawiki";
group = "mediawiki";
mode = "0770";
};
systemd.tmpfiles.settings."10-mediawiki"."/var/cache/mediawiki/simplesamlphp".d = lib.mkIf cfg.enable {
user = "mediawiki";
group = "mediawiki";
mode = "0770";
};
users.groups.mediawiki.members = lib.mkIf cfg.enable [ "nginx" ];
@@ -252,7 +227,7 @@ in
kTLS = true;
forceSSL = true;
enableACME = true;
locations = {
locations = {
"= /wiki/Main_Page" = lib.mkForce {
return = "301 /wiki/Programvareverkstedet";
};
@@ -278,22 +253,19 @@ in
"= /PNG/PVV-logo.svg".alias = fp /assets/logo_blue_regular.svg;
"= /PNG/PVV-logo.png".alias = fp /assets/logo_blue_regular.png;
"= /favicon.ico".alias =
pkgs.runCommandLocal "mediawiki-favicon.ico"
{
buildInputs = with pkgs; [ imagemagick ];
}
''
magick \
${fp /assets/logo_blue_regular.png} \
-resize x64 \
-gravity center \
-crop 64x64+0+0 \
-flatten \
-colors 256 \
-background transparent \
$out
'';
"= /favicon.ico".alias = pkgs.runCommandLocal "mediawiki-favicon.ico" {
buildInputs = with pkgs; [ imagemagick ];
} ''
magick \
${fp /assets/logo_blue_regular.png} \
-resize x64 \
-gravity center \
-crop 64x64+0+0 \
-flatten \
-colors 256 \
-background transparent \
$out
'';
};
};
@@ -301,9 +273,7 @@ in
systemd.services.mediawiki-init = lib.mkIf cfg.enable {
after = [ "sops-install-secrets.service" ];
serviceConfig = {
BindReadOnlyPaths = [
"/run/credentials/mediawiki-init.service/secret-key:/var/lib/mediawiki/secret.key"
];
BindReadOnlyPaths = [ "/run/credentials/mediawiki-init.service/secret-key:/var/lib/mediawiki/secret.key" ];
LoadCredential = [ "secret-key:${config.sops.secrets."mediawiki/secret-key".path}" ];
UMask = lib.mkForce "0007";
};
@@ -312,9 +282,7 @@ in
systemd.services.phpfpm-mediawiki = lib.mkIf cfg.enable {
after = [ "sops-install-secrets.service" ];
serviceConfig = {
BindReadOnlyPaths = [
"/run/credentials/phpfpm-mediawiki.service/secret-key:/var/lib/mediawiki/secret.key"
];
BindReadOnlyPaths = [ "/run/credentials/phpfpm-mediawiki.service/secret-key:/var/lib/mediawiki/secret.key" ];
LoadCredential = [ "secret-key:${config.sops.secrets."mediawiki/secret-key".path}" ];
UMask = lib.mkForce "0007";
};

View File

@@ -11,43 +11,41 @@ in
{
# Source: https://www.pierreblazquez.com/2023/06/17/how-to-harden-apache-php-fpm-daemons-using-systemd/
systemd.services = lib.genAttrs pools (_: {
serviceConfig =
let
caps = [
"CAP_NET_BIND_SERVICE"
"CAP_SETGID"
"CAP_SETUID"
"CAP_CHOWN"
"CAP_KILL"
"CAP_IPC_LOCK"
"CAP_DAC_OVERRIDE"
];
in
{
AmbientCapabilities = caps;
CapabilityBoundingSet = caps;
DeviceAllow = [ "" ];
LockPersonality = true;
MemoryDenyWriteExecute = false;
NoNewPrivileges = true;
PrivateMounts = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
RemoveIPC = true;
UMask = "0077";
RestrictNamespaces = "~mnt";
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
KeyringMode = "private";
SystemCallFilter = [
"@system-service"
];
};
serviceConfig = let
caps = [
"CAP_NET_BIND_SERVICE"
"CAP_SETGID"
"CAP_SETUID"
"CAP_CHOWN"
"CAP_KILL"
"CAP_IPC_LOCK"
"CAP_DAC_OVERRIDE"
];
in {
AmbientCapabilities = caps;
CapabilityBoundingSet = caps;
DeviceAllow = [ "" ];
LockPersonality = true;
MemoryDenyWriteExecute = false;
NoNewPrivileges = true;
PrivateMounts = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
RemoveIPC = true;
UMask = "0077";
RestrictNamespaces = "~mnt";
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
KeyringMode = "private";
SystemCallFilter = [
"@system-service"
];
};
});
}

View File

@@ -1,18 +1,11 @@
{
config,
pkgs,
lib,
values,
...
}:
{ config, pkgs, lib, values, ... }:
let
cfg = config.services.vaultwarden;
domain = "pw.pvv.ntnu.no";
address = "127.0.1.2";
port = 3011;
wsPort = 3012;
in
{
in {
sops.secrets."vaultwarden/environ" = {
owner = "vaultwarden";
group = "vaultwarden";

View File

@@ -1,10 +1,4 @@
{
config,
values,
pkgs,
lib,
...
}:
{ config, values, pkgs, lib, ... }:
{
imports = [
./roundcube.nix

View File

@@ -1,9 +1,4 @@
{
config,
pkgs,
lib,
...
}:
{ config, pkgs, lib, ... }:
with lib;
let
@@ -19,24 +14,14 @@ in
services.roundcube = {
enable = true;
package = pkgs.roundcube.withPlugins (
plugins: with plugins; [
persistent_login
thunderbird_labels
contextmenu
custom_from
]
);
package = pkgs.roundcube.withPlugins (plugins: with plugins; [
persistent_login
thunderbird_labels
contextmenu
custom_from
]);
dicts = with pkgs.aspellDicts; [
en
en-computers
nb
nn
fr
de
it
];
dicts = with pkgs.aspellDicts; [ en en-computers nb nn fr de it ];
maxAttachmentSize = 20;
hostName = "roundcubeplaceholder.example.com";
@@ -69,23 +54,21 @@ in
ln -s ${cfg.package} $out/roundcube
'';
extraConfig = ''
location ~ ^/roundcube/(${
builtins.concatStringsSep "|" [
# https://wiki.archlinux.org/title/Roundcube
"README"
"INSTALL"
"LICENSE"
"CHANGELOG"
"UPGRADING"
"bin"
"SQL"
".+\\.md"
"\\."
"config"
"temp"
"logs"
]
})/? {
location ~ ^/roundcube/(${builtins.concatStringsSep "|" [
# https://wiki.archlinux.org/title/Roundcube
"README"
"INSTALL"
"LICENSE"
"CHANGELOG"
"UPGRADING"
"bin"
"SQL"
".+\\.md"
"\\."
"config"
"temp"
"logs"
]})/? {
deny all;
}

View File

@@ -1,15 +1,7 @@
{
config,
lib,
fp,
pkgs,
values,
...
}:
{ config, lib, fp, pkgs, values, ... }:
let
cfg = config.services.snappymail;
in
{
in {
imports = [ (fp /modules/snappymail.nix) ];
services.snappymail = {

View File

@@ -1,31 +1,22 @@
{
pkgs,
lib,
config,
...
}:
{ pkgs, lib, config, ... }:
let
format = pkgs.formats.php { };
cfg = config.services.pvv-nettsiden;
in
{
in {
imports = [
./fetch-gallery.nix
];
sops.secrets =
lib.genAttrs
[
"nettsiden/door_secret"
"nettsiden/mysql_password"
"nettsiden/simplesamlphp/admin_password"
"nettsiden/simplesamlphp/cookie_salt"
]
(_: {
owner = config.services.phpfpm.pools.pvv-nettsiden.user;
group = config.services.phpfpm.pools.pvv-nettsiden.group;
restartUnits = [ "phpfpm-pvv-nettsiden.service" ];
});
sops.secrets = lib.genAttrs [
"nettsiden/door_secret"
"nettsiden/mysql_password"
"nettsiden/simplesamlphp/admin_password"
"nettsiden/simplesamlphp/cookie_salt"
] (_: {
owner = config.services.phpfpm.pools.pvv-nettsiden.user;
group = config.services.phpfpm.pools.pvv-nettsiden.group;
restartUnits = [ "phpfpm-pvv-nettsiden.service" ];
});
security.acme.certs."www.pvv.ntnu.no" = {
extraDomainNames = [
@@ -44,53 +35,48 @@ in
package = pkgs.pvv-nettsiden.override {
extra_files = {
"${pkgs.pvv-nettsiden.passthru.simplesamlphpPath}/metadata/saml20-idp-remote.php" =
pkgs.writeText "pvv-nettsiden-saml20-idp-remote.php" (import ../idp-simplesamlphp/metadata.php.nix);
"${pkgs.pvv-nettsiden.passthru.simplesamlphpPath}/config/authsources.php" =
pkgs.writeText "pvv-nettsiden-authsources.php" ''
<?php
$config = array(
'admin' => array(
'core:AdminPassword'
),
'default-sp' => array(
'saml:SP',
'entityID' => 'https://${cfg.domainName}/simplesaml/',
'idp' => 'https://idp.pvv.ntnu.no/',
),
);
'';
"${pkgs.pvv-nettsiden.passthru.simplesamlphpPath}/metadata/saml20-idp-remote.php" = pkgs.writeText "pvv-nettsiden-saml20-idp-remote.php" (import ../idp-simplesamlphp/metadata.php.nix);
"${pkgs.pvv-nettsiden.passthru.simplesamlphpPath}/config/authsources.php" = pkgs.writeText "pvv-nettsiden-authsources.php" ''
<?php
$config = array(
'admin' => array(
'core:AdminPassword'
),
'default-sp' => array(
'saml:SP',
'entityID' => 'https://${cfg.domainName}/simplesaml/',
'idp' => 'https://idp.pvv.ntnu.no/',
),
);
'';
};
};
domainName = "www.pvv.ntnu.no";
settings =
let
includeFromSops =
path: format.lib.mkRaw "file_get_contents('${config.sops.secrets."nettsiden/${path}".path}')";
in
{
DOOR_SECRET = includeFromSops "door_secret";
settings = let
includeFromSops = path: format.lib.mkRaw "file_get_contents('${config.sops.secrets."nettsiden/${path}".path}')";
in {
DOOR_SECRET = includeFromSops "door_secret";
DB = {
DSN = "mysql:dbname=www-data_nettside;host=mysql.pvv.ntnu.no";
USER = "www-data_nettsi";
PASS = includeFromSops "mysql_password";
};
# TODO: set up postgres session for simplesamlphp
SAML = {
COOKIE_SALT = includeFromSops "simplesamlphp/cookie_salt";
COOKIE_SECURE = true;
ADMIN_NAME = "PVV Drift";
ADMIN_EMAIL = "drift@pvv.ntnu.no";
ADMIN_PASSWORD = includeFromSops "simplesamlphp/admin_password";
TRUSTED_DOMAINS = [
"www.pvv.ntnu.no"
];
};
DB = {
DSN = "mysql:dbname=www-data_nettside;host=mysql.pvv.ntnu.no";
USER = "www-data_nettsi";
PASS = includeFromSops "mysql_password";
};
# TODO: set up postgres session for simplesamlphp
SAML = {
COOKIE_SALT = includeFromSops "simplesamlphp/cookie_salt";
COOKIE_SECURE = true;
ADMIN_NAME = "PVV Drift";
ADMIN_EMAIL = "drift@pvv.ntnu.no";
ADMIN_PASSWORD = includeFromSops "simplesamlphp/admin_password";
TRUSTED_DOMAINS = [
"www.pvv.ntnu.no"
];
};
};
};
services.phpfpm.pools."pvv-nettsiden".settings = {

View File

@@ -1,15 +1,8 @@
{
pkgs,
lib,
config,
values,
...
}:
{ pkgs, lib, config, values, ... }:
let
galleryDir = config.services.pvv-nettsiden.settings.GALLERY.DIR;
transferDir = "${config.services.pvv-nettsiden.settings.GALLERY.DIR}-transfer";
in
{
in {
users.users.${config.services.pvv-nettsiden.user} = {
# NOTE: the user unfortunately needs a registered shell for rrsync to function...
# is there anything we can do to remove this?
@@ -44,20 +37,14 @@ in
};
systemd.services.pvv-nettsiden-gallery-update = {
path = with pkgs; [
imagemagick
gnutar
gzip
];
path = with pkgs; [ imagemagick gnutar gzip ];
script = ''
tar ${
lib.cli.toGNUCommandLineShell { } {
extract = true;
file = "${transferDir}/gallery.tar.gz";
directory = ".";
}
}
tar ${lib.cli.toGNUCommandLineShell {} {
extract = true;
file = "${transferDir}/gallery.tar.gz";
directory = ".";
}}
# Delete files and directories that exists in the gallery that don't exist in the tarball
filesToRemove=$(uniq -u <(sort <(find . -not -path "./.thumbnails*") <(tar -tf ${transferDir}/gallery.tar.gz | sed 's|/$||')))

View File

@@ -1,28 +1,25 @@
{ lib, ... }:
{
services.nginx.virtualHosts =
lib.genAttrs
[
"pvv.ntnu.no"
"www.pvv.ntnu.no"
"pvv.org"
"www.pvv.org"
]
(_: {
locations = {
"^~ /.well-known/" = {
alias = (toString ./root) + "/";
};
services.nginx.virtualHosts = lib.genAttrs [
"pvv.ntnu.no"
"www.pvv.ntnu.no"
"pvv.org"
"www.pvv.org"
] (_: {
locations = {
"^~ /.well-known/" = {
alias = (toString ./root) + "/";
};
# Proxy the matrix well-known files
# Host has be set before proxy_pass
# The header must be set so nginx on the other side routes it to the right place
"^~ /.well-known/matrix/" = {
extraConfig = ''
proxy_set_header Host matrix.pvv.ntnu.no;
proxy_pass https://matrix.pvv.ntnu.no/.well-known/matrix/;
'';
};
};
});
# Proxy the matrix well-known files
# Host has be set before proxy_pass
# The header must be set so nginx on the other side routes it to the right place
"^~ /.well-known/matrix/" = {
extraConfig = ''
proxy_set_header Host matrix.pvv.ntnu.no;
proxy_pass https://matrix.pvv.ntnu.no/.well-known/matrix/;
'';
};
};
});
}

View File

@@ -1,9 +1,4 @@
{
fp,
pkgs,
values,
...
}:
{ fp, pkgs, values, ... }:
{
imports = [
./hardware-configuration.nix
@@ -24,16 +19,8 @@
systemd.network.networks."30-ens18" = values.defaultNetworkConfig // {
#matchConfig.Name = "enp6s0f0";
matchConfig.Name = "ens18";
address =
with values.hosts.bicep;
[
(ipv4 + "/25")
(ipv6 + "/64")
]
++ (with values.services.turn; [
(ipv4 + "/25")
(ipv6 + "/64")
]);
address = with values.hosts.bicep; [ (ipv4 + "/25") (ipv6 + "/64") ]
++ (with values.services.turn; [ (ipv4 + "/25") (ipv6 + "/64") ]);
};
systemd.network.wait-online = {
anyInterface = true;

View File

@@ -1,49 +1,34 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:
{ config, lib, pkgs, modulesPath, ... }:
{
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
];
imports =
[ (modulesPath + "/profiles/qemu-guest.nix")
];
boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"ahci"
"sd_mod"
"sr_mod"
];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "ahci" "sd_mod" "sr_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
fileSystems."/" = {
device = "/dev/disk/by-uuid/20e06202-7a09-47cc-8ef6-5e7afe19453a";
fsType = "ext4";
};
fileSystems."/" =
{ device = "/dev/disk/by-uuid/20e06202-7a09-47cc-8ef6-5e7afe19453a";
fsType = "ext4";
};
# temp data disk, only 128gb not enough until we can add another disk to the system.
fileSystems."/data" = {
device = "/dev/disk/by-uuid/c81af266-0781-4084-b8eb-c2587cbcf1ba";
fsType = "ext4";
};
fileSystems."/data" =
{ device = "/dev/disk/by-uuid/c81af266-0781-4084-b8eb-c2587cbcf1ba";
fsType = "ext4";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/198B-E363";
fsType = "vfat";
options = [
"fmask=0022"
"dmask=0022"
];
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/198B-E363";
fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ];
};
swapDevices = [ ];

View File

@@ -1,14 +1,7 @@
{
config,
fp,
lib,
pkgs,
...
}:
{ config, fp, lib, pkgs, ... }:
let
cfg = config.services.pvv-calendar-bot;
in
{
in {
sops.secrets = {
"calendar-bot/matrix_token" = {
sopsFile = fp /secrets/bicep/bicep.yaml;

View File

@@ -1,10 +1,4 @@
{
config,
pkgs,
lib,
fp,
...
}:
{ config, pkgs, lib, fp, ... }:
let
cfg = config.services.gickup;
in
@@ -26,88 +20,79 @@ in
lfs = false;
};
instances =
let
defaultGithubConfig = {
settings.token_file = config.sops.secrets."gickup/github-token".path;
};
defaultGitlabConfig = {
# settings.token_file = ...
};
in
{
"github:Git-Mediawiki/Git-Mediawiki" = defaultGithubConfig;
"github:NixOS/nixpkgs" = defaultGithubConfig;
"github:go-gitea/gitea" = defaultGithubConfig;
"github:heimdal/heimdal" = defaultGithubConfig;
"github:saltstack/salt" = defaultGithubConfig;
"github:typst/typst" = defaultGithubConfig;
"github:unmojang/FjordLauncher" = defaultGithubConfig;
"github:unmojang/drasl" = defaultGithubConfig;
"github:yushijinhun/authlib-injector" = defaultGithubConfig;
"gitlab:mx-puppet/discord/better-discord.js" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/discord-markdown" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/matrix-discord-parser" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/mx-puppet-discord" = defaultGitlabConfig;
"gitlab:mx-puppet/mx-puppet-bridge" = defaultGitlabConfig;
"any:glibc" = {
settings.url = "https://sourceware.org/git/glibc.git";
};
"any:out-of-your-element" = {
settings.url = "https://gitdab.com/cadence/out-of-your-element.git";
};
"any:out-of-your-element-module" = {
settings.url = "https://cgit.rory.gay/nix/OOYE-module.git";
};
instances = let
defaultGithubConfig = {
settings.token_file = config.sops.secrets."gickup/github-token".path;
};
};
defaultGitlabConfig = {
# settings.token_file = ...
};
in {
"github:Git-Mediawiki/Git-Mediawiki" = defaultGithubConfig;
"github:NixOS/nixpkgs" = defaultGithubConfig;
"github:go-gitea/gitea" = defaultGithubConfig;
"github:heimdal/heimdal" = defaultGithubConfig;
"github:saltstack/salt" = defaultGithubConfig;
"github:typst/typst" = defaultGithubConfig;
"github:unmojang/FjordLauncher" = defaultGithubConfig;
"github:unmojang/drasl" = defaultGithubConfig;
"github:yushijinhun/authlib-injector" = defaultGithubConfig;
services.cgit =
let
domain = "mirrors.pvv.ntnu.no";
in
{
${domain} = {
enable = true;
package = pkgs.callPackage (fp /packages/cgit.nix) { };
group = "gickup";
scanPath = "${cfg.dataDir}/linktree";
gitHttpBackend.checkExportOkFiles = false;
settings = {
enable-commit-graph = true;
enable-follow-links = true;
enable-http-clone = true;
enable-remote-branches = true;
clone-url = "https://${domain}/$CGIT_REPO_URL";
remove-suffix = true;
root-title = "PVVSPPP";
root-desc = "PVV Speiler Praktisk og Prominent Programvare";
snapshots = "all";
logo = "/PVV-logo.png";
};
"gitlab:mx-puppet/discord/better-discord.js" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/discord-markdown" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/matrix-discord-parser" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/mx-puppet-discord" = defaultGitlabConfig;
"gitlab:mx-puppet/mx-puppet-bridge" = defaultGitlabConfig;
"any:glibc" = {
settings.url = "https://sourceware.org/git/glibc.git";
};
"any:out-of-your-element" = {
settings.url = "https://gitdab.com/cadence/out-of-your-element.git";
};
"any:out-of-your-element-module" = {
settings.url = "https://cgit.rory.gay/nix/OOYE-module.git";
};
};
};
services.cgit = let
domain = "mirrors.pvv.ntnu.no";
in {
${domain} = {
enable = true;
package = pkgs.callPackage (fp /packages/cgit.nix) { };
group = "gickup";
scanPath = "${cfg.dataDir}/linktree";
gitHttpBackend.checkExportOkFiles = false;
settings = {
enable-commit-graph = true;
enable-follow-links = true;
enable-http-clone = true;
enable-remote-branches = true;
clone-url = "https://${domain}/$CGIT_REPO_URL";
remove-suffix = true;
root-title = "PVVSPPP";
root-desc = "PVV Speiler Praktisk og Prominent Programvare";
snapshots = "all";
logo = "/PVV-logo.png";
};
};
};
services.nginx.virtualHosts."mirrors.pvv.ntnu.no" = {
forceSSL = true;
enableACME = true;
locations."= /PVV-logo.png".alias =
let
small-pvv-logo =
pkgs.runCommandLocal "pvv-logo-96x96"
{
nativeBuildInputs = [ pkgs.imagemagick ];
}
''
magick '${fp /assets/logo_blue_regular.svg}' -resize 96x96 PNG:"$out"
'';
in
toString small-pvv-logo;
locations."= /PVV-logo.png".alias = let
small-pvv-logo = pkgs.runCommandLocal "pvv-logo-96x96" {
nativeBuildInputs = [ pkgs.imagemagick ];
} ''
magick '${fp /assets/logo_blue_regular.svg}' -resize 96x96 PNG:"$out"
'';
in toString small-pvv-logo;
};
systemd.services."fcgiwrap-cgit-mirrors.pvv.ntnu.no" = {

View File

@@ -1,12 +1,4 @@
{
config,
lib,
fp,
pkgs,
secrets,
values,
...
}:
{ config, lib, fp, pkgs, secrets, values, ... }:
{
sops.secrets."matrix/coturn/static-auth-secret" = {
@@ -135,31 +127,18 @@
};
networking.firewall = {
interfaces.enp6s0f0 =
let
range = with config.services.coturn; [
{
from = min-port;
to = max-port;
}
];
in
{
allowedUDPPortRanges = range;
allowedUDPPorts = [
443
3478
3479
5349
];
allowedTCPPortRanges = range;
allowedTCPPorts = [
443
3478
3479
5349
];
};
interfaces.enp6s0f0 = let
range = with config.services.coturn; [ {
from = min-port;
to = max-port;
} ];
in
{
allowedUDPPortRanges = range;
allowedUDPPorts = [ 443 3478 3479 5349 ];
allowedTCPPortRanges = range;
allowedTCPPorts = [ 443 3478 3479 5349 ];
};
};
}

View File

@@ -1,9 +1,4 @@
{
config,
lib,
fp,
...
}:
{ config, lib, fp, ... }:
let
cfg = config.services.mx-puppet-discord;
@@ -49,6 +44,7 @@ in
];
};
services.mx-puppet-discord.enable = false;
services.mx-puppet-discord.settings = {
bridge = {
@@ -56,21 +52,16 @@ in
domain = "pvv.ntnu.no";
homeserverUrl = "https://matrix.pvv.ntnu.no";
};
provisioning.whitelist = [
"@dandellion:dodsorf\\.as"
"@danio:pvv\\.ntnu\\.no"
];
provisioning.whitelist = [ "@dandellion:dodsorf\\.as" "@danio:pvv\\.ntnu\\.no"];
relay.whitelist = [ ".*" ];
selfService.whitelist = [
"@danio:pvv\\.ntnu\\.no"
"@dandellion:dodsorf\\.as"
];
selfService.whitelist = [ "@danio:pvv\\.ntnu\\.no" "@dandellion:dodsorf\\.as" ];
};
services.mx-puppet-discord.serviceDependencies = [
"matrix-synapse.target"
"nginx.service"
];
services.matrix-synapse-next.settings = {
app_service_config_files = [
config.sops.templates."discord-registration.yaml".path

View File

@@ -1,13 +1,7 @@
{
config,
lib,
pkgs,
...
}:
{ config, lib, pkgs, ... }:
let
synapse-cfg = config.services.matrix-synapse-next;
in
{
in {
services.pvv-matrix-well-known.client = {
"m.homeserver" = {
base_url = "https://matrix.pvv.ntnu.no";
@@ -27,12 +21,12 @@ in
default_server_config = config.services.pvv-matrix-well-known.client;
disable_3pid_login = true;
# integrations_ui_url = "https://dimension.dodsorf.as/riot";
# integrations_rest_url = "https://dimension.dodsorf.as/api/v1/scalar";
# integrations_widgets_urls = [
# "https://dimension.dodsorf.as/widgets"
# ];
# integration_jitsi_widget_url = "https://dimension.dodsorf.as/widgets/jitsi";
# integrations_ui_url = "https://dimension.dodsorf.as/riot";
# integrations_rest_url = "https://dimension.dodsorf.as/api/v1/scalar";
# integrations_widgets_urls = [
# "https://dimension.dodsorf.as/widgets"
# ];
# integration_jitsi_widget_url = "https://dimension.dodsorf.as/widgets/jitsi";
defaultCountryCode = "NO";
showLabsSettings = true;
features = {

View File

@@ -1,11 +1,4 @@
{
config,
lib,
fp,
unstablePkgs,
inputs,
...
}:
{ config, lib, fp, unstablePkgs, inputs, ... }:
let
cfg = config.services.matrix-hookshot;
@@ -107,8 +100,7 @@ in
};
serviceBots = [
{
localpart = "bot_feeds";
{ localpart = "bot_feeds";
displayname = "Aya";
avatar = ./feeds.png;
prefix = "!aya";
@@ -123,44 +115,20 @@ in
permissions = [
# Users of the PVV Server
{
actor = "pvv.ntnu.no";
services = [
{
service = "*";
level = "commands";
}
];
{ actor = "pvv.ntnu.no";
services = [ { service = "*"; level = "commands"; } ];
}
# Members of Medlem space (for people with their own hs)
{
actor = "!pZOTJQinWyyTWaeOgK:pvv.ntnu.no";
services = [
{
service = "*";
level = "commands";
}
];
{ actor = "!pZOTJQinWyyTWaeOgK:pvv.ntnu.no";
services = [ { service = "*"; level = "commands"; } ];
}
# Members of Drift
{
actor = "!eYgeufLrninXxQpYml:pvv.ntnu.no";
services = [
{
service = "*";
level = "admin";
}
];
{ actor = "!eYgeufLrninXxQpYml:pvv.ntnu.no";
services = [ { service = "*"; level = "admin"; } ];
}
# Dan bootstrap
{
actor = "@dandellion:dodsorf.as";
services = [
{
service = "*";
level = "admin";
}
];
{ actor = "@dandellion:dodsorf.as";
services = [ { service = "*"; level = "admin"; } ];
}
];
};

View File

@@ -1,9 +1,4 @@
{
config,
lib,
fp,
...
}:
{ config, lib, fp, ... }:
let
synapseConfig = config.services.matrix-synapse-next;
matrixDomain = "matrix.pvv.ntnu.no";
@@ -25,12 +20,10 @@ in
};
services.pvv-matrix-well-known.client = lib.mkIf cfg.enable {
"org.matrix.msc4143.rtc_foci" = [
{
type = "livekit";
livekit_service_url = "https://${matrixDomain}/livekit/jwt";
}
];
"org.matrix.msc4143.rtc_foci" = [{
type = "livekit";
livekit_service_url = "https://${matrixDomain}/livekit/jwt";
}];
};
services.livekit = {
@@ -50,12 +43,7 @@ in
keyFile = config.sops.templates."matrix-livekit-keyfile".path;
};
systemd.services.lk-jwt-service.environment.LIVEKIT_FULL_ACCESS_HOMESERVERS = lib.mkIf cfg.enable (
builtins.concatStringsSep "," [
"pvv.ntnu.no"
"dodsorf.as"
]
);
systemd.services.lk-jwt-service.environment.LIVEKIT_FULL_ACCESS_HOMESERVERS = lib.mkIf cfg.enable matrixDomain;
services.nginx.virtualHosts.${matrixDomain} = lib.mkIf cfg.enable {
locations."^~ /livekit/jwt/" = {

View File

@@ -1,9 +1,4 @@
{
config,
lib,
fp,
...
}:
{ config, lib, fp, ... }:
{
sops.secrets."matrix/mjolnir/access_token" = {

View File

@@ -1,11 +1,4 @@
{
config,
pkgs,
lib,
values,
fp,
...
}:
{ config, pkgs, fp, ... }:
let
cfg = config.services.matrix-ooye;
in
@@ -35,23 +28,6 @@ in
};
};
services.rsync-pull-targets = lib.mkIf cfg.enable {
enable = true;
locations."/var/lib/private/matrix-ooye" = {
user = "root";
rrsyncArgs.ro = true;
authorizedKeysAttrs = [
"restrict"
"from=\"principal.pvv.ntnu.no,${values.hosts.principal.ipv6},${values.hosts.principal.ipv4}\""
"no-agent-forwarding"
"no-port-forwarding"
"no-pty"
"no-X11-forwarding"
];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE5koYfor5+kKB30Dugj3dAWvmj8h/akQQ2XYDvLobFL matrix_ooye rsync backup";
};
};
services.matrix-ooye = {
enable = true;
homeserver = "https://matrix.pvv.ntnu.no";

View File

@@ -1,9 +1,4 @@
{
lib,
buildPythonPackage,
fetchFromGitHub,
setuptools,
}:
{ lib, buildPythonPackage, fetchFromGitHub, setuptools }:
buildPythonPackage rec {
pname = "matrix-synapse-smtp-auth";

View File

@@ -1,9 +1,5 @@
{
config,
lib,
pkgs,
...
}:
{ config, lib, pkgs, ... }:
# This service requires you to have access to endpoints not available over the internet
# Use an ssh proxy or similar to access this dashboard.

View File

@@ -1,9 +1,4 @@
{
config,
lib,
utils,
...
}:
{ config, lib, utils, ... }:
let
cfg = config.services.synapse-auto-compressor;
in

View File

@@ -1,23 +1,13 @@
{
config,
lib,
fp,
pkgs,
values,
inputs,
...
}:
{ config, lib, fp, pkgs, values, inputs, ... }:
let
cfg = config.services.matrix-synapse-next;
matrix-lib = inputs.matrix-next.lib;
imap0Attrs =
with lib;
f: set: listToAttrs (imap0 (i: attr: nameValuePair attr (f i attr set.${attr})) (attrNames set));
in
{
imap0Attrs = with lib; f: set:
listToAttrs (imap0 (i: attr: nameValuePair attr (f i attr set.${attr})) (attrNames set));
in {
sops.secrets."matrix/synapse/signing_key" = {
key = "synapse/signing_key";
sopsFile = fp /secrets/bicep/matrix.yaml;
@@ -33,9 +23,7 @@ in
owner = config.users.users.matrix-synapse.name;
group = config.users.users.matrix-synapse.group;
content = ''
registration_shared_secret: ${
config.sops.placeholder."matrix/synapse/user_registration/registration_shared_secret"
}
registration_shared_secret: ${config.sops.placeholder."matrix/synapse/user_registration/registration_shared_secret"}
'';
};
@@ -80,7 +68,7 @@ in
signing_key_path = config.sops.secrets."matrix/synapse/signing_key".path;
media_store_path = "${cfg.dataDir}/media";
media_store_path = "${cfg.dataDir}/media";
database = {
name = "psycopg2";
@@ -122,8 +110,7 @@ in
password_config.enabled = true;
modules = [
{
module = "smtp_auth_provider.SMTPAuthProvider";
{ module = "smtp_auth_provider.SMTPAuthProvider";
config = {
smtp_host = "smtp.pvv.ntnu.no";
};
@@ -196,79 +183,61 @@ in
services.pvv-matrix-well-known.server."m.server" = "matrix.pvv.ntnu.no:443";
services.nginx.virtualHosts."matrix.pvv.ntnu.no" = lib.mkMerge [
{
kTLS = true;
}
{
locations."/_synapse/admin" = {
proxyPass = "http://$synapse_backend";
extraConfig = ''
allow 127.0.0.1;
allow ::1;
allow ${values.hosts.bicep.ipv4};
allow ${values.hosts.bicep.ipv6};
deny all;
'';
};
}
{
locations =
let
connectionInfo = w: matrix-lib.workerConnectionResource "metrics" w;
socketAddress =
w:
let
c = connectionInfo w;
in
"${c.host}:${toString c.port}";
{
kTLS = true;
}
{
locations."/_synapse/admin" = {
proxyPass = "http://$synapse_backend";
extraConfig = ''
allow 127.0.0.1;
allow ::1;
allow ${values.hosts.bicep.ipv4};
allow ${values.hosts.bicep.ipv6};
deny all;
'';
};
}
{
locations = let
connectionInfo = w: matrix-lib.workerConnectionResource "metrics" w;
socketAddress = w: let c = connectionInfo w; in "${c.host}:${toString c.port}";
metricsPath = w: "/metrics/${w.type}/${toString w.index}";
proxyPath = w: "http://${socketAddress w}/_synapse/metrics";
in
lib.mapAttrs' (
n: v:
lib.nameValuePair (metricsPath v) {
proxyPass = proxyPath v;
extraConfig = ''
allow ${values.hosts.ildkule.ipv4};
allow ${values.hosts.ildkule.ipv6};
deny all;
'';
}
) cfg.workers.instances;
}
{
locations."/metrics/master/1" = {
proxyPass = "http://127.0.0.1:9000/_synapse/metrics";
metricsPath = w: "/metrics/${w.type}/${toString w.index}";
proxyPath = w: "http://${socketAddress w}/_synapse/metrics";
in lib.mapAttrs' (n: v: lib.nameValuePair
(metricsPath v) {
proxyPass = proxyPath v;
extraConfig = ''
allow ${values.hosts.ildkule.ipv4};
allow ${values.hosts.ildkule.ipv6};
deny all;
'';
};
})
cfg.workers.instances;
}
{
locations."/metrics/master/1" = {
proxyPass = "http://127.0.0.1:9000/_synapse/metrics";
extraConfig = ''
allow ${values.hosts.ildkule.ipv4};
allow ${values.hosts.ildkule.ipv6};
deny all;
'';
};
locations."/metrics/" =
let
endpoints =
lib.pipe cfg.workers.instances [
(lib.mapAttrsToList (_: v: v))
(map (w: "${w.type}/${toString w.index}"))
(map (w: "matrix.pvv.ntnu.no/metrics/${w}"))
]
++ [ "matrix.pvv.ntnu.no/metrics/master/1" ];
in
{
alias =
pkgs.writeTextDir "/config.json" (
builtins.toJSON [
{
targets = endpoints;
labels = { };
}
]
)
+ "/";
};
}
];
locations."/metrics/" = let
endpoints = lib.pipe cfg.workers.instances [
(lib.mapAttrsToList (_: v: v))
(map (w: "${w.type}/${toString w.index}"))
(map (w: "matrix.pvv.ntnu.no/metrics/${w}"))
] ++ [ "matrix.pvv.ntnu.no/metrics/master/1" ];
in {
alias = pkgs.writeTextDir "/config.json"
(builtins.toJSON [
{ targets = endpoints;
labels = { };
}]) + "/";
};
}];
}

View File

@@ -1,9 +1,4 @@
{
config,
pkgs,
lib,
...
}:
{ config, pkgs, lib, ... }:
let
cfg = config.services.pvv-matrix-well-known;
format = pkgs.formats.json { };

View File

@@ -1,9 +1,4 @@
{
config,
lib,
pkgs,
...
}:
{ config, lib, pkgs, ... }:
let
cfg = config.services.minecraft-heatmap;
in
@@ -32,25 +27,23 @@ in
"sshkey:${config.sops.secrets."minecraft-heatmap/ssh-key/private".path}"
];
preStart =
let
knownHostsFile = pkgs.writeText "minecraft-heatmap-known-hosts" ''
innovation.pvv.ntnu.no ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE9O/y5uqcLKCodg2Q+XfZPH/AoUIyBlDhigImU+4+Kn
innovation.pvv.ntnu.no ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQClR9GvWeVPZHudlnFXhGHUX5sGX9nscsOsotnlQ4uVuGsgvRifsVsuDULlAFXwoV1tYp4vnyXlsVtMddpLI5ANOIDcZ4fgDxpfSQmtHKssNpDcfMhFJbfRVyacipjA4osxTxvLox/yjtVt+URjTHUA1MWzEwc26KfiOvWO5tCBTan7doN/4KOyT05GwBxwzUAwUmoGTacIITck2Y9qp4+xFYqehbXqPdBb15hFyd38OCQhtU1hWV2Yi18+hJ4nyjc/g5pr6mW09ULlFghe/BaTUXrTisYC6bMcJZsTDwsvld9581KPvoNZOTQhZPTEQCZZ1h54fe0ZHuveVB3TIHovZyjoUuaf4uiFOjJVaKRB+Ig+Il6r7tMUn9CyHtus/Nd86E0TFBzoKxM0OFu88oaUlDtZVrUJL5En1lGoimajebb1JPxllFN5hqIT+gVyMY6nRzkcfS7ieny/U4rzXY2rfz98selftgh3LsBywwADv65i+mPw1A/1QdND1R6fV4U=
innovation.pvv.ntnu.no ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNjl3HfsDqmALWCL9uhz9k93RAD2565ndBqUh4N/rvI7MCwEJ6iRCdDev0YzB1Fpg24oriyYoxZRP24ifC2sQf8=
'';
in
''
mkdir -p '${cfg.minecraftLogsDir}'
"${lib.getExe pkgs.rsync}" \
--archive \
--verbose \
--progress \
--no-owner \
--no-group \
--rsh="${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=\"${knownHostsFile}\" -i \"$CREDENTIALS_DIRECTORY\"/sshkey" \
root@innovation.pvv.ntnu.no:/ \
'${cfg.minecraftLogsDir}'/
preStart = let
knownHostsFile = pkgs.writeText "minecraft-heatmap-known-hosts" ''
innovation.pvv.ntnu.no ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE9O/y5uqcLKCodg2Q+XfZPH/AoUIyBlDhigImU+4+Kn
innovation.pvv.ntnu.no ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQClR9GvWeVPZHudlnFXhGHUX5sGX9nscsOsotnlQ4uVuGsgvRifsVsuDULlAFXwoV1tYp4vnyXlsVtMddpLI5ANOIDcZ4fgDxpfSQmtHKssNpDcfMhFJbfRVyacipjA4osxTxvLox/yjtVt+URjTHUA1MWzEwc26KfiOvWO5tCBTan7doN/4KOyT05GwBxwzUAwUmoGTacIITck2Y9qp4+xFYqehbXqPdBb15hFyd38OCQhtU1hWV2Yi18+hJ4nyjc/g5pr6mW09ULlFghe/BaTUXrTisYC6bMcJZsTDwsvld9581KPvoNZOTQhZPTEQCZZ1h54fe0ZHuveVB3TIHovZyjoUuaf4uiFOjJVaKRB+Ig+Il6r7tMUn9CyHtus/Nd86E0TFBzoKxM0OFu88oaUlDtZVrUJL5En1lGoimajebb1JPxllFN5hqIT+gVyMY6nRzkcfS7ieny/U4rzXY2rfz98selftgh3LsBywwADv65i+mPw1A/1QdND1R6fV4U=
innovation.pvv.ntnu.no ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNjl3HfsDqmALWCL9uhz9k93RAD2565ndBqUh4N/rvI7MCwEJ6iRCdDev0YzB1Fpg24oriyYoxZRP24ifC2sQf8=
'';
in ''
mkdir -p '${cfg.minecraftLogsDir}'
"${lib.getExe pkgs.rsync}" \
--archive \
--verbose \
--progress \
--no-owner \
--no-group \
--rsh="${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=\"${knownHostsFile}\" -i \"$CREDENTIALS_DIRECTORY\"/sshkey" \
root@innovation.pvv.ntnu.no:/ \
'${cfg.minecraftLogsDir}'/
'';
};
}

View File

@@ -1,10 +1,4 @@
{
config,
lib,
pkgs,
values,
...
}:
{ config, lib, pkgs, values, ... }:
let
cfg = config.services.mysql;
backupDir = "/data/mysql-backups";
@@ -16,10 +10,10 @@ in
# };
systemd.tmpfiles.settings."10-mysql-backups".${backupDir}.d = {
user = "mysql";
group = "mysql";
mode = "700";
};
user = "mysql";
group = "mysql";
mode = "700";
};
services.rsync-pull-targets = lib.mkIf cfg.enable {
enable = true;
@@ -50,25 +44,23 @@ in
zstd
];
script =
let
rotations = 2;
in
''
set -euo pipefail
script = let
rotations = 2;
in ''
set -euo pipefail
OUT_FILE="$STATE_DIRECTORY/mysql-dump-$(date --iso-8601).sql.zst"
OUT_FILE="$STATE_DIRECTORY/mysql-dump-$(date --iso-8601).sql.zst"
mysqldump --all-databases | zstd --compress -9 --rsyncable -o "$OUT_FILE"
mysqldump --all-databases | zstd --compress -9 --rsyncable -o "$OUT_FILE"
# NOTE: this needs to be a hardlink for rrsync to allow sending it
rm "$STATE_DIRECTORY/mysql-dump-latest.sql.zst" ||:
ln -T "$OUT_FILE" "$STATE_DIRECTORY/mysql-dump-latest.sql.zst"
# NOTE: this needs to be a hardlink for rrsync to allow sending it
rm "$STATE_DIRECTORY/mysql-dump-latest.sql.zst" ||:
ln -T "$OUT_FILE" "$STATE_DIRECTORY/mysql-dump-latest.sql.zst"
while [ "$(find "$STATE_DIRECTORY" -type f -printf '.' | wc -c)" -gt ${toString (rotations + 1)} ]; do
rm "$(find "$STATE_DIRECTORY" -type f -printf '%T+ %p\n' | sort | head -n 1 | cut -d' ' -f2)"
done
'';
while [ "$(find "$STATE_DIRECTORY" -type f -printf '.' | wc -c)" -gt ${toString (rotations + 1)} ]; do
rm "$(find "$STATE_DIRECTORY" -type f -printf '%T+ %p\n' | sort | head -n 1 | cut -d' ' -f2)"
done
'';
serviceConfig = {
Type = "oneshot";

View File

@@ -1,10 +1,4 @@
{
config,
pkgs,
lib,
values,
...
}:
{ config, pkgs, lib, values, ... }:
let
cfg = config.services.mysql;
dataDir = "/data/mysql";
@@ -42,14 +36,12 @@ in
# a password which can be found in /secrets/ildkule/ildkule.yaml
# We have also changed both the host and auth plugin of this user
# to be 'ildkule.pvv.ntnu.no' and 'mysql_native_password' respectively.
ensureUsers = [
{
name = "prometheus_mysqld_exporter";
ensurePermissions = {
"*.*" = "PROCESS, REPLICATION CLIENT, SELECT, SLAVE MONITOR";
};
}
];
ensureUsers = [{
name = "prometheus_mysqld_exporter";
ensurePermissions = {
"*.*" = "PROCESS, REPLICATION CLIENT, SELECT, SLAVE MONITOR";
};
}];
};
networking.firewall.allowedTCPPorts = lib.mkIf cfg.enable [ 3306 ];

View File

@@ -1,10 +1,4 @@
{
config,
lib,
pkgs,
values,
...
}:
{ config, lib, pkgs, values, ... }:
let
cfg = config.services.postgresql;
backupDir = "/data/postgresql-backups";
@@ -17,10 +11,10 @@ in
# };
systemd.tmpfiles.settings."10-postgresql-backups".${backupDir}.d = {
user = "postgres";
group = "postgres";
mode = "700";
};
user = "postgres";
group = "postgres";
mode = "700";
};
services.rsync-pull-targets = lib.mkIf cfg.enable {
enable = true;
@@ -51,25 +45,23 @@ in
cfg.package
];
script =
let
rotations = 2;
in
''
set -euo pipefail
script = let
rotations = 2;
in ''
set -euo pipefail
OUT_FILE="$STATE_DIRECTORY/postgresql-dump-$(date --iso-8601).sql.zst"
OUT_FILE="$STATE_DIRECTORY/postgresql-dump-$(date --iso-8601).sql.zst"
pg_dumpall -U postgres | zstd --compress -9 --rsyncable -o "$OUT_FILE"
pg_dumpall -U postgres | zstd --compress -9 --rsyncable -o "$OUT_FILE"
# NOTE: this needs to be a hardlink for rrsync to allow sending it
rm "$STATE_DIRECTORY/postgresql-dump-latest.sql.zst" ||:
ln -T "$OUT_FILE" "$STATE_DIRECTORY/postgresql-dump-latest.sql.zst"
# NOTE: this needs to be a hardlink for rrsync to allow sending it
rm "$STATE_DIRECTORY/postgresql-dump-latest.sql.zst" ||:
ln -T "$OUT_FILE" "$STATE_DIRECTORY/postgresql-dump-latest.sql.zst"
while [ "$(find "$STATE_DIRECTORY" -type f -printf '.' | wc -c)" -gt ${toString (rotations + 1)} ]; do
rm "$(find "$STATE_DIRECTORY" -type f -printf '%T+ %p\n' | sort | head -n 1 | cut -d' ' -f2)"
done
'';
while [ "$(find "$STATE_DIRECTORY" -type f -printf '.' | wc -c)" -gt ${toString (rotations + 1)} ]; do
rm "$(find "$STATE_DIRECTORY" -type f -printf '%T+ %p\n' | sort | head -n 1 | cut -d' ' -f2)"
done
'';
serviceConfig = {
Type = "oneshot";

View File

@@ -1,10 +1,4 @@
{
config,
lib,
pkgs,
values,
...
}:
{ config, lib, pkgs, values, ... }:
let
cfg = config.services.postgresql;
in

View File

@@ -1,14 +1,8 @@
{
lib,
config,
pkgs,
values,
...
}:
{ config, pkgs, values, ... }:
{
networking.nat = {
enable = true;
internalInterfaces = [ "ve-+" ];
internalInterfaces = ["ve-+"];
externalInterface = "ens3";
# Lazy IPv6 connectivity for the container
enableIPv6 = true;
@@ -16,11 +10,9 @@
containers.bikkje = {
autoStart = true;
config =
{ config, pkgs, ... }:
{
#import packages
packages = with pkgs; [
config = { config, pkgs, ... }: {
#import packages
packages = with pkgs; [
alpine
mutt
mutt-ics
@@ -30,66 +22,26 @@
hexchat
irssi
pidgin
];
];
networking = {
hostName = "bikkje";
firewall = {
enable = true;
# Allow SSH and HTTP and ports for email and irc
allowedTCPPorts = [
80
22
194
994
6665
6666
6667
6668
6669
6697
995
993
25
465
587
110
143
993
995
];
allowedUDPPorts = [
80
22
194
994
6665
6666
6667
6668
6669
6697
995
993
25
465
587
110
143
993
995
];
};
# Use systemd-resolved inside the container
# Workaround for bug https://github.com/NixOS/nixpkgs/issues/162686
useHostResolvConf = lib.mkForce false;
networking = {
hostName = "bikkje";
firewall = {
enable = true;
# Allow SSH and HTTP and ports for email and irc
allowedTCPPorts = [ 80 22 194 994 6665 6666 6667 6668 6669 6697 995 993 25 465 587 110 143 993 995 ];
allowedUDPPorts = [ 80 22 194 994 6665 6666 6667 6668 6669 6697 995 993 25 465 587 110 143 993 995 ];
};
services.resolved.enable = true;
# Don't change (even during upgrades) unless you know what you are doing.
# See https://search.nixos.org/options?show=system.stateVersion
system.stateVersion = "23.11";
# Use systemd-resolved inside the container
# Workaround for bug https://github.com/NixOS/nixpkgs/issues/162686
useHostResolvConf = mkForce false;
};
services.resolved.enable = true;
# Don't change (even during upgrades) unless you know what you are doing.
# See https://search.nixos.org/options?show=system.stateVersion
system.stateVersion = "23.11";
};
};
}
};

View File

@@ -1,25 +1,16 @@
{
config,
fp,
pkgs,
values,
...
}:
{ config, fp, pkgs, values, ... }:
{
imports = [
# Include the results of the hardware scan.
./hardware-configuration.nix
(fp /base)
# Include the results of the hardware scan.
./hardware-configuration.nix
(fp /base)
./services/grzegorz.nix
];
./services/grzegorz.nix
];
systemd.network.networks."30-eno1" = values.defaultNetworkConfig // {
matchConfig.Name = "eno1";
address = with values.hosts.brzeczyszczykiewicz; [
(ipv4 + "/25")
(ipv6 + "/64")
];
address = with values.hosts.brzeczyszczykiewicz; [ (ipv4 + "/25") (ipv6 + "/64") ];
};
fonts.fontconfig.enable = true;

View File

@@ -1,45 +1,31 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:
{ config, lib, pkgs, modulesPath, ... }:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [
"xhci_pci"
"ehci_pci"
"ahci"
"usbhid"
"usb_storage"
"sd_mod"
"sr_mod"
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ehci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" "sr_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" = {
device = "/dev/disk/by-uuid/4e8667f8-55de-4103-8369-b94665f42204";
fsType = "ext4";
};
fileSystems."/" =
{ device = "/dev/disk/by-uuid/4e8667f8-55de-4103-8369-b94665f42204";
fsType = "ext4";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/82E3-3D03";
fsType = "vfat";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/82E3-3D03";
fsType = "vfat";
};
swapDevices = [
{ device = "/dev/disk/by-uuid/d0bf9a21-44bc-44a3-ae55-8f0971875883"; }
];
swapDevices =
[ { device = "/dev/disk/by-uuid/d0bf9a21-44bc-44a3-ae55-8f0971875883"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -1,25 +1,16 @@
{
config,
fp,
pkgs,
values,
...
}:
{ config, fp, pkgs, values, ... }:
{
imports = [
# Include the results of the hardware scan.
./hardware-configuration.nix
(fp /base)
# Include the results of the hardware scan.
./hardware-configuration.nix
(fp /base)
(fp /modules/grzegorz.nix)
];
(fp /modules/grzegorz.nix)
];
systemd.network.networks."30-eno1" = values.defaultNetworkConfig // {
matchConfig.Name = "eno1";
address = with values.hosts.georg; [
(ipv4 + "/25")
(ipv6 + "/64")
];
address = with values.hosts.georg; [ (ipv4 + "/25") (ipv6 + "/64") ];
};
services.spotifyd = {

View File

@@ -1,44 +1,31 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:
{ config, lib, pkgs, modulesPath, ... }:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [
"xhci_pci"
"ehci_pci"
"ahci"
"usb_storage"
"usbhid"
"sd_mod"
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ehci_pci" "ahci" "usb_storage" "usbhid" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" = {
device = "/dev/disk/by-uuid/33825f0d-5a63-40fc-83db-bfa1ebb72ba0";
fsType = "ext4";
};
fileSystems."/" =
{ device = "/dev/disk/by-uuid/33825f0d-5a63-40fc-83db-bfa1ebb72ba0";
fsType = "ext4";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/145E-7362";
fsType = "vfat";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/145E-7362";
fsType = "vfat";
};
swapDevices = [
{ device = "/dev/disk/by-uuid/7ed27e21-3247-44cd-8bcc-5d4a2efebf57"; }
];
swapDevices =
[ { device = "/dev/disk/by-uuid/7ed27e21-3247-44cd-8bcc-5d4a2efebf57"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -1,21 +1,14 @@
{
config,
fp,
pkgs,
lib,
values,
...
}:
{ config, fp, pkgs, lib, values, ... }:
{
imports = [
# Include the results of the hardware scan.
./hardware-configuration.nix
(fp /base)
# Include the results of the hardware scan.
./hardware-configuration.nix
(fp /base)
./services/monitoring
./services/nginx
./services/journald-remote.nix
];
./services/monitoring
./services/nginx
./services/journald-remote.nix
];
boot.loader.systemd-boot.enable = false;
boot.loader.grub.device = "/dev/vda";
@@ -24,37 +17,26 @@
# Openstack Neutron and systemd-networkd are not best friends, use something else:
systemd.network.enable = lib.mkForce false;
networking =
let
hostConf = values.hosts.ildkule;
in
{
tempAddresses = "disabled";
useDHCP = lib.mkForce true;
networking = let
hostConf = values.hosts.ildkule;
in {
tempAddresses = "disabled";
useDHCP = lib.mkForce true;
search = values.defaultNetworkConfig.domains;
nameservers = values.defaultNetworkConfig.dns;
defaultGateway.address = hostConf.ipv4_internal_gw;
search = values.defaultNetworkConfig.domains;
nameservers = values.defaultNetworkConfig.dns;
defaultGateway.address = hostConf.ipv4_internal_gw;
interfaces."ens4" = {
ipv4.addresses = [
{
address = hostConf.ipv4;
prefixLength = 32;
}
{
address = hostConf.ipv4_internal;
prefixLength = 24;
}
];
ipv6.addresses = [
{
address = hostConf.ipv6;
prefixLength = 64;
}
];
};
interfaces."ens4" = {
ipv4.addresses = [
{ address = hostConf.ipv4; prefixLength = 32; }
{ address = hostConf.ipv4_internal; prefixLength = 24; }
];
ipv6.addresses = [
{ address = hostConf.ipv6; prefixLength = 64; }
];
};
};
services.qemuGuest.enable = true;

View File

@@ -1,12 +1,7 @@
{ modulesPath, lib, ... }:
{
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"xen_blkfront"
"vmw_pvscsi"
];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "xen_blkfront" "vmw_pvscsi" ];
boot.initrd.kernelModules = [ "nvme" ];
fileSystems."/" = {
device = "/dev/disk/by-uuid/e35eb4ce-aac3-4f91-8383-6e7cd8bbf942";

View File

@@ -1,9 +1,4 @@
{
config,
lib,
values,
...
}:
{ config, lib, values, ... }:
let
cfg = config.services.journald.remote;
domainName = "journald.pvv.ntnu.no";
@@ -27,15 +22,13 @@ in
services.journald.remote = {
enable = true;
settings.Remote =
let
inherit (config.security.acme.certs.${domainName}) directory;
in
{
ServerKeyFile = "/run/credentials/systemd-journal-remote.service/key.pem";
ServerCertificateFile = "/run/credentials/systemd-journal-remote.service/cert.pem";
TrustedCertificateFile = "-";
};
settings.Remote = let
inherit (config.security.acme.certs.${domainName}) directory;
in {
ServerKeyFile = "/run/credentials/systemd-journal-remote.service/key.pem";
ServerCertificateFile = "/run/credentials/systemd-journal-remote.service/cert.pem";
TrustedCertificateFile = "-";
};
};
systemd.sockets."systemd-journal-remote" = {
@@ -54,14 +47,12 @@ in
systemd.services."systemd-journal-remote" = {
serviceConfig = {
LoadCredential =
let
inherit (config.security.acme.certs.${domainName}) directory;
in
[
"key.pem:${directory}/key.pem"
"cert.pem:${directory}/cert.pem"
];
LoadCredential = let
inherit (config.security.acme.certs.${domainName}) directory;
in [
"key.pem:${directory}/key.pem"
"cert.pem:${directory}/cert.pem"
];
};
};
}

View File

@@ -1,44 +1,33 @@
{
config,
pkgs,
values,
...
}:
let
{ config, pkgs, values, ... }: let
cfg = config.services.grafana;
in
{
sops.secrets =
let
owner = "grafana";
group = "grafana";
in
{
"keys/grafana/secret_key" = { inherit owner group; };
"keys/grafana/admin_password" = { inherit owner group; };
};
in {
sops.secrets = let
owner = "grafana";
group = "grafana";
in {
"keys/grafana/secret_key" = { inherit owner group; };
"keys/grafana/admin_password" = { inherit owner group; };
};
services.grafana = {
enable = true;
settings =
let
# See https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#file-provider
secretFile = path: "$__file{${path}}";
in
{
server = {
domain = "grafana.pvv.ntnu.no";
http_port = 2342;
http_addr = "127.0.0.1";
};
security = {
secret_key = secretFile config.sops.secrets."keys/grafana/secret_key".path;
admin_password = secretFile config.sops.secrets."keys/grafana/admin_password".path;
};
settings = let
# See https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#file-provider
secretFile = path: "$__file{${path}}";
in {
server = {
domain = "grafana.pvv.ntnu.no";
http_port = 2342;
http_addr = "127.0.0.1";
};
security = {
secret_key = secretFile config.sops.secrets."keys/grafana/secret_key".path;
admin_password = secretFile config.sops.secrets."keys/grafana/admin_password".path;
};
};
provision = {
enable = true;
datasources.settings.datasources = [

View File

@@ -3,8 +3,7 @@
let
cfg = config.services.loki;
stateDir = "/data/monitoring/loki";
in
{
in {
services.loki = {
enable = true;
configuration = {

View File

@@ -1,8 +1,6 @@
{ config, ... }:
let
{ config, ... }: let
stateDir = "/data/monitoring/prometheus";
in
{
in {
imports = [
./exim.nix
./gitea.nix

View File

@@ -5,11 +5,9 @@
{
job_name = "exim";
scrape_interval = "15s";
static_configs = [
{
targets = [ "microbel.pvv.ntnu.no:9636" ];
}
];
static_configs = [{
targets = [ "microbel.pvv.ntnu.no:9636" ];
}];
}
];
};

View File

@@ -1,18 +1,16 @@
{ ... }:
{
services.prometheus.scrapeConfigs = [
{
job_name = "gitea";
scrape_interval = "60s";
scheme = "https";
services.prometheus.scrapeConfigs = [{
job_name = "gitea";
scrape_interval = "60s";
scheme = "https";
static_configs = [
{
targets = [
"git.pvv.ntnu.no:443"
];
}
];
}
];
static_configs = [
{
targets = [
"git.pvv.ntnu.no:443"
];
}
];
}];
}

View File

@@ -1,5 +1,4 @@
{ config, ... }:
let
{ config, ... }: let
cfg = config.services.prometheus;
mkHostScrapeConfig = name: ports: {
@@ -10,98 +9,32 @@ let
defaultNodeExporterPort = 9100;
defaultSystemdExporterPort = 9101;
defaultNixosExporterPort = 9102;
in
{
services.prometheus.scrapeConfigs = [
{
job_name = "base_info";
static_configs = [
(mkHostScrapeConfig "ildkule" [
cfg.exporters.node.port
cfg.exporters.systemd.port
defaultNixosExporterPort
])
in {
services.prometheus.scrapeConfigs = [{
job_name = "base_info";
static_configs = [
(mkHostScrapeConfig "ildkule" [ cfg.exporters.node.port cfg.exporters.systemd.port defaultNixosExporterPort ])
(mkHostScrapeConfig "bekkalokk" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "bicep" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "brzeczyszczykiewicz" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "georg" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "gluttony" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "kommode" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-1" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-2" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-3" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-4" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-5" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "temmie" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "ustetind" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "wenche" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "bekkalokk" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "bicep" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "brzeczyszczykiewicz" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "georg" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "gluttony" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "kommode" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "lupine-1" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "lupine-2" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "lupine-3" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "lupine-4" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "lupine-5" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "temmie" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "ustetind" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "wenche" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ])
(mkHostScrapeConfig "skrott" [
defaultNodeExporterPort
defaultSystemdExporterPort
])
(mkHostScrapeConfig "skrott" [ defaultNodeExporterPort defaultSystemdExporterPort ])
(mkHostScrapeConfig "hildring" [ defaultNodeExporterPort ])
(mkHostScrapeConfig "isvegg" [ defaultNodeExporterPort ])
(mkHostScrapeConfig "microbel" [ defaultNodeExporterPort ])
];
}
];
(mkHostScrapeConfig "hildring" [ defaultNodeExporterPort ])
(mkHostScrapeConfig "isvegg" [ defaultNodeExporterPort ])
(mkHostScrapeConfig "microbel" [ defaultNodeExporterPort ])
];
}];
}

View File

@@ -1,44 +1,40 @@
{ ... }:
{
services.prometheus.scrapeConfigs = [
{
job_name = "synapse";
scrape_interval = "15s";
scheme = "https";
services.prometheus.scrapeConfigs = [{
job_name = "synapse";
scrape_interval = "15s";
scheme = "https";
http_sd_configs = [
{
url = "https://matrix.pvv.ntnu.no/metrics/config.json";
}
];
http_sd_configs = [{
url = "https://matrix.pvv.ntnu.no/metrics/config.json";
}];
relabel_configs = [
{
source_labels = [ "__address__" ];
regex = "[^/]+(/.*)";
target_label = "__metrics_path__";
}
{
source_labels = [ "__address__" ];
regex = "([^/]+)/.*";
target_label = "instance";
}
{
source_labels = [ "__address__" ];
regex = "[^/]+\\/+[^/]+/(.*)/\\d+$";
target_label = "job";
}
{
source_labels = [ "__address__" ];
regex = "[^/]+\\/+[^/]+/.*/(\\d+)$";
target_label = "index";
}
{
source_labels = [ "__address__" ];
regex = "([^/]+)/.*";
target_label = "__address__";
}
];
}
];
relabel_configs = [
{
source_labels = [ "__address__" ];
regex = "[^/]+(/.*)";
target_label = "__metrics_path__";
}
{
source_labels = [ "__address__" ];
regex = "([^/]+)/.*";
target_label = "instance";
}
{
source_labels = [ "__address__" ];
regex = "[^/]+\\/+[^/]+/(.*)/\\d+$";
target_label = "job";
}
{
source_labels = [ "__address__" ];
regex = "[^/]+\\/+[^/]+/.*/(\\d+)$";
target_label = "index";
}
{
source_labels = [ "__address__" ];
regex = "([^/]+)/.*";
target_label = "__address__";
}
];
}];
}

View File

@@ -1,42 +1,36 @@
{ config, ... }:
let
{ config, ... }: let
cfg = config.services.prometheus;
in
{
in {
sops = {
secrets."config/mysqld_exporter_password" = { };
templates."mysqld_exporter.conf" = {
restartUnits = [ "prometheus-mysqld-exporter.service" ];
content =
let
inherit (config.sops) placeholder;
in
''
[client]
host = mysql.pvv.ntnu.no
port = 3306
user = prometheus_mysqld_exporter
password = ${placeholder."config/mysqld_exporter_password"}
'';
content = let
inherit (config.sops) placeholder;
in ''
[client]
host = mysql.pvv.ntnu.no
port = 3306
user = prometheus_mysqld_exporter
password = ${placeholder."config/mysqld_exporter_password"}
'';
};
};
services.prometheus = {
scrapeConfigs = [
{
job_name = "mysql";
scheme = "http";
metrics_path = cfg.exporters.mysqld.telemetryPath;
static_configs = [
{
targets = [
"localhost:${toString cfg.exporters.mysqld.port}"
];
}
];
}
];
scrapeConfigs = [{
job_name = "mysql";
scheme = "http";
metrics_path = cfg.exporters.mysqld.telemetryPath;
static_configs = [
{
targets = [
"localhost:${toString cfg.exporters.mysqld.port}"
];
}
];
}];
exporters.mysqld = {
enable = true;

View File

@@ -1,17 +1,9 @@
{
pkgs,
lib,
config,
values,
...
}:
let
{ pkgs, lib, config, values, ... }: let
cfg = config.services.prometheus;
in
{
in {
sops.secrets = {
"keys/postgres/postgres_exporter_env" = { };
"keys/postgres/postgres_exporter_knakelibrak_env" = { };
"keys/postgres/postgres_exporter_env" = {};
"keys/postgres/postgres_exporter_knakelibrak_env" = {};
};
services.prometheus = {
@@ -19,26 +11,22 @@ in
{
job_name = "postgres";
scrape_interval = "15s";
static_configs = [
{
targets = [ "localhost:${toString cfg.exporters.postgres.port}" ];
labels = {
server = "bicep";
};
}
];
static_configs = [{
targets = [ "localhost:${toString cfg.exporters.postgres.port}" ];
labels = {
server = "bicep";
};
}];
}
{
job_name = "postgres-knakelibrak";
scrape_interval = "15s";
static_configs = [
{
targets = [ "localhost:${toString (cfg.exporters.postgres.port + 1)}" ];
labels = {
server = "knakelibrak";
};
}
];
static_configs = [{
targets = [ "localhost:${toString (cfg.exporters.postgres.port + 1)}" ];
labels = {
server = "knakelibrak";
};
}];
}
];
@@ -49,11 +37,9 @@ in
};
};
systemd.services.prometheus-postgres-exporter-knakelibrak.serviceConfig =
let
localCfg = config.services.prometheus.exporters.postgres;
in
lib.recursiveUpdate config.systemd.services.prometheus-postgres-exporter.serviceConfig {
systemd.services.prometheus-postgres-exporter-knakelibrak.serviceConfig = let
localCfg = config.services.prometheus.exporters.postgres;
in lib.recursiveUpdate config.systemd.services.prometheus-postgres-exporter.serviceConfig {
EnvironmentFile = config.sops.secrets."keys/postgres/postgres_exporter_knakelibrak_env".path;
ExecStart = ''
${pkgs.prometheus-postgres-exporter}/bin/postgres_exporter \

View File

@@ -1,15 +1,9 @@
{
config,
pkgs,
lib,
...
}:
{ config, pkgs, lib, ... }:
let
cfg = config.services.uptime-kuma;
domain = "status.pvv.ntnu.no";
stateDir = "/data/monitoring/uptime-kuma";
in
{
in {
services.uptime-kuma = {
enable = true;
settings = {

View File

@@ -1,9 +1,4 @@
{
pkgs,
values,
fp,
...
}:
{ pkgs, values, fp, ... }:
{
imports = [
# Include the results of the hardware scan.
@@ -17,10 +12,7 @@
systemd.network.networks."30-ens18" = values.defaultNetworkConfig // {
matchConfig.Name = "ens18";
address = with values.hosts.kommode; [
(ipv4 + "/25")
(ipv6 + "/64")
];
address = with values.hosts.kommode; [ (ipv4 + "/25") (ipv6 + "/64") ];
};
services.btrfs.autoScrub.enable = true;

View File

@@ -1,27 +1,14 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:
{ config, lib, pkgs, modulesPath, ... }:
{
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
];
imports =
[ (modulesPath + "/profiles/qemu-guest.nix")
];
boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"virtio_pci"
"virtio_scsi"
"sd_mod"
"sr_mod"
];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "virtio_pci" "virtio_scsi" "sd_mod" "sr_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];

View File

@@ -1,10 +1,4 @@
{
config,
pkgs,
lib,
fp,
...
}:
{ config, pkgs, lib, fp, ... }:
let
cfg = config.services.gitea;
in
@@ -16,117 +10,54 @@ in
catppuccin = pkgs.gitea-theme-catppuccin;
};
services.gitea.settings = {
ui = {
DEFAULT_THEME = "gitea-auto";
REACTIONS = lib.concatStringsSep "," [
"+1"
"-1"
"laugh"
"confused"
"heart"
"hooray"
"rocket"
"eyes"
"100"
"anger"
"astonished"
"no_good"
"ok_hand"
"pensive"
"pizza"
"point_up"
"sob"
"skull"
"upside_down_face"
"shrug"
"huh"
"bruh"
"okiedokie"
"grr"
];
CUSTOM_EMOJIS = lib.concatStringsSep "," [
"bruh"
"grr"
"huh"
"ohyeah"
];
};
"ui.meta" = {
AUTHOR = "Programvareverkstedet";
DESCRIPTION = "Bokstavelig talt programvareverkstedet";
KEYWORDS = lib.concatStringsSep "," [
"git"
"hackerspace"
"nix"
"open source"
"foss"
"organization"
"software"
"student"
];
};
};
systemd.services.gitea-customization = lib.mkIf cfg.enable {
description = "Install extra customization in gitea's CUSTOM_DIR";
wantedBy = [ "gitea.service" ];
requiredBy = [ "gitea.service" ];
serviceConfig = {
serviceConfig = {
Type = "oneshot";
User = cfg.user;
Group = cfg.group;
};
script =
let
logo-svg = fp /assets/logo_blue_regular.svg;
logo-png = fp /assets/logo_blue_regular.png;
script = let
logo-svg = fp /assets/logo_blue_regular.svg;
logo-png = fp /assets/logo_blue_regular.png;
extraLinks = pkgs.writeText "gitea-extra-links.tmpl" ''
<a class="item" href="https://git.pvv.ntnu.no/Drift/-/projects/4">Tokyo Drift Issues</a>
'';
extraLinksFooter = pkgs.writeText "gitea-extra-links-footer.tmpl" ''
<a class="item" href="https://www.pvv.ntnu.no/">PVV</a>
<a class="item" href="https://wiki.pvv.ntnu.no/">Wiki</a>
<a class="item" href="https://wiki.pvv.ntnu.no/wiki/Tjenester/Kodelager">PVV Gitea Howto</a>
'';
project-labels = (pkgs.formats.yaml { }).generate "gitea-project-labels.yaml" {
labels = lib.importJSON ./labels/projects.json;
};
customTemplates =
pkgs.runCommandLocal "gitea-templates"
{
nativeBuildInputs = with pkgs; [
coreutils
gnused
];
}
''
# Bigger icons
install -Dm444 "${cfg.package.src}/templates/repo/icon.tmpl" "$out/repo/icon.tmpl"
sed -i -e 's/24/60/g' "$out/repo/icon.tmpl"
'';
in
''
install -Dm444 ${logo-svg} ${cfg.customDir}/public/assets/img/logo.svg
install -Dm444 ${logo-png} ${cfg.customDir}/public/assets/img/logo.png
install -Dm444 ${./loading.apng} ${cfg.customDir}/public/assets/img/loading.png
install -Dm444 ${extraLinks} ${cfg.customDir}/templates/custom/extra_links.tmpl
install -Dm444 ${extraLinksFooter} ${cfg.customDir}/templates/custom/extra_links_footer.tmpl
install -Dm444 ${project-labels} ${cfg.customDir}/options/label/project-labels.yaml
install -Dm644 ${./emotes/bruh.png} ${cfg.customDir}/public/assets/img/emoji/bruh.png
install -Dm644 ${./emotes/huh.gif} ${cfg.customDir}/public/assets/img/emoji/huh.png
install -Dm644 ${./emotes/grr.png} ${cfg.customDir}/public/assets/img/emoji/grr.png
install -Dm644 ${./emotes/okiedokie.jpg} ${cfg.customDir}/public/assets/img/emoji/okiedokie.png
"${lib.getExe pkgs.rsync}" -a "${customTemplates}/" ${cfg.customDir}/templates/
extraLinks = pkgs.writeText "gitea-extra-links.tmpl" ''
<a class="item" href="https://git.pvv.ntnu.no/Drift/-/projects/4">Tokyo Drift Issues</a>
'';
extraLinksFooter = pkgs.writeText "gitea-extra-links-footer.tmpl" ''
<a class="item" href="https://www.pvv.ntnu.no/">PVV</a>
<a class="item" href="https://wiki.pvv.ntnu.no/">Wiki</a>
<a class="item" href="https://wiki.pvv.ntnu.no/wiki/Tjenester/Kodelager">PVV Gitea Howto</a>
'';
project-labels = (pkgs.formats.yaml { }).generate "gitea-project-labels.yaml" {
labels = lib.importJSON ./labels/projects.json;
};
customTemplates = pkgs.runCommandLocal "gitea-templates" {
nativeBuildInputs = with pkgs; [
coreutils
gnused
];
} ''
# Bigger icons
install -Dm444 "${cfg.package.src}/templates/repo/icon.tmpl" "$out/repo/icon.tmpl"
sed -i -e 's/24/60/g' "$out/repo/icon.tmpl"
'';
in ''
install -Dm444 ${logo-svg} ${cfg.customDir}/public/assets/img/logo.svg
install -Dm444 ${logo-png} ${cfg.customDir}/public/assets/img/logo.png
install -Dm444 ${./loading.apng} ${cfg.customDir}/public/assets/img/loading.png
install -Dm444 ${extraLinks} ${cfg.customDir}/templates/custom/extra_links.tmpl
install -Dm444 ${extraLinksFooter} ${cfg.customDir}/templates/custom/extra_links_footer.tmpl
install -Dm444 ${project-labels} ${cfg.customDir}/options/label/project-labels.yaml
"${lib.getExe pkgs.rsync}" -a "${customTemplates}/" ${cfg.customDir}/templates/
'';
};
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 206 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 145 KiB

View File

@@ -1,17 +1,9 @@
{
config,
values,
lib,
pkgs,
unstablePkgs,
...
}:
{ config, values, lib, pkgs, unstablePkgs, ... }:
let
cfg = config.services.gitea;
domain = "git.pvv.ntnu.no";
sshPort = 2222;
in
{
sshPort = 2222;
in {
imports = [
./customization
./gpg.nix
@@ -19,21 +11,19 @@ in
./web-secret-provider
];
sops.secrets =
let
defaultConfig = {
owner = "gitea";
group = "gitea";
restartUnits = [ "gitea.service" ];
};
in
{
"gitea/database" = defaultConfig;
"gitea/email-password" = defaultConfig;
"gitea/lfs-jwt-secret" = defaultConfig;
"gitea/oauth2-jwt-secret" = defaultConfig;
"gitea/secret-key" = defaultConfig;
sops.secrets = let
defaultConfig = {
owner = "gitea";
group = "gitea";
restartUnits = [ "gitea.service" ];
};
in {
"gitea/database" = defaultConfig;
"gitea/email-password" = defaultConfig;
"gitea/lfs-jwt-secret" = defaultConfig;
"gitea/oauth2-jwt-secret" = defaultConfig;
"gitea/secret-key" = defaultConfig;
};
services.gitea = {
enable = true;
@@ -54,7 +44,7 @@ in
# https://docs.gitea.com/administration/config-cheat-sheet
settings = {
server = {
DOMAIN = domain;
DOMAIN = domain;
ROOT_URL = "https://${domain}/";
PROTOCOL = "http+unix";
SSH_PORT = sshPort;
@@ -93,24 +83,11 @@ in
AUTO_WATCH_NEW_REPOS = false;
};
admin.DEFAULT_EMAIL_NOTIFICATIONS = "onmention";
session.COOKIE_SECURE = true;
security = {
SECRET_KEY = lib.mkForce "";
SECRET_KEY_URI = "file:${config.sops.secrets."gitea/secret-key".path}";
};
cache = {
ADAPTER = "redis";
HOST = "redis+socket://${config.services.redis.servers.gitea.unixSocket}?db=0";
ITEM_TTL = "72h";
};
session = {
COOKIE_SECURE = true;
PROVIDER = "redis";
PROVIDER_CONFIG = "redis+socket://${config.services.redis.servers.gitea.unixSocket}?db=1";
};
queue = {
TYPE = "redis";
CONN_STR = "redis+socket://${config.services.redis.servers.gitea.unixSocket}?db=2";
};
database.LOG_SQL = false;
repository = {
PREFERRED_LICENSES = lib.concatStringsSep "," [
@@ -151,6 +128,31 @@ in
AVATAR_MAX_ORIGIN_SIZE = 1024 * 1024 * 2;
};
actions.ENABLED = true;
ui = {
REACTIONS = lib.concatStringsSep "," [
"+1"
"-1"
"laugh"
"confused"
"heart"
"hooray"
"rocket"
"eyes"
"100"
"anger"
"astonished"
"no_good"
"ok_hand"
"pensive"
"pizza"
"point_up"
"sob"
"skull"
"upside_down_face"
"shrug"
];
};
"ui.meta".DESCRIPTION = "Bokstavelig talt programvareverkstedet";
};
dump = {
@@ -162,26 +164,12 @@ in
environment.systemPackages = [ cfg.package ];
systemd.services.gitea = lib.mkIf cfg.enable {
wants = [ "redis-gitea.service" ];
after = [ "redis-gitea.service" ];
systemd.services.gitea.serviceConfig.CPUSchedulingPolicy = "batch";
serviceConfig = {
CPUSchedulingPolicy = "batch";
CacheDirectory = "gitea/repo-archive";
BindPaths = [
"%C/gitea/repo-archive:${cfg.stateDir}/data/repo-archive"
];
};
};
services.redis.servers.gitea = lib.mkIf cfg.enable {
enable = true;
user = config.services.gitea.user;
save = [ ];
openFirewall = false;
port = 5698;
};
systemd.services.gitea.serviceConfig.CacheDirectory = "gitea/repo-archive";
systemd.services.gitea.serviceConfig.BindPaths = [
"%C/gitea/repo-archive:${cfg.stateDir}/data/repo-archive"
];
services.nginx.virtualHosts."${domain}" = {
forceSSL = true;
@@ -225,33 +213,29 @@ in
};
systemd.services.gitea-dump = {
serviceConfig.ExecStart =
let
args = lib.cli.toGNUCommandLineShell { } {
type = cfg.dump.type;
serviceConfig.ExecStart = let
args = lib.cli.toGNUCommandLineShell { } {
type = cfg.dump.type;
# This should be declarative on nixos, no need to backup.
skip-custom-dir = true;
# This should be declarative on nixos, no need to backup.
skip-custom-dir = true;
# This can be regenerated, no need to backup
skip-index = true;
# This can be regenerated, no need to backup
skip-index = true;
# Logs are stored in the systemd journal
skip-log = true;
};
in
lib.mkForce "${lib.getExe cfg.package} ${args}";
# Logs are stored in the systemd journal
skip-log = true;
};
in lib.mkForce "${lib.getExe cfg.package} ${args}";
# Only keep n backup files at a time
postStop =
let
cu = prog: "'${lib.getExe' pkgs.coreutils prog}'";
backupCount = 3;
in
''
for file in $(${cu "ls"} -t1 '${cfg.dump.backupDir}' | ${cu "sort"} --reverse | ${cu "tail"} -n+${toString (backupCount + 1)}); do
${cu "rm"} "$file"
done
postStop = let
cu = prog: "'${lib.getExe' pkgs.coreutils prog}'";
backupCount = 3;
in ''
for file in $(${cu "ls"} -t1 '${cfg.dump.backupDir}' | ${cu "sort"} --reverse | ${cu "tail"} -n+${toString (backupCount + 1)}); do
${cu "rm"} "$file"
done
'';
};
}

View File

@@ -1,9 +1,4 @@
{
config,
pkgs,
lib,
...
}:
{ config, pkgs, lib, ... }:
let
cfg = config.services.gitea;
GNUPGHOME = "${config.users.users.gitea.home}/gnupg";

View File

@@ -1,9 +1,4 @@
{
config,
pkgs,
lib,
...
}:
{ config, pkgs, lib, ... }:
let
cfg = config.services.gitea;
in
@@ -16,7 +11,7 @@ in
systemd.services.gitea-import-users = lib.mkIf cfg.enable {
enable = true;
preStart = ''${pkgs.rsync}/bin/rsync -e "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=$CREDENTIALS_DIRECTORY/ssh-known-hosts -i $CREDENTIALS_DIRECTORY/sshkey" -a pvv@smtp.pvv.ntnu.no:/etc/passwd /run/gitea-import-users/passwd'';
preStart=''${pkgs.rsync}/bin/rsync -e "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=$CREDENTIALS_DIRECTORY/ssh-known-hosts -i $CREDENTIALS_DIRECTORY/sshkey" -a pvv@smtp.pvv.ntnu.no:/etc/passwd /run/gitea-import-users/passwd'';
environment.PASSWD_FILE_PATH = "/run/gitea-import-users/passwd";
serviceConfig = {
ExecStart = pkgs.writers.writePython3 "gitea-import-users" {
@@ -25,12 +20,12 @@ in
];
libraries = with pkgs.python3Packages; [ requests ];
} (builtins.readFile ./gitea-import-users.py);
LoadCredential = [
LoadCredential=[
"sshkey:${config.sops.secrets."gitea/passwd-ssh-key".path}"
"ssh-known-hosts:${config.sops.secrets."gitea/ssh-known-hosts".path}"
];
DynamicUser = "yes";
EnvironmentFile = config.sops.secrets."gitea/import-user-env".path;
DynamicUser="yes";
EnvironmentFile=config.sops.secrets."gitea/import-user-env".path;
RuntimeDirectory = "gitea-import-users";
};
};

View File

@@ -1,9 +1,4 @@
{
config,
pkgs,
lib,
...
}:
{ config, pkgs, lib, ... }:
let
organizations = [
"Drift"
@@ -41,8 +36,7 @@ in
group = "gitea-web";
restartUnits = [
"gitea-web-secret-provider@"
]
++ (map (org: "gitea-web-secret-provider@${org}") organizations);
] ++ (map (org: "gitea-web-secret-provider@${org}") organizations);
};
systemd.slices.system-giteaweb = {
@@ -54,30 +48,25 @@ in
# %d - secrets directory
systemd.services."gitea-web-secret-provider@" = {
description = "Ensure all repos in %i has an SSH key to push web content";
requires = [
"gitea.service"
"network.target"
];
requires = [ "gitea.service" "network.target" ];
serviceConfig = {
Slice = "system-giteaweb.slice";
Type = "oneshot";
ExecStart =
let
args = lib.cli.toGNUCommandLineShell { } {
org = "%i";
token-path = "%d/token";
api-url = "${giteaCfg.settings.server.ROOT_URL}api/v1";
key-dir = "/var/lib/gitea-web/keys/%i";
authorized-keys-path = "/var/lib/gitea-web/authorized_keys.d/%i";
rrsync-script = pkgs.writeShellScript "rrsync-chown" ''
mkdir -p "$1"
${lib.getExe pkgs.rrsync} -wo "$1"
${pkgs.coreutils}/bin/chown -R gitea-web:gitea-web "$1"
'';
web-dir = "/var/lib/gitea-web/web";
};
in
"${giteaWebSecretProviderScript} ${args}";
ExecStart = let
args = lib.cli.toGNUCommandLineShell { } {
org = "%i";
token-path = "%d/token";
api-url = "${giteaCfg.settings.server.ROOT_URL}api/v1";
key-dir = "/var/lib/gitea-web/keys/%i";
authorized-keys-path = "/var/lib/gitea-web/authorized_keys.d/%i";
rrsync-script = pkgs.writeShellScript "rrsync-chown" ''
mkdir -p "$1"
${lib.getExe pkgs.rrsync} -wo "$1"
${pkgs.coreutils}/bin/chown -R gitea-web:gitea-web "$1"
'';
web-dir = "/var/lib/gitea-web/web";
};
in "${giteaWebSecretProviderScript} ${args}";
User = "gitea-web";
Group = "gitea-web";
@@ -96,10 +85,7 @@ in
ProtectControlGroups = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
];
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
RestrictRealtime = true;
RestrictSUIDSGID = true;
MemoryDenyWriteExecute = true;
@@ -119,9 +105,7 @@ in
systemd.targets.timers.wants = map (org: "gitea-web-secret-provider@${org}.timer") organizations;
services.openssh.authorizedKeysFiles = map (
org: "/var/lib/gitea-web/authorized_keys.d/${org}"
) organizations;
services.openssh.authorizedKeysFiles = map (org: "/var/lib/gitea-web/authorized_keys.d/${org}") organizations;
users.users.nginx.extraGroups = [ "gitea-web" ];
services.nginx.virtualHosts."pages.pvv.ntnu.no" = {

View File

@@ -1,9 +1,4 @@
{
fp,
values,
lupineName,
...
}:
{ fp, values, lupineName, ... }:
{
imports = [
./hardware-configuration/${lupineName}.nix
@@ -17,10 +12,7 @@
systemd.network.networks."30-enp0s31f6" = values.defaultNetworkConfig // {
matchConfig.Name = "enp0s31f6";
address = with values.hosts.${lupineName}; [
(ipv4 + "/25")
(ipv6 + "/64")
];
address = with values.hosts.${lupineName}; [ (ipv4 + "/25") (ipv6 + "/64") ];
networkConfig.LLDP = false;
};
systemd.network.wait-online = {

View File

@@ -1,46 +1,32 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:
{ config, lib, pkgs, modulesPath, ... }:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
"usbhid"
"sd_mod"
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" = {
device = "/dev/disk/by-uuid/a949e2e8-d973-4925-83e4-bcd815e65af7";
fsType = "ext4";
};
fileSystems."/" =
{ device = "/dev/disk/by-uuid/a949e2e8-d973-4925-83e4-bcd815e65af7";
fsType = "ext4";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/81D6-38D3";
fsType = "vfat";
options = [
"fmask=0077"
"dmask=0077"
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/81D6-38D3";
fsType = "vfat";
options = [ "fmask=0077" "dmask=0077" ];
};
swapDevices =
[ { device = "/dev/disk/by-uuid/82c2d7fa-7cd0-4398-8cf6-c892bc56264b"; }
];
};
swapDevices = [
{ device = "/dev/disk/by-uuid/82c2d7fa-7cd0-4398-8cf6-c892bc56264b"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -1,46 +1,32 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:
{ config, lib, pkgs, modulesPath, ... }:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
"usbhid"
"sd_mod"
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" = {
device = "/dev/disk/by-uuid/aa81d439-800b-403d-ac10-9d2aac3619d0";
fsType = "ext4";
};
fileSystems."/" =
{ device = "/dev/disk/by-uuid/aa81d439-800b-403d-ac10-9d2aac3619d0";
fsType = "ext4";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/4A34-6AE5";
fsType = "vfat";
options = [
"fmask=0077"
"dmask=0077"
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/4A34-6AE5";
fsType = "vfat";
options = [ "fmask=0077" "dmask=0077" ];
};
swapDevices =
[ { device = "/dev/disk/by-uuid/efb7cd0c-c1ae-4a86-8bc2-8e7fd0066650"; }
];
};
swapDevices = [
{ device = "/dev/disk/by-uuid/efb7cd0c-c1ae-4a86-8bc2-8e7fd0066650"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -1,46 +1,32 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:
{ config, lib, pkgs, modulesPath, ... }:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
"usbhid"
"sd_mod"
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" = {
device = "/dev/disk/by-uuid/39ba059b-3205-4701-a832-e72c0122cb88";
fsType = "ext4";
};
fileSystems."/" =
{ device = "/dev/disk/by-uuid/39ba059b-3205-4701-a832-e72c0122cb88";
fsType = "ext4";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/63FA-297B";
fsType = "vfat";
options = [
"fmask=0077"
"dmask=0077"
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/63FA-297B";
fsType = "vfat";
options = [ "fmask=0077" "dmask=0077" ];
};
swapDevices =
[ { device = "/dev/disk/by-uuid/9c72eb54-ea8c-4b09-808a-8be9b9a33869"; }
];
};
swapDevices = [
{ device = "/dev/disk/by-uuid/9c72eb54-ea8c-4b09-808a-8be9b9a33869"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -1,37 +1,26 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:
{ config, lib, pkgs, modulesPath, ... }:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
"usbhid"
"sd_mod"
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" = {
device = "/dev/disk/by-uuid/c7bbb293-a0a3-4995-8892-0ec63e8c67dd";
fsType = "ext4";
};
fileSystems."/" =
{ device = "/dev/disk/by-uuid/c7bbb293-a0a3-4995-8892-0ec63e8c67dd";
fsType = "ext4";
};
swapDevices = [
{ device = "/dev/disk/by-uuid/a86ffda8-8ecb-42a1-bf9f-926072e90ca5"; }
];
swapDevices =
[ { device = "/dev/disk/by-uuid/a86ffda8-8ecb-42a1-bf9f-926072e90ca5"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -1,46 +1,32 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:
{ config, lib, pkgs, modulesPath, ... }:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
"usbhid"
"sd_mod"
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" = {
device = "/dev/disk/by-uuid/5f8418ad-8ec1-4f9e-939e-f3a4c36ef343";
fsType = "ext4";
};
fileSystems."/" =
{ device = "/dev/disk/by-uuid/5f8418ad-8ec1-4f9e-939e-f3a4c36ef343";
fsType = "ext4";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/F372-37DF";
fsType = "vfat";
options = [
"fmask=0077"
"dmask=0077"
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/F372-37DF";
fsType = "vfat";
options = [ "fmask=0077" "dmask=0077" ];
};
swapDevices =
[ { device = "/dev/disk/by-uuid/27bf292d-bbb3-48c4-a86e-456e0f1f648f"; }
];
};
swapDevices = [
{ device = "/dev/disk/by-uuid/27bf292d-bbb3-48c4-a86e-456e0f1f648f"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -67,8 +67,5 @@
networking.dhcpcd.IPv6rs = false;
networking.firewall.interfaces."podman+".allowedUDPPorts = [
53
5353
];
networking.firewall.interfaces."podman+".allowedUDPPorts = [53 5353];
}

View File

@@ -1,23 +1,14 @@
{
config,
fp,
pkgs,
values,
...
}:
{ config, fp, pkgs, values, ... }:
{
imports = [
# Include the results of the hardware scan.
./hardware-configuration.nix
(fp /base)
];
# Include the results of the hardware scan.
./hardware-configuration.nix
(fp /base)
];
systemd.network.networks."30-ens18" = values.defaultNetworkConfig // {
matchConfig.Name = "ens18";
address = with values.hosts.shark; [
(ipv4 + "/25")
(ipv6 + "/64")
];
address = with values.hosts.shark; [ (ipv4 + "/25") (ipv6 + "/64") ];
};
services.qemuGuest.enable = true;

View File

@@ -1,44 +1,31 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:
{ config, lib, pkgs, modulesPath, ... }:
{
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
];
imports =
[ (modulesPath + "/profiles/qemu-guest.nix")
];
boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"virtio_pci"
"virtio_scsi"
"sd_mod"
"sr_mod"
];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "virtio_pci" "virtio_scsi" "sd_mod" "sr_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
fileSystems."/" = {
device = "/dev/disk/by-uuid/224c45db-9fdc-45d4-b3ad-aaf20b3efa8a";
fsType = "ext4";
};
fileSystems."/" =
{ device = "/dev/disk/by-uuid/224c45db-9fdc-45d4-b3ad-aaf20b3efa8a";
fsType = "ext4";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/CC37-F5FE";
fsType = "vfat";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/CC37-F5FE";
fsType = "vfat";
};
swapDevices = [
{ device = "/dev/disk/by-uuid/a1ce3234-78b1-4565-9643-f4a05004424f"; }
];
swapDevices =
[ { device = "/dev/disk/by-uuid/a1ce3234-78b1-4565-9643-f4a05004424f"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -1,22 +1,11 @@
{
config,
lib,
pkgs,
modulesPath,
...
}:
{ config, lib, pkgs, modulesPath, ... }:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
"usbhid"
"sd_mod"
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-amd" ];
boot.extraModulePackages = [ ];

View File

@@ -1,13 +1,4 @@
{
config,
pkgs,
lib,
modulesPath,
fp,
values,
...
}:
{
{ config, pkgs, lib, modulesPath, fp, values, ... }: {
imports = [
(modulesPath + "/profiles/perlless.nix")
@@ -73,18 +64,14 @@
defaultGateway6 = values.hosts.gateway6;
interfaces.eth0 = {
useDHCP = false;
ipv4.addresses = [
{
address = values.hosts.skrott.ipv4;
prefixLength = 25;
}
];
ipv6.addresses = [
{
address = values.hosts.skrott.ipv6;
prefixLength = 25;
}
];
ipv4.addresses = [{
address = values.hosts.skrott.ipv4;
prefixLength = 25;
}];
ipv6.addresses = [{
address = values.hosts.skrott.ipv6;
prefixLength = 25;
}];
};
};

View File

@@ -1,10 +1,4 @@
{
config,
fp,
pkgs,
values,
...
}:
{ config, fp, pkgs, values, ... }:
{
imports = [
# Include the results of the hardware scan.
@@ -17,10 +11,7 @@
systemd.network.networks."30-ens18" = values.defaultNetworkConfig // {
matchConfig.Name = "ens18";
address = with values.hosts.temmie; [
(ipv4 + "/25")
(ipv6 + "/64")
];
address = with values.hosts.temmie; [ (ipv4 + "/25") (ipv6 + "/64") ];
};
services.nginx.enable = false;

View File

@@ -1,44 +1,28 @@
# Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:
{ config, lib, pkgs, modulesPath, ... }:
{
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
];
imports =
[ (modulesPath + "/profiles/qemu-guest.nix")
];
boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"virtio_pci"
"virtio_scsi"
"sd_mod"
"sr_mod"
];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "virtio_pci" "virtio_scsi" "sd_mod" "sr_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
fileSystems."/" = {
device = "/dev/disk/by-uuid/c3aed415-0054-4ac5-8d29-75a99cc26451";
fsType = "btrfs";
};
fileSystems."/" =
{ device = "/dev/disk/by-uuid/c3aed415-0054-4ac5-8d29-75a99cc26451";
fsType = "btrfs";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/A367-83FD";
fsType = "vfat";
options = [
"fmask=0022"
"dmask=0022"
];
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/A367-83FD";
fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ];
};
swapDevices = [ ];

View File

@@ -1,19 +1,7 @@
{ lib, values, ... }:
let
# See microbel:/etc/exports
letters = [
"a"
"b"
"c"
"d"
"h"
"i"
"j"
"k"
"l"
"m"
"z"
];
letters = [ "a" "b" "c" "d" "h" "i" "j" "k" "l" "m" "z" ];
in
{
systemd.targets."pvv-homedirs" = {

Some files were not shown because too many files have changed in this diff Show More