From a6a0b16300b32e279737a4f8f6407097fa1c0765 Mon Sep 17 00:00:00 2001 From: Peder Bergebakken Sundt Date: Mon, 17 Oct 2022 22:40:48 +0200 Subject: [PATCH] Initial commit --- .editorconfig | 9 + .gitignore | 1 + README.md | 12 + configuration.nix | 498 ++ overlays/default.nix | 51 + overlays/kukkee/default.nix | 84 + overlays/kukkee/node-composition.nix | 17 + overlays/kukkee/node-env.nix | 598 +++ overlays/kukkee/node-packages.nix | 6713 ++++++++++++++++++++++++++ overlays/kukkee/update-node-deps.sh | 22 + overlays/kukkee/update-version.sh | 17 + overlays/kukkee/update.sh | 9 + overlays/rallly/default.nix | 0 overlays/trivial-gradios/default.nix | 39 + profiles/code-remote/default.nix | 183 + profiles/nas/default.nix | 1278 +++++ profiles/nas/modules/kukkee.nix | 160 + profiles/nas/modules/webhook.nix | 140 + profiles/websites/default.nix | 254 + profiles/websites/services/pdoc.nix | 273 ++ users/default.nix | 31 + users/pbsds/default.nix | 33 + users/pbsds/home/default.nix | 282 ++ users/pbsds/home/modules/jump.nix | 71 + users/pbsds/home/modules/micro.nix | 109 + 25 files changed, 10884 insertions(+) create mode 100644 .editorconfig create mode 100644 .gitignore create mode 100644 README.md create mode 100644 configuration.nix create mode 100644 overlays/default.nix create mode 100644 overlays/kukkee/default.nix create mode 100644 overlays/kukkee/node-composition.nix create mode 100644 overlays/kukkee/node-env.nix create mode 100644 overlays/kukkee/node-packages.nix create mode 100755 overlays/kukkee/update-node-deps.sh create mode 100755 overlays/kukkee/update-version.sh create mode 100755 overlays/kukkee/update.sh create mode 100644 overlays/rallly/default.nix create mode 100644 overlays/trivial-gradios/default.nix create mode 100644 profiles/code-remote/default.nix create mode 100644 profiles/nas/default.nix create mode 100644 profiles/nas/modules/kukkee.nix create mode 100644 profiles/nas/modules/webhook.nix create mode 100644 profiles/websites/default.nix create mode 100644 profiles/websites/services/pdoc.nix create mode 100644 users/default.nix create mode 100644 users/pbsds/default.nix create mode 100644 users/pbsds/home/default.nix create mode 100644 users/pbsds/home/modules/jump.nix create mode 100644 users/pbsds/home/modules/micro.nix diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..ef7458a --- /dev/null +++ b/.editorconfig @@ -0,0 +1,9 @@ +root = true + +[*] +end_of_line = lf +insert_final_newline = true + +[*.nix] +indent_size = 2 +indent_style = space diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b2be92b --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +result diff --git a/README.md b/README.md new file mode 100644 index 0000000..d010f9a --- /dev/null +++ b/README.md @@ -0,0 +1,12 @@ +# Initial setup + +``` +nixos-generate-config +``` + +# TODO: + +* [ ] Multiple user profiles, headless, nixpkgs-dev, desktop, hpc, pvv, etc +* [ ] Split stuff into multiple files +* [ ] Some system for multiple hosts with different configs +* [ ] Make a flake diff --git a/configuration.nix b/configuration.nix new file mode 100644 index 0000000..30efedb --- /dev/null +++ b/configuration.nix @@ -0,0 +1,498 @@ +{ config, pkgs, lib, ... }: + +{ + #nixpkgs.overlays = overlays; + nixpkgs.config.allowUnfreePredicate = (pkg: true); + nixpkgs.config.allowUnfree = true; + system.autoUpgrade.enable = true; # daily nixos-rebuild switch, no reboot by default + + nix.distributedBuilds = true; + # useful when the builder has a faster internet connection than i do + nix.extraOptions = '' + experimental-features = nix-command flakes + builders-use-substitutes = true + ''; + nix.buildMachines = [ + /**/ + { + system = "x86_64-linux"; # can be a list + hostName = "rocm.pbsds.net"; + sshUser = "pbsds"; + maxJobs = 2; + speedFactor = 2; + #supportedFeatures = [ "nixos-test" "benchmark" "big-parallel" "kvm" ]; + #mandatoryFeatures = [ ]; + } + /**/ + /**/ + { + system = "x86_64-linux"; # can be a list + hostName = "isvegg.pvv.ntnu.no"; + sshUser = "pederbs"; + maxJobs = 1; + speedFactor = 1; + #supportedFeatures = [ "nixos-test" "benchmark" "big-parallel" "kvm" ]; + #mandatoryFeatures = [ ]; + } + /**/ + ]; + + # deduplicate with hardlinks, expensive. Alternative: nix-store --optimise + nix.settings.auto-optimise-store = true; + #nix.optimize.automatic = true; # periodic optimization + nix.gc = { + automatic = true; + dates = "weekly"; + options = "--delete-older-than 30d"; + }; + + + # How to override package used by module + # https://github.com/NixOS/nixpkgs/issues/55366 + + imports = [ + ./hardware-configuration.nix # results of hardware scan + ./profiles/nas # add NAS services + ./profiles/websites + ./profiles/code-remote + ./users + + + + ]; + disabledModules = [ + "services/misc/jellyfin.nix" + "services/web-apps/invidious.nix" + ]; + services.jellyfin.package = pkgs.unstable.jellyfin; + services.invidious.package = pkgs.unstable.invidious; + + nixpkgs.overlays = [ + (import ./overlays) + ]; + + + + # Allow unstable packages. + nixpkgs.config.packageOverrides = pkgs: { + unstable = import { + config = config.nixpkgs.config; + }; + }; + + # enable opengl (headless) + hardware.opengl.enable = true; + #hardware.opengl.extraPackages = [ pkgs.mesa.drivers ]; + hardware.opengl.extraPackages = with pkgs; [ mesa.drivers vaapiIntel libvdpau-va-gl vaapiVdpau intel-ocl ]; + + + # run/build weird binaries + boot.binfmt.emulatedSystems = [ + "wasm32-wasi" + "x86_64-windows" + "aarch64-linux" + "riscv64-linux" + ]; + + # Bootloader + + boot.loader.grub.enable = true; + boot.loader.grub.device = "/dev/sda"; + boot.loader.grub.useOSProber = true; + + + # Virtualization + + #services.docker.enable = true; + virtualisation = { + podman.enable = true; + podman.dockerCompat = true; # alias docker to podman + oci-containers.backend = "podman"; + }; + + + # Networking + + networking = { + # Enable networking + networkmanager.enable = true; + #wireless.enable = true; # Enables wireless support via wpa_supplicant. + + hostName = "noximilien"; # Define your hostname. + domain = "pbsds.net"; + + interfaces.eno1.ipv4.addresses = [ + { address = "192.168.1.9"; prefixLength = 24; } + ]; + + nameservers = [ + "192.168.1.254" + "8.8.8.8" + ]; + defaultGateway = { + address = "192.168.1.254"; + interface = "eno1"; + }; + + #useDHCP = true; + + # Configure network proxy if necessary + #proxy.default = "http://user:password@proxy:port/"; + #proxy.noProxy = "127.0.0.1,localhost,internal.domain"; + }; + + # Open ports in the firewall. + #networking.firewall.allowedTCPPorts = [ ... ]; + #networking.firewall.allowedUDPPorts = [ ... ]; + # Or disable the firewall altogether. + networking.firewall.enable = false; # default is true, TEMP + + + # NFS mounts + + fileSystems = let + mkMount = mountpoint: server: subdir: { + "${mountpoint}${subdir}" = { + device = "${server}${subdir}"; + fsType = "nfs"; + #options = [ "nfsvers=4.2" ]; + }; + }; + # TODO: combine nameValuePair and listToAttrs + joinSets = sets: builtins.foldl' (l: r: l // r) {} sets; + in joinSets ( + (map (mkMount "/mnt/reidun" "192.168.1.3:/Reidun/shared") [ + "" + "/Backups" + "/Comics" + "/Downloads" + "/Games" + "/Games/Installable" + "/Games/Portable" + "/Games/ROMs" + "/ISO" + "/Images" + "/Images/Collections" + "/Images/Memes" + "/Images/Pictures" + "/Images/Wallpapers" + "/Music" + "/Music/Albums" + "/Music/Kancolle" + "/Music/OST" + "/Music/Old" + "/Music/Touhou" + "/Music/Vocaloid" + "/Music/dojin.co" + "/Various" + "/Various/Zotero" + "/Various/resilio" + "/Video" + "/Video/Anime" + "/Video/Concerts" + "/Video/Documentaries" + "/Video/Movies" + "/Video/Musicvideos" + "/Video/Series" + "/Video/Talks" + "/Work" + "/Work/Documents" + #"/Work/FL Studio" # broken, maybe due to the space? + "/Work/Programming" + "/Work/School" + "/pub" + ]) ++ (map (mkMount "/mnt/meconium" "192.168.1.3:/Meconium" ) [ + "" + "/beets_music" + ]) + ); + + + # Time zone and internationalisation properties. + + time.timeZone = "Europe/Oslo"; + i18n.defaultLocale = "en_US.utf8"; + i18n.extraLocaleSettings = { + LC_ADDRESS = "nb_NO.utf8"; + LC_IDENTIFICATION = "nb_NO.utf8"; + LC_MEASUREMENT = "nb_NO.utf8"; + LC_MONETARY = "nb_NO.utf8"; + LC_NAME = "nb_NO.utf8"; + LC_NUMERIC = "nb_NO.utf8"; + LC_PAPER = "nb_NO.utf8"; + LC_TELEPHONE = "nb_NO.utf8"; + LC_TIME = "nb_NO.utf8"; + }; + services.xserver = { + # Configure X11 keymap + layout = "no"; + xkbVariant = ""; + }; + console.keyMap = "no";# Configure console keymap + + + + # Installed system packages + # $ nix search FOOBAR + + environment.systemPackages = with pkgs; [ + lsof + lshw + htop + file + tmux + #parallel # already provided by moreutils + pwgen + git + nmap + rsync + bind.dnsutils + graphviz + dialog + cowsay + gnused + gnumake + coreutils-full + moreutils + binutils + diffutils + findutils + usbutils + bash-completion + curl + wget + strace + + zip + unrar + unzip + atool + p7zip + bzip2 + gzip + atool + + micro + aspell + aspellDicts.en + aspellDicts.nb + vimv + dos2unix + #rmate # TODO: add to nixpkgs + pandoc + cargo + cargo-edit + sqlite + #sshuttle + visidata + + weston + cage + vimix-gtk-themes + flat-remix-icon-theme + xclip + feh + + sshfs + + glances + zenith + fzf + tealdeer #tldr + entr + axel aria + bat + xe # xargs alternative + sd # sed alternative + fd # find alternative + silver-searcher # `ag` + ripgrep + jq + yq + htmlq + sysz + du-dust # du alternative + ncdu # Disk usage analyzer with an ncurses interface + + gh + hub + + nix-output-monitor + nix-prefetch + nix-top + #nix-index + nix-tree + nixfmt + alejandra + + ]; + + # TODO: make this root only? + programs.bash.shellInit = '' + if command -v fzf-share >/dev/null; then + source "$(fzf-share)/key-bindings.bash" + source "$(fzf-share)/completion.bash" + fi + ''; + + # TODO: make this root only? + programs.bash.shellAliases = { + ed = "micro"; # TODO: ${EDITOR:-micro} + }; + environment.variables = { + EDITOR = "micro"; + }; + + programs.dconf.enable = true; + + + # System fonts + # Nice to have when X-forwading on headless machines + + fonts.fonts = with pkgs; [ + noto-fonts # includes Cousine + noto-fonts-cjk + noto-fonts-emoji + noto-fonts-extra + dejavu_fonts + ]; + + # Some programs need SUID wrappers, can be configured further or are + # started in user sessions. + #programs.mtr.enable = true; + #programs.gnupg.agent = { + # enable = true; + # enableSSHSupport = true; + #}; + + + # OpenSSH + + services.openssh.enable = true; + services.openssh.forwardX11 = true; + + + # AutoSSH reverse tunnels + + services.autossh.sessions = let + mkSshSession = user: name: host: rport: monitoringPort: { + user = user; # local user + name = "ssh-reverse-tunnel-${name}-${toString rport}"; + monitoringPort = monitoringPort; + extraArguments = lib.concatStringsSep " " [ + "-N" # no remote command + "-o ServerAliveInterval=10" # check if still alive + "-o ServerAliveCountMax=3" # check if still alive + "-o ExitOnForwardFailure=yes" # reverse tunnel critical + "-R ${toString rport}:127.0.0.1:22" # reverse tunnel + host + ]; + }; + in [ + #(mkSshSession "pbsds" "p7pi" "pi@p7.pbsds.net" 10023 20000) # no mutual signature algorithm + (mkSshSession "pbsds" "pbuntu" "pbsds@pbuntu.pbsds.net -p 23" 10023 20002) + (mkSshSession "pbsds" "hildring" "pederbs@hildring.pvv.ntnu.no" 25775 20004) + ]; + + + + # auto domain update + # TODO: use the dyndns endpoint + curl instead + + /**/ + systemd.services.domeneshop-updater = { + description = "domene.shop domain updater"; + #after = [ "something?.service" ]; + #wants = [ "something?.service" ]; + serviceConfig = let + env = pkgs.python3.withPackages (ps: with ps; [ pkgs.python3Packages.domeneshop httpx toml ]); + prog = pkgs.writeScript "domain-updater.py" '' + #!${env}/bin/python + from domeneshop import Client + import os, httpx, pprint, toml + + def get_pub_ip() -> str: + for endpoint, getter in { + "http://myip.tf": lambda resp: resp.text, + "https://ipinfo.io/json": lambda resp: resp.json()["ip"], + "https://api.ipify.org": lambda resp: resp.text, + "http://ip.42.pl/raw": lambda resp: resp.text, + }.items(): + resp = httpx.get(endpoint) + if not resp.is_success: continue + try: + return resp.json()["ip"] + except: + pass + else: + raise Exception("Could not find external IP") + + # https://www.domeneshop.no/admin?view=api + with open("/var/lib/secrets/domeneshop.toml") as f: + c = toml.load(f) + DOMENESHOP_TOKEN = os.environ.get("DOMENESHOP_TOKEN", c["secrets"]["DOMENESHOP_TOKEN"]) + DOMENESHOP_SECRET = os.environ.get("DOMENESHOP_SECRET", c["secrets"]["DOMENESHOP_SECRET"]) + IP_ADDRESS = get_pub_ip() # TODO: both ipv4 and ipv6 + DOMAINS = { + "pbsds.net": { + "olavtr": ["A"], + }, + } + + client = Client(DOMENESHOP_TOKEN, DOMENESHOP_SECRET) + for domain in client.get_domains(): + if domain["domain"] not in DOMAINS: + continue + RECORDS = DOMAINS[domain["domain"]] + for record in client.get_records(domain["id"]): + if record["host"] in RECORDS \ + and record["type"] in RECORDS[record["host"]]: + print("Found: ", end="") + pprint.pprint(record) + if record["data"] != IP_ADDRESS: + record["data"] = IP_ADDRESS + print("Push: ", end="") + pprint.pprint(record) + client.modify_record(domain_id=domain["id"], record_id=record.pop("id"), record=record) + else: + print("Nothing done") + RECORDS[record["host"]].remove(record["type"]) + for k, v in list(RECORDS.items()): + if not v: RECORDS.pop(k) + if not RECORDS: DOMAINS.pop(domain["domain"]) + if DOMAINS: + print("ERROR: The following records were not found:") + pprint.pprint(DOMAINS) + exit(1) + else: + print("Success") + ''; + in { + User = "domeneshop"; + Group = "domeneshop"; + DynamicUser = true; + ExecStart = prog; + PrivateTmp = true; + }; + }; + systemd.timers.domeneshop-updater = let interval = "1d"; in { + description = "Update domene.shop every ${interval}"; + wantedBy = [ "timers.target" ]; + timerConfig = { + OnBootSec = "5m"; + OnUnitInactiveSec = interval; + Unit = "domeneshop-updater.service"; + }; + }; + /**/ + + + # This value determines the NixOS release from which the default + # settings for stateful data, like file locations and database versions + # on your system were taken. It‘s perfectly fine and recommended to leave + # this value at the release version of the first install of this system. + # Before changing this value read the documentation for this option + # (e.g. man configuration.nix or on https://nixos.org/nixos/options.html). + system.stateVersion = "22.05"; # Did you read the comment? + +} diff --git a/overlays/default.nix b/overlays/default.nix new file mode 100644 index 0000000..f191ba4 --- /dev/null +++ b/overlays/default.nix @@ -0,0 +1,51 @@ +# https://nixos.wiki/wiki/Overlays +self: super: # final: prev: +let + # WARNING: this works for nixos-rebuild, but not for the nix-build trick shown on the bottom + testing = import (fetchTarball { + name = "pr-180823"; + url = "https://github.com/r-ryantm/nixpkgs/archive/cfe56470cb641985d43adba690d5bca5453110fe.tar.gz"; + sha256 = "0rbncjp2a99l6i4z7w2m86l40m33b3dl9qficfny47kqcfpgyx0b"; + }) { + #config = super.config; + }; + + overridePythonPackages = old: { + overrides = self: super: { + + pdoc = self.callPackage /home/pbsds/repos/nixpkgs-pdoc/pkgs/development/python-modules/pdoc {}; + + domeneshop = self.callPackage /home/pbsds/repos/nixpkgs-domemeshop/pkgs/development/python-modules/domeneshop {}; + + shap = self.callPackage /home/pbsds/repos/nixpkgs-catboost/pkgs/development/python-modules/shap {}; + catboost = self.callPackage /home/pbsds/repos/nixpkgs-catboost/pkgs/development/python-modules/catboost {}; + analytics-python = self.callPackage /home/pbsds/repos/nixpkgs-gradio/pkgs/development/python-modules/analytics-python {}; + ffmpy = self.callPackage /home/pbsds/repos/nixpkgs-gradio/pkgs/development/python-modules/ffmpy {}; + markdown-it-py = self.callPackage /home/pbsds/repos/nixpkgs-gradio/pkgs/development/python-modules/markdown-it-py {}; + gradio = self.callPackage /home/pbsds/repos/nixpkgs-gradio/pkgs/development/python-modules/gradio {}; + + trivial-gradios = self.callPackage ./trivial-gradios {}; + + }; + }; +in { # "final" and "prev" + + #kukkee = super.callPackage ./kukkee {}; + #rallly = super.callPackage ./rallly {}; + + #inherit (testing) polaris polaris-web; + polaris = super.callPackage /home/pbsds/repos/nixpkgs-polaris/pkgs/servers/polaris {}; + polaris-web = super.callPackage /home/pbsds/repos/nixpkgs-polaris/pkgs/servers/polaris/web.nix {}; + + mapcrafter = super.callPackage /home/pbsds/repos/nixpkgs-mapcrafter/pkgs/tools/games/minecraft/mapcrafter/default.nix {}; + mapcrafter-world112 = super.callPackage /home/pbsds/repos/nixpkgs-mapcrafter/pkgs/tools/games/minecraft/mapcrafter/default.nix {world="world112";}; + + #python3.pkgs = super.python3.pkgs.override overridePythonPackages; + python3Packages = super.python3Packages.override overridePythonPackages; + +} + +# How to test: +# nix-build -E 'with import { overlays = [ (import ./. ) ]; }; MY_PACKAGE' + +# warning: using testing or unstable here (^) will infinitely recurse. diff --git a/overlays/kukkee/default.nix b/overlays/kukkee/default.nix new file mode 100644 index 0000000..edde918 --- /dev/null +++ b/overlays/kukkee/default.nix @@ -0,0 +1,84 @@ +# nix-build -E 'with import {}; callPackage ./default.nix {}' +{ lib +, stdenv +, pkgs +, fetchFromGitHub +, bash +, nodejs +, nodePackages +}: + +let + nodeDependencies = (import ./node-composition.nix { + inherit pkgs nodejs; + inherit (stdenv.hostPlatform) system; + }).nodeDependencies.override (old: { + # access to path '/nix/store/...-source' is forbidden in restricted mode + #src = src; + #dontNpmInstall = true; + }); +in stdenv.mkDerivation rec { + pname = "kukkee"; + #version = "0.1.0"; + version = "unstable-2022-06-19-270c8ed"; + + src = fetchFromGitHub { + owner = "AnandBaburajan"; + repo = "Kukkee"; + #rev = "v${version}"; + rev = "270c8ed421c8f1100a845958430e1ebe61d86d5a"; + sha256 = "CtbTKUZEPjwbLRYuC44JaeZn0Rjyn4h6tsBEWWQWJmA="; + }; + + buildInputs = [ + nodeDependencies + ]; + + buildPhase = '' + runHook preBuild + + #export PATH="${nodeDependencies}/bin:${nodejs}/bin:$PATH" + ln -s ${nodeDependencies}/lib/node_modules . + next build + + runHook postBuild + ''; + + + installPhase = '' + runHook preInstall + + # FIXME: is to possible for next.js to not run from a ".next" directory? + mkdir -p $out/share/kukkee + cp -a public .next $out/share/kukkee/ + ln -s ${nodeDependencies}/lib/node_modules $out/share/kukkee/ + + # create next.js entrypoint + mkdir $out/bin + + cat < $out/bin/kukkee + #!${bash}/bin/bash + export PATH="${nodeDependencies}/bin:\$PATH" + exec -a kukkee next start $out/share/kukkee "\$@" + EOF + + chmod +x $out/bin/kukkee + + runHook postInstall + ''; + + passthru.updateScript = ./update.sh; + + meta = with lib; { + description = "Self-hosted Doodle alternative: a meeting poll tool."; + longDescription = '' + The free and open source meeting poll tool. + Never ask “what time works for you all?” again. + A self-hosted Doodle alternative. + ''; + homepage = "https://kukkee.com/"; + license = licenses.mit; + platforms = platforms.unix; + maintainers = with maintainers; [ pbsds ]; + }; +} diff --git a/overlays/kukkee/node-composition.nix b/overlays/kukkee/node-composition.nix new file mode 100644 index 0000000..08f947e --- /dev/null +++ b/overlays/kukkee/node-composition.nix @@ -0,0 +1,17 @@ +# This file has been generated by node2nix 1.11.1. Do not edit! + +{pkgs ? import { + inherit system; + }, system ? builtins.currentSystem, nodejs ? pkgs."nodejs-14_x"}: + +let + nodeEnv = import ./node-env.nix { + inherit (pkgs) stdenv lib python2 runCommand writeTextFile writeShellScript; + inherit pkgs nodejs; + libtool = if pkgs.stdenv.isDarwin then pkgs.darwin.cctools else null; + }; +in +import ./node-packages.nix { + inherit (pkgs) fetchurl nix-gitignore stdenv lib fetchgit; + inherit nodeEnv; +} diff --git a/overlays/kukkee/node-env.nix b/overlays/kukkee/node-env.nix new file mode 100644 index 0000000..2590dd2 --- /dev/null +++ b/overlays/kukkee/node-env.nix @@ -0,0 +1,598 @@ +# This file originates from node2nix + +{lib, stdenv, nodejs, python2, pkgs, libtool, runCommand, writeTextFile, writeShellScript}: + +let + # Workaround to cope with utillinux in Nixpkgs 20.09 and util-linux in Nixpkgs master + utillinux = if pkgs ? utillinux then pkgs.utillinux else pkgs.util-linux; + + python = if nodejs ? python then nodejs.python else python2; + + # Create a tar wrapper that filters all the 'Ignoring unknown extended header keyword' noise + tarWrapper = runCommand "tarWrapper" {} '' + mkdir -p $out/bin + + cat > $out/bin/tar <> $out/nix-support/hydra-build-products + ''; + }; + + # Common shell logic + installPackage = writeShellScript "install-package" '' + installPackage() { + local packageName=$1 src=$2 + + local strippedName + + local DIR=$PWD + cd $TMPDIR + + unpackFile $src + + # Make the base dir in which the target dependency resides first + mkdir -p "$(dirname "$DIR/$packageName")" + + if [ -f "$src" ] + then + # Figure out what directory has been unpacked + packageDir="$(find . -maxdepth 1 -type d | tail -1)" + + # Restore write permissions to make building work + find "$packageDir" -type d -exec chmod u+x {} \; + chmod -R u+w "$packageDir" + + # Move the extracted tarball into the output folder + mv "$packageDir" "$DIR/$packageName" + elif [ -d "$src" ] + then + # Get a stripped name (without hash) of the source directory. + # On old nixpkgs it's already set internally. + if [ -z "$strippedName" ] + then + strippedName="$(stripHash $src)" + fi + + # Restore write permissions to make building work + chmod -R u+w "$strippedName" + + # Move the extracted directory into the output folder + mv "$strippedName" "$DIR/$packageName" + fi + + # Change to the package directory to install dependencies + cd "$DIR/$packageName" + } + ''; + + # Bundle the dependencies of the package + # + # Only include dependencies if they don't exist. They may also be bundled in the package. + includeDependencies = {dependencies}: + lib.optionalString (dependencies != []) ( + '' + mkdir -p node_modules + cd node_modules + '' + + (lib.concatMapStrings (dependency: + '' + if [ ! -e "${dependency.packageName}" ]; then + ${composePackage dependency} + fi + '' + ) dependencies) + + '' + cd .. + '' + ); + + # Recursively composes the dependencies of a package + composePackage = { name, packageName, src, dependencies ? [], ... }@args: + builtins.addErrorContext "while evaluating node package '${packageName}'" '' + installPackage "${packageName}" "${src}" + ${includeDependencies { inherit dependencies; }} + cd .. + ${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."} + ''; + + pinpointDependencies = {dependencies, production}: + let + pinpointDependenciesFromPackageJSON = writeTextFile { + name = "pinpointDependencies.js"; + text = '' + var fs = require('fs'); + var path = require('path'); + + function resolveDependencyVersion(location, name) { + if(location == process.env['NIX_STORE']) { + return null; + } else { + var dependencyPackageJSON = path.join(location, "node_modules", name, "package.json"); + + if(fs.existsSync(dependencyPackageJSON)) { + var dependencyPackageObj = JSON.parse(fs.readFileSync(dependencyPackageJSON)); + + if(dependencyPackageObj.name == name) { + return dependencyPackageObj.version; + } + } else { + return resolveDependencyVersion(path.resolve(location, ".."), name); + } + } + } + + function replaceDependencies(dependencies) { + if(typeof dependencies == "object" && dependencies !== null) { + for(var dependency in dependencies) { + var resolvedVersion = resolveDependencyVersion(process.cwd(), dependency); + + if(resolvedVersion === null) { + process.stderr.write("WARNING: cannot pinpoint dependency: "+dependency+", context: "+process.cwd()+"\n"); + } else { + dependencies[dependency] = resolvedVersion; + } + } + } + } + + /* Read the package.json configuration */ + var packageObj = JSON.parse(fs.readFileSync('./package.json')); + + /* Pinpoint all dependencies */ + replaceDependencies(packageObj.dependencies); + if(process.argv[2] == "development") { + replaceDependencies(packageObj.devDependencies); + } + replaceDependencies(packageObj.optionalDependencies); + + /* Write the fixed package.json file */ + fs.writeFileSync("package.json", JSON.stringify(packageObj, null, 2)); + ''; + }; + in + '' + node ${pinpointDependenciesFromPackageJSON} ${if production then "production" else "development"} + + ${lib.optionalString (dependencies != []) + '' + if [ -d node_modules ] + then + cd node_modules + ${lib.concatMapStrings (dependency: pinpointDependenciesOfPackage dependency) dependencies} + cd .. + fi + ''} + ''; + + # Recursively traverses all dependencies of a package and pinpoints all + # dependencies in the package.json file to the versions that are actually + # being used. + + pinpointDependenciesOfPackage = { packageName, dependencies ? [], production ? true, ... }@args: + '' + if [ -d "${packageName}" ] + then + cd "${packageName}" + ${pinpointDependencies { inherit dependencies production; }} + cd .. + ${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."} + fi + ''; + + # Extract the Node.js source code which is used to compile packages with + # native bindings + nodeSources = runCommand "node-sources" {} '' + tar --no-same-owner --no-same-permissions -xf ${nodejs.src} + mv node-* $out + ''; + + # Script that adds _integrity fields to all package.json files to prevent NPM from consulting the cache (that is empty) + addIntegrityFieldsScript = writeTextFile { + name = "addintegrityfields.js"; + text = '' + var fs = require('fs'); + var path = require('path'); + + function augmentDependencies(baseDir, dependencies) { + for(var dependencyName in dependencies) { + var dependency = dependencies[dependencyName]; + + // Open package.json and augment metadata fields + var packageJSONDir = path.join(baseDir, "node_modules", dependencyName); + var packageJSONPath = path.join(packageJSONDir, "package.json"); + + if(fs.existsSync(packageJSONPath)) { // Only augment packages that exist. Sometimes we may have production installs in which development dependencies can be ignored + console.log("Adding metadata fields to: "+packageJSONPath); + var packageObj = JSON.parse(fs.readFileSync(packageJSONPath)); + + if(dependency.integrity) { + packageObj["_integrity"] = dependency.integrity; + } else { + packageObj["_integrity"] = "sha1-000000000000000000000000000="; // When no _integrity string has been provided (e.g. by Git dependencies), add a dummy one. It does not seem to harm and it bypasses downloads. + } + + if(dependency.resolved) { + packageObj["_resolved"] = dependency.resolved; // Adopt the resolved property if one has been provided + } else { + packageObj["_resolved"] = dependency.version; // Set the resolved version to the version identifier. This prevents NPM from cloning Git repositories. + } + + if(dependency.from !== undefined) { // Adopt from property if one has been provided + packageObj["_from"] = dependency.from; + } + + fs.writeFileSync(packageJSONPath, JSON.stringify(packageObj, null, 2)); + } + + // Augment transitive dependencies + if(dependency.dependencies !== undefined) { + augmentDependencies(packageJSONDir, dependency.dependencies); + } + } + } + + if(fs.existsSync("./package-lock.json")) { + var packageLock = JSON.parse(fs.readFileSync("./package-lock.json")); + + if(![1, 2].includes(packageLock.lockfileVersion)) { + process.stderr.write("Sorry, I only understand lock file versions 1 and 2!\n"); + process.exit(1); + } + + if(packageLock.dependencies !== undefined) { + augmentDependencies(".", packageLock.dependencies); + } + } + ''; + }; + + # Reconstructs a package-lock file from the node_modules/ folder structure and package.json files with dummy sha1 hashes + reconstructPackageLock = writeTextFile { + name = "addintegrityfields.js"; + text = '' + var fs = require('fs'); + var path = require('path'); + + var packageObj = JSON.parse(fs.readFileSync("package.json")); + + var lockObj = { + name: packageObj.name, + version: packageObj.version, + lockfileVersion: 1, + requires: true, + dependencies: {} + }; + + function augmentPackageJSON(filePath, dependencies) { + var packageJSON = path.join(filePath, "package.json"); + if(fs.existsSync(packageJSON)) { + var packageObj = JSON.parse(fs.readFileSync(packageJSON)); + dependencies[packageObj.name] = { + version: packageObj.version, + integrity: "sha1-000000000000000000000000000=", + dependencies: {} + }; + processDependencies(path.join(filePath, "node_modules"), dependencies[packageObj.name].dependencies); + } + } + + function processDependencies(dir, dependencies) { + if(fs.existsSync(dir)) { + var files = fs.readdirSync(dir); + + files.forEach(function(entry) { + var filePath = path.join(dir, entry); + var stats = fs.statSync(filePath); + + if(stats.isDirectory()) { + if(entry.substr(0, 1) == "@") { + // When we encounter a namespace folder, augment all packages belonging to the scope + var pkgFiles = fs.readdirSync(filePath); + + pkgFiles.forEach(function(entry) { + if(stats.isDirectory()) { + var pkgFilePath = path.join(filePath, entry); + augmentPackageJSON(pkgFilePath, dependencies); + } + }); + } else { + augmentPackageJSON(filePath, dependencies); + } + } + }); + } + } + + processDependencies("node_modules", lockObj.dependencies); + + fs.writeFileSync("package-lock.json", JSON.stringify(lockObj, null, 2)); + ''; + }; + + prepareAndInvokeNPM = {packageName, bypassCache, reconstructLock, npmFlags, production}: + let + forceOfflineFlag = if bypassCache then "--offline" else "--registry http://www.example.com"; + in + '' + # Pinpoint the versions of all dependencies to the ones that are actually being used + echo "pinpointing versions of dependencies..." + source $pinpointDependenciesScriptPath + + # Patch the shebangs of the bundled modules to prevent them from + # calling executables outside the Nix store as much as possible + patchShebangs . + + # Deploy the Node.js package by running npm install. Since the + # dependencies have been provided already by ourselves, it should not + # attempt to install them again, which is good, because we want to make + # it Nix's responsibility. If it needs to install any dependencies + # anyway (e.g. because the dependency parameters are + # incomplete/incorrect), it fails. + # + # The other responsibilities of NPM are kept -- version checks, build + # steps, postprocessing etc. + + export HOME=$TMPDIR + cd "${packageName}" + runHook preRebuild + + ${lib.optionalString bypassCache '' + ${lib.optionalString reconstructLock '' + if [ -f package-lock.json ] + then + echo "WARNING: Reconstruct lock option enabled, but a lock file already exists!" + echo "This will most likely result in version mismatches! We will remove the lock file and regenerate it!" + rm package-lock.json + else + echo "No package-lock.json file found, reconstructing..." + fi + + node ${reconstructPackageLock} + ''} + + node ${addIntegrityFieldsScript} + ''} + + npm ${forceOfflineFlag} --nodedir=${nodeSources} ${npmFlags} ${lib.optionalString production "--production"} rebuild + + if [ "''${dontNpmInstall-}" != "1" ] + then + # NPM tries to download packages even when they already exist if npm-shrinkwrap is used. + rm -f npm-shrinkwrap.json + + npm ${forceOfflineFlag} --nodedir=${nodeSources} ${npmFlags} ${lib.optionalString production "--production"} install + fi + ''; + + # Builds and composes an NPM package including all its dependencies + buildNodePackage = + { name + , packageName + , version ? null + , dependencies ? [] + , buildInputs ? [] + , production ? true + , npmFlags ? "" + , dontNpmInstall ? false + , bypassCache ? false + , reconstructLock ? false + , preRebuild ? "" + , dontStrip ? true + , unpackPhase ? "true" + , buildPhase ? "true" + , meta ? {} + , ... }@args: + + let + extraArgs = removeAttrs args [ "name" "dependencies" "buildInputs" "dontStrip" "dontNpmInstall" "preRebuild" "unpackPhase" "buildPhase" "meta" ]; + in + stdenv.mkDerivation ({ + name = "${name}${if version == null then "" else "-${version}"}"; + buildInputs = [ tarWrapper python nodejs ] + ++ lib.optional (stdenv.isLinux) utillinux + ++ lib.optional (stdenv.isDarwin) libtool + ++ buildInputs; + + inherit nodejs; + + inherit dontStrip; # Stripping may fail a build for some package deployments + inherit dontNpmInstall preRebuild unpackPhase buildPhase; + + compositionScript = composePackage args; + pinpointDependenciesScript = pinpointDependenciesOfPackage args; + + passAsFile = [ "compositionScript" "pinpointDependenciesScript" ]; + + installPhase = '' + source ${installPackage} + + # Create and enter a root node_modules/ folder + mkdir -p $out/lib/node_modules + cd $out/lib/node_modules + + # Compose the package and all its dependencies + source $compositionScriptPath + + ${prepareAndInvokeNPM { inherit packageName bypassCache reconstructLock npmFlags production; }} + + # Create symlink to the deployed executable folder, if applicable + if [ -d "$out/lib/node_modules/.bin" ] + then + ln -s $out/lib/node_modules/.bin $out/bin + + # Patch the shebang lines of all the executables + ls $out/bin/* | while read i + do + file="$(readlink -f "$i")" + chmod u+rwx "$file" + patchShebangs "$file" + done + fi + + # Create symlinks to the deployed manual page folders, if applicable + if [ -d "$out/lib/node_modules/${packageName}/man" ] + then + mkdir -p $out/share + for dir in "$out/lib/node_modules/${packageName}/man/"* + do + mkdir -p $out/share/man/$(basename "$dir") + for page in "$dir"/* + do + ln -s $page $out/share/man/$(basename "$dir") + done + done + fi + + # Run post install hook, if provided + runHook postInstall + ''; + + meta = { + # default to Node.js' platforms + platforms = nodejs.meta.platforms; + } // meta; + } // extraArgs); + + # Builds a node environment (a node_modules folder and a set of binaries) + buildNodeDependencies = + { name + , packageName + , version ? null + , src + , dependencies ? [] + , buildInputs ? [] + , production ? true + , npmFlags ? "" + , dontNpmInstall ? false + , bypassCache ? false + , reconstructLock ? false + , dontStrip ? true + , unpackPhase ? "true" + , buildPhase ? "true" + , ... }@args: + + let + extraArgs = removeAttrs args [ "name" "dependencies" "buildInputs" ]; + in + stdenv.mkDerivation ({ + name = "node-dependencies-${name}${if version == null then "" else "-${version}"}"; + + buildInputs = [ tarWrapper python nodejs ] + ++ lib.optional (stdenv.isLinux) utillinux + ++ lib.optional (stdenv.isDarwin) libtool + ++ buildInputs; + + inherit dontStrip; # Stripping may fail a build for some package deployments + inherit dontNpmInstall unpackPhase buildPhase; + + includeScript = includeDependencies { inherit dependencies; }; + pinpointDependenciesScript = pinpointDependenciesOfPackage args; + + passAsFile = [ "includeScript" "pinpointDependenciesScript" ]; + + installPhase = '' + source ${installPackage} + + mkdir -p $out/${packageName} + cd $out/${packageName} + + source $includeScriptPath + + # Create fake package.json to make the npm commands work properly + cp ${src}/package.json . + chmod 644 package.json + ${lib.optionalString bypassCache '' + if [ -f ${src}/package-lock.json ] + then + cp ${src}/package-lock.json . + chmod 644 package-lock.json + fi + ''} + + # Go to the parent folder to make sure that all packages are pinpointed + cd .. + ${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."} + + ${prepareAndInvokeNPM { inherit packageName bypassCache reconstructLock npmFlags production; }} + + # Expose the executables that were installed + cd .. + ${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."} + + mv ${packageName} lib + ln -s $out/lib/node_modules/.bin $out/bin + ''; + } // extraArgs); + + # Builds a development shell + buildNodeShell = + { name + , packageName + , version ? null + , src + , dependencies ? [] + , buildInputs ? [] + , production ? true + , npmFlags ? "" + , dontNpmInstall ? false + , bypassCache ? false + , reconstructLock ? false + , dontStrip ? true + , unpackPhase ? "true" + , buildPhase ? "true" + , ... }@args: + + let + nodeDependencies = buildNodeDependencies args; + extraArgs = removeAttrs args [ "name" "dependencies" "buildInputs" "dontStrip" "dontNpmInstall" "unpackPhase" "buildPhase" ]; + in + stdenv.mkDerivation ({ + name = "node-shell-${name}${if version == null then "" else "-${version}"}"; + + buildInputs = [ python nodejs ] ++ lib.optional (stdenv.isLinux) utillinux ++ buildInputs; + buildCommand = '' + mkdir -p $out/bin + cat > $out/bin/shell < ]; + #home-manager.useUserPackages = true; # install to /etc instead of ~/.nix-profile, needed for containers + #home-manager.useGlobalPkgs = true; # brrr + #home-manager.users.${config.services.code-server.user} = { pkgs, config, ... }: { + # programs.git.enable = true; + # programs.git.userName = "Theoharis Theoharis"; + # programs.git.userEmail = "theotheo@ntnu.no"; + #}; + + services.code-server = { + enable = true; + host = "0.0.0.0"; # container + port = 53754; + # if you don't care about security: https://argon2.online/ + hashedPassword = "$argon2i$v=19$m=16,t=2,p=1$MHh5UGNtU1lWR1UySnhIZw$ITg8U7Gq2CXByuOOnrKVUg"; + package = pkgs.vscode-with-extensions.override { + vscode = pkgs.code-server.overrideAttrs (old: { + passthru.executableName = "code-server"; + passthru.longName = "Visual Studio Code Server"; + }); + #vscodeExtensions = vscode-extensions; [ + vscodeExtensions = with (import {}).vscode-extensions; [ + shd101wyy.markdown-preview-enhanced + sanaajani.taskrunnercode + tomoki1207.pdf + ] ++ pkgs.vscode-utils.extensionsFromVscodeMarketplace [ + { + name = "new-railscasts"; + publisher = "carakan"; + version = "1.0.68"; + sha256 = "sha256-uZCAurvZu7QHjTR6ukmYbsI58GpfTo3shdoX/MH2ElA="; + } + { + name = "theme-railscasts"; + publisher = "PaulOlteanu"; + version = "4.0.1"; + sha256 = "sha256-67RNcMr+hvzn2FvapkHLd8OdEBAz8w4cwsGlu0tbCNY="; + } + { + name = "trailscasts"; + publisher = "mksafi"; + version = "1.2.3"; + sha256 = "sha256-mZ9I1BYf8x3lpl5/2sojk+5GMfhDqRBzs6nFkumlPKg="; + } + { + name = "vscode-theme-railscasts-plus"; + publisher = "marlosirapuan"; + version = "0.0.6"; + sha256 = "sha256-8GyyxDeehFo/lGSmA6dfXZ3DMZ/B632ax+9q3+irjws="; + } + { + name = "theme-railscast-next"; + publisher = "edus44"; + version = "0.0.2"; + sha256 = "sha256-RYk6X4iKoEQlKSVhydnwWQJqt884+HC9DZN2aqIbfNI="; + } + { # best, but no markdown + name = "railscasts"; + publisher = "mrded"; + version = "0.0.4"; + sha256 = "sha256-vjfoeRW+rmYlzSuEbYJqg41r03zSfbfuNCfAhHYyjDc="; + } + { + name = "beardedtheme"; + publisher = "BeardedBear"; + version = "7.4.0"; + sha256 = "sha256-8FY9my7v7bcfD0LH5AVNGI2dF1qMLnVp2LR/CiP01NQ="; + } + ]; + }; + extraPackages = with pkgs; [ + (writeShellScriptBin "pandoc" '' + export XDG_DATA_HOME=${pandoc-lua-filters}/share + exec ${pandoc}/bin/pandoc "$@" + '') + + (texlive.combine { + inherit (texlive) + scheme-small + titlesec + fontaxes + supertabular + xtab + # boxed quotes + mdframed + zref + needspace + soul + atkinson + ; + }) + + pandoc-imagine + haskellPackages.pandoc-crossref + #haskellPackages.pandoc-plot + #pandoc-plantuml-filter nodePackages.mermaid-cli + + bash + git + bat + gnumake + boxes + graphviz + #python3Packages.cairosvg + + (python3.withPackages (ps: with ps; [ + numpy + matplotlib + #python-lsp-server + ])) + + ]; + }; + + networking.firewall = { + enable = true; + allowedTCPPorts = [ + config.services.code-server.port + ]; + }; + + }; + }; + + services.nginx.virtualHosts.${mkDomain "code-server"} = { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + #proxyPass = "http://127.0.0.1:${toString cnt.services.code-server.port}"; + #proxyPass = "http://10.240.100.3:${toString cnt.services.code-server.port}"; + proxyPass = "http://${config.containers.code-server-theo.localAddress}:${toString cnt.services.code-server.port}"; + proxyWebsockets = true; + }; + }; + + +} diff --git a/profiles/nas/default.nix b/profiles/nas/default.nix new file mode 100644 index 0000000..160a851 --- /dev/null +++ b/profiles/nas/default.nix @@ -0,0 +1,1278 @@ +{ config, pkgs, ... }: + +let + lib = pkgs.lib; + #webserver = "nginx" # httpd caddy lighttpd + domain = "${config.networking.hostName}.${config.networking.domain}"; + mkDomain = subname: "${subname}.${domain}"; + subdomains = lib.sort (x: y: x + #/home/pbsds/repos/nixpkgs-polaris/nixos/modules/services/misc/polaris.nix + ]; + + + # ACME + # regenerate certs with: + # systemctl clean --what=state acme-noximilien.pbsds.net.service + + security.acme.acceptTerms = true; + security.acme.defaults.email = "pbsds+acme@hotmail.com"; + #security.acme.defaults.renewInterval = "daily"; + #security.acme.defaults.reloadServices + + # https://www.xf.is/2020/06/30/list-of-free-acme-ssl-providers/ + #security.acme.defaults.server = "https://acme-staging-v02.api.letsencrypt.org/directory"; # STAGING + #security.acme.defaults.server = "https://api.buypass.com/acme/directory"; # no wildcards, rate limit: 20 domains/week, 5 duplicate certs / week + #security.acme.defaults.server = "https://api.test4.buypass.no/acme/directory"; # STAGING. no wildcards, rate limit: 20 domains/week, 5 duplicate certs / week + + # DNS-based ACME: + # - https://go-acme.github.io/lego/dns/domeneshop/ + # - https://nixos.org/manual/nixos/stable/index.html#module-security-acme-config-dns-with-vhosts + #security.acme.defaults.dnsProvider = "domeneshop"; + #security.acme.defaults.credentialsFile = "/var/lib/secrets/domeneshop.key"; # TODO: this file must be made by hand, containing env variables. + + + + services.nginx.enable = true; + + + # Website tunnel + # TODO: move to web profile? + + services.nginx.virtualHosts.${domain} = { + forceSSL = true; # addSSL = true; + enableACME = true; + #acmeRoot = null; # use DNS + default = true; + serverAliases = map mkDomain [ + "www" + #"*" # requires DNS ACME + ]; + # The alternative to ^ is: config.security.acme.certs."${acmeDomain}".extraDomainNames = [ (mkDomain "foo") ]; + # TODO: 'nox' alias for everything + locations."/" = { + proxyPass = "http://pbuntu.pbsds.net"; + proxyWebsockets = true; + }; + }; + #services.nginx.virtualHosts.${mkDomain "www"} = { + # addSSL = true; + # useACMEHost = acmeDomain; #enableACME = true; + # locations."/" = { + # proxyPass = "http://pbuntu.pbsds.net"; + # proxyWebsockets = true; + # }; + #}; + + + # service index + # TODO: move to web profile? + + services.nginx.virtualHosts.${mkDomain "index"} = { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + root = let + getName = domain: head (lib.splitString "." domain); + getDomain = domain: concatStringsSep "." (tail (lib.splitString "." domain)); + custom = rec { + index = "This page"; + links = "Linktree"; + element = pkgs.element-web.meta.description; + refleksjon = "My dad is a cheapskate"; + roroslyd = "My dad is a cheapskate"; + www = "wwwwwwwwwwwwwww"; + noximilien = www; + shlink = "Url shortener"; + }; + getDesc = domain: let + name = getName domain; + in if lib.hasAttr name custom + then custom.${name} + else if lib.hasAttr name pkgs.python3Packages + then pkgs.python3Packages.${name}.meta.description + else if lib.hasAttr name pkgs + then pkgs.${name}.meta.description + else if lib.hasAttrByPath [name "package"] config.services + then config.services.${name}.package.meta.description + else ""; + mkRow = domain: ''${getName domain}.${getDomain domain}${getDesc domain}''; + in pkgs.writeTextDir "index.html" '' + + +
urldescription + ${lib.concatStringsSep "\n" (map mkRow subdomains)} +
+ ''; + }; + + + # webdav + # Simple WebDAV server + + services.webdav = { + enable = true; + # the webdav user uid:gid is fixed + settings = { + address = "127.0.0.1"; + port = 9568; + prefix = "/"; + scope = "/mnt/reidun/pub"; + modify = false; + auth = true; + users = [ + { + username = "zotero"; + password = "{bcrypt}$2y$10$9zzZuwd2AvNZXb8WCG/bM..ibOroNnX0sN94UTAV.Jco9LnZ8Whs2"; + #prefix = "/zotero/"; + scope = "/mnt/reidun/Various/Zotero"; + modify = true; + } + /** / + { + username = "guest"; + password = "hunter2"; + } + /**/ + ]; + }; + }; + services.nginx.virtualHosts.${mkDomain "webdav"} = lib.mkIf config.services.webdav.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:${toString config.services.webdav.settings.port}"; + #proxyWebsockets = true; + extraConfig = '' + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header REMOTE-HOST $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Host $host; + proxy_redirect off; + + client_max_body_size 2G; + ''; + }; + }; + + + # Home assistant + + services.home-assistant = { + #enable = true; + config = { + # https://www.home-assistant.io/integrations/default_config/ + default_config = {}; + # https://www.home-assistant.io/integrations/esphome/ + #esphome = {}; + # https://www.home-assistant.io/integrations/met/ + #met = {}; + }; + }; + + + # Flexget + # Multipurpose automation tool for all of your media + + services.flexget = { + enable = true; + user = "flexget"; # The user under which to run flexget. + homeDir = "/var/lib/flexget"; + interval = "30m"; + config = '' + tasks: + shanaproject: + rss: 'https://www.shanaproject.com/feeds/secure/user/35853/J98B7OXAHO/' + accept_all: yes + no_entries_ok: yes + transmission: + host: 192.168.1.3 + port: 9091 + path: '/Reidun/shared/Downloads/shana project/' + username: pbsds + password: spismeg + ''; + }; + users.groups."${config.services.flexget.user}" = lib.mkIf config.services.flexget.enable { }; + users.users."${config.services.flexget.user}" = lib.mkIf config.services.flexget.enable { + isSystemUser = true; + createHome = true; + home = config.services.flexget.homeDir; + group = "${config.services.flexget.user}"; + }; + + + # Graphana + # Gorgeous metric viz, dashboards & editors for Graphite, InfluxDB & OpenTSDB + + services.grafana = rec { + #enable = true; + #addr = "127.0.0.1"; + addr = "0.0.0.0"; + port = 3000; + domain = mkDomain "grafana"; + #rootUrl = "https://${domain}/grafana/"; # Not needed if it is `https://your.domain/` + }; + services.nginx.virtualHosts."${config.services.grafana.domain}" = lib.mkIf config.services.grafana.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + #locations."/grafana/" = { + locations."/" = { + proxyPass = "http://127.0.0.1:${toString config.services.grafana.port}"; + proxyWebsockets = true; + }; + }; + + + # OwnCast + # self-hosted video live streaming solution + + services.owncast = { + # the default /admin account is admin:abc123, don't enable if you don't intend to change it! + enable = true; + port = 3456; # default is 8080 + rtmp-port = 1935; # remember to punch a TCP hole in the NAT + #listen = "0.0.0.0"; # default is "127.0.0.1" + openFirewall = true; # the rtmp port, and the http port if listen != "127.0.0.1" + }; + services.nginx.virtualHosts.${mkDomain "owncast"} = lib.mkIf config.services.owncast.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:${toString config.services.owncast.port}"; + proxyWebsockets = true; + }; + }; + + + # Cryptpad + # A collaborative office suite that is end-to-end encrypted and open-source. + + services.cryptpad = { + #enable = true; # current node version used is marked insecure + # reference: https://github.com/xwiki-labs/cryptpad/blob/main/config/config.example.js + configFile = toFile "cryptpad-config.js" '' + module.exports = { + httpUnsafeOrigin: 'http://localhost:3457', + httpSafeOrigin: 'https://${mkDomain "cryptpad"}', + httpAddress: '127.0.0.1', + httpPort: 3457, + + //adminKeys: [ // can be found on the settings page for registered users + // "[cryptpad-user1@my.awesome.website/YZgXQxKR0Rcb6r6CmxHPdAGLVludrAF2lEnkbx1vVOo=]", + //], + + // storage + //inactiveTime: 90, // days + //archiveRetentionTime: 15, // days + //accountRetentionTime: 365, // days, default is never + //maxUploadSize: 20 * 1024 * 1024, // bytes + //premiumUploadSize: 100 * 1024 * 1024, // bytes, (users with a plan in their customLimit) + + filePath: './datastore/', + archivePath: './data/archive', // recovery in the event of accidental deletion + pinPath: './data/pins', // content stored indefinetly + taskPath: './data/tasks', // scheduled tasks + blockPath: './block', // users' authenticated blocks + blobPath: './blob', // uploaded encrypted blobs + blobStagingPath: './data/blobstage', // incomplete blobs + decreePath: './data/decrees', // undocumented + logPath: false, // logging of events, may be set to false + logToStdout: true, + logLevel: 'info', // silly, verbose, debug, feedback, info, warn, error + logFeedback: false, // data collection + verbose: false, // logging + installMethod: 'nixpkgs', // telemetry for devs + }; + ''; + }; + services.nginx.virtualHosts.${mkDomain "cryptpad"} = lib.mkIf config.services.cryptpad.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:3457"; + proxyWebsockets = true; + }; + }; + + + + # Jellyfin + + services.jellyfin = { + enable = true; # don't enable unless you intend to first-time-setup the admin user + # from https://jellyfin.org/docs/general/networking/index.html: + # - 8096/tcp is used by default for HTTP traffic. You can change this in the dashboard. + # - 8920/tcp is used by default for HTTPS traffic. You can change this in the dashboard. + # - 1900/udp is used for service auto-discovery. This is not configurable. + # - 7359/udp is also used for auto-discovery. This is not configurable. + openFirewall = false; # I do it manually below: + # TODO: configure initial collections and extensions + }; + # firewall + networking.firewall = lib.mkIf config.service.jellyfin.enable { + # TODO: does this overwrite rules set by other stuff? should i use ++ ? + #allowedTCPPorts = [ 8096 8920 ]; + allowedUDPPorts = [ 1900 7359 ]; # TODO: Only if behind a NAT? + }; + services.nginx.virtualHosts.${mkDomain "jellyfin"} = lib.mkIf config.services.jellyfin.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:8096"; + proxyWebsockets = true; + }; + }; + # Hardware acceleration + nixpkgs.config.packageOverrides = pkgs: { + vaapiIntel = pkgs.vaapiIntel.override { enableHybridCodec = true; }; + }; + hardware.opengl = { + enable = true; + extraPackages = with pkgs; [ + intel-media-driver + vaapiIntel + vaapiVdpau + libvdpau-va-gl + ]; + }; + # Allow Jellyfin access to VAAPI + users.users.${config.services.jellyfin.user}.extraGroups = [ "video" "render" ]; + systemd.services.jellyfin.serviceConfig.PrivateDevices = lib.mkForce false; + systemd.services.jellyfin.serviceConfig.DeviceAllow = lib.mkForce [ "/dev/dri/renderD128" ]; + + + # Navidrome + # Music Server and Streamer compatible with Subsonic/Airsonic + + services.navidrome = { + enable = true; + settings = { + # default hostname:port = "127.0.0.1:4533" + MusicFolder = "/mnt/reidun/Music/Albums"; + #MusicFolder = pkgs.linkFarm "navidrome-music-library" [ + # { name = "Albums"; path = "/mnt/reidun/Music/Albums"; } + # { name = "OST"; path = "/mnt/reidun/Music/OST"; } + # { name = "dojin.co"; path = "/mnt/reidun/Music/dojin.co"; } + # { name = "Touhou"; path = "/mnt/reidun/Music/Touhou"; } + # { name = "Kancolle"; path = "/mnt/reidun/Music/Kancolle"; } + # { name = "Vocaloid"; path = "/mnt/reidun/Music/Vocaloid"; } + #]; + + UIWelcomeMessage = "Spis meg"; + DefaultTheme = "Spotify-ish"; + }; + }; + services.nginx.virtualHosts.${mkDomain "navidrome"} = lib.mkIf config.services.navidrome.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:4533"; + proxyWebsockets = true; + }; + }; + + + # Polaris + # Self-host your music collection, and access it from any computer and mobile device + + services.polaris = { + enable = true; + #user = "pbsds"; + #group = "users"; + port = 7890; + package = pkgs.unstable.polaris; # instead of my overlay, TODO: move that overlay here + settings = { + settings.reindex_every_n_seconds = 7*24*60*60; # weekly, default is 1800, i.e. hourly + settings.album_art_pattern = + "([Cc]over|COVER|[Ff]older|FOLDER|[Ff]ront|FRONT)\.(jpeg|JPEG|jpg|JPG|png|PNG|bmp|BMP|gif|GIF)"; + #"(?i)(cover|folder|front)\.(jpeg|jpg|png|bmp|gif)"; + mount_dirs = [ + { source = "/mnt/reidun/Music/Albums"; name = "Albums"; } + { source = "/mnt/reidun/Music/dojin.co"; name = "dojin.co"; } + { source = "/mnt/reidun/Music/Vocaloid"; name = "Vocaloid"; } + { source = "/mnt/reidun/Music/Touhou"; name = "Touhou"; } + { source = "/mnt/reidun/Music/OST"; name = "OST"; } + { source = "/mnt/reidun/Music/Kancolle"; name = "Kancolle"; } + { source = "/mnt/reidun/Downloads/music"; name = "Downloads"; } + ]; + }; + }; + services.nginx.virtualHosts.${mkDomain "polaris"} = lib.mkIf config.services.polaris.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:${toString config.services.polaris.port}"; + proxyWebsockets = true; + }; + }; + + + # Hydra + # Nix-based continuous build system + # https://github.com/NixOS/hydra + # https://nixos.wiki/wiki/Hydra + # sudo -u hydra hydra-create-user 'admin' --full-name '' --email-address '' --password-prompt --role admin + # https://blog.matejc.com/blogs/myblog/nixos-hydra-nginx + services.hydra = { + enable = true; + hydraURL = "https://${mkDomain "hydra"}"; + #smtpHost = ; + listenHost = "localhost"; + port = 4758; + notificationSender = "hydra@${domain}"; # Sender email address used for email notifications. + #buildMachinesFiles = []; + #useSubstitutes = true; + #debugServer = true; + #logo = /some/path.png; + #minimumDiskFree = 0; # Minimum disk space (GiB) determining if queue runner runs or not. + #minimumDiskFreeEvaluator = 0; # Minimum disk space (GiB) determining if evaluator runs or not. + }; + services.nginx.virtualHosts.${mkDomain "hydra"} = lib.mkIf config.services.hydra.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:${toString config.services.hydra.port}"; + proxyWebsockets = true; + extraConfig = '' + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + add_header Front-End-Https on; + ''; + }; + }; + + + # Sourcegraph + # Understand, fix, and automate across your codebase with this code intelligence platform + + /** / + # First user regitration becomes admin + # data can be destryoed with `nixos-container destroy sourcegraph` + virtualisation.oci-containers.containers."sourcegraph" = { + autoStart = true; + #image = "sourcegraph/server:3.41.0"; + #image = "sourcegraph/server:latest"; + image = "sourcegraph/server:insiders"; + environment = {}; + ports = [ + "127.0.0.1:7080:7080/tcp" # webui? + "127.0.0.1:3370:3370/tcp" # admin? (graphana and stuff) + ]; + volumes = [ + "/var/lib/sourcegraph/config:/etc/sourcegraph" + "/var/lib/sourcegraph/data:/var/opt/sourcegraph" + ]; + }; + systemd.services."create-sourcegraph-volume-dirs" = { + wantedBy = [ "${config.virtualisation.oci-containers.backend}-sourcegraph.service" ]; + serviceConfig.Type = "oneshot"; + script = '' + mkdir -p /var/lib/sourcegraph/config + mkdir -p /var/lib/sourcegraph/data + ''; + }; + services.nginx.virtualHosts.${mkDomain "sourcegraph"} + = lib.mkIf config.virtualisation.oci-containers.containers."sourcegraph".autoStart { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:7080"; + proxyWebsockets = true; + }; + #locations."/graphana/" = { + # proxyPass = "http://127.0.0.1:3370"; + # proxyWebsockets = true; + #}; + }; + /**/ + + + + # Shlink + # URL shortener with REST API and command line interface + # manage with https://app.shlink.io/ + # TODO: self-host shlink web client? https://shlink.io/documentation/shlink-web-client/ + + /** / + # data can be destryoed with `nixos-container destroy shlink` + virtualisation.oci-containers.containers."shlink" = { + autoStart = true; + image = "shlinkio/shlink:stable"; + # https://shlink.io/documentation/install-docker-image/ + environment = { + "DEFAULT_DOMAIN" = mkDomain "shlink"; + "IS_HTTPS_ENABLED" = "true"; + "TIMEZONE" = "Europe/Oslo"; + #"GEOLITE_LICENSE_KEY" = ; # https://shlink.io/documentation/geolite-license-key/ + + # TODO: use postgres? default is sqlite3? + }; + ports = [ + "127.0.0.1:5757:8080/tcp" # webui + ]; + volumes = [ + "/var/lib/shlink/database.sqlite:/etc/shlink/data/database.sqlite" + # TODO: where is the sqlite file? + ]; + }; + systemd.services."create-shlink-volume-dirs" = { + wantedBy = [ "${config.virtualisation.oci-containers.backend}-shlink.service" ]; + serviceConfig.Type = "oneshot"; + script = '' + mkdir -p /var/lib/shlink + touch /var/lib/shlink/database.sqlite + ''; + }; + services.nginx.virtualHosts.${mkDomain "shlink"} + = lib.mkIf config.virtualisation.oci-containers.containers."shlink".autoStart { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:5757"; + proxyWebsockets = true; + }; + }; + programs.bash.shellAliases = { + shlink = "docker exec -it shlink shlink"; + }; + /**/ + + # Resilio Sync + # Automatically sync files via secure, distributed technology + + services.resilio = { + #enable = true; + #downloadLimit = 0; + #uploadLimit = 0; + #directoryRoot = "/media" # Default directory to add folders in the web UI. + #storagePath = "/var/lib/resilio-sync/"; # Where BitTorrent Sync will store it's database files + httpLogin = ""; + httpPass = ""; + deviceName = "${config.networking.hostName}"; + #apiKey = ; # API key, which enables the developer API. + #httpListenPort = 9000; + #httpListenAddr = "[::1]"; + enableWebUI = false; # default is false + }; + services.nginx.virtualHosts.${mkDomain "resilio"} = let + cfg = config.services.resilio; + in lib.mkIf (cfg.enable && cfg.enableWebUI) { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:${cfg.httpListenPort}"; + proxyWebsockets = true; + }; + }; + + + # Webhook + # incoming webhook server that executes shell commands + + /**/ + services.webhook = { + enable = true; + #listenHost = "0.0.0.0"; # default is "127.0.0.1" + listenPort = 7777; # default is 8080 + urlPrefix = "spismeg"; # default is "hooks" + #httpMethods = [ "GET" "POST" ]; # default is [ "POST" ] + settings = [ + { + id = "webhook-id"; + execute-command = pkgs.writeShellScript "webhook-handler.sh" '' + echo foobar; + ''; + } + ]; + }; + services.nginx.virtualHosts.${mkDomain "webhook"} = lib.mkIf config.services.webhook.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:${toString config.services.webhook.listenPort}"; + proxyWebsockets = true; + }; + }; + /**/ + + + # Gitea + # Git with a cup of tea + + services.gitea = rec { + enable = true; + disableRegistration = true; # disable after initial deploy + #https://docs.gitea.io/en-us/config-cheat-sheet/ + #settings = { + # "cron.sync_external_users" = { + # RUN_AT_START = true; + # SCHEDULE = "@every 24h"; + # UPDATE_EXISTING = true; + # }; + # mailer = { + # ENABLED = true; + # MAILER_TYPE = "sendmail"; + # FROM = "do-not-reply@example.org"; + # SENDMAIL_PATH = "${pkgs.system-sendmail}/bin/sendmail"; + # }; + # other = { + # SHOW_FOOTER_VERSION = false; + # }; + #}; + #appName = "gitea: spis meg"; + appName = "gitea: private instance"; + domain = mkDomain "gitea"; + #ssh.enable # default is true + rootUrl = "https://${domain}/"; + #ssh.clonePort # default is 22 + #log.level = "Debug"; # default is "Info" + #lfs.enable = true; # default is false + httpPort = 9675; # default is 3000 + httpAddress = "127.0.0.1"; # default is "0.0.0.0" + #extraConfig + #database.type # default is "sqlite3" + cookieSecure = true; # default is false, only send cookies over https + #stateDir # default is "/var/lib/gitea" + #mailerPasswordFile # Path to a file containing the SMTP password + #repositoryRoot # default is "${config.services.gitea.stateDir}/repositories" + #log.rootPath # TODO: move? + #lfs.contentDir + #dump.enable # default is false + staticRootPath = pkgs.symlinkJoin { + name = "gitea-static-root-data"; + paths = let + giteaModern = pkgs.fetchFromGitea { # https://codeberg.org/Freeplay/Gitea-Modern + domain = "codeberg.org"; + owner = "Freeplay"; + repo = "Gitea-Modern"; + rev = "0c0a05e6f0496521c166402dd56441a714487fd8"; + sha256 = "q14E5ni2BvpGsmGOHWQgbCqD4lBh4bFtBFtIyNfAf0Q="; + }; + giteaEarlGray = pkgs.fetchFromGitHub { # https://github.com/acoolstraw/earl-grey + owner = "acoolstraw"; + repo = "earl-grey"; + rev = "a6ca3dd3b9e6b48f6e45032b2aa691c2f16dc9bc"; + sha256 = "55Piafc7kQ5hybwHQczx36AP+kX1AtWugxERYNdmqWk="; + }; + in [ + config.services.gitea.package.data + (pkgs.linkFarm "gitea-custom-dir" [ + { name = "public/css/theme-gitea-modern.css"; path = "${giteaModern}/Gitea/theme-gitea-modern.css"; } + { name = "public/css/theme-earl-grey.css"; path = "${giteaEarlGray}/theme-earl-grey.css"; } + ]) + ]; + }; + settings = { + # https://docs.gitea.io/en-us/config-cheat-sheet/ + ui.THEMES = "gitea,arc-green,earl-grey,gitea-modern"; + ui.DEFAULT_THEME = "earl-grey"; + }; + }; + services.nginx.virtualHosts.${mkDomain "gitea"} = lib.mkIf config.services.gitea.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:${toString config.services.gitea.httpPort}"; + proxyWebsockets = true; + }; + }; + + + # TODO: mailcatcher + # TODO: configure stuff to send its shit here + + + # ntopng + # High-speed web-based traffic analysis and flow collection tool + # WARNING: default username and password is admin:admin + + services.ntopng = { + enable = true; # also enables redis for persistent data storage + httpPort = 3987; # HTTP port of embedded web server + #interfaces = [ "any" ]; + #extraConfig = "; + #redis.address = ""; + #redis.createInstance = "ntopng"; + }; + services.nginx.virtualHosts.${mkDomain "ntopng"} = lib.mkIf config.services.ntopng.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:${toString config.services.ntopng.httpPort}"; + proxyWebsockets = true; + }; + }; + + + # TODO: kukkee or rallly + # https://noted.lol/2-self-hosted-alternatives-to-doodle-meeting-scheduling/ + #https://rallly.co/ + + + # Kukkee + # Self-hosted Doodle alternative: a meeting poll tool + + /** / + services.kukkee = { + #enable = true; + port = 5666; + baseUrl = "https://${mkDomain "kukkee"}"; + #mongodb.enable = false; + }; + services.nginx.virtualHosts.${mkDomain "kukkee"} = lib.mkIf config.services.kukkee.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:${toString config.services.kukkee.port}"; + proxyWebsockets = true; + }; + }; + /**/ + + + # Nitter + # Alternative Twitter front-end + + services.nitter = { + enable = true; + package = pkgs.unstable.nitter; + #openFirewall + #config.base64Media = false; # Use base64 encoding for proxied media URLs. + server.title = "Pjitter"; + server.address = "127.0.0.1"; + server.hostname = mkDomain "nitter"; + server.https = true; # Secure cookies + server.port = 4965; + #preferences.autoplayGifs = ; # default is true + #preferences.bidiSupport = ; # Support bidirectional text (makes clicking on tweets harder). default is false + #preferences.hideBanner = ; # Hide profile banner. default is false + #preferences.hidePins = ; # Hide pinned tweets. default is false + #preferences.hideReplies = ; # Hide tweet replies. default is false + #preferences.hideTweetStats = ; # Hide tweet stats (replies, retweets, likes). default is false + preferences.hlsPlayback = true; # Enable HLS video streaming (requires JavaScript). default is false + preferences.infiniteScroll = true; # Infinite scrolling (requires JavaScript, experimental!). default is false + #preferences.mp4Playback = ; # Enable MP4 video playback. default is true + #preferences.muteVideos = ; # Mute videos by default. default is false + #preferences.proxyVideos = ; # Proxy video streaming through the server (might be slow). default is true + preferences.replaceInstagram = "bibliogram.art"; # Replace Instagram links with links to this instance (blank to disable). default is "" + preferences.replaceTwitter = mkDomain "nitter"; # Replace Twitter links with links to this instance (blank to disable). default is "" + preferences.replaceYouTube = lib.mkIf config.services.invidious.enable (mkDomain "invidious"); # Replace YouTube links with links to this instance (blank to disable). default is "" + settings = lib.mkIf config.services.libreddit.enable { + Preferences.replaceReddit = (mkDomain "libreddit"); # Replace Reddit links with links to this instance (blank to disable). default is "" + }; + #preferences.stickyProfile = ; # Make profile sidebar stick to top. default is true + preferences.theme = "Twitter Dark"; # Instance theme. default is "Nitter" + }; + services.nginx.virtualHosts.${mkDomain "nitter"} = lib.mkIf config.services.nitter.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:${toString config.services.nitter.server.port}"; + proxyWebsockets = true; + }; + }; + + + # Invidious + # An open source alternative front-end to YouTube + + services.invidious = { + enable = true; + domain = mkDomain "invidious"; + port = 4765; + settings = { + host_binding = "127.0.0.1"; + external_port = 443; + https_only = true; + statistics_enabled = false; # api endpoint required for public instances + registration_enabled = false; + login_enabled = false; + #admins = ["pbsds"]; + banner = "spis meg"; + default_user_preferences = { + #feed_menu = ["Popular", "Trending", "Subscriptions", "Playlists"] + feed_menu = ["Trending" "Subscriptions" "Playlists"]; + default_home = "Trending"; + }; + }; + }; + services.nginx.virtualHosts.${mkDomain "invidious"} = lib.mkIf config.services.invidious.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:${toString config.services.invidious.port}"; + proxyWebsockets = true; + }; + }; + + + # Libreddit + # Private front-end for Reddit + + services.libreddit = { + enable = true; + address = "127.0.0.1"; + port = 4876; + }; + systemd.services.libreddit.environment = lib.mkIf config.services.libreddit.enable { + # https://github.com/spikecodes/libreddit#change-default-settings= + # TODO: merge my module addition + LIBREDDIT_DEFAULT_THEME = "gold"; + }; + services.nginx.virtualHosts.${mkDomain "libreddit"} = lib.mkIf config.services.libreddit.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:${toString config.services.libreddit.port}"; + proxyWebsockets = true; + }; + }; + + + # paperless-ngx + # A supercharged version of paperless: scan, index, and archive all of your physical documents + + services.paperless = { + enable = true; + #package = pkgs.paperless-ngx; + #port = 28981; + #address = "localhost"; + #passwordfile = null; # file contining the superuser 'admin' password, optionally set with `${datadir}/paperless-manage createsuperuser` + #datadir = "/var/lib/paperless"; + #mediadir = "${datadir}/media"; + #consumptiondir = "${datadir}/consume"; # Directory from which new documents are imported. (TODO: zotero) + #extraconfig = {}; + #consumptiondirispublic = false; # Whether all users can write to the consumption dir + }; + services.nginx.virtualHosts.${mkDomain "paperless"} = lib.mkIf config.services.paperless.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:${toString config.services.paperless.port}"; + proxyWebsockets = true; + }; + }; + + + # Netdata + # Real-time performance monitoring tool + + services.netdata = { + enable = true; + #python.enable = false; # default is true + #python.extraPackages = ps: []; + #config = { # https://github.com/netdata/netdata/blob/master/daemon/config/README.md + # hostname = ""; + # port = 19999; + #}; + #configDir = {}; + #extraPluginPaths = []; + }; + services.nginx.virtualHosts.${mkDomain "netdata"} = lib.mkIf config.services.netdata.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:19999"; + proxyWebsockets = true; + }; + }; + + + # upterm + # Secure terminal-session sharing + + services.uptermd = { + enable = false; + openFirewall = true; + #listenAddress # default is "[::]"; + #port = 2222; # default is 2222, uses ssh + #extraFlags + #hostKey = null; + }; + + + # thelunge + # The self-hosted Web IRC client + + services.thelounge = { + # configure user accounts by using the 'thelounge' command, or by adding entries to /var/lib/thelounge/users + enable = true; + public = false; + port = 5876; + # theLoungePlugins.themes is view of nodePackages_latest.thelounge-theme-* + # theLoungePlugins.plugins is view of nodePackages_latest.thelounge-plugin-* + plugins = with pkgs.theLoungePlugins; + (with lib; attrValues (filterAttrs (name: _: name != "recurseForDerivations") themes)) + ++ [ + #plugins.giphy + #plugins.shortcuts + plugins.closepms + ]; + extraConfig.theme = "One Dark"; + extraConfig.fileUpload.enable = true; + extraConfig.fileUpload.baseUrl = "${mkDomain "thelounge"}"; + }; + services.nginx.virtualHosts.${mkDomain "thelounge"} = lib.mkIf config.services.thelounge.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:${toString config.services.thelounge.port}"; + proxyWebsockets = true; + }; + }; + + + # Convos + # The simplest way to use IRC in your browser + + services.convos = { + enable = false; # user registration is borken. new major version (7) in unstable. + reverseProxy = true; + listenAddress = "127.0.0.1"; + listenPort = 44649; + }; + services.nginx.virtualHosts.${mkDomain "convos"} = lib.mkIf config.services.convos.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:${toString config.services.convos.listenPort}"; + proxyWebsockets = true; + extraConfig = '' + #proxy_redirect off; + client_max_body_size 0; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Request-Base "$scheme://$host/"; + #proxy_set_header X-Real-IP $remote_addr; + #proxy_set_header REMOTE-HOST $remote_addr; + ''; + }; + }; + + + # Roundcube + # Open Source Webmail Software + + services.roundcube = { + enable = true; + hostName = mkDomain "roundcube"; + plugins = [ + "archive" + "zipdownload" + "managesieve" + ]; + extraConfig = '' + $config['product_name'] = 'Spis meg'; + $config['skin_logo'] = [ + #'elastic:login' => 'https://links.pbsds.net/img/piuy_render.png', + #'elastic:*[small]' => 'https://links.pbsds.net/img/piuy_render.png', + 'elastic:*' => 'https://links.pbsds.net/img/piuy_render.png', + #'elastic:*' => 'https://links.pbsds.net/img/nox.png', + ]; + #$config['blankpage_url'] = '/something.html' # TODO <- + $config['default_host'] = [ + 'tls://imap.fyrkat.no' => 'Fyrkat', + 'tls://imap.pvv.ntnu.no' => 'PVV', + 'tls://imap.nvg.ntnu.no' => 'NVG', + ]; + $config['smtp_server'] = [ + 'imap.fyrkat.no' => 'tls://smtp.fyrkat.no', + 'imap.pvv.ntnu.no' => 'tls://smtp.pvv.ntnu.no', + 'imap.nvg.ntnu.no' => 'tls://smtp.nvg.ntnu.no', + ]; + # plugins/managesieve/config.inc.php.dist + $config['managesieve_host'] = 'tls://%h'; + ''; + }; + services.nginx.virtualHosts.${mkDomain "roundcube"} = lib.mkIf config.services.roundcube.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/skins/elastic/images/logo.svg" = { + #alias = "/path/to/file"; + #return = "302 https://links.pbsds.net/img/piuy_render.png"; + return = "302 https://links.pbsds.net/img/nox.png"; + }; + }; + + + # Galene + # Videoconferencing server that is easy to deploy, written in Go + + services.galene = { + #enable = true; + insecure = true; # reverse proxy instead, but can i feed it the acme cert? + httpAddress = "127.0.0.1"; + httpPort = 3975; + }; + services.nginx.virtualHosts.${mkDomain "galene"} = lib.mkIf config.services.galene.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:${toString config.services.galene.httpPort}"; + proxyWebsockets = true; + }; + }; + #networking.firewall = lib.mkIf config.service.jellyfin.enable { + # allowedTCPPorts = [ 1194 ]; + # allowedUDPPorts = [ 1194 ]; # TODO: Only if behind a NAT? + #}; + + + # Jitsi meet + + services.jitsi-meet = { + #enable = true; + hostName = mkDomain "jitsi-meet"; + config = { + # https://github.com/jitsi/jitsi-meet/blob/master/config.js + #enableWelcomePage = false; + defaultLang = "nb"; + }; + interfaceConfig = { + # https://github.com/jitsi/jitsi-meet/blob/master/interface_config.js" + APP_NAME = "Spis meg"; + + # SHOW_JITSI_WATERMARK = false; + # SHOW_WATERMARK_FOR_GUESTS = false; + }; + jibri.enable = false; # record in a headless chrome instance + nginx.enable = true; # force ssl, acme, lots of routing rules + }; + + + # Rocketchat + # A self-hosted discord/slack alternative + # TODO, docker exists, but no nixos module + + + # Mattermost + # Open-source, self-hosted Slack-alternative + + services.mattermost = { + enable = true; + # will create and use a psql db + listenAddress = "[::1]:8065"; + siteName = "Spis meg"; + siteUrl = "https://${mkDomain "mattermost"}"; + #mutableConfig = true; # default is false, if true, see also "preferNixConfig" + extraConfig = { + # https://docs.mattermost.com/configure/configuration-settings.html#reporting + # TODO: smtp + }; + matterircd = { + #enable = true; # default is false + parameters = [ + "-mmserver chat.example.com" + "-bind [::]:6667" + ]; + }; + }; + services.nginx.virtualHosts.${mkDomain "mattermost"} = lib.mkIf config.services.mattermost.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://${config.services.mattermost.listenAddress}"; + proxyWebsockets = true; + }; + }; + + + # hedgedoc + # Realtime collaborative markdown notes on all platforms + + services.hedgedoc = { + #enable = true; # FIXME: make it load + configuration.host = "127.0.0.1"; + configuration.port = 44776; + configuration.db.dialect = "sqlite"; + configuration.db.storage = "${config.services.hedgedoc.workDir}/db.hedgedoc.sqlite"; + configuration.domain = mkDomain "hedgedoc"; + configuration.allowAnonymous = true; + configuration.allowEmailRegister = false; # default is true + configuration.allowAnonymousEdits = false; # default is false + configuration.protocolUseSSL = true; # https prefix + configuration.useSSL = false; # nginx terminates ssl + #configuration.csp = {TODO}; # content security policy + #configuration.useCDN = true; + #configuration.debug = true; + # there are also a metric fuckton of integration services, like github, twitter, minio, mattermost, dropbox etc. + # there are also auth options, like ldap, saml and oauth2 + }; + services.nginx.virtualHosts.${mkDomain "hedgedoc"} = lib.mkIf config.services.hedgedoc.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:${toString config.services.hedgedoc.configuration.port}"; + proxyWebsockets = true; + # TODO: proxy headers: + # https://docs.hedgedoc.org/guides/reverse-proxy/ + }; + }; + + + # Cinny + # Yet another Matrix client for the web + + services.nginx.virtualHosts.${mkDomain "cinny"} = { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + root = pkgs.unstable.cinny.override { + conf = { + defaultHomeserver = 0; + homeserverList = [ + "pvv.ntnu.no" + "matrix.org" + "dodsorf.as" + ]; + }; + }; + }; + + + # Element-web + # A glossy Matrix collaboration client for the web + + services.nginx.virtualHosts.${mkDomain "element"} = { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + root = pkgs.element-web.override { + conf = { + # https://github.com/vector-im/element-web/blob/develop/docs/config.md + # https://github.com/vector-im/element-web/blob/develop/config.sample.json + # https://github.com/vector-im/element-web/blob/develop/docs/labs.md + brand = "spis meg"; + default_country_code = "NO"; + default_server_config."m.homeserver" = { + server_name = "pvv.ntnu.no"; + base_url = "https://matrix.pvv.ntnu.no"; + }; + roomDirectory.servers = [ + "pvv.ntnu.no" + "matrix.org" + "nixos.org" + "agdersam.no" + "trygve.me" + "utwente.io" + ]; + disable_guests = true; + showLabsSettings = true; + features.feature_pinning = "labs"; + features.feature_custom_status = "labs"; + features.feature_custom_tags = "labs"; + features.feature_state_counters = "labs"; + features.feature_latex_maths = "labs"; + setting_defaults.breadcrumbs = true; + UIFeature.urlPreviews = true; + UIFeature.shareQrCode = true; + UIFeature.registration = false; + }; + }; + }; + + + # vaultwarden + # Unofficial Bitwarden compatible server written in Rust + + services.vaultwarden = { + enable = true; + config = { + # https://github.com/dani-garcia/vaultwarden/blob/1.24.0/.env.template + # camelCase is converted to UPPER_SNAKE_CASE + domain = "https://${mkDomain "vaultwarden"}"; # port is supported + signupsAllowed = false; + # rocket is the http library + rocketAddress = "127.0.0.1"; + rocketPort = 8222; + #rocketWorkers = 10; + rocketLog = "critical"; + }; + #dbBackend = "sqlite"; + # backupDir = ""; # TODO + }; + services.nginx.virtualHosts.${mkDomain "vaultwarden"} = lib.mkIf config.services.vaultwarden.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + proxyPass = "http://127.0.0.1:${toString config.services.vaultwarden.config.rocketPort}"; + proxyWebsockets = true; + }; + }; + + + + # matrix-synapse + /** / + services.matrix-synapse = { + enable = true; + settings = { + server_name = "pbsds.net" + public_baseurl = mkDomain "matrix"; + url_preview_enabled = false; + max_upload_size = "100M"; + trusted_key_servers = [ + {server_name = "matrix.org";} + {server_name = "dodsorf.as";} + {server_name = "pvv.ntnu.no";} + ]; + listeners = [ + { + bind_addresses = [ + "127.0.0.1" + ]; + port = 8008; + resources = [ + { + compress = true; + names = [ + "client" + ]; + } + { + compress = false; + names = [ + "federation" + ]; + } + ]; + tls = false; + type = "http"; + x_forwarded = true; + } + ]; + }; + }; + services.nginx.virtualHosts.${mkDomain "matrix"} = lib.mkIf config.services.matrix-synapse.enable { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/_matrix" = { + proxyPass = "http://127.0.0.1:${toString (builtins.elemAt 0 config.services.matrix-synaps.listeners).port}"; + #proxyWebsockets = true; + extraConfig = '' + client_max_body_size ${config.services.matrix-synaps.max_upload_size}; + ''; + }; + locations."/_synapse/client" = { + proxyPass = "http://127.0.0.1:${toString (builtins.elemAt 0 config.services.matrix-synaps.listeners).port}/_synapse/client"; + #proxyWebsockets = true; + }; + }; + /**/ + + +} diff --git a/profiles/nas/modules/kukkee.nix b/profiles/nas/modules/kukkee.nix new file mode 100644 index 0000000..38ef174 --- /dev/null +++ b/profiles/nas/modules/kukkee.nix @@ -0,0 +1,160 @@ +{ lib, pkgs, config, ... }: + +let + cfg = config.services.kukkee; +in with builtins; { + options.services.kukkee = with lib; { + + enable = mkEnableOption "kukkee service"; + + package = mkPackageOption pkgs "kukkee" { }; + + user = mkOption { + type = types.str; + default = "kukkee"; + description = "User under which Kukkee runs."; + }; + + group = mkOption { + type = types.str; + default = "kukkee"; + description = "Group under which Kukkee runs."; + }; + + listen = mkOption { + type = types.str; + default = "127.0.0.1"; + description = "Which address to listen on."; + }; + + port = mkOption { + type = types.port; + default = 3000; + description = "Which port Kukkee should listen to for HTTP."; + }; + + extraArgs = mkOption { + type = types.listOf types.str; + default = []; + description = "Extra command-line arguments for the next.js runtime."; + }; + + baseUrl = mkOption { + type = types.str; + default = "http://localhost:${cfg.port}"; + description = "The base URL for the site"; + }; + + openFirewall = mkOption { + type = types.bool; + default = false; + description = '' + Open the configured port in the firewall for the Kukkee server. + Preferably the Kukkee server is instead put behind a reverse proxy. + ''; + }; + + mongodb.enable = mkOption { + type = types.bool; + default = true; + description = "Whether to configure a local MongoDB instance."; + }; + + mongodb.uri = mkOption { + type = types.str; + default = "mongodb://127.0.0.1:27017/kukkeePolls"; + example = "mongodb+srv://:@.aewjs.mongodb.net/?retryWrites=true&w=majority"; + description = '' + Mongodb connection string. MongoDB databases are normally + created automatically upon first write. + ''; + }; + + }; + + config = lib.mkMerge [ + (lib.mkIf (cfg.enable && cfg.mongodb.enable) { + services.mongodb.enable = true; + systemd.services.kukkee.after = [ "mongodb.service" ]; + systemd.services.kukkee.requires = [ "mongodb.service" ]; + }) + (lib.mkIf cfg.enable { + systemd.services.kukkee = { + description = "Kukkee Server"; + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + + environment = { + # https://github.com/AnandBaburajan/Kukkee/blob/270c8ed421c8f1100a845958430e1ebe61d86d5a/.env.example + NEXT_MONGODB_URI = cfg.mongodb.uri; + NEXT_PUBLIC_BASE_URL = cfg.baseUrl; + NEXT_PUBLIC_ENCRYPTION_KEY = "2a148b84dcec756c59ab96d450a79372"; # TODO + NEXT_PUBLIC_ENCRYPTION_IV = "0d88ec0887f614b6"; # TODO + }; + + serviceConfig = let + args = map lib.strings.escapeShellArg [ + "--hostname" cfg.listen + "--port" cfg.port + ] ++ cfg.extraArgs; + in rec { + User = cfg.user; + Group = cfg.group; + ExecStart = "${cfg.package}/bin/kukkee " + (lib.strings.concatStringsSep " " args); + Restart = "on-failure"; + + # Security options: + + NoNewPrivileges = true; + AmbientCapabilities = ""; + CapabilityBoundingSet = ""; + + DeviceAllow = ""; + LockPersonality = true; + PrivateTmp = true; + PrivateDevices = true; + PrivateUsers = true; + + ProtectClock = true; + ProtectControlGroups = true; + ProtectHostname = true; + ProtectKernelLogs = true; + ProtectKernelModules = true; + ProtectKernelTunables = true; + + RemoveIPC = true; + + RestrictNamespaces = true; + RestrictAddressFamilies = [ "AF_NETLINK" "AF_INET" "AF_INET6" "AF_UNIX" ]; + RestrictRealtime = true; + RestrictSUIDSGID = true; + + SystemCallArchitectures = "native"; + SystemCallErrorNumber = "EPERM"; + SystemCallFilter = [ + "@system-service" + "~@cpu-emulation" "~@debug" "~@keyring" "~@memlock" "~@obsolete" "~@privileged" "~@setuid" + ]; + }; + }; + + users.users = lib.mkIf (cfg.user == "kukkee") { + kukkee = { + group = cfg.group; + isSystemUser = true; + }; + }; + + users.groups = lib.mkIf (cfg.group == "kukkee") { + kukkee = {}; + }; + + networking.firewall = lib.mkIf cfg.openFirewall { + allowedTCPPorts = [ cfg.port ]; + }; + }) + ]; + + meta.maintainers = with lib.maintainers; [ pbsds ]; +} + diff --git a/profiles/nas/modules/webhook.nix b/profiles/nas/modules/webhook.nix new file mode 100644 index 0000000..70052e2 --- /dev/null +++ b/profiles/nas/modules/webhook.nix @@ -0,0 +1,140 @@ +{ lib, pkgs, config, ... }: + +#with builtins; + +let + lib_ = lib; +in +let + cfg = config.services.webhook; + hooksFormat = pkgs.formats.json {}; + lib = lib_ // { mdDoc = x: x; }; # HACK + +in { + options.services.webhook = with lib; { + + enable = mkEnableOption "webhook service"; + + package = mkPackageOption pkgs "webhook" { }; + + user = mkOption { + type = types.str; + default = "webhook"; + description = lib.mdDoc "User under which Webhook runs."; + }; + + group = mkOption { + type = types.str; + default = "webhook"; + description = lib.mdDoc "Group under which Webhook runs."; + }; + + listenHost = mkOption { + type = types.str; + default = "127.0.0.1"; + description = lib.mdDoc "Which address Webhook should listen to for HTTP."; + }; + + listenPort = mkOption { + type = types.port; + default = 8080; + description = lib.mdDoc "Which port Webhook should listen to for HTTP."; + }; + + openFirewall = mkOption { + type = types.bool; + default = false; + description = lib.mdDoc '' + Open the configured ports in the firewall for the Webhook server. + Preferably the Webhook server is instead put behind a reverse proxy. + ''; + }; + + urlPrefix = mkOption { + type = types.str; + default = "hooks"; + description = lib.mdDoc '' + Url prefix to use for served hooks. + `http://listen:port/PREFIX/:hook-id` + ''; + }; + + httpMethods = mkOption { + type = types.listOf types.str; + default = ["POST"]; + defaultText = literalExpression ''["POST"]''; + description = lib.mdDoc "Default allowed HTTP methods"; + }; + + verbose = mkOption { + type = types.bool; + default = true; + description = lib.mdDoc "Whether to log events or not."; + }; + + extraArgs = mkOption { + type = types.listOf types.str; + default = []; + description = lib.mdDoc '' + Extra command-line arguments. + If you want to set CORS headers, you can set [ "-header" "name=value" ] + to the appropriate CORS headers to passed along with each response. + ''; + }; + + settings = mkOption { + type = hooksFormat.type; + default = []; + example = lib.literalExpression '' + [ + { + id = "my-webhook"; + execute-command = pkgs.writeShellScript "handle-my-webhook.sh" '${""}' + echo "foobar" + '${""}'; + } + ] + ''; + description = lib.mdDoc '' + The configured hooks for Webhook to serve. + Here is a collection of hook examples: + + ''; + }; + + }; + + config = lib.mkIf cfg.enable { + + systemd.services.webhook = { + description = lib.mdDoc "Webhook Server"; + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + + serviceConfig = let + args = [ + "-ip" cfg.listenHost + "-port" cfg.listenPort + "-http-methods" (lib.strings.concatStringsSep "," cfg.httpMethods) + "-urlprefix" cfg.urlPrefix + "-hooks" (hooksFormat.generate "hooks.json" cfg.settings) + ] ++ lib.optional cfg.verbose "-verbose" + ++ cfg.extraArgs; + in rec { + User = cfg.user; + Group = cfg.group; + DynamicUser = cfg.user == "webhook"; + ExecStart = "${cfg.package}/bin/webhook " + (lib.strings.escapeShellArgs args); + Restart = "on-failure"; + }; + }; + + networking.firewall = lib.mkIf cfg.openFirewall { + allowedTCPPorts = [ cfg.listenPort ]; + }; + + }; + + meta.maintainers = with lib.maintainers; [ pbsds ]; +} + diff --git a/profiles/websites/default.nix b/profiles/websites/default.nix new file mode 100644 index 0000000..f5ba87a --- /dev/null +++ b/profiles/websites/default.nix @@ -0,0 +1,254 @@ +{ config, pkgs, ... }: + +let + lib = pkgs.lib; + domain = "${config.networking.hostName}.${config.networking.domain}"; + mkDomain = subname: "${subname}.${domain}"; + +in { + + #services.nginx.enable = true; + + imports = [ + ./services/pdoc.nix + ]; + + + # links.pbsds.net + + services.nginx.virtualHosts."links.pbsds.net" = let + links-pbsds-net = pkgs.fetchFromGitea rec { + name = repo; + domain = "gitea.noximilien.pbsds.net"; + owner = "pbsds"; + repo = "links.pbsds.net"; + rev = "fd980f4610f8027b4fc89c506542009f09504085"; + hash = "sha256-Iz/lfLkdCLJyyZ/PM9+VCkCG5lYSb9/i4x0ZhranBxc="; + }; + in { + #serverAliases = map mkDomain [ "links" ]; + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + root = "${links-pbsds-net}"; + }; + + # refleksjon.no + + services.nginx.virtualHosts.${mkDomain "refleksjon"} = let + refleksjon-net = pkgs.fetchFromGitea rec { + name = repo; + domain = "gitea.noximilien.pbsds.net"; + owner = "pbsds"; + repo = "refleksjon.net"; + rev = "c1b91e369bf411e44534334595d4481cb59bd129"; + sha256 = "O+lNqD2LuESKM+S+AljF2SzIxzK05xdZqiLhylTQ2ls="; + }; + in { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + root = "${refleksjon-net}/www.refleksjon.net"; + }; + + + # roroslyd.no + + services.nginx.virtualHosts.${mkDomain "roroslyd"} = let + roroslyd-no = pkgs.fetchFromGitea rec { + name = repo; + domain = "gitea.noximilien.pbsds.net"; + owner = "pbsds"; + repo = "roroslyd.no"; + #rev = "v${version}"; + rev = "fb7b0a7e70754cf368de7d7c469dabe71b2f1c78"; + sha256 = "Rud5bBUuPgIC5UAGtyuYhUtXhN174UCWDoLUWWc/n6U="; + }; + in { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + root = "${roroslyd-no}/www.roroslyd.no"; + }; + + # trivial gradios + + /** / + systemd.services.trivial-gradios-heritage-graph = { + description = pkgs.python3Packages.trivial-gradios.meta.description; + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig = rec { + User = "trivial-gradios"; + Group = "trivial-gradios"; + DynamicUser = true; + StateDirectory = "trivial-gradios-heritage-graph"; + WorkingDirectory = "/var/lib/${StateDirectory}"; + ExecStart = "${pkgs.python3Packages.trivial-gradios}/bin/trivial-gradios-heritage-graph --port 37001"; + Restart = "on-failure"; + }; + }; + services.nginx.virtualHosts.${mkDomain "gradio"} = { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + locations."/" = { + root = pkgs.writeTextDir "index.html" '' + + + +
name + description +
heritage-graph + A simple tool to greate a directed ancestry graph. +
+ ''; + }; + locations."/heritage-graph/" = { + proxyPass = "http://127.0.0.1:37001"; + proxyWebsockets = true; + extraConfig = '' + rewrite ^/heritage-graph(/.*)$ $1 break; + ''; + }; + + }; + /**/ + + + # CensorDodge + # A lightweight and customisable web proxy + /** / + services.phpfpm.pools.censordodge = { + user = "censordodge"; + group = "censordodge"; + settings = { + "listen.owner" = config.services.nginx.user; + "listen.group" = config.services.nginx.group; + "pm" = "dynamic"; + "pm.max_children" = "32"; + "pm.start_servers" = "2"; + "pm.min_spare_servers" = "2"; + "pm.max_spare_servers" = "4"; + "pm.max_requests" = "500"; + }; + }; + services.nginx.virtualHosts.${mkDomain "censordodge"} = { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + root = pkgs.fetchFromGitHub { + owner = "ryanmab"; + repo = "CensorDodge"; + rev = "2480e8269190ca8618e41dc581f9d55f4ce9f333"; + sha256 = "8R3lyxF22HXui4pJytMcqwwa5TDXIJb6fWII934IhEA="; + }; + extraConfig = '' + index index.php; + ''; + locations."/".extraConfig = '' + try_files $uri $uri/ /index.php?$args; + ''; + locations."~ \.php$".extraConfig = '' + include ${config.services.nginx.package}/conf/fastcgi.conf; + fastcgi_pass unix:${config.services.phpfpm.pools.censordodge.socket}; + fastcgi_buffers 16 16k; + fastcgi_buffer_size 32k; + ''; + }; + users.users.censordodge = { + isSystemUser = true; + group = "censordodge"; + }; + users.groups.censordodge = {}; + /**/ + + + # OpenSpeedtTest + # Pure HTML5 Network Performance Estimation Tool + + /** / + services.nginx.virtualHosts.${mkDomain "openspeedtest"} = let + cfg = config.services.nginx.virtualHosts.${mkDomain "openspeedtest"}; + openspeedtest = pkgs.fetchFromGitHub rec { + name = "${owner}-unstable-2022-07-02"; + + owner = "openspeedtest"; + repo = "Speed-Test"; + #rev = "v${version}"; + rev = "59eb7367ede5555f7516ebb8eeeb65245bc5a6e5"; + sha256 = "yzvulzgBUri+sU9WxZrLKH/T+mlZu9G2zucv8t/fZdY="; + postFetch = '' + rm $out/README.md + rm $out/License.md + rm $out/.gitignore + rm $out/hosted.html + ''; + }; + in { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + http2 = false; + root = "${openspeedtest}"; + extraConfig = '' + #access_log off; + #error_log /dev/null; #Disable this for Windows Nginx. + #log_not_found off; + gzip off; + fastcgi_read_timeout 999; + server_tokens off; + tcp_nodelay on; + tcp_nopush on; + sendfile on; + open_file_cache max=200000 inactive=20s; + open_file_cache_valid 30s; + open_file_cache_min_uses 2; + open_file_cache_errors off; + ''; + + locations."/".extraConfig = lib.mkIf false '' + if_modified_since off; + expires off; + etag off; + + if ($request_method != OPTIONS ) { + add_header 'Access-Control-Allow-Origin' "*" always; + add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Mx-ReqToken,X-Requested-With' always; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always; + #Very Very Important! You SHOULD send no-store from server for Google Chrome. + add_header 'Cache-Control' 'no-store, no-cache, max-age=0, no-transform'; + add_header 'Last-Modified' $date_gmt; + } + if ($request_method = OPTIONS ) { + add_header 'Access-Control-Allow-Origin' "$http_origin" always; + add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Mx-ReqToken,X-Requested-With' always; + add_header 'Access-Control-Allow-Methods' "GET, POST, OPTIONS" always; + add_header 'Access-Control-Allow-Credentials' "true"; + return 204; + } + ''; + # IF and Only if you Enabled HTTP2 otherwise never enable the following + # HTTP2 will return 200 withot waiting for upload to complete. it's smart but we don't need that to happen here when testing upload speed on HTTP2. + locations."/upload.bin".extraConfig = '' + #proxy_set_header Host $host; + proxy_pass http://127.0.0.1:80/upload.bin; + ''; + locations."~* ^.+\.(?:css|cur|js|jpe?g|gif|htc|ico|png|html|xml|otf|ttf|eot|woff|woff2|svg)$".extraConfig = lib.mkIf false '' + #access_log off; + expires 365d; + add_header 'Cache-Control' public; + add_header 'Vary' Accept-Encoding; + tcp_nodelay off; + open_file_cache max=3000 inactive=120s; + open_file_cache_valid 45s; + open_file_cache_min_uses 2; + open_file_cache_errors off; + gzip on; + gzip_disable "msie6"; + gzip_vary on; + gzip_proxied any; + gzip_comp_level 6; + gzip_buffers 16 8k; + gzip_http_version 1.1; + gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/javascript; + ''; + }; + /**/ + + +} diff --git a/profiles/websites/services/pdoc.nix b/profiles/websites/services/pdoc.nix new file mode 100644 index 0000000..fd987dd --- /dev/null +++ b/profiles/websites/services/pdoc.nix @@ -0,0 +1,273 @@ +{ config, pkgs, ... }: + +let + lib = pkgs.lib; + domain = "${config.networking.hostName}.${config.networking.domain}"; + mkDomain = subname: "${subname}.${domain}"; + + # pdoc data + pdoc-builtins = [ + "builtins" + "os" + "array" + "sys" + "time" + "traceback" + "pathlib" + "itertools" + "functools" + "unittest" + "argparse" + "asyncio" + "textwrap" + "collections" + "configparser" + "concurrent" + "contextlib" + "operator" + "pickle" # TODO: marsmellow or whatever + "copy" + "ctypes" + "pprint" + "shlex" + "re" + "abc" + "ast" + "random" + "shutil" + "sqlite3" + "subprocess" + "statistics" + "string" + "tarfile" + "typing" + "uuid" + "warnings" + "wave" + "dataclasses" + "glob" + "gzip" + "inspect" + "json" + "base64" + "zipfile" + ]; + pdoc-modules = [ + {name="more-itertools"; literal="more_itertools";} + "altair" + "pygal" + "vispy" + #"ggplot" + "seaborn" + "bokeh" + "plotly" + "tabulate" + "wavefile" + "moderngl" + "pydantic" + "typer" + "ptpython" + "colorama" + {name="pyjwt"; literal="jwt";} + "zipp" + "aiofiles" + "aafigure" + "urllib3" + "tesserocr" + "trio" + "starlette" + "pyverilog" + "nixpkgs" + "wavedrom" + "httpx" + "pyquery" + "mpv" + {name="beautifulsoup4"; literal="bs4";} + "hid" + #{name="hidapi"; literal="hid";} + "sanic" + "paramiko" + "pydub" + "aiohttp" + "rtoml" + "redis" + "numpy" + "domeneshop" + "munch" + "migen" + "amaranth" + "click" + "attrs" + "graphviz" + "baron" + "redbaron" + "fastapi" + "pytest" + #"pyglet" # pyglet.com fails, windows only + #"pygame" # pygame.movie fails on pdoc3, pdoc hangs + "plotly" + "peewee" + "parsel" + "pandas" + "mutmut" + "mlflow" + "meshio" + #"einops" # depends on tensorflow, which is broken ATM + "aiodns" + "json5" + "seaborn" + "matplotlib" + "dash" + "rarfile" + "pyramid" + "pygtail" + "codecov" + "nbconvert" + "humanfriendly" + "pendulum" + "jsonpickle" + "cachetools" + "wrapt" + "lxml" + "chardet" + "yarl" + "frozenlist" + "itsdangerous" + "xmltodict" + {name="cached-property"; literal="cached_property";} + "toolz" + "aioitertools" + "coconut" + "asyncpg" #"aiopg" + {name="libsass"; literal="sass";} + {name="pytorch"; literal="torch";} + {name="pytorch-lightning"; literal="pytorch_lightning";} + {name="pillow"; literal="PIL";} + "trio" + "tqdm" + "rich" + "pudb" + "pony" + "mido" + "jedi" + "h5py" + "atom" + "toml" + {name="pyyaml"; literal="yaml";} + "jinja2" + "requests" + "h5py" + "imageio" + "pygments" + "trimesh" + #"faiss" + #"geomloss" + #"mesh_to_sdf" + #"pyrender" + ]; + toName = x: if builtins.isString x then x else x.name; + toLiteral = x: if builtins.isString x then x else x.literal; + + + mkPdoc = use-pdoc3: isBuiltin: pkg: let + description = if isBuiltin + then "builtin" + else pkgs.python3Packages.${toName pkg}.meta.description; + version = if isBuiltin + then "-" + else pkgs.python3Packages.${toName pkg}.version; + homepage = if isBuiltin + then "https://docs.python.org/3/library/${toLiteral pkg}.html" + else pkgs.python3Packages.${toName pkg}.meta.homepage or "-"; + doc = pkgs.runCommand "pdoc${if use-pdoc3 then "3" else ""}-${toName pkg}-docs" { + nativeBuildInputs = (if use-pdoc3 + then [pkgs.python3Packages.pdoc3] + else [pkgs.python3Packages.pdoc]) + ++ lib.optional (!isBuiltin) (builtins.getAttr (toName pkg) pkgs.python3Packages); + NAME = toName pkg; + LITERAL = toLiteral pkg; + # TODO: license + # TODO: build html with something better than bash + } '' + ( timeout 900s ${if !use-pdoc3 + then ''pdoc --no-search --math --no-browser --output-directory $out "$LITERAL"'' + else ''pdoc3 --skip-errors --output-dir $out --html "$LITERAL"'' + } 2>&1 | tee $LITERAL.log ) || true + mkdir -p $out + cp $LITERAL.log $out + test -f $out/index.html && rm -v $out/index.html + + function write { + { printf "%s" "$@"; echo; } >> $out/index.part-"$LITERAL".html + } + + write "" + if test -f $out/"$LITERAL".html; then + write "$NAME" + elif test -d $out/"$LITERAL"; then + write "$NAME" + else + write "$NAME" + fi + write "${version}" + if test -s $out/$LITERAL.log; then + write "log" + else + write "-" + fi + write "${lib.escapeXML description}" + ${if homepage == "-" then '' + write "n/a" + '' else '' + write "${homepage}" + ''} + write "" + ''; + fallback = pkgs.writeTextDir "index.part-${toLiteral pkg}.html" '' + + ${toLiteral pkg} + ${version} + ⨯ + ${lib.escapeXML description} + ${if homepage == "-" then + "n/a" + else + ''${homepage}'' + } + + ''; + in if (builtins.tryEval doc.outPath).success + then doc + else fallback; + mkPdocs = use-pdoc3: pkgs.symlinkJoin { + name = "pdoc-docs"; + paths = (map (mkPdoc use-pdoc3 true) pdoc-builtins) ++ (map (mkPdoc use-pdoc3 false) pdoc-modules); + # note: globs are sorted + postBuild = '' + echo "" >> $out/index.html + echo "" >> $out/index.html + cat $out/index.part-*.html >> $out/index.html + rm $out/index.part-*.html + echo "
nameversionlogdescriptionhomepage
" >> $out/index.html + ''; + }; + +in { + + # lib.filter (x: lib.isDerivation x && (builtins.tryEval x.outPath).success) (lib.attrValues linuxPackages_latest)) + + # Pdoc + # Auto-generate API documentation for Python projects. + + services.nginx.virtualHosts.${mkDomain "pdoc"} = { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + root = mkPdocs false; + }; + services.nginx.virtualHosts.${mkDomain "pdoc3"} = { + forceSSL = true; # addSSL = true; + enableACME = true; #useACMEHost = acmeDomain; + root = mkPdocs true; + }; + +} diff --git a/users/default.nix b/users/default.nix new file mode 100644 index 0000000..4046d92 --- /dev/null +++ b/users/default.nix @@ -0,0 +1,31 @@ +{ config, pkgs, lib, ... }: + +{ + # User accounts + # Don't forget to set a password with ‘passwd’! + + imports = [ + + ./pbsds + ]; + + home-manager.useGlobalPkgs = true; + + # TODO: nas stuff + # TODO: can uid mapping be done at nfs level? + users.users.pbsds.uid = 1001; + users.groups.pbsds.gid = 1001; + + users.users.jornane = { + isNormalUser = true; + uid = 1002; + description = "jornane"; + extraGroups = [ "networkmanager" "wheel" ]; # TODO: NAS stuff + + openssh.authorizedKeys.keys = [ + "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDhGRFktIRrppVVILraEn5eTrANBIBMcpNT4qvNcd7Ut" + "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAA/UAomSQjOHb4Im0TV70f7Jb/JpsQDd7YKHCXBmjmsrXi2dJVpw/tn+FzP4d2XJXm38hVN89yG+PQwZhf3PSHBaB4DXqFnVLFNWXTRyPPnc1U9uTEvLnZPpVJ/iv2zuS73QHrDcBdIubtdnsr239dJizUJJKwSMHszhOAN4AMYS9WivozdmyS+3o4p8mSp+tOWhq0hmVewnH0teTaHASpvE0V65xW9RGc5AWx0PgkGTXScOOf4/N8oXILn6mepODstlRKCZnBsC/LaXgJsk2+BX/Q/t4V0ytHh9iYblSavNjZZXRvygvkmV/eYAJAJ+igHubs2fEDsXfRj9J0O6JWjAmsELObCYGRbg9QkvaRq5EQgDoSW64iQUmbfB8NmYyXxg4fh0xBUrX87YkYvtHznWzD8hZkqRfj4K9Ixfk1Bsaxb5ubU3/mjGLOpZZ47zEqoen43rUxLq+eeMEQGDbq3mAcA6uX73MvBTzERrfh93rojwlUHEUDoUYyq7aN6Y9vF8/gy3KT2+pvAoUy4NDImSmJTwVcFJ+qUsAaGMECKiznte3Qn8TiD5G9nqeqCoA9edegM2N0z+ovsiXRxVqQDPh3cz/VPSsTKa8jNxhFpw4Q6KzDrtQOKXDkrhSKTxozVLYw2rYCkd2odOhjIJiN63UTNSm2z37ckKbOCqDy6LwW2ls4OzH/LOz2QDkMCwe7MYMrC66wanDhsRUZwlbSEs8aB25NB6OGg61hId3SLS8HzJ+4dmbHhciZm0oJlKRSMAqMLO6o9OVguJOl1td71rhnqAbp4UuaMqm5Zzut1ET+zkYB4t2voTuMhSrEJn1RS4hxR2rWt5jF9Nn67Mu70c0K2DE7FXqldGALC87GO7PHLTnNRg3o8FCkmVYlHNUEqHR56Incg5sC6KS9G1RL3KEHzjgzz8RjFXR5p1Qj+ZZjObVuENdWuqk7gQaxsyocCCB4pbBtF9AYDOIIGCn8rJSKUFvD8KIaTpWFsFoUXanSnAiSCT98GhfIBLbgAt4yJmegRKOML/cxplCh0z9MkNlfPdVU+LI/2RSj3NJpxd/KuR1l73IpgVNcbumXefAY95ztB/w067ZHCFlO0r+Q42NacthsMDc4Ffd+grLpo7KSmDRc+L9YdRNDgLZMbfIimHYIRRMdvEMEICXe1tUvtKBSfU1goTSXXYK2fLOBfOFIXCQponfgZ04klRjgpzCtv8juCOOrHU6r/FpIRkDNbwjWm9i8yBacZGT30bwjK8UW6JSFvDDu747f0ztKyQew8hEivOyqGDwZyrhFImasulsS0/7DB07oUQtaXJ7J8ucGsarttt02D6K8yuCh5bqEVk5Fy4Xlw==" + ]; + }; + +} diff --git a/users/pbsds/default.nix b/users/pbsds/default.nix new file mode 100644 index 0000000..38830ef --- /dev/null +++ b/users/pbsds/default.nix @@ -0,0 +1,33 @@ +{ config, pkgs, lib, ... }: + +{ + + users.users.pbsds = { + isNormalUser = true; + description = "pbsds"; + extraGroups = [ "pbsds" "networkmanager" "wheel" "nixbld" ]; # TODO: NAS stuff + initialHashedPassword = "$6$yNgxTHcP1UYkNwuZ$1sBehnKgPjVnDe0tSV8kyfynWpfjDzuohZX6SoTrMnYFa3/aiMOtI6JppYevl.M6qYhBIT0XBvL6TqSSFWn8B/"; + + openssh.authorizedKeys.keys = [ + "ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAClYXCu7CyStjJ0AqZadUWfuA0h/3kC5FUJk4nTsR0nLXSNp26ETKYY9dID1JQCNgPDDZU3EKCNCpK+nZ/Q09L+agH5XtP6MRIDs0+aXZXy0rcLjS2fgx4ZgIiDGfBlaMCIrM/mdRzVmrYbeMotmkdsLSQ/lFBvX1IuzvUSnyYmRPCXxA== pederbs@hildring" + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDnVaayewel9GWGUYpTdLqfBnYnaBM10Vfq9fxeb9odwjf6pWe78il/5BCgW5EOadR/PeRv/ZYYnIT1uKEJOZkhjY2E6P2/B/JgzwPTwsrrjQsDHd5VjZty097dmf6vj0LXeJHmP9yogjPjGaSxktqyZi2CTFskRfZBPeCsoRMG+Z5bCMOHpXolvGCVWBNRcT3ITVYAAFL7HNPhcN3f5JkQgu0N+ySlMclNNSbHXXv1OIcLMKto6ZDx4DHp7NmU9uSbv8ERAfmoLCgdz1zOg0eVw9Kxs+XpUy3YFDdmPrny/Vq2LCDHljUWtjJI1uBoPF/ngavV+AuX5FHU9OSKNu7H pbsds@knut.pbsds.net" + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+qv5MogWwOgctQfQeHxUHF2ij6UA8BR4DLXtZClnw6A1CtOjAtZeAW62C8q9OKaIKDO0hqd2vLBkgEno4smqBDJ2ThwKuXrhiHqJzCkXZqIKKx79mpTo7aRpFgkJ7328Ee+tbqa65coL98WRhLnDg69NDaOfSCmH85/D0kuyTG7mYIMdBtFXB/IU0QC9USCSGcUGSnQAEx8S0vaXL7JP043kfEfeqwsea598qX+LFa2UfGwgLBpiWi4QEfYy6fviz2TFkbRYKQImybidzUHZkljjPupqu8U4dIx/jsJM/vew717xZPCU0ZCho77TIU+bYSitD5mjnzuD7LrAdbFgnhkD2sQlD/hUW40kPVT/Tq3DrpDRKC9tniiTaIQV1Pe0k82XwYrvV/hTl8T1ed6TuzhmUggqowAbJRbaBIa1zI672AFFQM8OBIN59ZlLy3V2RZW4fvQk2/xMRdVBT0W5Upx+9rCbH9LCGWL8gNNA/PRJ0L9Ts6cq8kf4tFhFQQrk= pbsds@bjarte" + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDlLTAf5ObSpUU490M/l6+s5m0rxayPeaH23RLvIyoCqGftf/3Yi2iHP8wusBWGrEkXg8Po9YKh2CztflqJBnhsv/HaGYRXNsz3oVf2bSURUepZBkUXkg+T1x9OGG8pfvde8ROWZ8KxwLbAKghHUusyAvtJE9ktDxLpajomXDQlo+v7Hj2v4tMKCG/vHPxf/ni3Icl/8Rwo4zjuxl1MxLftPZv9rxCFv06ujuW6f6Mu5q+damt6ReH7RpOzs1rtDjPSnrRCboY4IbT5P4v6cZCr5hgAblKXHfOzPO9WM7O9tugJeE7eJK6Ps8gvWSHs/48SONSpjcYX3NzsRfxp6RRyD0yGrTDP/Ly6TNZzwZdKPO6GkRbLFXAxSn+ex/zW//R4ECQmof3KPYyjpt7yygICSdRlRocpz5aYxytFqBhelEbQqSZTP8q3HdxqGUplAgaCc0bK+m2ob5cirx3kHK2TyQ2dyCZgOML7AjD3GaclxPjkfEipL3/uFkq6EdsdQFs= pbsds@Svanbjorg" + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCo5/9uHIKhhpVbcLSKslj9wdBiV4YaY/tydTfypNZBMMP2U54640t8JvHHkxCZRur8AqYupellxAqkmKn516Ut0WvfQcNgF7ieI66JHkK1j7kSFHHG1nkJHslwCh2PeYtfx5zHZZq8X9v/UjVGY182BC4BHC5zixmNiUvvc+N24BRT4NwslFmMYVcTdoNBSJXPgte4uUd+FZrAnHQrjYdJVANgI4i1d11mxlDFgJrPJj30KaIDxHAsAWgCEqGLMDO9N1cpGGbXVeXfoGvv+vdCXgbyA8BK7wWwXvy5HlvhpEJo8g84r6uKMMkEf+K1MpTiaNjdu+7/sKD/ZOyDB4RgCBs0DskouWRi+xfxABaKBj6706Z3hpj+GfpuSXrHKgGYXIL4cZHaAlz8GVsN1mUL4eJ12Sk14Od2QUHbzp7TDz5eaWuczPs5W9qXwNDMZcmBBZ3mkt9ZYPvAPRjeLpAKhhA9xPL3hbob5hhAENTWsFRFJEgpm8l362XFIOLHr/M= pbsds@rocm" + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDHV25/jfk0upLl6JOq1tu1n+VNkMr0OOu8nZa3NBZQfqrGiLQuTTFycBd5hhoWBaZewb0R8jm/GESE4gfeiLtObe0bKXo8SVty5hNrIq06BbICXByJR99ux17psaNyp/dvZO7gkjKm3m30q7TfbZANlIwhv0XmqCz8S31ocJddFznWyK3nFau/Lvzpupi0Y+7yHkmcKiWYzZsjluQF90M5X5nIf2x4jj7WY0IkR2l41MOLk4NCQNIor6EyAXnHs78JBS3kY5p2x7t/cBpMDBmbgZePdfjGv/L4vFgYiG1wTZT77PWPA93GHueZWDGUkIvKbNriP/U+bShKnGjIfZttjerhzsFE1V/RctCFToqHkW39439nCj6eFpgUiLHkx/mAUPz/whKP+9x5I3/DQkgYZ7qA424Msdz2wXWNi3465Mtrf5XPsjWNReEWt9I29W19K5OLO9QQVrkgdioSCvxoHLvQypPscPkLVF8srzCVA6npUOrOuJ5zZcK1ax2/0v8= pbsds@frotbjoff" + #"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8S44NzMNlCGpQ0aMqpv4YCbp4esYKLejsFRtCCA3oSgz+zq0Rbem1S1/vQehC44Ps1JPljiJgb8rj0VFUcuqnDtJP6kYRvvUDBaM7QO8Z4mZjOKYQo/MoidaPYEHakPB4fk4fdDU1u090VvTgPJvNe0UmPoTHbedk4u+OMuvMr8T56OPmwZPrCyRLtc4O+cYoig/cB+Y7DlwNI9wBx3xhShb5tuML+ZR1XyBYgprwoZML5l2pzEeK7dXdmkc4QT4TM13EcNOopsNZymH/xOCsY/yVqVJJC2Smp6mkIk+Or0zdlzxXFp4u3MS4bg5pzFVFfsqJAQGB7laMxtakMbn0if54MOA34hEAdmzdBCc+g9suuqFhA9WPqMsVlxx9khTue0MNoUVflUkm4B51aPbnPe+aycxdqMgfONroOjtBAQYfGnlRUP1qR3AD9Y2ND/NhGA9f8gTKPBRam+lRDWEGQO9HmWQdpeZbfWEyJa82HZcTCIhQyQukfa5PIzwtops= pbsds@pbsds-optiplex7060" + ]; + + #EDITOR = "micro"; + + #packages = with pkgs; [ + # + #]; + }; + users.groups.pbsds = {}; + + + + home-manager.users.pbsds = import ./home; + +} diff --git a/users/pbsds/home/default.nix b/users/pbsds/home/default.nix new file mode 100644 index 0000000..7076b79 --- /dev/null +++ b/users/pbsds/home/default.nix @@ -0,0 +1,282 @@ +# https://nix-community.github.io/home-manager/options.html +{ pkgs, config, ... }: + +{ + imports = [ + ./modules/jump.nix + ./modules/micro.nix + ]; + + nixpkgs.config.allowUnfree = true; + + home.stateVersion = "22.05"; + home.sessionVariables = { + EDITOR = "micro"; + }; + programs.bash.initExtra = '' + if [ "$COLORTERM" = "truecolor" ] || [ "$TERM" == "xterm" ]; then + export TERM=xterm-256color + export MICRO_TRUECOLOR=1 + fi + + parse_git_branch() { + git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/' + } + export PS1='\[\033[01;32m\]\u@\h\[\033[01;37m\] \[\033[01;34m\]\W\[\033[33m\]$(parse_git_branch)\[\033[01;32m\]\$\[\033[00m\] ' + + # ssh autocomplete + if test -f ~/.ssh/config; then + complete -W "$(cat ~/.ssh/config | grep '^Host ' | cut -b 6- | grep -v "\*")" ssh + complete -W "$(cat ~/.ssh/config | grep '^Host ' | cut -b 6- | grep -v "\*")" rssh + complete -W "$(cat ~/.ssh/config | grep '^Host ' | cut -b 6- | grep -v "\*")" vpn + complete -W "$(cat ~/.ssh/config | grep '^Host ' | cut -b 6- | grep -v "\*")" lvpn + complete -W "$(cat ~/.ssh/config | grep '^Host ' | cut -b 6- | grep -v "\*")" dvpn + complete -W "$(cat ~/.ssh/config | grep '^Host ' | cut -b 6- | grep -v "\*")" scp + complete -W "$(cat ~/.ssh/config | grep '^Host ' | cut -b 6- | grep -v "\*")" remote-init + fi + + # remote-exec and tldr + complete -F _command remote + complete -F _command remote-quick + #complete -F _command tldr + + function atom_nix { + nix-shell -p atom --run "atom $(printf "%q " "$@") --in-process-gpu --no-sandbox" + } + ''; + # TODO: split ^ + + + home.packages = with pkgs; [ + rsync + bind.dnsutils + xclip + + zip + unrar + unzip + atool + p7zip + bzip2 + gzip + atool + + micro + aspell + aspellDicts.en + aspellDicts.nb + vimv + dos2unix + + pandoc + graphviz + vgmstream + gallery-dl + yt-dlp + ffmpeg-full + + git + curl + wget + + visidata + + lolcat + toilet + boxes + tewisay + ponysay + + #tldr + entr + axel aria + bat + xe # xargs alternative + sd # sed alternative + fd # find alternative + silver-searcher # `ag` + ripgrep + gron + jq + yq + htmlq + sysz + du-dust # du alternative + ncdu # Disk usage analyzer with an ncurses interface + + mesa-demos + cage + + gh + hub + + librespeed-cli + + nix-template + nix-output-monitor + nixpkgs-review + manix + + (python3.withPackages (python-packages: with python-packages; [ + requests + numpy + scipy + ptpython + poetry + rich + matplotlib + more-itertools + toml + pyyaml + virtualenv + ])) + + ]; + home.shellAliases = { + ip = "ip -br -color"; + watch = "watch -c "; + hman = "man -H "; + #igrep = "grep -i"; + #flexget = "ssh -t knut.pbsds.net sudo -u flexget flexget"; + flexget = "sudo --user=flexget flexget -c /var/lib/flexget/flexget.yml"; + tmux = "systemd-run --scope --user tmux"; + ed = "$EDITOR"; # ed is the standard editor + de = "$EDITOR"; + dush = "du -shc * | sort -h"; + dushd = "du -shc * .[!.]?* | sort -h"; + diff = "diff -u --color"; + sudo = "sudo "; + xargs = "xargs "; + dc = "cd"; + #sl = "ls"; + sl = "exa"; + rssh = "ssh -l root"; + + # TODO: wayland detection + clip = "xclip -sel clip -t text/plain -rmlastnl -i"; + + # git gud + gs = "git status"; + gb = "git blame"; + gl = "git log --oneline --color | head -n 30"; + glg = "git log --all --decorate --oneline --graph"; + gpra = "git pull --rebase --autostash"; + gd = "git diff"; + gdwd = "git diff --word-diff"; + gdwdr = "git diff --word-diff --word-diff-regex=."; + gds = "git diff --staged"; + gdswd = "git diff --staged --word-diff"; + gdswdr = "git diff --staged --word-diff --word-diff-regex=."; + gcp = "git cherry-pick"; + gca = "git commit --amend"; + gcara = "git commit --amend --reset-author"; + gpo = "git push origin"; + gpasr = "git pull --autostash --rebase"; + #gfr = "git fetch origin master && git rebase FETCH_HEAD"; + gfr = "git pull --rebase"; + gp = "git pull --rebase --autostash"; + + python = "ptpython"; # this has too many problems... + cpython = "python"; + + pwd-fqdn = ''echo "$(whoami)@$(hostname -f):$(printf "%q" "$(realpath .)/")"''; + + http-server = "${pkgs.python3}/bin/python -m http.server"; + + manix-fzf = ''manix "" 2>/dev/null | grep '^# ' | sed 's/^# \(.*\) (.*/\1/;s/ (.*//;s/^# //' | fzf --preview="manix '{}'" | xargs manix''; + }; + programs.bash.enable = true; + #programs.bash.enableCompletion = true; + programs.bash.shellOptions = [ + # Append to history file rather than replacing it. + "histappend" + # check the window size after each command and, if + # necessary, update the values of LINES and COLUMNS. + "checkwinsize" + # Extended globbing. + "extglob" + "globstar" + # Warn if closing shell with running jobs. + "checkjobs" + ]; + programs.fzf.enable = true; # TODO: does this conflict with system-wide setup? + #programs.git.gitui.enable = true; + programs.git.enable = true; + programs.git.delta.enable = true; + #programs.git.lfs.enable = true; + #programs.git.signing + #programs.git.userName = "pbsds" + programs.git.userName = "Peder Bergebakken Sundt"; + programs.git.userEmail = "pbsds@hotmail.com"; + programs.git.ignores = [ "result" "__pycache__" ]; + programs.exa.enable = true; + programs.exa.enableAliases = true; + programs.direnv.enable = true; + programs.just.enable = true; + #programs.mpv.bindings + #programs.mpv.config + + /**/ + # TODO: upstream this + programs.micro.enable = true; + programs.micro.trueColor = true; + programs.micro.settings = { + colorscheme = "railscast"; + rmtrailingws = true; + tabstospaces = true; + }; + #xdg.configFile."micro/bindings.json".source = (pkgs.formats.json {}).generate "micro-bindings" { + programs.micro.bindings = { + "Alt-/" = "lua:comment.comment"; + "Alt-d" = "SpawnMultiCursor"; + "Alt-j" = "lua:joinLines.joinLines"; + "Alt-l" = "command:lower"; + "Alt-u" = "command:upper"; + "AltLeft" = "PreviousTab"; + "AltRight" = "NextTab"; + "Ctrl-j" = "EndOfLine,CursorRight,OutdentLine,OutdentLine,OutdentLine,OutdentLine,OutdentLine,OutdentLine,OutdentLine,OutdentLine,OutdentLine,OutdentLine,OutdentLine,OutdentLine,OutdentLine,OutdentLine,Backspace"; + "CtrlDown" = "None"; + "CtrlUnderscore" = "lua:comment.comment"; + "CtrlUp" = "None"; + "Escape" = "RemoveAllMultiCursors"; + "Shift-PageDown" = "SelectPageDown"; + "Shift-PageUp" = "SelectPageUp"; + }; + programs.micro.ensurePlugins = [ + "aspell" + "detectindent" + "editorconfig" + "joinLines" + "manipulator" + "quoter" + ]; + /**/ + + programs.nix-index.enable = true; + programs.tealdeer.enable = true; + + xdg.enable = true; + #xdg.desktopEntries + + gtk.enable = true; # TODO: only if programs.dconf is enabled + gtk.theme.name = "vimix-dark-ruby"; + gtk.theme.package = pkgs.vimix-gtk-themes; + gtk.iconTheme.name = "Flat-Remix-Blue-Dark"; + gtk.iconTheme.package = pkgs.flat-remix-icon-theme; + + programs.jump.enableBash = true; + + programs.beets = { + enable = true; + settings = { + directory = "/mnt/meconium/beets_preprocessed/data"; + #library = "/mnt/meconium/beets_preprocessed/library.db"; + library = "${config.xdg.configHome}/beets/library_preprocessed.db"; + + #directory = "/mnt/meconium/beets_music/library"; + #library = "${config.xdg.configHome}/beets/library_meconium.db"; + ##library = "/mnt/meconium/beets_music/data.db"; + }; + }; + +} diff --git a/users/pbsds/home/modules/jump.nix b/users/pbsds/home/modules/jump.nix new file mode 100644 index 0000000..7c7bf7f --- /dev/null +++ b/users/pbsds/home/modules/jump.nix @@ -0,0 +1,71 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + + cfg = config.programs.jump; + + enabled = cfg.enableBash || cfg.enableZsh; + +in { + meta.maintainers = [ hm.maintainers.pbsds ]; + + options.programs.jump = { + # Jumping around with symbolic links + # Based on http://jeroenjanssens.com/2013/08/16/quickly-navigate-your-filesystem-from-the-command-line.html + + enableBash = mkEnableOption "jump - Quickly Navigate your Filesystem"; + enableZsh = mkEnableOption "jump - Quickly Navigate your Filesystem"; + + marksPath = mkOption { + type = types.str; + default = "$HOME/.marks"; + description = '' + Where the jump marks are stored + ''; + }; + + }; + + config = mkIf enabled { + #home.packages = [ cfg.package ]; + + home.sessionVariables = { _JUMP_MARKPATH = cfg.marksPath; }; + + programs = let + rcScript = '' + function jump { + pushd . > /dev/null + cd -P "$_JUMP_MARKPATH/$1" 2>/dev/null || echo "No such mark: $1" + } + function mark { + mkdir -p "$_JUMP_MARKPATH" && + test ! -L "$_JUMP_MARKPATH/$1" \ + && ln -s "$(pwd)" "$_JUMP_MARKPATH/$1" \ + || echo "mark already exists!" + } + function unmark { + test ! -z "$1" \ + && rm -i "$_JUMP_MARKPATH/$1" + } + function marks { + #ls -l "$_JUMP_MARKPATH" | sed 's/ / /g' | cut -d' ' -f9- | sed 's/ -/\t-/g' && echo + ls --color=always -l "$_JUMP_MARKPATH" | tr -s ' ' | cut -d' ' -f9- | sed -e 's/ -> /§/g' | column -t -s '§' -o ' -> ' + } + _complete_jump_marks() { + local curw=''${COMP_WORDS[COMP_CWORD]} + local wordlist=$(find $_JUMP_MARKPATH -type l -printf "%f\n") + COMPREPLY=($(compgen -W ''\'''${wordlist[@]}' -- "$curw")) + return 0 + } + complete -F _complete_jump_marks jump unmark + ''; + in { + # TODO: fish + bash.initExtra = mkIf cfg.enableBash rcScript; + zsh.initExtra = mkIf cfg.enableZsh rcScript; + }; + + }; +} diff --git a/users/pbsds/home/modules/micro.nix b/users/pbsds/home/modules/micro.nix new file mode 100644 index 0000000..378cd92 --- /dev/null +++ b/users/pbsds/home/modules/micro.nix @@ -0,0 +1,109 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + + cfg = config.programs.micro; + + jsonFormat = pkgs.formats.json { }; + +in { + meta.maintainers = [ hm.maintainers.mforster maintainers.pbsds ]; + + options = { + programs.micro = { + enable = mkEnableOption "micro, a terminal-based text editor"; + + trueColor = mkOption { + type = types.bool; + default = true; + description = + "Enables support for the whole color range, should the terminal allow."; + }; + + settings = mkOption { + type = jsonFormat.type; + default = { }; + example = literalExpression '' + { + autosu = false; + cursorline = false; + } + ''; + description = '' + Configuration written to + $XDG_CONFIG_HOME/micro/settings.json. See + + for supported values. + ''; + }; + + bindings = mkOption { + type = jsonFormat.type; + default = { }; + example = literalExpression '' + { + "Alt-d" = "SpawnMultiCursor"; + "Escape" = "RemoveAllMultiCursors"; + "CtrlDown" = "None"; + "CtrlUp" = "None"; + "Shift-PageDown" = "SelectPageDown"; + "Shift-PageUp" = "SelectPageUp"; + } + ''; + description = '' + Configuration written to + $XDG_CONFIG_HOME/micro/bindings.json. See + + for supported values. + ''; + }; + + ensurePlugins = mkOption { + type = types.listOf types.str; + default = [ ]; + example = literalExpression '' + [ + "aspell" + ] + ''; + description = '' + Install micro plugins during activation. See + + for a listing of available plugins. + ''; + + }; + }; + }; + + config = mkIf cfg.enable { + home.packages = [ pkgs.micro ]; + + home.sessionVariables = mkIf cfg.trueColor { MICRO_TRUECOLOR = "1"; }; + + xdg.configFile."micro/settings.json".source = + jsonFormat.generate "micro-settings" cfg.settings; + + xdg.configFile."micro/bindings.json".source = + jsonFormat.generate "micro-bindings" cfg.bindings; + + home.activation = let + mkInstall = pluginName: '' + if ! test -d ${config.xdg.configHome}/micro/plug/${ + lib.escapeShellArg pluginName + }; then + (set -x + $DRY_RUN_CMD ${pkgs.micro}/bin/micro -plugin install ${ + lib.escapeShellArg pluginName + } + ) + fi + ''; + installs = lib.concatStringsSep "\n" (map mkInstall cfg.ensurePlugins); + in mkIf (cfg.ensurePlugins != [ ]) { + microPluginSetup = lib.hm.dag.entryAfter [ "writeBoundary" ] installs; + }; + }; +}