split nas and websites into modules

This commit is contained in:
2023-02-25 00:03:29 +01:00
parent 019c139a5c
commit db4b4d4b45
56 changed files with 1631 additions and 1532 deletions
File diff suppressed because it is too large Load Diff
+49
View File
@@ -0,0 +1,49 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# CensorDodge
# A lightweight and customisable web proxy
/** /
services.phpfpm.pools.censordodge = {
user = "censordodge";
group = "censordodge";
settings = {
"listen.owner" = config.services.nginx.user;
"listen.group" = config.services.nginx.group;
"pm" = "dynamic";
"pm.max_children" = "32";
"pm.start_servers" = "2";
"pm.min_spare_servers" = "2";
"pm.max_spare_servers" = "4";
"pm.max_requests" = "500";
};
};
services.nginx.virtualHosts.${mkDomain "censordodge"} = {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
root = pkgs.fetchFromGitHub {
owner = "ryanmab";
repo = "CensorDodge";
rev = "2480e8269190ca8618e41dc581f9d55f4ce9f333";
sha256 = "8R3lyxF22HXui4pJytMcqwwa5TDXIJb6fWII934IhEA=";
};
extraConfig = ''
index index.php;
'';
locations."/".extraConfig = ''
try_files $uri $uri/ /index.php?$args;
'';
locations."~ \.php$".extraConfig = ''
include ${config.services.nginx.package}/conf/fastcgi.conf;
fastcgi_pass unix:${config.services.phpfpm.pools.censordodge.socket};
fastcgi_buffers 16 16k;
fastcgi_buffer_size 32k;
'';
};
users.users.censordodge = {
isSystemUser = true;
group = "censordodge";
};
users.groups.censordodge = {};
/**/
}
+21
View File
@@ -0,0 +1,21 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Cinny
# Yet another Matrix client for the web
services.nginx.virtualHosts.${mkDomain "cinny"} = {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
root = pkgs.unstable.cinny.override {
conf = {
defaultHomeserver = 0;
homeserverList = [
"pvv.ntnu.no"
"matrix.org"
"dodsorf.as"
];
};
};
};
}
+31
View File
@@ -0,0 +1,31 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Convos
# The simplest way to use IRC in your browser
services.convos = {
enable = false; # user registration is borken. new major version (7) in unstable.
reverseProxy = true;
listenAddress = "127.0.0.1";
listenPort = 44649;
};
services.nginx.virtualHosts.${mkDomain "convos"} = lib.mkIf config.services.convos.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.convos.listenPort}";
proxyWebsockets = true;
extraConfig = ''
#proxy_redirect off;
client_max_body_size 0;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Request-Base "$scheme://$host/";
#proxy_set_header X-Real-IP $remote_addr;
#proxy_set_header REMOTE-HOST $remote_addr;
'';
};
};
}
+57
View File
@@ -0,0 +1,57 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Cryptpad
# A collaborative office suite that is end-to-end encrypted and open-source.
# TODO: https://github.com/NixOS/nixpkgs/pull/180066
/**/
services.cryptpad = {
#enable = true; # current node version used is marked insecure
# reference: https://github.com/xwiki-labs/cryptpad/blob/main/config/config.example.js
configFile = toFile "cryptpad-config.js" ''
module.exports = {
httpUnsafeOrigin: 'http://localhost:3457',
httpSafeOrigin: 'https://${mkDomain "cryptpad"}',
httpAddress: '127.0.0.1',
httpPort: 3457,
//adminKeys: [ // can be found on the settings page for registered users
// "[cryptpad-user1@my.awesome.website/YZgXQxKR0Rcb6r6CmxHPdAGLVludrAF2lEnkbx1vVOo=]",
//],
// storage
//inactiveTime: 90, // days
//archiveRetentionTime: 15, // days
//accountRetentionTime: 365, // days, default is never
//maxUploadSize: 20 * 1024 * 1024, // bytes
//premiumUploadSize: 100 * 1024 * 1024, // bytes, (users with a plan in their customLimit)
filePath: './datastore/',
archivePath: './data/archive', // recovery in the event of accidental deletion
pinPath: './data/pins', // content stored indefinetly
taskPath: './data/tasks', // scheduled tasks
blockPath: './block', // users' authenticated blocks
blobPath: './blob', // uploaded encrypted blobs
blobStagingPath: './data/blobstage', // incomplete blobs
decreePath: './data/decrees', // undocumented
logPath: false, // logging of events, may be set to false
logToStdout: true,
logLevel: 'info', // silly, verbose, debug, feedback, info, warn, error
logFeedback: false, // data collection
verbose: false, // logging
installMethod: 'nixpkgs', // telemetry for devs
};
'';
};
services.nginx.virtualHosts.${mkDomain "cryptpad"} = lib.mkIf config.services.cryptpad.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:3457";
proxyWebsockets = true;
};
};
/**/
}
+64
View File
@@ -0,0 +1,64 @@
{ config, pkgs, lib, ... }:
let
mkDomain = subname: "${subname}.${config.networking.fqdn}";
in
{
_module.args.mkDomain = mkDomain;
_module.args.allSubdomains = lib.sort (x: y: x<y) ( # TODO: deduplicate <-
lib.flatten (
lib.mapAttrsToList
(k: v: [k] ++ v.serverAliases)
config.services.nginx.virtualHosts
)
);
security.acme.acceptTerms = true;
security.acme.defaults.email = "pbsds+acme@hotmail.com";
#security.acme.defaults.renewInterval = "daily";
#security.acme.defaults.reloadServices
# https://www.xf.is/2020/06/30/list-of-free-acme-ssl-providers/
#security.acme.defaults.server = "https://acme-staging-v02.api.letsencrypt.org/directory"; # STAGING
#security.acme.defaults.server = "https://api.buypass.com/acme/directory"; # no wildcards, rate limit: 20 domains/week, 5 duplicate certs / week
#security.acme.defaults.server = "https://api.test4.buypass.no/acme/directory"; # STAGING. no wildcards, rate limit: 20 domains/week, 5 duplicate certs / week
# DNS-based ACME:
# - https://go-acme.github.io/lego/dns/domeneshop/
# - https://nixos.org/manual/nixos/stable/index.html#module-security-acme-config-dns-with-vhosts
#security.acme.defaults.dnsProvider = "domeneshop";
#security.acme.defaults.credentialsFile = "/var/lib/secrets/domeneshop.key"; # TODO: this file must be made by hand, containing env variables.
services.nginx.enable = true;
networking.firewall.allowedTCPPorts = [ 80 443 ];
# Website tunnel
# TODO: remove
services.nginx.virtualHosts.${config.networking.fqdn} = {
forceSSL = true; # addSSL = true;
enableACME = true;
#acmeRoot = null; # use DNS
default = true;
serverAliases = map mkDomain [
"www"
#"*" # requires DNS ACME
];
# The alternative to ^ is: config.security.acme.certs."${acmeDomain}".extraDomainNames = [ (mkDomain "foo") ];
# TODO: 'nox' alias for everything
locations."/" = {
proxyPass = "http://pbuntu.pbsds.net";
proxyWebsockets = true;
};
};
#services.nginx.virtualHosts.${mkDomain "www"} = {
# addSSL = true;
# useACMEHost = acmeDomain; #enableACME = true;
# locations."/" = {
# proxyPass = "http://pbuntu.pbsds.net";
# proxyWebsockets = true;
# };
#};
}
+43
View File
@@ -0,0 +1,43 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Element-web
# A glossy Matrix collaboration client for the web
services.nginx.virtualHosts.${mkDomain "element"} = {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
root = pkgs.element-web.override {
conf = {
# https://github.com/vector-im/element-web/blob/develop/docs/config.md
# https://github.com/vector-im/element-web/blob/develop/config.sample.json
# https://github.com/vector-im/element-web/blob/develop/docs/labs.md
brand = "spis meg";
default_country_code = "NO";
default_server_config."m.homeserver" = {
server_name = "pvv.ntnu.no";
base_url = "https://matrix.pvv.ntnu.no";
};
roomDirectory.servers = [
"pvv.ntnu.no"
"matrix.org"
"nixos.org"
"agdersam.no"
"trygve.me"
"utwente.io"
];
disable_guests = true;
showLabsSettings = true;
features.feature_pinning = "labs";
features.feature_custom_status = "labs";
features.feature_custom_tags = "labs";
features.feature_state_counters = "labs";
features.feature_latex_maths = "labs";
setting_defaults.breadcrumbs = true;
UIFeature.urlPreviews = true;
UIFeature.shareQrCode = true;
UIFeature.registration = false;
};
};
};
}
+35
View File
@@ -0,0 +1,35 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Flexget
# Multipurpose automation tool for all of your media
services.flexget = {
enable = true;
user = "flexget"; # The user under which to run flexget.
homeDir = "/var/lib/flexget";
interval = "30m";
config = ''
tasks:
shanaproject:
rss: 'https://www.shanaproject.com/feeds/secure/user/35853/J98B7OXAHO/'
accept_all: yes
no_entries_ok: yes
transmission:
host: 192.168.1.3
port: 9091
path: '/Reidun/shared/Downloads/shana project/'
username: pbsds
password: spismeg
'';
};
users.groups."${config.services.flexget.user}" = lib.mkIf config.services.flexget.enable { };
users.users."${config.services.flexget.user}" = lib.mkIf config.services.flexget.enable {
isSystemUser = true;
createHome = true;
home = config.services.flexget.homeDir;
group = "${config.services.flexget.user}";
};
}
+25
View File
@@ -0,0 +1,25 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Galene
# Videoconferencing server that is easy to deploy, written in Go
services.galene = {
#enable = true;
insecure = true; # reverse proxy instead, but can i feed it the acme cert?
httpAddress = "127.0.0.1";
httpPort = 3975;
};
services.nginx.virtualHosts.${mkDomain "galene"} = lib.mkIf config.services.galene.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.galene.httpPort}";
proxyWebsockets = true;
};
};
#networking.firewall = lib.mkIf config.service.jellyfin.enable {
# allowedTCPPorts = [ 1194 ];
# allowedUDPPorts = [ 1194 ]; # TODO: Only if behind a NAT?
#};
}
+84
View File
@@ -0,0 +1,84 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Gitea
# Git with a cup of tea
services.gitea = rec {
enable = true;
settings.service.DISABLE_REGISTRATION = true; # disable after initial deploy
#https://docs.gitea.io/en-us/config-cheat-sheet/
#settings = {
# "cron.sync_external_users" = {
# RUN_AT_START = true;
# SCHEDULE = "@every 24h";
# UPDATE_EXISTING = true;
# };
# mailer = {
# ENABLED = true;
# MAILER_TYPE = "sendmail";
# FROM = "do-not-reply@example.org";
# SENDMAIL_PATH = "${pkgs.system-sendmail}/bin/sendmail";
# };
# other = {
# SHOW_FOOTER_VERSION = false;
# };
#};
#appName = "gitea: spis meg";
appName = "gitea: private instance";
domain = mkDomain "gitea";
#ssh.enable # default is true
rootUrl = "https://${domain}/";
#ssh.clonePort # default is 22
#log.level = "Debug"; # default is "Info"
#lfs.enable = true; # default is false
httpPort = 9675; # default is 3000
httpAddress = "127.0.0.1"; # default is "0.0.0.0"
#extraConfig
#database.type # default is "sqlite3"
settings.session.COOKIE_SECURE = true; # default is false, only send cookies over https
#stateDir # default is "/var/lib/gitea"
#mailerPasswordFile # Path to a file containing the SMTP password
#repositoryRoot # default is "${config.services.gitea.stateDir}/repositories"
#log.rootPath # TODO: move?
#lfs.contentDir
#dump.enable # default is false
staticRootPath = pkgs.symlinkJoin {
name = "gitea-static-root-data";
paths = let
giteaModern = pkgs.fetchFromGitea { # https://codeberg.org/Freeplay/Gitea-Modern
domain = "codeberg.org";
owner = "Freeplay";
repo = "Gitea-Modern";
rev = "0c0a05e6f0496521c166402dd56441a714487fd8";
sha256 = "q14E5ni2BvpGsmGOHWQgbCqD4lBh4bFtBFtIyNfAf0Q=";
};
giteaEarlGray = pkgs.fetchFromGitHub { # https://github.com/acoolstraw/earl-grey
owner = "acoolstraw";
repo = "earl-grey";
rev = "a6ca3dd3b9e6b48f6e45032b2aa691c2f16dc9bc";
sha256 = "55Piafc7kQ5hybwHQczx36AP+kX1AtWugxERYNdmqWk=";
};
in [
config.services.gitea.package.data
(pkgs.linkFarm "gitea-custom-dir" [
{ name = "public/css/theme-gitea-modern.css"; path = "${giteaModern}/Gitea/theme-gitea-modern.css"; }
{ name = "public/css/theme-earl-grey.css"; path = "${giteaEarlGray}/theme-earl-grey.css"; }
])
];
};
settings = {
# https://docs.gitea.io/en-us/config-cheat-sheet/
ui.THEMES = "gitea,arc-green,earl-grey,gitea-modern";
ui.DEFAULT_THEME = "earl-grey";
};
};
services.nginx.virtualHosts.${mkDomain "gitea"} = lib.mkIf config.services.gitea.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.gitea.httpPort}";
proxyWebsockets = true;
};
};
}
+24
View File
@@ -0,0 +1,24 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Graphana
# Gorgeous metric viz, dashboards & editors for Graphite, InfluxDB & OpenTSDB
services.grafana = rec {
#enable = true;
#addr = "127.0.0.1";
settings.server.http_addr = "0.0.0.0";
settings.server.http_port = 3000;
settings.server.domain = mkDomain "grafana";
#rootUrl = "https://${domain}/grafana/"; # Not needed if it is `https://your.domain/`
};
services.nginx.virtualHosts."${config.services.grafana.settings.server.domain}" = lib.mkIf config.services.grafana.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
#locations."/grafana/" = {
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.grafana.port}";
proxyWebsockets = true;
};
};
}
+34
View File
@@ -0,0 +1,34 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# hedgedoc
# Realtime collaborative markdown notes on all platforms
services.hedgedoc = {
#enable = true; # FIXME: make it load
settings.host = "127.0.0.1";
settings.port = 44776;
settings.db.dialect = "sqlite";
settings.db.storage = "${config.services.hedgedoc.workDir}/db.hedgedoc.sqlite";
settings.domain = mkDomain "hedgedoc";
settings.allowAnonymous = true;
settings.allowEmailRegister = false; # default is true
settings.allowAnonymousEdits = false; # default is false
settings.protocolUseSSL = true; # https prefix
settings.useSSL = false; # nginx terminates ssl
#settings.csp = {TODO}; # content security policy
#settings.useCDN = true;
#settings.debug = true;
# there are also a metric fuckton of integration services, like github, twitter, minio, mattermost, dropbox etc.
# there are also auth options, like ldap, saml and oauth2
};
services.nginx.virtualHosts.${mkDomain "hedgedoc"} = lib.mkIf config.services.hedgedoc.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.hedgedoc.settings.port}";
proxyWebsockets = true;
# TODO: proxy headers:
# https://docs.hedgedoc.org/guides/reverse-proxy/
};
};
}
+19
View File
@@ -0,0 +1,19 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# home-assistant
services.home-assistant = {
#enable = true;
config = {
# https://www.home-assistant.io/integrations/default_config/
default_config = {};
# https://www.home-assistant.io/integrations/esphome/
#esphome = {};
# https://www.home-assistant.io/integrations/met/
#met = {};
};
};
# TODO: nginx
}
+38
View File
@@ -0,0 +1,38 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Hydra
# Nix-based continuous build system
# https://github.com/NixOS/hydra
# https://nixos.wiki/wiki/Hydra
# sudo -u hydra hydra-create-user 'admin' --full-name '<NAME>' --email-address '<EMAIL>' --password-prompt --role admin
# https://blog.matejc.com/blogs/myblog/nixos-hydra-nginx
services.hydra = {
enable = true;
hydraURL = "https://${mkDomain "hydra"}";
#smtpHost = ;
listenHost = "localhost";
port = 4758;
notificationSender = "hydra@${config.networking.fqdn}"; # Sender email address used for email notifications.
#buildMachinesFiles = [];
#useSubstitutes = true;
#debugServer = true;
#logo = /some/path.png;
#minimumDiskFree = 0; # Minimum disk space (GiB) determining if queue runner runs or not.
#minimumDiskFreeEvaluator = 0; # Minimum disk space (GiB) determining if evaluator runs or not.
};
services.nginx.virtualHosts.${mkDomain "hydra"} = lib.mkIf config.services.hydra.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.hydra.port}";
proxyWebsockets = true;
extraConfig = ''
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
add_header Front-End-Https on;
'';
};
};
}
+46
View File
@@ -0,0 +1,46 @@
{ config, pkgs, lib, mkDomain, allSubdomains, ... }:
let
# TODO: support fully qualified urls as well
customDescriptions = rec {
index = "This page";
links = "Linktree";
element = pkgs.element-web.meta.description;
refleksjon = "My dad is a cheapskate";
roroslyd = "My dad is a cheapskate";
www = "wwwwwwwwwwwwwww";
${config.networking.hostName} = www;
shlink = "Url shortener";
};
in
{
# list of other endpoints hosted with nginx
# it assumes the owest subdomain equals a package name in pkgs or python3.pkgs.
services.nginx.virtualHosts.${mkDomain "index"} = {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
root = with lib; let
getName = domain: head (lib.splitString "." domain);
getDomain = domain: concatStringsSep "." (tail (lib.splitString "." domain));
getDesc = domain: let
name = getName domain;
in if lib.hasAttr name customDescriptions
then customDescriptions.${name}
else if lib.hasAttr name pkgs.python3Packages
then pkgs.python3Packages.${name}.meta.description
else if lib.hasAttr name pkgs
then pkgs.${name}.meta.description
else if lib.hasAttrByPath [name "package"] config.services
then config.services.${name}.package.meta.description
else "";
mkRow = domain: ''<tr><td><a href="//${domain}">${getName domain}<span style=\"opacity: 0.65;\">.${getDomain domain}</span></a><td>${getDesc domain}'';
in pkgs.writeTextDir "index.html" ''
<!DOCTYPE html>
<table>
<tr><th>url<th>description
${lib.concatStringsSep "\n" (map mkRow allSubdomains)}
</table>
'';
};
}
+35
View File
@@ -0,0 +1,35 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Invidious
# An open source alternative front-end to YouTube
services.invidious = {
enable = true;
domain = mkDomain "invidious";
port = 4765;
settings = {
host_binding = "127.0.0.1";
external_port = 443;
https_only = true;
statistics_enabled = false; # api endpoint required for public instances
registration_enabled = false;
login_enabled = false;
#admins = ["pbsds"];
banner = "spis meg";
default_user_preferences = {
#feed_menu = ["Popular", "Trending", "Subscriptions", "Playlists"]
feed_menu = ["Trending" "Subscriptions" "Playlists"];
default_home = "Trending";
};
};
};
services.nginx.virtualHosts.${mkDomain "invidious"} = lib.mkIf config.services.invidious.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.invidious.port}";
proxyWebsockets = true;
};
};
}
+51
View File
@@ -0,0 +1,51 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Jellyfin
services.jellyfin = {
enable = true; # don't enable unless you intend to first-time-setup the admin user
# from https://jellyfin.org/docs/general/networking/index.html:
# - 8096/tcp is used by default for HTTP traffic. You can change this in the dashboard.
# - 8920/tcp is used by default for HTTPS traffic. You can change this in the dashboard.
# - 1900/udp is used for service auto-discovery. This is not configurable.
# - 7359/udp is also used for auto-discovery. This is not configurable.
openFirewall = false; # I do it manually below:
# TODO: configure initial collections and extensions
};
# firewall - not needed?
/*
networking.firewall = lib.mkIf config.services.jellyfin.enable {
# TODO: does this overwrite rules set by other stuff? should i use ++ ?
#allowedTCPPorts = [ 8096 8920 ];
allowedUDPPorts = [ 1900 7359 ]; # TODO: Only if behind a NAT?
};
*/
services.nginx.virtualHosts.${mkDomain "jellyfin"} = lib.mkIf config.services.jellyfin.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:8096";
proxyWebsockets = true;
};
};
# Hardware acceleration
# https://nixos.wiki/wiki/Jellyfin
nixpkgs.config.packageOverrides = pkgs: {
vaapiIntel = pkgs.vaapiIntel.override { enableHybridCodec = true; };
};
hardware.opengl = {
enable = true;
extraPackages = with pkgs; [
intel-media-driver
vaapiIntel
vaapiVdpau
libvdpau-va-gl
intel-compute-runtime # OpenCL filter support (hardware tonemapping and subtitle burn-in)
];
};
# Allow Jellyfin access to VAAPI
users.users.${config.services.jellyfin.user}.extraGroups = [ "video" "render" ];
systemd.services.jellyfin.serviceConfig.PrivateDevices = lib.mkForce false;
systemd.services.jellyfin.serviceConfig.DeviceAllow = lib.mkForce [ "/dev/dri/renderD128" ];
}
+23
View File
@@ -0,0 +1,23 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Jitsi meet
services.jitsi-meet = {
#enable = true;
hostName = mkDomain "jitsi-meet";
config = {
# https://github.com/jitsi/jitsi-meet/blob/master/config.js
#enableWelcomePage = false;
defaultLang = "nb";
};
interfaceConfig = {
# https://github.com/jitsi/jitsi-meet/blob/master/interface_config.js"
APP_NAME = "Spis meg";
# SHOW_JITSI_WATERMARK = false;
# SHOW_WATERMARK_FOR_GUESTS = false;
};
jibri.enable = false; # record in a headless chrome instance
nginx.enable = true; # force ssl, acme, lots of routing rules
};
}
+33
View File
@@ -0,0 +1,33 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Kukkee
# Self-hosted Doodle alternative: a meeting poll tool
/** /
nixpkgs.overlays = [
(final: prev: {
kukkee = prev.callPackage ./pkg { };
})
];
imports = [ ./module ];
/** /
services.kukkee = {
#enable = true;
port = 5666;
baseUrl = "https://${mkDomain "kukkee"}";
#mongodb.enable = false;
};
services.nginx.virtualHosts.${mkDomain "kukkee"} = lib.mkIf config.services.kukkee.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.kukkee.port}";
proxyWebsockets = true;
};
};
/**/
}
+84
View File
@@ -0,0 +1,84 @@
# nix-build -E 'with import <nixpkgs> {}; callPackage ./default.nix {}'
{ lib
, stdenv
, pkgs
, fetchFromGitHub
, bash
, nodejs
, nodePackages
}:
let
nodeDependencies = (import ./node-composition.nix {
inherit pkgs nodejs;
inherit (stdenv.hostPlatform) system;
}).nodeDependencies.override (old: {
# access to path '/nix/store/...-source' is forbidden in restricted mode
#src = src;
#dontNpmInstall = true;
});
in stdenv.mkDerivation rec {
pname = "kukkee";
#version = "0.1.0";
version = "unstable-2022-06-19-270c8ed";
src = fetchFromGitHub {
owner = "AnandBaburajan";
repo = "Kukkee";
#rev = "v${version}";
rev = "270c8ed421c8f1100a845958430e1ebe61d86d5a";
sha256 = "CtbTKUZEPjwbLRYuC44JaeZn0Rjyn4h6tsBEWWQWJmA=";
};
buildInputs = [
nodeDependencies
];
buildPhase = ''
runHook preBuild
#export PATH="${nodeDependencies}/bin:${nodejs}/bin:$PATH"
ln -s ${nodeDependencies}/lib/node_modules .
next build
runHook postBuild
'';
installPhase = ''
runHook preInstall
# FIXME: is to possible for next.js to not run from a ".next" directory?
mkdir -p $out/share/kukkee
cp -a public .next $out/share/kukkee/
ln -s ${nodeDependencies}/lib/node_modules $out/share/kukkee/
# create next.js entrypoint
mkdir $out/bin
cat <<EOF > $out/bin/kukkee
#!${bash}/bin/bash
export PATH="${nodeDependencies}/bin:\$PATH"
exec -a kukkee next start $out/share/kukkee "\$@"
EOF
chmod +x $out/bin/kukkee
runHook postInstall
'';
passthru.updateScript = ./update.sh;
meta = with lib; {
description = "Self-hosted Doodle alternative: a meeting poll tool.";
longDescription = ''
The free and open source meeting poll tool.
Never ask what time works for you all? again.
A self-hosted Doodle alternative.
'';
homepage = "https://kukkee.com/";
license = licenses.mit;
platforms = platforms.unix;
maintainers = with maintainers; [ pbsds ];
};
}
@@ -0,0 +1,17 @@
# This file has been generated by node2nix 1.11.1. Do not edit!
{pkgs ? import <nixpkgs> {
inherit system;
}, system ? builtins.currentSystem, nodejs ? pkgs."nodejs-14_x"}:
let
nodeEnv = import ./node-env.nix {
inherit (pkgs) stdenv lib python2 runCommand writeTextFile writeShellScript;
inherit pkgs nodejs;
libtool = if pkgs.stdenv.isDarwin then pkgs.darwin.cctools else null;
};
in
import ./node-packages.nix {
inherit (pkgs) fetchurl nix-gitignore stdenv lib fetchgit;
inherit nodeEnv;
}
+598
View File
@@ -0,0 +1,598 @@
# This file originates from node2nix
{lib, stdenv, nodejs, python2, pkgs, libtool, runCommand, writeTextFile, writeShellScript}:
let
# Workaround to cope with utillinux in Nixpkgs 20.09 and util-linux in Nixpkgs master
utillinux = if pkgs ? utillinux then pkgs.utillinux else pkgs.util-linux;
python = if nodejs ? python then nodejs.python else python2;
# Create a tar wrapper that filters all the 'Ignoring unknown extended header keyword' noise
tarWrapper = runCommand "tarWrapper" {} ''
mkdir -p $out/bin
cat > $out/bin/tar <<EOF
#! ${stdenv.shell} -e
$(type -p tar) "\$@" --warning=no-unknown-keyword --delay-directory-restore
EOF
chmod +x $out/bin/tar
'';
# Function that generates a TGZ file from a NPM project
buildNodeSourceDist =
{ name, version, src, ... }:
stdenv.mkDerivation {
name = "node-tarball-${name}-${version}";
inherit src;
buildInputs = [ nodejs ];
buildPhase = ''
export HOME=$TMPDIR
tgzFile=$(npm pack | tail -n 1) # Hooks to the pack command will add output (https://docs.npmjs.com/misc/scripts)
'';
installPhase = ''
mkdir -p $out/tarballs
mv $tgzFile $out/tarballs
mkdir -p $out/nix-support
echo "file source-dist $out/tarballs/$tgzFile" >> $out/nix-support/hydra-build-products
'';
};
# Common shell logic
installPackage = writeShellScript "install-package" ''
installPackage() {
local packageName=$1 src=$2
local strippedName
local DIR=$PWD
cd $TMPDIR
unpackFile $src
# Make the base dir in which the target dependency resides first
mkdir -p "$(dirname "$DIR/$packageName")"
if [ -f "$src" ]
then
# Figure out what directory has been unpacked
packageDir="$(find . -maxdepth 1 -type d | tail -1)"
# Restore write permissions to make building work
find "$packageDir" -type d -exec chmod u+x {} \;
chmod -R u+w "$packageDir"
# Move the extracted tarball into the output folder
mv "$packageDir" "$DIR/$packageName"
elif [ -d "$src" ]
then
# Get a stripped name (without hash) of the source directory.
# On old nixpkgs it's already set internally.
if [ -z "$strippedName" ]
then
strippedName="$(stripHash $src)"
fi
# Restore write permissions to make building work
chmod -R u+w "$strippedName"
# Move the extracted directory into the output folder
mv "$strippedName" "$DIR/$packageName"
fi
# Change to the package directory to install dependencies
cd "$DIR/$packageName"
}
'';
# Bundle the dependencies of the package
#
# Only include dependencies if they don't exist. They may also be bundled in the package.
includeDependencies = {dependencies}:
lib.optionalString (dependencies != []) (
''
mkdir -p node_modules
cd node_modules
''
+ (lib.concatMapStrings (dependency:
''
if [ ! -e "${dependency.packageName}" ]; then
${composePackage dependency}
fi
''
) dependencies)
+ ''
cd ..
''
);
# Recursively composes the dependencies of a package
composePackage = { name, packageName, src, dependencies ? [], ... }@args:
builtins.addErrorContext "while evaluating node package '${packageName}'" ''
installPackage "${packageName}" "${src}"
${includeDependencies { inherit dependencies; }}
cd ..
${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."}
'';
pinpointDependencies = {dependencies, production}:
let
pinpointDependenciesFromPackageJSON = writeTextFile {
name = "pinpointDependencies.js";
text = ''
var fs = require('fs');
var path = require('path');
function resolveDependencyVersion(location, name) {
if(location == process.env['NIX_STORE']) {
return null;
} else {
var dependencyPackageJSON = path.join(location, "node_modules", name, "package.json");
if(fs.existsSync(dependencyPackageJSON)) {
var dependencyPackageObj = JSON.parse(fs.readFileSync(dependencyPackageJSON));
if(dependencyPackageObj.name == name) {
return dependencyPackageObj.version;
}
} else {
return resolveDependencyVersion(path.resolve(location, ".."), name);
}
}
}
function replaceDependencies(dependencies) {
if(typeof dependencies == "object" && dependencies !== null) {
for(var dependency in dependencies) {
var resolvedVersion = resolveDependencyVersion(process.cwd(), dependency);
if(resolvedVersion === null) {
process.stderr.write("WARNING: cannot pinpoint dependency: "+dependency+", context: "+process.cwd()+"\n");
} else {
dependencies[dependency] = resolvedVersion;
}
}
}
}
/* Read the package.json configuration */
var packageObj = JSON.parse(fs.readFileSync('./package.json'));
/* Pinpoint all dependencies */
replaceDependencies(packageObj.dependencies);
if(process.argv[2] == "development") {
replaceDependencies(packageObj.devDependencies);
}
replaceDependencies(packageObj.optionalDependencies);
/* Write the fixed package.json file */
fs.writeFileSync("package.json", JSON.stringify(packageObj, null, 2));
'';
};
in
''
node ${pinpointDependenciesFromPackageJSON} ${if production then "production" else "development"}
${lib.optionalString (dependencies != [])
''
if [ -d node_modules ]
then
cd node_modules
${lib.concatMapStrings (dependency: pinpointDependenciesOfPackage dependency) dependencies}
cd ..
fi
''}
'';
# Recursively traverses all dependencies of a package and pinpoints all
# dependencies in the package.json file to the versions that are actually
# being used.
pinpointDependenciesOfPackage = { packageName, dependencies ? [], production ? true, ... }@args:
''
if [ -d "${packageName}" ]
then
cd "${packageName}"
${pinpointDependencies { inherit dependencies production; }}
cd ..
${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."}
fi
'';
# Extract the Node.js source code which is used to compile packages with
# native bindings
nodeSources = runCommand "node-sources" {} ''
tar --no-same-owner --no-same-permissions -xf ${nodejs.src}
mv node-* $out
'';
# Script that adds _integrity fields to all package.json files to prevent NPM from consulting the cache (that is empty)
addIntegrityFieldsScript = writeTextFile {
name = "addintegrityfields.js";
text = ''
var fs = require('fs');
var path = require('path');
function augmentDependencies(baseDir, dependencies) {
for(var dependencyName in dependencies) {
var dependency = dependencies[dependencyName];
// Open package.json and augment metadata fields
var packageJSONDir = path.join(baseDir, "node_modules", dependencyName);
var packageJSONPath = path.join(packageJSONDir, "package.json");
if(fs.existsSync(packageJSONPath)) { // Only augment packages that exist. Sometimes we may have production installs in which development dependencies can be ignored
console.log("Adding metadata fields to: "+packageJSONPath);
var packageObj = JSON.parse(fs.readFileSync(packageJSONPath));
if(dependency.integrity) {
packageObj["_integrity"] = dependency.integrity;
} else {
packageObj["_integrity"] = "sha1-000000000000000000000000000="; // When no _integrity string has been provided (e.g. by Git dependencies), add a dummy one. It does not seem to harm and it bypasses downloads.
}
if(dependency.resolved) {
packageObj["_resolved"] = dependency.resolved; // Adopt the resolved property if one has been provided
} else {
packageObj["_resolved"] = dependency.version; // Set the resolved version to the version identifier. This prevents NPM from cloning Git repositories.
}
if(dependency.from !== undefined) { // Adopt from property if one has been provided
packageObj["_from"] = dependency.from;
}
fs.writeFileSync(packageJSONPath, JSON.stringify(packageObj, null, 2));
}
// Augment transitive dependencies
if(dependency.dependencies !== undefined) {
augmentDependencies(packageJSONDir, dependency.dependencies);
}
}
}
if(fs.existsSync("./package-lock.json")) {
var packageLock = JSON.parse(fs.readFileSync("./package-lock.json"));
if(![1, 2].includes(packageLock.lockfileVersion)) {
process.stderr.write("Sorry, I only understand lock file versions 1 and 2!\n");
process.exit(1);
}
if(packageLock.dependencies !== undefined) {
augmentDependencies(".", packageLock.dependencies);
}
}
'';
};
# Reconstructs a package-lock file from the node_modules/ folder structure and package.json files with dummy sha1 hashes
reconstructPackageLock = writeTextFile {
name = "addintegrityfields.js";
text = ''
var fs = require('fs');
var path = require('path');
var packageObj = JSON.parse(fs.readFileSync("package.json"));
var lockObj = {
name: packageObj.name,
version: packageObj.version,
lockfileVersion: 1,
requires: true,
dependencies: {}
};
function augmentPackageJSON(filePath, dependencies) {
var packageJSON = path.join(filePath, "package.json");
if(fs.existsSync(packageJSON)) {
var packageObj = JSON.parse(fs.readFileSync(packageJSON));
dependencies[packageObj.name] = {
version: packageObj.version,
integrity: "sha1-000000000000000000000000000=",
dependencies: {}
};
processDependencies(path.join(filePath, "node_modules"), dependencies[packageObj.name].dependencies);
}
}
function processDependencies(dir, dependencies) {
if(fs.existsSync(dir)) {
var files = fs.readdirSync(dir);
files.forEach(function(entry) {
var filePath = path.join(dir, entry);
var stats = fs.statSync(filePath);
if(stats.isDirectory()) {
if(entry.substr(0, 1) == "@") {
// When we encounter a namespace folder, augment all packages belonging to the scope
var pkgFiles = fs.readdirSync(filePath);
pkgFiles.forEach(function(entry) {
if(stats.isDirectory()) {
var pkgFilePath = path.join(filePath, entry);
augmentPackageJSON(pkgFilePath, dependencies);
}
});
} else {
augmentPackageJSON(filePath, dependencies);
}
}
});
}
}
processDependencies("node_modules", lockObj.dependencies);
fs.writeFileSync("package-lock.json", JSON.stringify(lockObj, null, 2));
'';
};
prepareAndInvokeNPM = {packageName, bypassCache, reconstructLock, npmFlags, production}:
let
forceOfflineFlag = if bypassCache then "--offline" else "--registry http://www.example.com";
in
''
# Pinpoint the versions of all dependencies to the ones that are actually being used
echo "pinpointing versions of dependencies..."
source $pinpointDependenciesScriptPath
# Patch the shebangs of the bundled modules to prevent them from
# calling executables outside the Nix store as much as possible
patchShebangs .
# Deploy the Node.js package by running npm install. Since the
# dependencies have been provided already by ourselves, it should not
# attempt to install them again, which is good, because we want to make
# it Nix's responsibility. If it needs to install any dependencies
# anyway (e.g. because the dependency parameters are
# incomplete/incorrect), it fails.
#
# The other responsibilities of NPM are kept -- version checks, build
# steps, postprocessing etc.
export HOME=$TMPDIR
cd "${packageName}"
runHook preRebuild
${lib.optionalString bypassCache ''
${lib.optionalString reconstructLock ''
if [ -f package-lock.json ]
then
echo "WARNING: Reconstruct lock option enabled, but a lock file already exists!"
echo "This will most likely result in version mismatches! We will remove the lock file and regenerate it!"
rm package-lock.json
else
echo "No package-lock.json file found, reconstructing..."
fi
node ${reconstructPackageLock}
''}
node ${addIntegrityFieldsScript}
''}
npm ${forceOfflineFlag} --nodedir=${nodeSources} ${npmFlags} ${lib.optionalString production "--production"} rebuild
if [ "''${dontNpmInstall-}" != "1" ]
then
# NPM tries to download packages even when they already exist if npm-shrinkwrap is used.
rm -f npm-shrinkwrap.json
npm ${forceOfflineFlag} --nodedir=${nodeSources} ${npmFlags} ${lib.optionalString production "--production"} install
fi
'';
# Builds and composes an NPM package including all its dependencies
buildNodePackage =
{ name
, packageName
, version ? null
, dependencies ? []
, buildInputs ? []
, production ? true
, npmFlags ? ""
, dontNpmInstall ? false
, bypassCache ? false
, reconstructLock ? false
, preRebuild ? ""
, dontStrip ? true
, unpackPhase ? "true"
, buildPhase ? "true"
, meta ? {}
, ... }@args:
let
extraArgs = removeAttrs args [ "name" "dependencies" "buildInputs" "dontStrip" "dontNpmInstall" "preRebuild" "unpackPhase" "buildPhase" "meta" ];
in
stdenv.mkDerivation ({
name = "${name}${if version == null then "" else "-${version}"}";
buildInputs = [ tarWrapper python nodejs ]
++ lib.optional (stdenv.isLinux) utillinux
++ lib.optional (stdenv.isDarwin) libtool
++ buildInputs;
inherit nodejs;
inherit dontStrip; # Stripping may fail a build for some package deployments
inherit dontNpmInstall preRebuild unpackPhase buildPhase;
compositionScript = composePackage args;
pinpointDependenciesScript = pinpointDependenciesOfPackage args;
passAsFile = [ "compositionScript" "pinpointDependenciesScript" ];
installPhase = ''
source ${installPackage}
# Create and enter a root node_modules/ folder
mkdir -p $out/lib/node_modules
cd $out/lib/node_modules
# Compose the package and all its dependencies
source $compositionScriptPath
${prepareAndInvokeNPM { inherit packageName bypassCache reconstructLock npmFlags production; }}
# Create symlink to the deployed executable folder, if applicable
if [ -d "$out/lib/node_modules/.bin" ]
then
ln -s $out/lib/node_modules/.bin $out/bin
# Patch the shebang lines of all the executables
ls $out/bin/* | while read i
do
file="$(readlink -f "$i")"
chmod u+rwx "$file"
patchShebangs "$file"
done
fi
# Create symlinks to the deployed manual page folders, if applicable
if [ -d "$out/lib/node_modules/${packageName}/man" ]
then
mkdir -p $out/share
for dir in "$out/lib/node_modules/${packageName}/man/"*
do
mkdir -p $out/share/man/$(basename "$dir")
for page in "$dir"/*
do
ln -s $page $out/share/man/$(basename "$dir")
done
done
fi
# Run post install hook, if provided
runHook postInstall
'';
meta = {
# default to Node.js' platforms
platforms = nodejs.meta.platforms;
} // meta;
} // extraArgs);
# Builds a node environment (a node_modules folder and a set of binaries)
buildNodeDependencies =
{ name
, packageName
, version ? null
, src
, dependencies ? []
, buildInputs ? []
, production ? true
, npmFlags ? ""
, dontNpmInstall ? false
, bypassCache ? false
, reconstructLock ? false
, dontStrip ? true
, unpackPhase ? "true"
, buildPhase ? "true"
, ... }@args:
let
extraArgs = removeAttrs args [ "name" "dependencies" "buildInputs" ];
in
stdenv.mkDerivation ({
name = "node-dependencies-${name}${if version == null then "" else "-${version}"}";
buildInputs = [ tarWrapper python nodejs ]
++ lib.optional (stdenv.isLinux) utillinux
++ lib.optional (stdenv.isDarwin) libtool
++ buildInputs;
inherit dontStrip; # Stripping may fail a build for some package deployments
inherit dontNpmInstall unpackPhase buildPhase;
includeScript = includeDependencies { inherit dependencies; };
pinpointDependenciesScript = pinpointDependenciesOfPackage args;
passAsFile = [ "includeScript" "pinpointDependenciesScript" ];
installPhase = ''
source ${installPackage}
mkdir -p $out/${packageName}
cd $out/${packageName}
source $includeScriptPath
# Create fake package.json to make the npm commands work properly
cp ${src}/package.json .
chmod 644 package.json
${lib.optionalString bypassCache ''
if [ -f ${src}/package-lock.json ]
then
cp ${src}/package-lock.json .
chmod 644 package-lock.json
fi
''}
# Go to the parent folder to make sure that all packages are pinpointed
cd ..
${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."}
${prepareAndInvokeNPM { inherit packageName bypassCache reconstructLock npmFlags production; }}
# Expose the executables that were installed
cd ..
${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."}
mv ${packageName} lib
ln -s $out/lib/node_modules/.bin $out/bin
'';
} // extraArgs);
# Builds a development shell
buildNodeShell =
{ name
, packageName
, version ? null
, src
, dependencies ? []
, buildInputs ? []
, production ? true
, npmFlags ? ""
, dontNpmInstall ? false
, bypassCache ? false
, reconstructLock ? false
, dontStrip ? true
, unpackPhase ? "true"
, buildPhase ? "true"
, ... }@args:
let
nodeDependencies = buildNodeDependencies args;
extraArgs = removeAttrs args [ "name" "dependencies" "buildInputs" "dontStrip" "dontNpmInstall" "unpackPhase" "buildPhase" ];
in
stdenv.mkDerivation ({
name = "node-shell-${name}${if version == null then "" else "-${version}"}";
buildInputs = [ python nodejs ] ++ lib.optional (stdenv.isLinux) utillinux ++ buildInputs;
buildCommand = ''
mkdir -p $out/bin
cat > $out/bin/shell <<EOF
#! ${stdenv.shell} -e
$shellHook
exec ${stdenv.shell}
EOF
chmod +x $out/bin/shell
'';
# Provide the dependencies in a development shell through the NODE_PATH environment variable
inherit nodeDependencies;
shellHook = lib.optionalString (dependencies != []) ''
export NODE_PATH=${nodeDependencies}/lib/node_modules
export PATH="${nodeDependencies}/bin:$PATH"
'';
} // extraArgs);
in
{
buildNodeSourceDist = lib.makeOverridable buildNodeSourceDist;
buildNodePackage = lib.makeOverridable buildNodePackage;
buildNodeDependencies = lib.makeOverridable buildNodeDependencies;
buildNodeShell = lib.makeOverridable buildNodeShell;
}
File diff suppressed because it is too large Load Diff
+22
View File
@@ -0,0 +1,22 @@
#!/usr/bin/env nix-shell
#!nix-shell -i bash -p nodePackages.node2nix nix
set -euo pipefail
set -x
cd "$(dirname "${BASH_SOURCE[0]}")"
#store_src="$(nix-build . -A kukkee.src --no-out-link)"
store_src="$(nix-build -E 'with import <nixpkgs> {}; (callPackage ./default.nix {}).src' --no-out-link)"
node2nix \
--nodejs-14 \
--development \
--input "$store_src"/package.json \
--lock "$store_src"/package-lock.json \
--output ./node-packages.nix \
--composition ./node-composition.nix
# --node-env ../../development/node-packages/node-env.nix \
+17
View File
@@ -0,0 +1,17 @@
#!/usr/bin/env nix-shell
#!nix-shell -i bash -p common-updater-scripts curl jq gnused nix coreutils
set -euo pipefail
set -x
#cd "$(dirname "${BASH_SOURCE[0]}")"/../../..
latestVersion="$(curl -s "https://api.github.com/repos/AnandBaburajan/Kukkee/releases?per_page=1" | jq -r ".[0].tag_name")"
#currentVersion=$(nix-instantiate --eval -E "with import ./. {}; kukkee.version or (lib.getVersion kukkee)" | tr -d '"')
#if [[ "$currentVersion" == "$latestVersion" ]]; then
# echo "Package is up-to-date: $currentVersion"
# exit 0
#fi
#update-source-version kukkee "$latestVersion"
+9
View File
@@ -0,0 +1,9 @@
#!/usr/bin/env bash
set -euo pipefail
cd "$(dirname "${BASH_SOURCE[0]}")"
./update-version.sh
./update-node-deps.sh
+25
View File
@@ -0,0 +1,25 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Libreddit
# Private front-end for Reddit
services.libreddit = {
enable = true;
address = "127.0.0.1";
port = 4876;
};
systemd.services.libreddit.environment = lib.mkIf config.services.libreddit.enable {
# https://github.com/spikecodes/libreddit#change-default-settings=
# TODO: merge my module addition
LIBREDDIT_DEFAULT_THEME = "gold";
};
services.nginx.virtualHosts.${mkDomain "libreddit"} = lib.mkIf config.services.libreddit.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.libreddit.port}";
proxyWebsockets = true;
};
};
}
+21
View File
@@ -0,0 +1,21 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# links.pbsds.net
services.nginx.virtualHosts."links.pbsds.net" = let
links-pbsds-net = pkgs.fetchFromGitea rec {
name = repo;
domain = "gitea.noximilien.pbsds.net";
owner = "pbsds";
repo = "links.pbsds.net";
rev = "61cd605f198a22db87af087fda34c378b03d4306";
hash = "sha256-dx19aTy8K9xkL+cO4r4huYKrlVBZMUkDcbjqxLKe8W4=";
};
in {
#serverAliases = map mkDomain [ "links" ];
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
root = "${links-pbsds-net}";
};
}
+60
View File
@@ -0,0 +1,60 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# matrix-synapse
/**/
services.matrix-synapse = {
#enable = true;
settings = {
server_name = "${config.networking.domain}"
public_baseurl = mkDomain "matrix";
url_preview_enabled = false;
max_upload_size = "100M";
trusted_key_servers = [
{server_name = "matrix.org";}
{server_name = "dodsorf.as";}
{server_name = "pvv.ntnu.no";}
];
listeners = [
{
bind_addresses = [
"127.0.0.1"
];
port = 8008;
resources = [
{
compress = true;
names = [
"client"
];
}
{
compress = false;
names = [
"federation"
];
}
];
tls = false;
type = "http";
x_forwarded = true;
}
];
};
};
services.nginx.virtualHosts.${mkDomain "matrix"} = lib.mkIf config.services.matrix-synapse.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/_matrix" = {
proxyPass = "http://127.0.0.1:${toString (builtins.elemAt 0 config.services.matrix-synaps.listeners).port}";
#proxyWebsockets = true;
extraConfig = ''
client_max_body_size ${config.services.matrix-synaps.max_upload_size};
'';
};
locations."/_synapse/client" = {
proxyPass = "http://127.0.0.1:${toString (builtins.elemAt 0 config.services.matrix-synaps.listeners).port}/_synapse/client";
#proxyWebsockets = true;
};
};
/**/
}
+34
View File
@@ -0,0 +1,34 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Mattermost
# Open-source, self-hosted Slack-alternative
services.mattermost = {
enable = true;
# will create and use a psql db
listenAddress = "[::1]:8065";
siteName = "Spis meg";
siteUrl = "https://${mkDomain "mattermost"}";
#mutableConfig = true; # default is false, if true, see also "preferNixConfig"
extraConfig = {
# https://docs.mattermost.com/configure/configuration-settings.html#reporting
# TODO: smtp
};
matterircd = {
#enable = true; # default is false
parameters = [
"-mmserver chat.example.com"
"-bind [::]:6667"
];
};
};
services.nginx.virtualHosts.${mkDomain "mattermost"} = lib.mkIf config.services.mattermost.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://${config.services.mattermost.listenAddress}";
proxyWebsockets = true;
};
};
}
+33
View File
@@ -0,0 +1,33 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Navidrome
# Music Server and Streamer compatible with Subsonic/Airsonic
services.navidrome = {
enable = true;
settings = {
# default hostname:port = "127.0.0.1:4533"
MusicFolder = "/mnt/reidun/Music/Albums";
#MusicFolder = pkgs.linkFarm "navidrome-music-library" [
# { name = "Albums"; path = "/mnt/reidun/Music/Albums"; }
# { name = "OST"; path = "/mnt/reidun/Music/OST"; }
# { name = "dojin.co"; path = "/mnt/reidun/Music/dojin.co"; }
# { name = "Touhou"; path = "/mnt/reidun/Music/Touhou"; }
# { name = "Kancolle"; path = "/mnt/reidun/Music/Kancolle"; }
# { name = "Vocaloid"; path = "/mnt/reidun/Music/Vocaloid"; }
#];
UIWelcomeMessage = "Spis meg";
DefaultTheme = "Spotify-ish";
};
};
services.nginx.virtualHosts.${mkDomain "navidrome"} = lib.mkIf config.services.navidrome.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:4533";
proxyWebsockets = true;
};
};
}
+25
View File
@@ -0,0 +1,25 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Netdata
# Real-time performance monitoring tool
services.netdata = {
enable = true;
#python.enable = false; # default is true
#python.extraPackages = ps: [];
#config = { # https://github.com/netdata/netdata/blob/master/daemon/config/README.md
# hostname = "";
# port = 19999;
#};
#configDir = {};
#extraPluginPaths = [];
};
services.nginx.virtualHosts.${mkDomain "netdata"} = lib.mkIf config.services.netdata.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:19999";
proxyWebsockets = true;
};
};
}
+45
View File
@@ -0,0 +1,45 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Nitter
# Alternative Twitter front-end
services.nitter = {
enable = true;
package = pkgs.unstable.nitter;
#openFirewall
#config.base64Media = false; # Use base64 encoding for proxied media URLs.
server.title = "Pjitter";
server.address = "127.0.0.1";
server.hostname = mkDomain "nitter";
server.https = true; # Secure cookies
server.port = 4965;
#preferences.autoplayGifs = ; # default is true
#preferences.bidiSupport = ; # Support bidirectional text (makes clicking on tweets harder). default is false
#preferences.hideBanner = ; # Hide profile banner. default is false
#preferences.hidePins = ; # Hide pinned tweets. default is false
#preferences.hideReplies = ; # Hide tweet replies. default is false
#preferences.hideTweetStats = ; # Hide tweet stats (replies, retweets, likes). default is false
preferences.hlsPlayback = true; # Enable HLS video streaming (requires JavaScript). default is false
preferences.infiniteScroll = true; # Infinite scrolling (requires JavaScript, experimental!). default is false
#preferences.mp4Playback = ; # Enable MP4 video playback. default is true
#preferences.muteVideos = ; # Mute videos by default. default is false
#preferences.proxyVideos = ; # Proxy video streaming through the server (might be slow). default is true
preferences.replaceInstagram = "bibliogram.art"; # Replace Instagram links with links to this instance (blank to disable). default is ""
preferences.replaceTwitter = mkDomain "nitter"; # Replace Twitter links with links to this instance (blank to disable). default is ""
preferences.replaceYouTube = lib.mkIf config.services.invidious.enable (mkDomain "invidious"); # Replace YouTube links with links to this instance (blank to disable). default is ""
settings = lib.mkIf config.services.libreddit.enable {
Preferences.replaceReddit = (mkDomain "libreddit"); # Replace Reddit links with links to this instance (blank to disable). default is ""
};
#preferences.stickyProfile = ; # Make profile sidebar stick to top. default is true
preferences.theme = "Twitter Dark"; # Instance theme. default is "Nitter"
};
services.nginx.virtualHosts.${mkDomain "nitter"} = lib.mkIf config.services.nitter.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.nitter.server.port}";
proxyWebsockets = true;
};
};
}
+24
View File
@@ -0,0 +1,24 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# ntopng
# High-speed web-based traffic analysis and flow collection tool
# WARNING: default username and password is admin:admin
services.ntopng = {
enable = true; # also enables redis for persistent data storage
httpPort = 3987; # HTTP port of embedded web server
#interfaces = [ "any" ];
#extraConfig = ";
#redis.address = "";
#redis.createInstance = "ntopng";
};
services.nginx.virtualHosts.${mkDomain "ntopng"} = lib.mkIf config.services.ntopng.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.ntopng.httpPort}";
proxyWebsockets = true;
};
};
}
+94
View File
@@ -0,0 +1,94 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# OpenSpeedtTest
# Pure HTML5 Network Performance Estimation Tool
/** /
services.nginx.virtualHosts.${mkDomain "openspeedtest"} = let
cfg = config.services.nginx.virtualHosts.${mkDomain "openspeedtest"};
openspeedtest = pkgs.fetchFromGitHub rec {
name = "${owner}-unstable-2022-07-02";
owner = "openspeedtest";
repo = "Speed-Test";
#rev = "v${version}";
rev = "59eb7367ede5555f7516ebb8eeeb65245bc5a6e5";
sha256 = "yzvulzgBUri+sU9WxZrLKH/T+mlZu9G2zucv8t/fZdY=";
postFetch = ''
rm $out/README.md
rm $out/License.md
rm $out/.gitignore
rm $out/hosted.html
'';
};
in {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
http2 = false;
root = "${openspeedtest}";
extraConfig = ''
#access_log off;
#error_log /dev/null; #Disable this for Windows Nginx.
#log_not_found off;
gzip off;
fastcgi_read_timeout 999;
server_tokens off;
tcp_nodelay on;
tcp_nopush on;
sendfile on;
open_file_cache max=200000 inactive=20s;
open_file_cache_valid 30s;
open_file_cache_min_uses 2;
open_file_cache_errors off;
'';
locations."/".extraConfig = lib.mkIf false ''
if_modified_since off;
expires off;
etag off;
if ($request_method != OPTIONS ) {
add_header 'Access-Control-Allow-Origin' "*" always;
add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Mx-ReqToken,X-Requested-With' always;
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always;
#Very Very Important! You SHOULD send no-store from server for Google Chrome.
add_header 'Cache-Control' 'no-store, no-cache, max-age=0, no-transform';
add_header 'Last-Modified' $date_gmt;
}
if ($request_method = OPTIONS ) {
add_header 'Access-Control-Allow-Origin' "$http_origin" always;
add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Mx-ReqToken,X-Requested-With' always;
add_header 'Access-Control-Allow-Methods' "GET, POST, OPTIONS" always;
add_header 'Access-Control-Allow-Credentials' "true";
return 204;
}
'';
# IF and Only if you Enabled HTTP2 otherwise never enable the following
# HTTP2 will return 200 withot waiting for upload to complete. it's smart but we don't need that to happen here when testing upload speed on HTTP2.
locations."/upload.bin".extraConfig = ''
#proxy_set_header Host $host;
proxy_pass http://127.0.0.1:80/upload.bin;
'';
locations."~* ^.+\.(?:css|cur|js|jpe?g|gif|htc|ico|png|html|xml|otf|ttf|eot|woff|woff2|svg)$".extraConfig = lib.mkIf false ''
#access_log off;
expires 365d;
add_header 'Cache-Control' public;
add_header 'Vary' Accept-Encoding;
tcp_nodelay off;
open_file_cache max=3000 inactive=120s;
open_file_cache_valid 45s;
open_file_cache_min_uses 2;
open_file_cache_errors off;
gzip on;
gzip_disable "msie6";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/javascript;
'';
};
/**/
}
+24
View File
@@ -0,0 +1,24 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# OwnCast
# self-hosted video live streaming solution
services.owncast = {
# the default /admin account is admin:abc123, don't enable if you don't intend to change it!
enable = true;
port = 3456; # default is 8080
rtmp-port = 1935; # remember to punch a TCP hole in the NAT
#listen = "0.0.0.0"; # default is "127.0.0.1"
openFirewall = true; # the rtmp port, and the http port if listen != "127.0.0.1"
};
services.nginx.virtualHosts.${mkDomain "owncast"} = lib.mkIf config.services.owncast.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.owncast.port}";
proxyWebsockets = true;
};
};
}
+26
View File
@@ -0,0 +1,26 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# paperless-ngx
# A supercharged version of paperless: scan, index, and archive all of your physical documents
services.paperless = {
enable = true;
#package = pkgs.paperless-ngx;
#port = 28981;
#address = "localhost";
#passwordfile = null; # file contining the superuser 'admin' password, optionally set with `${datadir}/paperless-manage createsuperuser`
#datadir = "/var/lib/paperless";
#mediadir = "${datadir}/media";
#consumptiondir = "${datadir}/consume"; # Directory from which new documents are imported. (TODO: zotero)
#extraconfig = {};
#consumptiondirispublic = false; # Whether all users can write to the consumption dir
};
services.nginx.virtualHosts.${mkDomain "paperless"} = lib.mkIf config.services.paperless.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.paperless.port}";
proxyWebsockets = true;
};
};
}
@@ -1,10 +1,6 @@
{ config, pkgs, ... }:
{ config, pkgs, lib, mkDomain, ... }:
let
lib = pkgs.lib;
domain = "${config.networking.hostName}.${config.networking.domain}";
mkDomain = subname: "${subname}.${domain}";
# pdoc data
pdoc-builtins = [
"builtins"
+53
View File
@@ -0,0 +1,53 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Polaris
# Self-host your music collection, and access it from any computer and mobile device
/**/
disabledModules = [
"services/misc/polaris.nix"
];
imports = [
/home/pbsds/repos/nixpkgs/polaris-14/nixos/modules/services/misc/polaris.nix
];
nixpkgs.overlays = [
(final: prev: {
polaris = prev.callPackage /home/pbsds/repos/nixpkgs/polaris-14/pkgs/servers/polaris { };
polaris-web = prev.callPackage /home/pbsds/repos/nixpkgs/polaris-14/pkgs/servers/polaris/web.nix { };
})
];
/**/
services.polaris = {
enable = true;
#user = "pbsds";
#group = "users";
port = 7890;
#package = pkgs.unstable.polaris; # instead of my overlay, TODO: move that overlay here
settings = {
settings.reindex_every_n_seconds = 7*24*60*60; # weekly, default is 1800, i.e. hourly
settings.album_art_pattern =
"([Cc]over|COVER|[Ff]older|FOLDER|[Ff]ront|FRONT)\.(jpeg|JPEG|jpg|JPG|png|PNG|bmp|BMP|gif|GIF)";
#"(?i)(cover|folder|front)\.(jpeg|jpg|png|bmp|gif)";
mount_dirs = [
{ name = "Albums"; source = "/mnt/reidun/Music/Albums"; }
{ name = "dojin.co"; source = "/mnt/reidun/Music/dojin.co"; }
{ name = "Vocaloid"; source = "/mnt/reidun/Music/Vocaloid"; }
{ name = "Touhou"; source = "/mnt/reidun/Music/Touhou"; }
{ name = "OST"; source = "/mnt/reidun/Music/OST"; }
{ name = "Kancolle"; source = "/mnt/reidun/Music/Kancolle"; }
{ name = "Downloads"; source = "/mnt/reidun/Downloads/music"; }
];
};
};
services.nginx.virtualHosts.${mkDomain "polaris"} = lib.mkIf config.services.polaris.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.polaris.port}";
proxyWebsockets = true;
};
};
}
+20
View File
@@ -0,0 +1,20 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# refleksjon.no
services.nginx.virtualHosts.${mkDomain "refleksjon"} = let
refleksjon-net = pkgs.fetchFromGitea rec {
name = repo;
domain = "gitea.noximilien.pbsds.net";
owner = "pbsds";
repo = "refleksjon.net";
rev = "c1b91e369bf411e44534334595d4481cb59bd129";
sha256 = "O+lNqD2LuESKM+S+AljF2SzIxzK05xdZqiLhylTQ2ls=";
};
in {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
root = "${refleksjon-net}/www.refleksjon.net";
};
}
+31
View File
@@ -0,0 +1,31 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Resilio Sync
# Automatically sync files via secure, distributed technology
services.resilio = {
#enable = true;
#downloadLimit = 0;
#uploadLimit = 0;
#directoryRoot = "/media" # Default directory to add folders in the web UI.
#storagePath = "/var/lib/resilio-sync/"; # Where BitTorrent Sync will store it's database files
httpLogin = "";
httpPass = "";
deviceName = "${config.networking.hostName}";
#apiKey = ; # API key, which enables the developer API.
#httpListenPort = 9000;
#httpListenAddr = "[::1]";
enableWebUI = false; # default is false
};
services.nginx.virtualHosts.${mkDomain "resilio"} = let
cfg = config.services.resilio;
in lib.mkIf (cfg.enable && cfg.enableWebUI) {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:${cfg.httpListenPort}";
proxyWebsockets = true;
};
};
}
+21
View File
@@ -0,0 +1,21 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# roroslyd.no
services.nginx.virtualHosts.${mkDomain "roroslyd"} = let
roroslyd-no = pkgs.fetchFromGitea rec {
name = repo;
domain = "gitea.noximilien.pbsds.net";
owner = "pbsds";
repo = "roroslyd.no";
#rev = "v${version}";
rev = "fb7b0a7e70754cf368de7d7c469dabe71b2f1c78";
sha256 = "Rud5bBUuPgIC5UAGtyuYhUtXhN174UCWDoLUWWc/n6U=";
};
in {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
root = "${roroslyd-no}/www.roroslyd.no";
};
}
+47
View File
@@ -0,0 +1,47 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Roundcube
# Open Source Webmail Software
services.roundcube = {
enable = true;
hostName = mkDomain "roundcube";
plugins = [
"archive"
"zipdownload"
"managesieve"
];
extraConfig = ''
$config['product_name'] = 'Spis meg';
$config['skin_logo'] = [
#'elastic:login' => 'https://links.pbsds.net/img/piuy_render.png',
#'elastic:*[small]' => 'https://links.pbsds.net/img/piuy_render.png',
'elastic:*' => 'https://links.pbsds.net/img/piuy_render.png',
#'elastic:*' => 'https://links.pbsds.net/img/nox.png',
];
#$config['blankpage_url'] = '/something.html' # TODO <-
$config['default_host'] = [
'tls://imap.fyrkat.no' => 'Fyrkat',
'tls://imap.pvv.ntnu.no' => 'PVV',
'tls://imap.nvg.ntnu.no' => 'NVG',
];
$config['smtp_server'] = [
'imap.fyrkat.no' => 'tls://smtp.fyrkat.no',
'imap.pvv.ntnu.no' => 'tls://smtp.pvv.ntnu.no',
'imap.nvg.ntnu.no' => 'tls://smtp.nvg.ntnu.no',
];
# plugins/managesieve/config.inc.php.dist
$config['managesieve_host'] = 'tls://%h';
'';
};
services.nginx.virtualHosts.${mkDomain "roundcube"} = lib.mkIf config.services.roundcube.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/skins/elastic/images/logo.svg" = {
#alias = "/path/to/file";
#return = "302 https://links.pbsds.net/img/piuy_render.png";
return = "302 https://links.pbsds.net/img/nox.png";
};
};
}
+51
View File
@@ -0,0 +1,51 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Shlink
# URL shortener with REST API and command line interface
# manage with https://app.shlink.io/
# TODO: self-host shlink web client? https://shlink.io/documentation/shlink-web-client/
/** /
# data can be destryoed with `nixos-container destroy shlink`
virtualisation.oci-containers.containers."shlink" = {
autoStart = true;
image = "shlinkio/shlink:stable";
# https://shlink.io/documentation/install-docker-image/
environment = {
"DEFAULT_DOMAIN" = mkDomain "shlink";
"IS_HTTPS_ENABLED" = "true";
"TIMEZONE" = "Europe/Oslo";
#"GEOLITE_LICENSE_KEY" = ; # https://shlink.io/documentation/geolite-license-key/
# TODO: use postgres? default is sqlite3?
};
ports = [
"127.0.0.1:5757:8080/tcp" # webui
];
volumes = [
"/var/lib/shlink/database.sqlite:/etc/shlink/data/database.sqlite"
# TODO: where is the sqlite file?
];
};
systemd.services."create-shlink-volume-dirs" = {
wantedBy = [ "${config.virtualisation.oci-containers.backend}-shlink.service" ];
serviceConfig.Type = "oneshot";
script = ''
mkdir -p /var/lib/shlink
touch /var/lib/shlink/database.sqlite
'';
};
services.nginx.virtualHosts.${mkDomain "shlink"} = lib.mkIf config.virtualisation.oci-containers.containers."shlink".autoStart {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:5757";
proxyWebsockets = true;
};
};
programs.bash.shellAliases = {
shlink = "docker exec -it shlink shlink";
};
/**/
}
+47
View File
@@ -0,0 +1,47 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# Sourcegraph
# Understand, fix, and automate across your codebase with this code intelligence platform
/** /
# First user regitration becomes admin
# data can be destryoed with `nixos-container destroy sourcegraph`
virtualisation.oci-containers.containers."sourcegraph" = {
autoStart = true;
#image = "sourcegraph/server:3.41.0";
#image = "sourcegraph/server:latest";
image = "sourcegraph/server:insiders";
environment = {};
ports = [
"127.0.0.1:7080:7080/tcp" # webui?
"127.0.0.1:3370:3370/tcp" # admin? (graphana and stuff)
];
volumes = [
"/var/lib/sourcegraph/config:/etc/sourcegraph"
"/var/lib/sourcegraph/data:/var/opt/sourcegraph"
];
};
systemd.services."create-sourcegraph-volume-dirs" = {
wantedBy = [ "${config.virtualisation.oci-containers.backend}-sourcegraph.service" ];
serviceConfig.Type = "oneshot";
script = ''
mkdir -p /var/lib/sourcegraph/config
mkdir -p /var/lib/sourcegraph/data
'';
};
services.nginx.virtualHosts.${mkDomain "sourcegraph"}
= lib.mkIf config.virtualisation.oci-containers.containers."sourcegraph".autoStart {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:7080";
proxyWebsockets = true;
};
#locations."/graphana/" = {
# proxyPass = "http://127.0.0.1:3370";
# proxyWebsockets = true;
#};
};
/**/
}
+32
View File
@@ -0,0 +1,32 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# thelunge
# The self-hosted Web IRC client
services.thelounge = {
# configure user accounts by using the 'thelounge' command, or by adding entries to /var/lib/thelounge/users
enable = true;
public = false;
port = 5876;
# theLoungePlugins.themes is view of nodePackages_latest.thelounge-theme-*
# theLoungePlugins.plugins is view of nodePackages_latest.thelounge-plugin-*
plugins = with pkgs.theLoungePlugins;
(with lib; attrValues (filterAttrs (name: _: name != "recurseForDerivations") themes))
++ [
#plugins.giphy
#plugins.shortcuts
plugins.closepms
];
extraConfig.theme = "One Dark";
extraConfig.fileUpload.enable = true;
extraConfig.fileUpload.baseUrl = "${mkDomain "thelounge"}";
};
services.nginx.virtualHosts.${mkDomain "thelounge"} = lib.mkIf config.services.thelounge.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.thelounge.port}";
proxyWebsockets = true;
};
};
}
+26
View File
@@ -0,0 +1,26 @@
* [ ] cryptpad
* [ ] upterm / tmate
* [ ] shlink ?
* [ ] mailcatcher
* configure stuff to send its shit here
# TODO: kukkee or rallly
# https://noted.lol/2-self-hosted-alternatives-to-doodle-meeting-scheduling/
#https://rallly.co/
# upterm
# Secure terminal-session sharing
services.uptermd = {
enable = false;
openFirewall = true;
#listenAddress # default is "[::]";
#port = 2222; # default is 2222, uses ssh
#extraFlags
#hostKey = null;
};
# Rocketchat
# A self-hosted discord/slack alternative
# TODO, docker exists, but no nixos module
+46
View File
@@ -0,0 +1,46 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# trivial gradios
/** /
systemd.services.trivial-gradios-heritage-graph = {
description = pkgs.python3Packages.trivial-gradios.meta.description;
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = rec {
User = "trivial-gradios";
Group = "trivial-gradios";
DynamicUser = true;
StateDirectory = "trivial-gradios-heritage-graph";
WorkingDirectory = "/var/lib/${StateDirectory}";
ExecStart = "${pkgs.python3Packages.trivial-gradios}/bin/trivial-gradios-heritage-graph --port 37001";
Restart = "on-failure";
};
};
services.nginx.virtualHosts.${mkDomain "gradio"} = {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
root = pkgs.writeTextDir "index.html" ''
<table>
<tr>
<th>name
<th>description
<tr>
<td><a href="heritage-graph/">heritage-graph</a>
<td>A simple tool to greate a directed ancestry graph.
</table>
'';
};
locations."/heritage-graph/" = {
proxyPass = "http://127.0.0.1:37001";
proxyWebsockets = true;
extraConfig = ''
rewrite ^/heritage-graph(/.*)$ $1 break;
'';
};
};
/**/
}
+30
View File
@@ -0,0 +1,30 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# vaultwarden
# Unofficial Bitwarden compatible server written in Rust
services.vaultwarden = {
enable = true;
config = {
# https://github.com/dani-garcia/vaultwarden/blob/1.24.0/.env.template
# camelCase is converted to UPPER_SNAKE_CASE
domain = "https://${mkDomain "vaultwarden"}"; # port is supported
signupsAllowed = false;
# rocket is the http library
rocketAddress = "127.0.0.1";
rocketPort = 8222;
#rocketWorkers = 10;
rocketLog = "critical";
};
#dbBackend = "sqlite";
# backupDir = ""; # TODO
};
services.nginx.virtualHosts.${mkDomain "vaultwarden"} = lib.mkIf config.services.vaultwarden.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.vaultwarden.config.rocketPort}";
proxyWebsockets = true;
};
};
}
+54
View File
@@ -0,0 +1,54 @@
{ config, pkgs, lib, mkDomain, ... }:
{
# webdav
# Simple WebDAV server
# TODO: parametrize which webdav shares i have?
services.webdav = {
enable = true;
# the webdav user uid:gid is fixed
settings = {
address = "127.0.0.1";
port = 9568;
prefix = "/";
scope = "/mnt/reidun/pub";
modify = false;
auth = true;
users = [
{
username = "zotero";
password = "{bcrypt}$2y$10$9zzZuwd2AvNZXb8WCG/bM..ibOroNnX0sN94UTAV.Jco9LnZ8Whs2";
#prefix = "/zotero/";
scope = "/mnt/reidun/Various/Zotero";
modify = true;
}
/** /
{
username = "guest";
password = "hunter2";
}
/**/
];
};
};
services.nginx.virtualHosts.${mkDomain "webdav"} = lib.mkIf config.services.webdav.enable {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.webdav.settings.port}";
#proxyWebsockets = true;
extraConfig = ''
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header REMOTE-HOST $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_redirect off;
client_max_body_size 2G;
'';
};
};
}
-255
View File
@@ -1,255 +0,0 @@
{ config, pkgs, ... }:
let
lib = pkgs.lib;
domain = "${config.networking.hostName}.${config.networking.domain}";
mkDomain = subname: "${subname}.${domain}";
in {
#services.nginx.enable = true;
imports = [
./services/pdoc.nix
#../services/tt-rss.nix
];
# links.pbsds.net
services.nginx.virtualHosts."links.pbsds.net" = let
links-pbsds-net = pkgs.fetchFromGitea rec {
name = repo;
domain = "gitea.noximilien.pbsds.net";
owner = "pbsds";
repo = "links.pbsds.net";
rev = "61cd605f198a22db87af087fda34c378b03d4306";
hash = "sha256-dx19aTy8K9xkL+cO4r4huYKrlVBZMUkDcbjqxLKe8W4=";
};
in {
#serverAliases = map mkDomain [ "links" ];
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
root = "${links-pbsds-net}";
};
# refleksjon.no
services.nginx.virtualHosts.${mkDomain "refleksjon"} = let
refleksjon-net = pkgs.fetchFromGitea rec {
name = repo;
domain = "gitea.noximilien.pbsds.net";
owner = "pbsds";
repo = "refleksjon.net";
rev = "c1b91e369bf411e44534334595d4481cb59bd129";
sha256 = "O+lNqD2LuESKM+S+AljF2SzIxzK05xdZqiLhylTQ2ls=";
};
in {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
root = "${refleksjon-net}/www.refleksjon.net";
};
# roroslyd.no
services.nginx.virtualHosts.${mkDomain "roroslyd"} = let
roroslyd-no = pkgs.fetchFromGitea rec {
name = repo;
domain = "gitea.noximilien.pbsds.net";
owner = "pbsds";
repo = "roroslyd.no";
#rev = "v${version}";
rev = "fb7b0a7e70754cf368de7d7c469dabe71b2f1c78";
sha256 = "Rud5bBUuPgIC5UAGtyuYhUtXhN174UCWDoLUWWc/n6U=";
};
in {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
root = "${roroslyd-no}/www.roroslyd.no";
};
# trivial gradios
/** /
systemd.services.trivial-gradios-heritage-graph = {
description = pkgs.python3Packages.trivial-gradios.meta.description;
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = rec {
User = "trivial-gradios";
Group = "trivial-gradios";
DynamicUser = true;
StateDirectory = "trivial-gradios-heritage-graph";
WorkingDirectory = "/var/lib/${StateDirectory}";
ExecStart = "${pkgs.python3Packages.trivial-gradios}/bin/trivial-gradios-heritage-graph --port 37001";
Restart = "on-failure";
};
};
services.nginx.virtualHosts.${mkDomain "gradio"} = {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
locations."/" = {
root = pkgs.writeTextDir "index.html" ''
<table>
<tr>
<th>name
<th>description
<tr>
<td><a href="heritage-graph/">heritage-graph</a>
<td>A simple tool to greate a directed ancestry graph.
</table>
'';
};
locations."/heritage-graph/" = {
proxyPass = "http://127.0.0.1:37001";
proxyWebsockets = true;
extraConfig = ''
rewrite ^/heritage-graph(/.*)$ $1 break;
'';
};
};
/**/
# CensorDodge
# A lightweight and customisable web proxy
/** /
services.phpfpm.pools.censordodge = {
user = "censordodge";
group = "censordodge";
settings = {
"listen.owner" = config.services.nginx.user;
"listen.group" = config.services.nginx.group;
"pm" = "dynamic";
"pm.max_children" = "32";
"pm.start_servers" = "2";
"pm.min_spare_servers" = "2";
"pm.max_spare_servers" = "4";
"pm.max_requests" = "500";
};
};
services.nginx.virtualHosts.${mkDomain "censordodge"} = {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
root = pkgs.fetchFromGitHub {
owner = "ryanmab";
repo = "CensorDodge";
rev = "2480e8269190ca8618e41dc581f9d55f4ce9f333";
sha256 = "8R3lyxF22HXui4pJytMcqwwa5TDXIJb6fWII934IhEA=";
};
extraConfig = ''
index index.php;
'';
locations."/".extraConfig = ''
try_files $uri $uri/ /index.php?$args;
'';
locations."~ \.php$".extraConfig = ''
include ${config.services.nginx.package}/conf/fastcgi.conf;
fastcgi_pass unix:${config.services.phpfpm.pools.censordodge.socket};
fastcgi_buffers 16 16k;
fastcgi_buffer_size 32k;
'';
};
users.users.censordodge = {
isSystemUser = true;
group = "censordodge";
};
users.groups.censordodge = {};
/**/
# OpenSpeedtTest
# Pure HTML5 Network Performance Estimation Tool
/** /
services.nginx.virtualHosts.${mkDomain "openspeedtest"} = let
cfg = config.services.nginx.virtualHosts.${mkDomain "openspeedtest"};
openspeedtest = pkgs.fetchFromGitHub rec {
name = "${owner}-unstable-2022-07-02";
owner = "openspeedtest";
repo = "Speed-Test";
#rev = "v${version}";
rev = "59eb7367ede5555f7516ebb8eeeb65245bc5a6e5";
sha256 = "yzvulzgBUri+sU9WxZrLKH/T+mlZu9G2zucv8t/fZdY=";
postFetch = ''
rm $out/README.md
rm $out/License.md
rm $out/.gitignore
rm $out/hosted.html
'';
};
in {
forceSSL = true; # addSSL = true;
enableACME = true; #useACMEHost = acmeDomain;
http2 = false;
root = "${openspeedtest}";
extraConfig = ''
#access_log off;
#error_log /dev/null; #Disable this for Windows Nginx.
#log_not_found off;
gzip off;
fastcgi_read_timeout 999;
server_tokens off;
tcp_nodelay on;
tcp_nopush on;
sendfile on;
open_file_cache max=200000 inactive=20s;
open_file_cache_valid 30s;
open_file_cache_min_uses 2;
open_file_cache_errors off;
'';
locations."/".extraConfig = lib.mkIf false ''
if_modified_since off;
expires off;
etag off;
if ($request_method != OPTIONS ) {
add_header 'Access-Control-Allow-Origin' "*" always;
add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Mx-ReqToken,X-Requested-With' always;
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always;
#Very Very Important! You SHOULD send no-store from server for Google Chrome.
add_header 'Cache-Control' 'no-store, no-cache, max-age=0, no-transform';
add_header 'Last-Modified' $date_gmt;
}
if ($request_method = OPTIONS ) {
add_header 'Access-Control-Allow-Origin' "$http_origin" always;
add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Mx-ReqToken,X-Requested-With' always;
add_header 'Access-Control-Allow-Methods' "GET, POST, OPTIONS" always;
add_header 'Access-Control-Allow-Credentials' "true";
return 204;
}
'';
# IF and Only if you Enabled HTTP2 otherwise never enable the following
# HTTP2 will return 200 withot waiting for upload to complete. it's smart but we don't need that to happen here when testing upload speed on HTTP2.
locations."/upload.bin".extraConfig = ''
#proxy_set_header Host $host;
proxy_pass http://127.0.0.1:80/upload.bin;
'';
locations."~* ^.+\.(?:css|cur|js|jpe?g|gif|htc|ico|png|html|xml|otf|ttf|eot|woff|woff2|svg)$".extraConfig = lib.mkIf false ''
#access_log off;
expires 365d;
add_header 'Cache-Control' public;
add_header 'Vary' Accept-Encoding;
tcp_nodelay off;
open_file_cache max=3000 inactive=120s;
open_file_cache_valid 45s;
open_file_cache_min_uses 2;
open_file_cache_errors off;
gzip on;
gzip_disable "msie6";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/javascript;
'';
};
/**/
}