1
2
mirror of https://github.com/dali99/nixos-matrix-modules.git synced 2026-01-19 05:58:22 +01:00

33 Commits

Author SHA1 Message Date
099db715d1 synapse: Remove removed extra feature 2025-07-22 22:35:55 +02:00
da9dc0479f sliding-sync: remove 2025-01-02 23:34:05 +01:00
ff787d410c Add documentation for new sliding-sync setup and upgrade info 2024-09-27 06:21:37 +02:00
f8843835e2 sliding-sync: deprecate 2024-09-27 06:09:23 +02:00
f4e20d0360 Update README.MD 2024-08-29 10:32:38 +02:00
d7dc42c9bb sliding-sync: make enableAcme lib.mkDefault to match synapse 2024-06-01 11:39:05 +02:00
61b366f5f6 migrate to ensureDBOwnership 2024-06-01 11:11:14 +02:00
6c9b67974b fix generating multiple upstreams of same type 2024-03-13 07:39:59 +01:00
19b85a2562 Merge pull request #7 from dali99/refactor-nginx-upstream-generation
refactor nginx upstream generation, add support for unix sockets
2024-03-13 06:23:07 +01:00
d48997cfb4 generate only one upstream per worker 2024-03-13 06:22:24 +01:00
b8d7c76a7e treewide: add support for unix sockets 2024-01-27 07:52:26 +01:00
19d50fae63 nginx-pipeline: add basic test 2024-01-27 07:52:26 +01:00
18d3b34406 nginx: refactor upstream generation 2024-01-27 07:52:25 +01:00
85804fce8d lib: fix bug where lib.throw does not exist 2024-01-27 07:40:40 +01:00
046194cdad v0.5.0
This is mostly a maintainance release to be compatible with nixos-23.11 but comes with some small improvements as well
2023-12-02 09:58:52 +01:00
3f92b5f197 use nixpkgs sliding sync package 2023-12-02 09:49:03 +01:00
a24a5e5da4 update to 23.11 2023-12-02 09:44:45 +01:00
e098146571 Update README.MD 2023-10-22 03:02:59 +02:00
1e370b9622 matrix-sliding-sync: 0.99.10 -> 0.99.11 2023-10-16 03:49:32 +02:00
161d1ed360 document some breaking changes 2023-09-24 04:39:20 +02:00
50ae1b6e57 Implement easy sliding sync setup
Co-authored-by: h7x4 <h7x4@nani.wtf>
2023-09-24 04:39:20 +02:00
bedede1e6d Import sliding sync from nixpkgs unstable
Co-authored-by: Sandro Jäckel <sandro.jaeckel@gmail.com>
Co-authored-by: Emily <55066419+emilylange@users.noreply.github.com>
2023-09-24 03:32:54 +02:00
66ff528912 Update README.MD 2023-09-11 00:04:19 +02:00
8199f88a5a Update README.MD 2023-09-11 00:03:44 +02:00
lon
bf997073d9 fix: don't force enableACME to allow useACMEHost 2023-07-27 22:10:46 +02:00
c158a35ea2 emergency handling of deprecations 2023-07-13 04:16:00 +02:00
362496f4aa move matrix-lib to let block
Else it has to be called with { } which modules cant do
2023-02-17 23:59:59 +01:00
cf89fa8eb9 load matrix-lib directly inside module
To enable use with non-flakes
2023-02-17 23:44:47 +01:00
59e39d551d Add a license
Co-authored-by: h7x4 <h7x4@nani.wtf>
2023-02-17 01:16:02 +01:00
07e95170e8 introduce matrix-lib 2023-01-20 08:11:33 +01:00
5ef8873997 simplify mainReplicationListener stuff 2023-01-20 08:11:33 +01:00
fbee6a0c0d Merge pull request #2 from h7x4/master 2023-01-19 21:48:06 +01:00
2fd07f83b5 Cleaned up matrix synapse module 2023-01-19 20:53:14 +01:00
13 changed files with 1199 additions and 806 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
result

21
COPYING Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2020, 2022-2023 Daniel Løvbrøtte Olsen and contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

15
MIGRATIONS.MD Normal file
View File

@@ -0,0 +1,15 @@
# Migrations
This is a best effort document descibing neccecary changes you might have to do when updating
## 0.6.1
enableSlidingSync, and setting matrix-synapse.sliding-sync.environmentFile (or any other sliding-sync setting)
is no longer needed for a sliding-sync setup. Upgrading will force relogins for all users.
## 0.5.0
* The module has been renamed from `synapse` to `default`
* The synapse module now expects a wrapper-style package. This means the module is now incompatible with nixpkgs < 23.11.

View File

@@ -1,3 +1,5 @@
For support and requests feel free to join [#nixos-matrix-modules:dodsorf.as](https://matrix.to/#/#nixos-matrix-modules:dodsorf.as), [uri](matrix:r/nixos-matrix-modules:dodsorf.as)
With matrix.YOURDOMAIN pointing at the server:
```
@@ -9,6 +11,10 @@ With matrix.YOURDOMAIN pointing at the server:
workers.federationSenders = 1;
workers.federationReceivers = 1;
workers.initialSyncers = 1;
workers.normalSyncers = 1;
workers.eventPersisters = 2;
workers.useUserDirectoryWorker = true;
enableNginx = true;
@@ -31,4 +37,4 @@ With matrix.YOURDOMAIN pointing at the server:
}
```
is ~enough to get a functional matrix-server running one federation sender and one federation receiver
is ~enough to get a functional matrix-server running with some workers

26
flake.lock generated Normal file
View File

@@ -0,0 +1,26 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1706098335,
"narHash": "sha256-r3dWjT8P9/Ah5m5ul4WqIWD8muj5F+/gbCdjiNVBKmU=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "a77ab169a83a4175169d78684ddd2e54486ac651",
"type": "github"
},
"original": {
"id": "nixpkgs",
"ref": "nixos-23.11",
"type": "indirect"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

View File

@@ -1,9 +1,30 @@
{
description = "NixOS modules for matrix related services";
outputs = { self }: {
inputs = {
nixpkgs.url = "nixpkgs/nixos-23.11";
};
outputs = { self, nixpkgs }: {
nixosModules = {
synapse = import ./synapse-module;
default = import ./module.nix;
};
lib = import ./lib.nix { lib = nixpkgs.lib; };
packages = let
forAllSystems = f:
nixpkgs.lib.genAttrs [
"x86_64-linux"
"aarch64-linux"
"x86_64-darwin"
"aarch64-darwin"
] (system: f nixpkgs.legacyPackages.${system});
in forAllSystems (pkgs: {
tests = import ./tests {
inherit nixpkgs pkgs;
matrix-lib = self.lib;
};
});
};
}

80
lib.nix Normal file
View File

@@ -0,0 +1,80 @@
{ lib }:
rec {
# checks if given listener configuration has type as a resource
isListenerType = type: l: lib.any (r: lib.any (n: n == type) r.names) l.resources;
# Get the first listener that includes the given resource from worker
firstListenerOfType = type: ls: lib.lists.findFirst (isListenerType type)
(throw "No listener with resource: ${type} configured")
ls;
# Get an attrset of the host and port from a listener
connectionInfo = l: {
host = lib.head l.bind_addresses;
port = l.port;
};
# Get an attrset of the host and port from a worker given a type
workerConnectionResource = r: w: let
l = firstListenerOfType r w.settings.worker_listeners;
in connectionInfo l;
mapWorkersToUpstreamsByType = workerInstances:
lib.pipe workerInstances [
lib.attrValues
# Index by worker type
(lib.foldl (acc: worker: acc // {
${worker.type} = (acc.${worker.type} or [ ]) ++ [ worker ];
}) { })
# Subindex by resource names, listener types, and convert to upstreams
(lib.mapAttrs (_: workers: lib.pipe workers [
(lib.concatMap (worker: [ (lib.lists.head worker.settings.worker_listeners) ]))
lib.flatten
mapListenersToUpstreamsByType
]))
];
mapListenersToUpstreamsByType = listenerInstances:
lib.pipe listenerInstances [
# Index by resource names
(lib.concatMap (listener: lib.pipe listener [
(listener: let
allResourceNames = lib.pipe listener.resources [
(map (resource: resource.names))
lib.flatten
lib.unique
];
in if allResourceNames == [ ]
then { "empty" = listener; }
else lib.genAttrs allResourceNames (_: listener))
lib.attrsToList
]))
(lib.foldl (acc: listener: acc // {
${listener.name} = (acc.${listener.name} or [ ]) ++ [ listener.value ];
}) { })
# Index by listener type
(lib.mapAttrs (_:
(lib.foldl (acc: listener: acc // {
${listener.type} = (acc.${listener.type} or [ ]) ++ [ listener ];
}) { })
))
# Convert listeners to upstream URIs
(lib.mapAttrs (_:
(lib.mapAttrs (_: listeners:
lib.pipe listeners [
(lib.concatMap (listener:
if listener.path != null
then [ "unix:${listener.path}" ]
else (map (addr: "${addr}:${toString listener.port}") listener.bind_addresses)
))
# NOTE: Adding ` = { }` to every upstream might seem unnecessary in isolation,
# but it makes it easier to set upstreams in the nginx module.
(uris: lib.genAttrs uris (_: { }))
]
))
))
];
}

14
module.nix Normal file
View File

@@ -0,0 +1,14 @@
{ lib, ... }:
{
imports = [
./synapse-module
# TODO: Remove after 25.05
(lib.mkRemovedOptionModule [ "services" "matrix-synapse" "sliding-sync" ] ''
`services.matrix-synapse.sliding-sync` is no longer necessary to use sliding-sync with synapse.
As synapse now includes this in itself, if you have a manually managed `.well-known/matrix/client` file
remove the proxy url from it.
'')
];
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,235 +1,213 @@
{ lib, pkgs, config, ...}:
{ pkgs, lib, config, ... }:
let
cfg = config.services.matrix-synapse-next;
getWorkersOfType = type: lib.filterAttrs (_: w: w.type == type) cfg.workers.instances;
isListenerType = type: listener: lib.lists.any (r: lib.lists.any (n: n == type) r.names) listener.resources;
firstListenerOfType = type: worker: lib.lists.findFirst (isListenerType type) (throw "No federation endpoint on receiver") worker.settings.worker_listeners;
wAddressOfType = type: w: lib.lists.findFirst (_: true) (throw "No address in receiver") (firstListenerOfType type w).bind_addresses;
wPortOfType = type: w: (firstListenerOfType type w).port;
wSocketAddressOfType = type: w: "${wAddressOfType type w}:${builtins.toString (wPortOfType type w)}";
generateSocketAddresses = type: workers: lib.mapAttrsToList (_: w: "${wSocketAddressOfType type w}") workers;
matrix-lib = (import ../lib.nix { inherit lib; });
workerUpstreams = matrix-lib.mapWorkersToUpstreamsByType cfg.workers.instances;
listenerUpstreams = matrix-lib.mapListenersToUpstreamsByType cfg.settings.listeners;
in
{
config = lib.mkIf cfg.enableNginx {
services.nginx.commonHttpConfig = ''
# No since argument means its initialSync
map $arg_since $synapse_unknown_sync {
default synapse_normal_sync;
''' synapse_initial_sync;
}
services.nginx.commonHttpConfig = ''
# No since argument means its initialSync
map $arg_since $synapse_unknown_sync {
default synapse_normal_sync;
''' synapse_initial_sync;
}
map $uri $synapse_uri_group {
# Sync requests
~^/_matrix/client/(r0|v3)/sync$ $synapse_unknown_sync;
~^/_matrix/client/(api/v1|r0|v3)/event$ synapse_normal_sync;
~^/_matrix/client/(api/v1|r0|v3)/initialSync$ synapse_initial_sync;
~^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$ synapse_initial_sync;
map $uri $synapse_uri_group {
# Sync requests
~^/_matrix/client/(r0|v3)/sync$ $synapse_unknown_sync;
~^/_matrix/client/(api/v1|r0|v3)/event$ synapse_normal_sync;
~^/_matrix/client/(api/v1|r0|v3)/initialSync$ synapse_initial_sync;
~^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$ synapse_initial_sync;
# Federation requests
~^/_matrix/federation/v1/event/ synapse_federation;
~^/_matrix/federation/v1/state/ synapse_federation;
~^/_matrix/federation/v1/state_ids/ synapse_federation;
~^/_matrix/federation/v1/backfill/ synapse_federation;
~^/_matrix/federation/v1/get_missing_events/ synapse_federation;
~^/_matrix/federation/v1/publicRooms synapse_federation;
~^/_matrix/federation/v1/query/ synapse_federation;
~^/_matrix/federation/v1/make_join/ synapse_federation;
~^/_matrix/federation/v1/make_leave/ synapse_federation;
~^/_matrix/federation/(v1|v2)/send_join/ synapse_federation;
~^/_matrix/federation/(v1|v2)/send_leave/ synapse_federation;
~^/_matrix/federation/(v1|v2)/invite/ synapse_federation;
~^/_matrix/federation/v1/event_auth/ synapse_federation;
~^/_matrix/federation/v1/timestamp_to_event/ synapse_federation;
~^/_matrix/federation/v1/exchange_third_party_invite/ synapse_federation;
~^/_matrix/federation/v1/user/devices/ synapse_federation;
~^/_matrix/key/v2/query synapse_federation;
~^/_matrix/federation/v1/hierarchy/ synapse_federation;
# Federation requests
~^/_matrix/federation/v1/event/ synapse_federation;
~^/_matrix/federation/v1/state/ synapse_federation;
~^/_matrix/federation/v1/state_ids/ synapse_federation;
~^/_matrix/federation/v1/backfill/ synapse_federation;
~^/_matrix/federation/v1/get_missing_events/ synapse_federation;
~^/_matrix/federation/v1/publicRooms synapse_federation;
~^/_matrix/federation/v1/query/ synapse_federation;
~^/_matrix/federation/v1/make_join/ synapse_federation;
~^/_matrix/federation/v1/make_leave/ synapse_federation;
~^/_matrix/federation/(v1|v2)/send_join/ synapse_federation;
~^/_matrix/federation/(v1|v2)/send_leave/ synapse_federation;
~^/_matrix/federation/(v1|v2)/invite/ synapse_federation;
~^/_matrix/federation/v1/event_auth/ synapse_federation;
~^/_matrix/federation/v1/timestamp_to_event/ synapse_federation;
~^/_matrix/federation/v1/exchange_third_party_invite/ synapse_federation;
~^/_matrix/federation/v1/user/devices/ synapse_federation;
~^/_matrix/key/v2/query synapse_federation;
~^/_matrix/federation/v1/hierarchy/ synapse_federation;
# Inbound federation transaction request
~^/_matrix/federation/v1/send/ synapse_federation_transaction;
# Inbound federation transaction request
~^/_matrix/federation/v1/send/ synapse_federation_transaction;
# Client API requests
~^/_matrix/client/(api/v1|r0|v3|unstable)/createRoom$ synapse_client_interaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$ synapse_client_interaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$ synapse_client_interaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$ synapse_client_interaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$ synapse_client_interaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$ synapse_client_interaction;
~^/_matrix/client/v1/rooms/.*/hierarchy$ synapse_client_interaction;
~^/_matrix/client/(v1|unstable)/rooms/.*/relations/ synapse_client_interaction;
~^/_matrix/client/v1/rooms/.*/threads$ synapse_client_interaction;
~^/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$ synapse_client_interaction;
~^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$ synapse_client_interaction;
~^/_matrix/client/(r0|v3|unstable)/account/3pid$ synapse_client_interaction;
~^/_matrix/client/(r0|v3|unstable)/account/whoami$ synapse_client_interaction;
~^/_matrix/client/(r0|v3|unstable)/devices$ synapse_client_interaction;
~^/_matrix/client/versions$ synapse_client_interaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$ synapse_client_interaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/ synapse_client_interaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$ synapse_client_interaction;
~^/_matrix/client/v1/rooms/.*/timestamp_to_event$ synapse_client_interaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/search$ synapse_client_interaction;
# Client API requests
~^/_matrix/client/(api/v1|r0|v3|unstable)/createRoom$ synapse_client_interaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$ synapse_client_interaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$ synapse_client_interaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$ synapse_client_interaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$ synapse_client_interaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$ synapse_client_interaction;
~^/_matrix/client/v1/rooms/.*/hierarchy$ synapse_client_interaction;
~^/_matrix/client/(v1|unstable)/rooms/.*/relations/ synapse_client_interaction;
~^/_matrix/client/v1/rooms/.*/threads$ synapse_client_interaction;
~^/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$ synapse_client_interaction;
~^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$ synapse_client_interaction;
~^/_matrix/client/(r0|v3|unstable)/account/3pid$ synapse_client_interaction;
~^/_matrix/client/(r0|v3|unstable)/account/whoami$ synapse_client_interaction;
~^/_matrix/client/(r0|v3|unstable)/devices$ synapse_client_interaction;
~^/_matrix/client/versions$ synapse_client_interaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$ synapse_client_interaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/ synapse_client_interaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$ synapse_client_interaction;
~^/_matrix/client/v1/rooms/.*/timestamp_to_event$ synapse_client_interaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/search$ synapse_client_interaction;
# Encryption requests
~^/_matrix/client/(r0|v3|unstable)/keys/query$ synapse_client_encryption;
~^/_matrix/client/(r0|v3|unstable)/keys/changes$ synapse_client_encryption;
~^/_matrix/client/(r0|v3|unstable)/keys/claim$ synapse_client_encryption;
~^/_matrix/client/(r0|v3|unstable)/room_keys/ synapse_client_encryption;
~^/_matrix/client/(r0|v3|unstable)/keys/upload/ synapse_client_encryption;
# Encryption requests
~^/_matrix/client/(r0|v3|unstable)/keys/query$ synapse_client_encryption;
~^/_matrix/client/(r0|v3|unstable)/keys/changes$ synapse_client_encryption;
~^/_matrix/client/(r0|v3|unstable)/keys/claim$ synapse_client_encryption;
~^/_matrix/client/(r0|v3|unstable)/room_keys/ synapse_client_encryption;
~^/_matrix/client/(r0|v3|unstable)/keys/upload/ synapse_client_encryption;
# Registration/login requests
~^/_matrix/client/(api/v1|r0|v3|unstable)/login$ synapse_client_login;
~^/_matrix/client/(r0|v3|unstable)/register$ synapse_client_login;
~^/_matrix/client/v1/register/m.login.registration_token/validity$ synapse_client_login;
# Registration/login requests
~^/_matrix/client/(api/v1|r0|v3|unstable)/login$ synapse_client_login;
~^/_matrix/client/(r0|v3|unstable)/register$ synapse_client_login;
~^/_matrix/client/v1/register/m.login.registration_token/validity$ synapse_client_login;
# Event sending requests
~^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact synapse_client_transaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send synapse_client_transaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state/ synapse_client_transaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$ synapse_client_transaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/join/ synapse_client_transaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/profile/ synapse_client_transaction;
# Event sending requests
~^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact synapse_client_transaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send synapse_client_transaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state/ synapse_client_transaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$ synapse_client_transaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/join/ synapse_client_transaction;
~^/_matrix/client/(api/v1|r0|v3|unstable)/profile/ synapse_client_transaction;
# Account data requests
~^/_matrix/client/(r0|v3|unstable)/.*/tags synapse_client_data;
~^/_matrix/client/(r0|v3|unstable)/.*/account_data synapse_client_data;
# Account data requests
~^/_matrix/client/(r0|v3|unstable)/.*/tags synapse_client_data;
~^/_matrix/client/(r0|v3|unstable)/.*/account_data synapse_client_data;
# Receipts requests
~^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt synapse_client_interaction;
~^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers synapse_client_interaction;
# Receipts requests
~^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt synapse_client_interaction;
~^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers synapse_client_interaction;
# Presence requests
~^/_matrix/client/(api/v1|r0|v3|unstable)/presence/ synapse_client_presence;
# Presence requests
~^/_matrix/client/(api/v1|r0|v3|unstable)/presence/ synapse_client_presence;
# User directory search requests;
~^/_matrix/client/(r0|v3|unstable)/user_directory/search$ synapse_client_user-dir;
}
# User directory search requests;
~^/_matrix/client/(r0|v3|unstable)/user_directory/search$ synapse_client_user-dir;
}
#Plugboard for url -> workers
map $synapse_uri_group $synapse_backend {
default synapse_master;
#Plugboard for url -> workers
map $synapse_uri_group $synapse_backend {
default synapse_master;
synapse_initial_sync synapse_worker_initial_sync;
synapse_normal_sync synapse_worker_normal_sync;
synapse_initial_sync synapse_worker_initial_sync;
synapse_normal_sync synapse_worker_normal_sync;
synapse_federation synapse_worker_federation;
synapse_federation_transaction synapse_worker_federation;
synapse_federation synapse_worker_federation;
synapse_federation_transaction synapse_worker_federation;
synapse_client_user-dir synapse_worker_user-dir;
}
synapse_client_user-dir synapse_worker_user-dir;
}
# from https://github.com/tswfi/synapse/commit/b3704b936663cc692241e978dce4ac623276b1a6
map $arg_access_token $accesstoken_from_urlparam {
# Defaults to just passing back the whole accesstoken
default $arg_access_token;
# Try to extract username part from accesstoken URL parameter
"~syt_(?<username>.*?)_.*" $username;
}
# from https://github.com/tswfi/synapse/commit/b3704b936663cc692241e978dce4ac623276b1a6
map $arg_access_token $accesstoken_from_urlparam {
# Defaults to just passing back the whole accesstoken
default $arg_access_token;
# Try to extract username part from accesstoken URL parameter
"~syt_(?<username>.*?)_.*" $username;
}
map $http_authorization $mxid_localpart {
# Defaults to just passing back the whole accesstoken
default $http_authorization;
# Try to extract username part from accesstoken header
"~Bearer syt_(?<username>.*?)_.*" $username;
# if no authorization-header exist, try mapper for URL parameter "access_token"
"" $accesstoken_from_urlparam;
}
'';
services.nginx.upstreams.synapse_master.servers = let
isMainListener = l: isListenerType "client" l && isListenerType "federation" l;
firstMainListener = lib.findFirst isMainListener
(throw "No cartch-all listener configured") cfg.settings.listeners;
address = lib.findFirst (_: true) (throw "No address in main listener") firstMainListener.bind_addresses;
port = firstMainListener.port;
socketAddress = "${address}:${builtins.toString port}";
in {
"${socketAddress}" = { };
};
services.nginx.upstreams.synapse_worker_federation = {
servers = let
fedReceivers = getWorkersOfType "fed-receiver";
socketAddresses = generateSocketAddresses "federation" fedReceivers;
in if fedReceivers != { } then
lib.genAttrs socketAddresses (_: { })
else config.services.nginx.upstreams.synapse_master.servers;
extraConfig = ''
ip_hash;
map $http_authorization $mxid_localpart {
# Defaults to just passing back the whole accesstoken
default $http_authorization;
# Try to extract username part from accesstoken header
"~Bearer syt_(?<username>.*?)_.*" $username;
# if no authorization-header exist, try mapper for URL parameter "access_token"
"" $accesstoken_from_urlparam;
}
'';
};
services.nginx.upstreams.synapse_master.servers = let
mainListeners = builtins.intersectAttrs
(listenerUpstreams.client.http or { })
(listenerUpstreams.federation.http or { });
in
assert lib.assertMsg (mainListeners != { })
"No catch-all listener configured, or listener is not bound to an address";
mainListeners;
services.nginx.upstreams.synapse_worker_initial_sync = {
servers = let
initialSyncers = getWorkersOfType "initial-sync";
socketAddresses = generateSocketAddresses "client" initialSyncers;
in if initialSyncers != { } then
lib.genAttrs socketAddresses (_: { })
else config.services.nginx.upstreams.synapse_master.servers;
extraConfig = ''
hash $mxid_localpart consistent;
'';
};
services.nginx.upstreams.synapse_worker_normal_sync = {
servers = let
normalSyncers = getWorkersOfType "normal-sync";
socketAddresses = generateSocketAddresses "client" normalSyncers;
in if normalSyncers != { } then
lib.genAttrs socketAddresses (_: { })
else config.services.nginx.upstreams.synapse_master.servers;
extraConfig = ''
hash $mxid_localpart consistent;
'';
};
services.nginx.upstreams.synapse_worker_user-dir = {
servers = let
workers = getWorkersOfType "user-dir";
socketAddresses = generateSocketAddresses "client" workers;
in if workers != { } then
lib.genAttrs socketAddresses (_: { })
else config.services.nginx.upstreams.synapse_master.servers;
};
services.nginx.virtualHosts."${cfg.public_baseurl}" = {
enableACME = true;
forceSSL = true;
locations."/_matrix" = {
proxyPass = "http://$synapse_backend";
services.nginx.upstreams.synapse_worker_federation = {
servers = workerUpstreams.fed-receiver.federation.http or config.services.nginx.upstreams.synapse_master.servers;
extraConfig = ''
add_header X-debug-backend $synapse_backend;
add_header X-debug-group $synapse_uri_group;
client_max_body_size ${cfg.settings.max_upload_size};
proxy_read_timeout 10m;
ip_hash;
'';
};
locations."~ ^/_matrix/client/(r0|v3)/sync$" = {
proxyPass = "http://$synapse_backend";
services.nginx.upstreams.synapse_worker_initial_sync = {
servers = workerUpstreams.initial-sync.client.http or config.services.nginx.upstreams.synapse_master.servers;
extraConfig = ''
proxy_read_timeout 1h;
hash $mxid_localpart consistent;
'';
};
locations."~ ^/_matrix/client/(api/v1|r0|v3)/initialSync$" = {
proxyPass = "http://synapse_worker_initial_sync";
services.nginx.upstreams.synapse_worker_normal_sync = {
servers = workerUpstreams.normal-sync.client.http or config.services.nginx.upstreams.synapse_master.servers;
extraConfig = ''
proxy_read_timeout 1h;
hash $mxid_localpart consistent;
'';
};
locations."~ ^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$" = {
proxyPass = "http://synapse_worker_initial_sync";
extraConfig = ''
proxy_read_timeout 1h;
'';
services.nginx.upstreams.synapse_worker_user-dir = {
servers = workerUpstreams.user-dir.client.http or config.services.nginx.upstreams.synapse_master.servers;
};
locations."/_synapse/client" = {
proxyPass = "http://$synapse_backend";
services.nginx.virtualHosts."${cfg.public_baseurl}" = {
enableACME = lib.mkDefault true;
forceSSL = true;
locations."/_matrix" = {
proxyPass = "http://$synapse_backend";
extraConfig = ''
add_header X-debug-backend $synapse_backend;
add_header X-debug-group $synapse_uri_group;
client_max_body_size ${cfg.settings.max_upload_size};
proxy_read_timeout 10m;
'';
};
locations."~ ^/_matrix/client/(r0|v3)/sync$" = {
proxyPass = "http://$synapse_backend";
extraConfig = ''
proxy_read_timeout 1h;
'';
};
locations."~ ^/_matrix/client/(api/v1|r0|v3)/initialSync$" = {
proxyPass = "http://synapse_worker_initial_sync";
extraConfig = ''
proxy_read_timeout 1h;
'';
};
locations."~ ^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$" = {
proxyPass = "http://synapse_worker_initial_sync";
extraConfig = ''
proxy_read_timeout 1h;
'';
};
locations."/_synapse/client" = {
proxyPass = "http://$synapse_backend";
};
locations."/.well-known/matrix" = {
proxyPass = "http://$synapse_backend";
};
};
};
};
}
}

401
synapse-module/workers.nix Normal file
View File

@@ -0,0 +1,401 @@
{ matrix-synapse-common-config,
matrix-lib,
wrapped,
throw',
format
}:
{ pkgs, lib, config, ... }: let
cfg = config.services.matrix-synapse-next;
wcfg = config.services.matrix-synapse-next.workers;
# Used to generate proper defaultTexts.
cfgText = "config.services.matrix-synapse-next";
wcfgText = "config.services.matrix-synapse-next.workers";
inherit (lib) types mkOption mkEnableOption mkIf mkMerge literalExpression;
mkWorkerCountOption = workerType: mkOption {
type = types.ints.unsigned;
description = "How many automatically configured ${workerType} workers to set up";
default = 0;
};
genAttrs' = items: f: g: builtins.listToAttrs (map (i: lib.nameValuePair (f i) (g i)) items);
mainReplicationListener = matrix-lib.firstListenerOfType "replication" cfg.settings.listeners;
in {
# See https://github.com/matrix-org/synapse/blob/develop/docs/workers.md for more info
options.services.matrix-synapse-next.workers = let
workerInstanceType = types.submodule ({ config, ... }: {
options = {
isAuto = mkOption {
type = types.bool;
internal = true;
default = false;
};
index = mkOption {
internal = true;
type = types.ints.positive;
};
# The custom string type here is mainly for the name to use
# for the metrics of custom worker types
type = mkOption {
type = types.str;
# TODO: add description and possibly default value?
};
settings = mkOption {
type = workerSettingsType config;
default = { };
};
};
});
workerSettingsType = instanceCfg: types.submodule {
freeformType = format.type;
options = {
worker_app = mkOption {
type = types.enum [
"synapse.app.generic_worker"
"synapse.app.appservice"
"synapse.app.media_repository"
"synapse.app.user_dir"
];
description = "The type of worker application";
default = "synapse.app.generic_worker";
};
worker_listeners = mkOption {
type = types.listOf (workerListenerType instanceCfg);
description = "Listener configuration for the worker, similar to the main synapse listener";
default = [ ];
};
};
};
workerListenerType = instanceCfg: types.submodule {
options = {
type = mkOption {
type = types.enum [ "http" "metrics" ];
description = "The type of the listener";
default = "http";
};
port = mkOption {
type = with types; nullOr port;
default = null;
description = "The TCP port to bind to";
};
path = mkOption {
type = with types; nullOr path;
default = null;
description = "The UNIX socket to bind to";
};
bind_addresses = mkOption {
type = with types; listOf str;
description = "A list of local addresses to listen on";
default = [ wcfg.defaultListenerAddress ];
defaultText = literalExpression "[ ${wcfgText}.defaultListenerAddress ]";
};
tls = mkOption {
type = types.bool;
description = ''
Whether to enable TLS for this listener.
Will use the TLS key/cert specified in tls_private_key_path / tls_certificate_path.
'';
default = false;
example = true;
};
x_forwarded = mkOption {
type = types.bool;
description = ''
Whether to use the X-Forwarded-For HTTP header as the client IP.
This option is only valid for an 'http' listener.
It is useful when Synapse is running behind a reverse-proxy.
'';
default = true;
example = false;
};
resources = let
typeToResources = t: {
"fed-receiver" = [ "federation" ];
"fed-sender" = [ ];
"initial-sync" = [ "client" ];
"normal-sync" = [ "client" ];
"event-persist" = [ "replication" ];
"user-dir" = [ "client" ];
}.${t};
in mkOption {
type = types.listOf (types.submodule {
options = {
names = mkOption {
type = with types; listOf (enum [
"client"
"consent"
"federation"
"keys"
"media"
"metrics"
"openid"
"replication"
"static"
"webclient"
]);
description = "A list of resources to host on this port";
default = lib.optionals instanceCfg.isAuto (typeToResources instanceCfg.type);
defaultText = ''
If the worker is generated from other config, the resource type will
be determined automatically.
'';
};
compress = mkEnableOption "HTTP compression for this resource";
};
});
default = [{ }];
};
};
};
in {
mainReplicationHost = mkOption {
type = with types; nullOr str;
default = let
host = (matrix-lib.connectionInfo mainReplicationListener).host;
in
# To avoid connecting to 0.0.0.0 and so on
if builtins.elem host [ "0.0.0.0" "::" ]
then "127.0.0.1"
else host;
# TODO: add defaultText
description = "Host of the main synapse instance's replication listener";
};
mainReplicationPort = mkOption {
type = with types; nullOr port;
default = mainReplicationListener.port;
# TODO: add defaultText
description = "Port for the main synapse instance's replication listener";
};
mainReplicationPath = mkOption {
type = with types; nullOr path;
default = mainReplicationListener.path;
# TODO: add defaultText
description = "Path to the UNIX socket of the main synapse instance's replication listener";
};
defaultListenerAddress = mkOption {
type = types.str;
default = "127.0.0.1";
description = "The default listener address for the worker";
};
workersUsePath = mkOption {
type = types.bool;
description = "Whether to enable UNIX sockets for all automatically generated workers";
default = true;
example = false;
};
workerStartingPort = mkOption {
type = types.port;
description = "What port should the automatically configured workers start enumerating from";
default = 8083;
};
enableMetrics = mkOption {
type = types.bool;
default = cfg.settings.enable_metrics;
defaultText = literalExpression "${cfgText}.settings.enable_metrics";
# TODO: add description
};
metricsStartingPort = mkOption {
type = types.port;
default = 18083;
# TODO: add description
};
federationSenders = mkWorkerCountOption "federation-sender";
federationReceivers = mkWorkerCountOption "federation-reciever";
initialSyncers = mkWorkerCountOption "initial-syncer";
normalSyncers = mkWorkerCountOption "sync";
eventPersisters = mkWorkerCountOption "event-persister";
useUserDirectoryWorker = mkEnableOption "user directory worker";
instances = mkOption {
type = types.attrsOf workerInstanceType;
default = { };
description = "Worker configuration";
example = {
"federation_sender1" = {
settings = {
worker_name = "federation_sender1";
worker_app = "synapse.app.generic_worker";
worker_replication_host = "127.0.0.1";
worker_replication_http_port = 9093;
worker_listeners = [ ];
};
};
};
};
};
config = {
assertions = [ ]
++ (lib.concatMap (worker:
(map (l: {
assertion = l.path == null -> (l.bind_addresses != [ ] && l.port != null);
message = "At least one worker listener is missing either a socket path or a bind_address + port to listen on";
}) worker.settings.worker_listeners)
) (lib.attrValues wcfg.instances));
services.matrix-synapse-next.settings = {
federation_sender_instances =
lib.genList (i: "auto-fed-sender${toString (i + 1)}") wcfg.federationSenders;
instance_map = (lib.mkIf (cfg.workers.instances != { }) ({
main = if wcfg.mainReplicationPath != null then {
path = wcfg.mainReplicationPath;
} else {
host = wcfg.mainReplicationHost;
port = wcfg.mainReplicationPort;
};
} // genAttrs' (lib.lists.range 1 wcfg.eventPersisters)
(i: "auto-event-persist${toString i}")
(i: let
wRL = matrix-lib.firstListenerOfType "replication" wcfg.instances."auto-event-persist${toString i}".settings.worker_listeners;
in if wRL.path != null then {
inherit (wRL) path;
} else matrix-lib.connectionInfo wRL)));
stream_writers.events =
mkIf (wcfg.eventPersisters > 0)
(lib.genList (i: "auto-event-persist${toString (i + 1)}") wcfg.eventPersisters);
update_user_directory_from_worker =
mkIf wcfg.useUserDirectoryWorker "auto-user-dir";
};
services.matrix-synapse-next.workers.instances = let
sum = lib.foldl lib.add 0;
workerListenersWithMetrics = portOffset: name:
[(if wcfg.workersUsePath
then {
path = "${cfg.socketDir}/matrix-synapse-worker-${name}.sock";
}
else {
port = wcfg.workerStartingPort + portOffset - 1;
}
)]
++ lib.optional wcfg.enableMetrics {
port = wcfg.metricsStartingPort + portOffset;
resources = [ { names = [ "metrics" ]; } ];
};
makeWorkerInstances = {
type,
numberOfWorkers,
portOffset ? 0,
nameFn ? i: "auto-${type}${toString i}",
workerListenerFn ? i: name: workerListenersWithMetrics (portOffset + i) name
}: genAttrs'
(lib.lists.range 1 numberOfWorkers)
nameFn
(i: {
isAuto = true;
inherit type;
index = i;
settings.worker_listeners = workerListenerFn i (nameFn i);
});
workerInstances = {
"fed-sender" = wcfg.federationSenders;
"fed-receiver" = wcfg.federationReceivers;
"initial-sync" = wcfg.initialSyncers;
"normal-sync" = wcfg.normalSyncers;
"event-persist" = wcfg.eventPersisters;
} // (lib.optionalAttrs wcfg.useUserDirectoryWorker {
"user-dir" = {
numberOfWorkers = 1;
nameFn = _: "auto-user-dir";
};
});
coerceWorker = { name, value }: if builtins.isInt value then {
type = name;
numberOfWorkers = value;
} else { type = name; } // value;
# Like foldl, but keeps all intermediate values
#
# (b -> a -> b) -> b -> [a] -> [b]
scanl = f: x1: list: let
x2 = lib.head list;
x1' = f x1 x2;
in if list == [] then [] else [x1'] ++ (scanl f x1' (lib.tail list));
f = { portOffset, numberOfWorkers, ... }: x: x // { portOffset = portOffset + numberOfWorkers; };
init = { portOffset = 0; numberOfWorkers = 0; };
in lib.pipe workerInstances [
(lib.mapAttrsToList lib.nameValuePair)
(map coerceWorker)
(scanl f init)
(map makeWorkerInstances)
mkMerge
];
systemd.services = let
workerList = lib.mapAttrsToList lib.nameValuePair wcfg.instances;
workerConfig = worker:
format.generate "matrix-synapse-worker-${worker.name}-config.yaml"
(worker.value.settings // {
worker_name = worker.name;
worker_listeners =
map (lib.filterAttrsRecursive (_: v: v != null)) worker.value.settings.worker_listeners;
});
in builtins.listToAttrs (lib.flip map workerList (worker: {
name = "matrix-synapse-worker-${worker.name}";
value = {
description = "Synapse Matrix Worker";
partOf = [ "matrix-synapse.target" ];
wantedBy = [ "matrix-synapse.target" ];
after = [ "matrix-synapse.service" ];
requires = [ "matrix-synapse.service" ];
serviceConfig = {
Type = "notify";
User = "matrix-synapse";
Group = "matrix-synapse";
Slice = "system-matrix-synapse.slice";
WorkingDirectory = cfg.dataDir;
RuntimeDirectory = "matrix-synapse";
StateDirectory = "matrix-synapse";
ExecStartPre = pkgs.writers.writeBash "wait-for-synapse" ''
# From https://md.darmstadt.ccc.de/synapse-at-work
while ! systemctl is-active -q matrix-synapse.service; do
sleep 1
done
'';
ExecStart = let
flags = lib.cli.toGNUCommandLineShell {} {
config-path = [ matrix-synapse-common-config (workerConfig worker) ] ++ cfg.extraConfigFiles;
keys-directory = cfg.dataDir;
};
in "${wrapped}/bin/synapse_worker ${flags}";
};
};
}));
};
}

4
tests/default.nix Normal file
View File

@@ -0,0 +1,4 @@
{ nixpkgs, pkgs, matrix-lib, ... }:
{
nginx-pipeline = pkgs.callPackage ./nginx-pipeline { inherit nixpkgs matrix-lib; };
}

View File

@@ -0,0 +1,53 @@
{ nixpkgs, lib, matrix-lib, writeText, ... }:
let
nixosConfig = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
../../module.nix
{
system.stateVersion = "23.11";
boot.isContainer = true;
services.matrix-synapse-next = {
enable = true;
enableNginx = true;
workers = {
enableMetrics = true;
federationSenders = 3;
federationReceivers = 3;
initialSyncers = 1;
normalSyncers = 1;
eventPersisters = 1;
useUserDirectoryWorker = true;
instances.auto-fed-receiver1.settings.worker_listeners = [
{
bind_addresses = [
"127.0.0.2"
];
port = 1337;
resources = [
{ compress = false;
names = [ "federation" ];
}
];
}
];
};
settings.server_name = "example.com";
};
}
];
};
inherit (nixosConfig.config.services.matrix-synapse-next.workers) instances;
in
writeText "matrix-synapse-next-nginx-pipeline-test.txt" ''
${(lib.generators.toPretty {}) instances}
====================================================
${(lib.generators.toPretty {}) (matrix-lib.mapWorkersToUpstreamsByType instances)}
''