Compare commits

..

37 Commits

Author SHA1 Message Date
74f5316121
default.nix: fix mpvipc-async hash 2025-01-10 22:42:59 +01:00
c4e2ade27d
websocket-api: use IdPool to manage connection ids 2025-01-06 19:11:40 +01:00
945c2abc15
add websocket API 2025-01-06 19:11:40 +01:00
29ae3d7482
Move project from Projects to Grzegorz 2025-01-06 16:32:29 +01:00
04726d1ce0
main: continuously report play status to systemd 2024-12-23 01:16:13 +01:00
c8ee55ec92
cargo fmt 2024-12-14 13:46:42 +01:00
b0b77b4981
Cargo.toml: update url to mpvipc-async 2024-12-14 12:23:06 +01:00
4a02bd089b
mpv_setup: add ytdl hook args 2024-12-11 12:12:53 +01:00
80e0447bcb
default.nix: filter flake.lock from source 2024-10-30 01:53:59 +01:00
14703dc733
flake.nix: opinionated app 2024-10-30 01:53:36 +01:00
012cdb4658 Add openapi docs 2024-10-30 01:53:08 +01:00
355d2ad13d
module.nix: relax hardening 2024-10-22 19:49:52 +02:00
9934b11766
nix: use unwrapped program by default in module 2024-10-20 23:06:32 +02:00
cfb6bbd7a7
default.nix: better source filtering 2024-10-20 23:06:32 +02:00
b1f8cf9ba2
systemd integration
- Add watchdog timeout support
- Add native journald logging support
- Add application state notifications
- Add verbosity flag
2024-10-20 23:06:32 +02:00
64bcef4307
default.nix: filter source 2024-10-20 19:48:23 +02:00
0def9bc340
flake.lock: bump 2024-10-20 19:48:04 +02:00
ae975f946d
flake.nix: fix rust-analyzer 2024-10-20 19:47:55 +02:00
1413af8111
Cargo.lock: bump 2024-10-20 19:47:36 +02:00
c15feac958
Don't fail on not being able to show image 2024-10-20 19:45:53 +02:00
91ab1ad771
module.nix: move greg user under sway condition 2024-10-20 19:45:53 +02:00
a476f877d2
systemd hardening 2024-10-20 19:45:53 +02:00
c5f606f910
module.nix: add enableDebug option 2024-10-20 02:08:39 +02:00
6bb8d28eff
Add picture of grzeg 2024-10-20 01:24:09 +02:00
5e3df86a2a
Split mpv setup to separate module 2024-10-20 00:04:59 +02:00
a52f2a2e49
module.nix: use sway as graphical backend 2024-10-19 23:26:16 +02:00
c1438f2480
Add nixos module and overlay 2024-08-04 20:33:22 +02:00
0ff6440457
Add --mpv-config-file option, with default content
This fixes some issues with seeking in output from yt-dlp
2024-08-04 19:00:05 +02:00
a0202215aa
README.md: add startup commands 2024-08-04 04:19:00 +02:00
e9190f7879
default.nix: init, flake.nix: add app 2024-08-04 04:10:59 +02:00
a89cb24c86
Cargo.toml: add some metadata 2024-08-04 03:19:55 +02:00
a5bedf4d87
Cargo.toml: optimize release profile 2024-08-04 03:19:42 +02:00
894cc4e146
Use mpvipc-async version 0.1.0 2024-08-04 03:13:11 +02:00
24b0307f0f
.envrc: init 2024-08-04 03:13:10 +02:00
f0b0e9f248
flake.nix: replace fenix with rust-overlay 2024-08-04 03:13:10 +02:00
0d4841a9ec
README: move TODO list to gitea issue tracker 2024-07-28 16:29:17 +02:00
18d186b997
nix support 2024-04-19 00:20:16 +02:00
19 changed files with 2944 additions and 778 deletions

1
.envrc Normal file
View File

@ -0,0 +1 @@
use flake

1
.gitignore vendored
View File

@ -1 +1,2 @@
/target
result

1611
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -2,30 +2,34 @@
name = "greg-ng"
version = "0.1.0"
edition = "2021"
license = "MIT"
authors = ["oysteikt@pvv.ntnu.no"]
readme = "README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
aide = { version = "0.13.3", features = [
"axum",
"axum-extra",
"axum-extra-query",
"axum-ws",
"macros",
"scalar",
] }
anyhow = "1.0.82"
axum = { version = "0.6.20", features = ["macros", "ws"] }
axum-jsonschema = { version = "0.8.0", features = ["aide"] }
axum-macros = "0.4.1"
axum = { version = "0.7.7", features = ["macros", "ws"] }
clap = { version = "4.4.1", features = ["derive"] }
clap-verbosity-flag = "2.2.2"
env_logger = "0.10.0"
futures = "0.3.31"
log = "0.4.20"
mpvipc = "1.3.0"
schemars = "0.8.16"
mpvipc-async = { git = "https://git.pvv.ntnu.no/Grzegorz/mpvipc-async.git", branch = "main" }
sd-notify = "0.4.3"
serde = { version = "1.0.188", features = ["derive"] }
serde_json = "1.0.105"
serde_urlencoded = "0.7.1"
systemd-journal-logger = "2.2.0"
tempfile = "3.11.0"
tokio = { version = "1.32.0", features = ["full"] }
tower = { version = "0.4.13", features = ["full"] }
tower-http = { version = "0.4.3", features = ["full"] }
utoipa = { version = "5.1.3", features = ["axum_extras"] }
utoipa-axum = "0.1.2"
utoipa-swagger-ui = { version = "8.0.3", features = ["axum", "vendored"] }
[profile.release]
strip = true
lto = true
codegen-units = 1

View File

@ -2,23 +2,20 @@
New implementation of https://github.com/Programvareverkstedet/grzegorz
## Feature wishlist
## Test it out
- [ ] Feature parity with old grzegorz
- [X] Rest API
- [ ] Rest API docs
- [ ] Metadata fetcher
- [ ] Init mpv with image of grzegorz
- [ ] Save playlists to machine
- [ ] Cache playlist contents to disk
- [ ] Expose service through mpd protocol
- [ ] Users with playlists and songs (and auth?)
- [ ] Some kind of fair scheduling for each user
- [ ] Max time to avoid playlist songs
- [ ] Expose video/media stream so others can listen at home
- [ ] Syncope support >:)
- [ ] Jitsi support >:)))
- [ ] Show other media while playing music, like grafana or bustimes
- [ ] Soft shuffle
- [ ] Libre.fm integration
- [ ] Karaoke mode lmao
```sh
# NixOS
nix run "git+https://git.pvv.ntnu.no/Grzegorz/greg-ng#" -- --mpv-socket-path /tmp/mpv.sock
# Other (after git clone and rust toolchain has been set up)
cargo run -- --mpv-socket-path /tmp/mpv.sock
```
See also https://git.pvv.ntnu.no/Grzegorz/grzegorz-clients for frontend alternatives
## Debugging
```sh
RUST_LOG=greg_ng=trace,mpvipc=trace cargo run -- --mpv-socket-path /tmp/mpv.sock
```

3
assets/default-mpv.conf Normal file
View File

@ -0,0 +1,3 @@
[youtube]
profile-cond=path:find('youtu%.?be')
--ytdl-format="bestvideo[height<=?720][vcodec!~='vp0?9']+bestaudio/best"

BIN
assets/the_man.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 157 KiB

52
default.nix Normal file
View File

@ -0,0 +1,52 @@
{
lib
, fetchFromGitHub
, rustPlatform
, makeWrapper
, mpv
, wrapped ? false
}:
rustPlatform.buildRustPackage rec {
pname = "greg-ng";
version = "0.1.0";
src = builtins.filterSource (path: type: let
baseName = baseNameOf (toString path);
in !(lib.any (b: b) [
(!(lib.cleanSourceFilter path type))
(type == "directory" && lib.elem baseName [
".direnv"
".git"
"target"
"result"
])
(type == "regular" && lib.elem baseName [
"flake.nix"
"flake.lock"
"default.nix"
"module.nix"
".envrc"
])
])) ./.;
nativeBuildInputs = [ makeWrapper ];
cargoLock = {
lockFile = ./Cargo.lock;
outputHashes = {
"mpvipc-async-0.1.0" = "sha256-V22wdnVVCBzayqkwb2d0msG7YypVss0cGBihtXrHtuM=";
};
};
postInstall = lib.optionalString wrapped ''
wrapProgram $out/bin/greg-ng \
--prefix PATH : '${lib.makeBinPath [ mpv ]}'
'';
meta = with lib; {
license = licenses.mit;
maintainers = with maintainers; [ h7x4 ];
platforms = platforms.linux ++ platforms.darwin;
mainProgram = "greg-ng";
};
}

48
flake.lock generated Normal file
View File

@ -0,0 +1,48 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1729256560,
"narHash": "sha256-/uilDXvCIEs3C9l73JTACm4quuHUsIHcns1c+cHUJwA=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "4c2fcb090b1f3e5b47eaa7bd33913b574a11e0a0",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs",
"rust-overlay": "rust-overlay"
}
},
"rust-overlay": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1729391507,
"narHash": "sha256-as0I9xieJUHf7kiK2a9znDsVZQTFWhM1pLivII43Gi0=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "784981a9feeba406de38c1c9a3decf966d853cca",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

75
flake.nix Normal file
View File

@ -0,0 +1,75 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
rust-overlay.url = "github:oxalica/rust-overlay";
rust-overlay.inputs.nixpkgs.follows = "nixpkgs";
};
outputs = { self, nixpkgs, rust-overlay }:
let
inherit (nixpkgs) lib;
systems = [
"x86_64-linux"
"aarch64-linux"
"x86_64-darwin"
"aarch64-darwin"
"armv7l-linux"
];
forAllSystems = f: lib.genAttrs systems (system: let
pkgs = import nixpkgs {
inherit system;
overlays = [
(import rust-overlay)
];
};
rust-bin = rust-overlay.lib.mkRustBin { } pkgs.buildPackages;
toolchain = rust-bin.stable.latest.default.override {
extensions = [ "rust-src" "rust-analyzer" "rust-std" ];
};
in f system pkgs toolchain);
in {
apps = forAllSystems (system: pkgs: _: {
default = self.apps.${system}.greg-ng;
greg-ng = let
package = self.packages.${system}.greg-ng-wrapped;
in {
type = "app";
program = toString (pkgs.writeShellScript "greg-ng" ''
${lib.getExe package} --mpv-socket-path /tmp/greg-ng-mpv.sock -vvvv
'');
};
});
devShells = forAllSystems (system: pkgs: toolchain: {
default = pkgs.mkShell {
nativeBuildInputs = [
toolchain
pkgs.mpv
];
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
};
});
overlays = {
default = self.overlays.greg-ng;
greg-ng = final: prev: {
inherit (self.packages.${prev.system}) greg-ng;
};
};
packages = forAllSystems (system: pkgs: _: {
default = self.packages.${system}.greg-ng;
greg-ng = pkgs.callPackage ./default.nix { };
greg-ng-wrapped = pkgs.callPackage ./default.nix {
wrapped = true;
};
});
} // {
nixosModules.default = ./module.nix;
};
}

197
module.nix Normal file
View File

@ -0,0 +1,197 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.greg-ng;
in
{
options.services.greg-ng = {
enable = lib.mkEnableOption "greg-ng, an mpv based media player";
package = lib.mkPackageOption pkgs "greg-ng" { };
mpvPackage = lib.mkPackageOption pkgs "mpv" { };
enableSway = lib.mkEnableOption "sway as the main window manager";
enablePipewire = lib.mkEnableOption "pipewire" // { default = true; };
logLevel = lib.mkOption {
type = lib.types.enum [ "quiet" "error" "warn" "info" "debug" "trace" ];
default = "debug";
description = "Log level.";
apply = level: {
"quiet" = "-q";
"error" = "";
"warn" = "-v";
"info" = "-vv";
"debug" = "-vvv";
"trace" = "-vvvv";
}.${level};
};
# TODO: create some better descriptions
settings = {
host = lib.mkOption {
type = lib.types.str;
default = "localhost";
example = "0.0.0.0";
description = ''
Which host to bind to.
'';
};
port = lib.mkOption {
type = lib.types.port;
default = 8008;
example = 10008;
description = ''
Which port to bind to.
'';
};
mpv-socket-path = lib.mkOption {
type = lib.types.str;
default = "%t/greg-ng-mpv.sock";
description = ''
Path to the mpv socket.
'';
};
mpv-executable-path = lib.mkOption {
type = lib.types.str;
default = lib.getExe cfg.mpvPackage;
defaultText = lib.literalExpression ''
lib.getExe config.services.greg-ng.mpvPackage
'';
description = ''
Path to the mpv executable.
'';
};
mpv-config-file = lib.mkOption {
type = with lib.types; nullOr str;
default = null;
description = ''
Path to the mpv config file.
'';
};
auto-start-mpv = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Whether to automatically start mpv.
'';
};
force-auto-start = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Whether to force auto starting mpv.
'';
};
};
};
config = lib.mkMerge [
(lib.mkIf cfg.enable {
systemd.user.services.greg-ng = {
description = "greg-ng, an mpv based media player";
wantedBy = [ "graphical-session.target" ];
partOf = [ "graphical-session.target" ];
serviceConfig = {
Type = "notify";
ExecStart = let
args = lib.cli.toGNUCommandLineShell { } (cfg.settings // {
systemd = true;
});
in "${lib.getExe cfg.package} ${cfg.logLevel} ${args}";
Restart = "always";
RestartSec = 3;
WatchdogSec = lib.mkDefault 15;
TimeoutStartSec = lib.mkDefault 30;
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
AmbientCapabilities = [ "" ];
CapabilityBoundingSet = [ "" ];
DeviceAllow = [ "" ];
LockPersonality = true;
# Might work, but wouldn't bet on it with embedded lua in mpv
MemoryDenyWriteExecute = false;
NoNewPrivileges = true;
# MPV and mesa tries to talk directly to the GPU.
PrivateDevices = false;
PrivateMounts = true;
PrivateTmp = true;
PrivateUsers = true;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
# MPV wants ~/.cache
ProtectHome = false;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
# I'll figure it out sometime
# ProtectSystem = "full";
RemoveIPC = true;
UMask = "0077";
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
# Something brokey
# SystemCallFilter = [
# "@system-service"
# "~@privileged"
# "~@resources"
# ];
};
};
})
(lib.mkIf (cfg.enable && cfg.enablePipewire) {
services.pipewire = {
enable = true;
alsa.enable = true;
alsa.support32Bit = true;
pulse.enable = true;
};
})
(lib.mkIf (cfg.enable && cfg.enableSway) {
programs.sway = {
enable = true;
wrapperFeatures.gtk = true;
};
xdg.portal = {
enable = true;
wlr.enable = true;
extraPortals = [ pkgs.xdg-desktop-portal-gtk ];
};
users = {
users.greg = {
isNormalUser = true;
group = "greg";
uid = 2000;
description = "loud gym bro";
};
groups.greg.gid = 2000;
};
services.greetd = {
enable = true;
settings = rec {
initial_session = {
command = "${pkgs.sway}/bin/sway";
user = "greg";
};
default_session = initial_session;
};
};
})
];
}

View File

@ -1,4 +1,6 @@
mod base;
mod rest_wrapper_v1;
mod websocket_v1;
pub use rest_wrapper_v1::rest_api_routes;
pub use rest_wrapper_v1::{rest_api_docs, rest_api_routes};
pub use websocket_v1::websocket_api;

View File

@ -1,62 +1,61 @@
use std::sync::Arc;
use log::trace;
use mpvipc::{
Mpv, NumberChangeOptions, PlaylistAddOptions, PlaylistAddTypeOptions, SeekOptions, Switch,
use mpvipc_async::{
LoopProperty, Mpv, MpvExt, NumberChangeOptions, PlaylistAddOptions, PlaylistAddTypeOptions,
SeekOptions, Switch,
};
use serde_json::{json, Value};
use tokio::sync::Mutex;
/// Add item to playlist
pub async fn loadfile(mpv: Arc<Mutex<Mpv>>, path: &str) -> anyhow::Result<()> {
trace!("api::loadfile({:?})", path);
mpv.lock().await.playlist_add(
pub async fn loadfile(mpv: Mpv, path: &str) -> anyhow::Result<()> {
log::trace!("api::loadfile({:?})", path);
mpv.playlist_add(
path,
PlaylistAddTypeOptions::File,
PlaylistAddOptions::Append,
)?;
)
.await?;
Ok(())
}
/// Check whether the player is paused or playing
pub async fn play_get(mpv: Arc<Mutex<Mpv>>) -> anyhow::Result<Value> {
trace!("api::play_get()");
let paused: bool = mpv.lock().await.get_property("pause")?;
pub async fn play_get(mpv: Mpv) -> anyhow::Result<Value> {
log::trace!("api::play_get()");
let paused: bool = !mpv.is_playing().await?;
Ok(json!(!paused))
}
/// Set whether the player is paused or playing
pub async fn play_set(mpv: Arc<Mutex<Mpv>>, should_play: bool) -> anyhow::Result<()> {
trace!("api::play_set({:?})", should_play);
mpv.lock()
pub async fn play_set(mpv: Mpv, should_play: bool) -> anyhow::Result<()> {
log::trace!("api::play_set({:?})", should_play);
mpv.set_playback(if should_play { Switch::On } else { Switch::Off })
.await
.set_property("pause", !should_play)
.map_err(|e| e.into())
}
/// Get the current player volume
pub async fn volume_get(mpv: Arc<Mutex<Mpv>>) -> anyhow::Result<Value> {
trace!("api::volume_get()");
let volume: f64 = mpv.lock().await.get_property("volume")?;
pub async fn volume_get(mpv: Mpv) -> anyhow::Result<Value> {
log::trace!("api::volume_get()");
let volume: f64 = mpv.get_volume().await?;
Ok(json!(volume))
}
/// Set the player volume
pub async fn volume_set(mpv: Arc<Mutex<Mpv>>, value: f64) -> anyhow::Result<()> {
trace!("api::volume_set({:?})", value);
mpv.lock()
pub async fn volume_set(mpv: Mpv, value: f64) -> anyhow::Result<()> {
log::trace!("api::volume_set({:?})", value);
mpv.set_volume(value, NumberChangeOptions::Absolute)
.await
.set_volume(value, NumberChangeOptions::Absolute)
.map_err(|e| e.into())
}
/// Get current playback position
pub async fn time_get(mpv: Arc<Mutex<Mpv>>) -> anyhow::Result<Value> {
trace!("api::time_get()");
let current: f64 = mpv.lock().await.get_property("time-pos")?;
let remaining: f64 = mpv.lock().await.get_property("time-remaining")?;
let total = current + remaining;
pub async fn time_get(mpv: Mpv) -> anyhow::Result<Value> {
log::trace!("api::time_get()");
let current: Option<f64> = mpv.get_time_pos().await?;
let remaining: Option<f64> = mpv.get_time_remaining().await?;
let total = match (current, remaining) {
(Some(c), Some(r)) => Some(c + r),
(_, _) => None,
};
Ok(json!({
"current": current,
@ -66,22 +65,16 @@ pub async fn time_get(mpv: Arc<Mutex<Mpv>>) -> anyhow::Result<Value> {
}
/// Set playback position
pub async fn time_set(
mpv: Arc<Mutex<Mpv>>,
pos: Option<f64>,
percent: Option<f64>,
) -> anyhow::Result<()> {
trace!("api::time_set({:?}, {:?})", pos, percent);
pub async fn time_set(mpv: Mpv, pos: Option<f64>, percent: Option<f64>) -> anyhow::Result<()> {
log::trace!("api::time_set({:?}, {:?})", pos, percent);
if pos.is_some() && percent.is_some() {
anyhow::bail!("pos and percent cannot be provided at the same time");
}
if let Some(pos) = pos {
mpv.lock().await.seek(pos, SeekOptions::Absolute)?;
mpv.seek(pos, SeekOptions::Absolute).await?;
} else if let Some(percent) = percent {
mpv.lock()
.await
.seek(percent, SeekOptions::AbsolutePercent)?;
mpv.seek(percent, SeekOptions::AbsolutePercent).await?;
} else {
anyhow::bail!("Either pos or percent must be provided");
};
@ -90,10 +83,10 @@ pub async fn time_set(
}
/// Get the current playlist
pub async fn playlist_get(mpv: Arc<Mutex<Mpv>>) -> anyhow::Result<Value> {
trace!("api::playlist_get()");
let playlist: mpvipc::Playlist = mpv.lock().await.get_playlist()?;
let is_playing: bool = mpv.lock().await.get_property("pause")?;
pub async fn playlist_get(mpv: Mpv) -> anyhow::Result<Value> {
log::trace!("api::playlist_get()");
let playlist: mpvipc_async::Playlist = mpv.get_playlist().await?;
let is_playing: bool = mpv.is_playing().await?;
let items: Vec<Value> = playlist
.0
@ -104,7 +97,7 @@ pub async fn playlist_get(mpv: Arc<Mutex<Mpv>>) -> anyhow::Result<Value> {
"index": i,
"current": item.current,
"playing": is_playing,
"filename": item.filename,
"filename": item.title.as_ref().unwrap_or(&item.filename),
"data": {
"fetching": true,
}
@ -116,74 +109,64 @@ pub async fn playlist_get(mpv: Arc<Mutex<Mpv>>) -> anyhow::Result<Value> {
}
/// Skip to the next item in the playlist
pub async fn playlist_next(mpv: Arc<Mutex<Mpv>>) -> anyhow::Result<()> {
trace!("api::playlist_next()");
mpv.lock().await.next().map_err(|e| e.into())
pub async fn playlist_next(mpv: Mpv) -> anyhow::Result<()> {
log::trace!("api::playlist_next()");
mpv.next().await.map_err(|e| e.into())
}
/// Go back to the previous item in the playlist
pub async fn playlist_previous(mpv: Arc<Mutex<Mpv>>) -> anyhow::Result<()> {
trace!("api::playlist_previous()");
mpv.lock().await.prev().map_err(|e| e.into())
pub async fn playlist_previous(mpv: Mpv) -> anyhow::Result<()> {
log::trace!("api::playlist_previous()");
mpv.prev().await.map_err(|e| e.into())
}
/// Go chosen item in the playlist
pub async fn playlist_goto(mpv: Arc<Mutex<Mpv>>, index: usize) -> anyhow::Result<()> {
trace!("api::playlist_goto({:?})", index);
mpv.lock()
.await
.playlist_play_id(index)
.map_err(|e| e.into())
pub async fn playlist_goto(mpv: Mpv, index: usize) -> anyhow::Result<()> {
log::trace!("api::playlist_goto({:?})", index);
mpv.playlist_play_id(index).await.map_err(|e| e.into())
}
/// Clears the playlist
pub async fn playlist_clear(mpv: Arc<Mutex<Mpv>>) -> anyhow::Result<()> {
trace!("api::playlist_clear()");
mpv.lock().await.playlist_clear().map_err(|e| e.into())
pub async fn playlist_clear(mpv: Mpv) -> anyhow::Result<()> {
log::trace!("api::playlist_clear()");
mpv.playlist_clear().await.map_err(|e| e.into())
}
/// Remove an item from the playlist by index
pub async fn playlist_remove(mpv: Arc<Mutex<Mpv>>, index: usize) -> anyhow::Result<()> {
trace!("api::playlist_remove({:?})", index);
mpv.lock()
.await
.playlist_remove_id(index)
.map_err(|e| e.into())
pub async fn playlist_remove(mpv: Mpv, index: usize) -> anyhow::Result<()> {
log::trace!("api::playlist_remove({:?})", index);
mpv.playlist_remove_id(index).await.map_err(|e| e.into())
}
/// Move an item in the playlist from one index to another
pub async fn playlist_move(mpv: Arc<Mutex<Mpv>>, from: usize, to: usize) -> anyhow::Result<()> {
trace!("api::playlist_move({:?}, {:?})", from, to);
mpv.lock()
.await
.playlist_move_id(from, to)
.map_err(|e| e.into())
pub async fn playlist_move(mpv: Mpv, from: usize, to: usize) -> anyhow::Result<()> {
log::trace!("api::playlist_move({:?}, {:?})", from, to);
mpv.playlist_move_id(from, to).await.map_err(|e| e.into())
}
/// Shuffle the playlist
pub async fn shuffle(mpv: Arc<Mutex<Mpv>>) -> anyhow::Result<()> {
trace!("api::shuffle()");
mpv.lock().await.playlist_shuffle().map_err(|e| e.into())
pub async fn shuffle(mpv: Mpv) -> anyhow::Result<()> {
log::trace!("api::shuffle()");
mpv.playlist_shuffle().await.map_err(|e| e.into())
}
/// See whether it loops the playlist or not
pub async fn playlist_get_looping(mpv: Arc<Mutex<Mpv>>) -> anyhow::Result<Value> {
trace!("api::playlist_get_looping()");
let loop_playlist = mpv.lock().await.get_property_string("loop-playlist")? == "inf";
Ok(json!(loop_playlist))
pub async fn playlist_get_looping(mpv: Mpv) -> anyhow::Result<Value> {
log::trace!("api::playlist_get_looping()");
let loop_status = match mpv.playlist_is_looping().await? {
LoopProperty::No => false,
LoopProperty::Inf => true,
LoopProperty::N(_) => true,
};
Ok(json!(loop_status))
}
pub async fn playlist_set_looping(mpv: Arc<Mutex<Mpv>>, r#loop: bool) -> anyhow::Result<()> {
trace!("api::playlist_set_looping({:?})", r#loop);
if r#loop {
mpv.lock()
.await
.set_loop_playlist(Switch::On)
.map_err(|e| e.into())
} else {
mpv.lock()
.await
.set_loop_playlist(Switch::Off)
.map_err(|e| e.into())
}
pub async fn playlist_set_looping(mpv: Mpv, r#loop: bool) -> anyhow::Result<()> {
log::trace!("api::playlist_set_looping({:?})", r#loop);
mpv.set_loop_playlist(if r#loop { Switch::On } else { Switch::Off })
.await
.map_err(|e| e.into())
}

View File

@ -1,123 +1,20 @@
use std::{ops::Deref, sync::Arc};
use aide::{axum::IntoApiResponse, operation::OperationIo, OperationOutput};
use axum_jsonschema::JsonSchemaRejection;
use axum::{
async_trait, extract::{rejection::{FailedToDeserializeQueryString, QueryRejection}, FromRequest, FromRequestParts, State}, http::{request::Parts, StatusCode}, response::{IntoResponse, Response}, routing::{delete, get, post}, Json, Router
extract::{Query, State},
http::StatusCode,
response::{IntoResponse, Response},
routing::{delete, get, post},
Json, Router,
};
use mpvipc::Mpv;
use schemars::JsonSchema;
use serde::{de::DeserializeOwned, Serialize};
use mpvipc_async::Mpv;
use serde_json::{json, Value};
use tokio::sync::Mutex;
use utoipa::OpenApi;
use utoipa_axum::{router::OpenApiRouter, routes};
use utoipa_swagger_ui::SwaggerUi;
use super::base;
// #[derive(FromRequest, OperationIo)]
// #[from_request(via(axum_jsonschema::Json), rejection(RestResponse))]
// #[aide(
// input_with = "axum_jsonschema::Json<T>",
// output_with = "axum_jsonschema::Json<T>",
// json_schema
// )]
pub struct RestResponse(anyhow::Result<Value>);
impl From<anyhow::Result<Value>> for RestResponse {
fn from(result: anyhow::Result<Value>) -> Self {
Self(result.map(|value| json!({ "success": true, "error": false, "value": value })))
}
}
impl From<anyhow::Result<()>> for RestResponse {
fn from(result: anyhow::Result<()>) -> Self {
Self(result.map(|_| json!({ "success": true, "error": false })))
}
}
impl IntoResponse for RestResponse {
fn into_response(self) -> Response {
match self.0 {
Ok(value) => (StatusCode::OK, Json(value)).into_response(),
Err(err) => (
StatusCode::INTERNAL_SERVER_ERROR,
Json(json!({ "error": err.to_string(), "success": false })),
)
.into_response(),
}
}
}
impl aide::OperationOutput for RestResponse {
type Inner = anyhow::Result<Value>;
}
/// -------
// impl<T> aide::OperationInput for Query<T> {}
// #[derive(FromRequest, OperationIo)]
// #[from_request(via(axum_jsonschema::Json), rejection(RestResponse))]
// #[aide(
// input_with = "axum_jsonschema::Json<T>",
// output_with = "axum_jsonschema::Json<T>",
// json_schema
// )]
// pub struct Json<T>(pub T);
// impl<T> IntoResponse for Json<T>
// where
// T: Serialize,
// {
// fn into_response(self) -> axum::response::Response {
// axum::Json(self.0).into_response()
// }
// }
#[derive(OperationIo)]
#[aide(json_schema)]
pub struct Query<T>(pub T);
#[async_trait]
impl <T, S> FromRequestParts<S> for Query<T>
where
T: JsonSchema + DeserializeOwned,
S: Send + Sync,
{
type Rejection = QueryRejection;
async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result<Self, Self::Rejection> {
let axum::extract::Query(query) = axum::extract::Query::try_from_uri(&parts.uri)?;
Ok(Query(query))
}
}
impl<T> Deref for Query<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}
pub fn rest_api_route_docs(mpv: Arc<Mutex<Mpv>>) -> Router {
use aide::axum::ApiRouter;
use aide::axum::routing::{delete, get, post};
let mut api = aide::openapi::OpenApi::default();
let x = ApiRouter::new()
// .api_route("/load", get(loadfile))
.api_route("/play", get(play_get))
.finish_api(&mut api);
// .with_state(mpv);
todo!()
}
// ----------
pub fn rest_api_routes(mpv: Arc<Mutex<Mpv>>) -> Router {
pub fn rest_api_routes(mpv: Mpv) -> Router {
Router::new()
.route("/load", post(loadfile))
.route("/play", get(play_get))
@ -138,106 +35,292 @@ pub fn rest_api_routes(mpv: Arc<Mutex<Mpv>>) -> Router {
.with_state(mpv)
}
pub fn rest_api_docs(mpv: Mpv) -> Router {
let (router, api) = OpenApiRouter::with_openapi(ApiDoc::openapi())
.routes(routes!(loadfile))
.routes(routes!(play_get, play_set))
.routes(routes!(volume_get, volume_set))
.routes(routes!(time_get, time_set))
.routes(routes!(playlist_get, playlist_remove_or_clear))
.routes(routes!(playlist_next))
.routes(routes!(playlist_previous))
.routes(routes!(playlist_goto))
.routes(routes!(playlist_move))
.routes(routes!(playlist_get_looping, playlist_set_looping))
.routes(routes!(shuffle))
.with_state(mpv)
.split_for_parts();
router.merge(SwaggerUi::new("/docs").url("/docs/openapi.json", api))
}
// NOTE: the openapi stuff is very heavily duplicated and introduces
// a lot of maintenance overhead and boilerplate. It should theoretically
// be possible to infer a lot of this from axum, but I haven't found a
// good library that does this and works properly yet (I have tried some
// but they all had issues). Feel free to replace this with a better solution.
#[derive(OpenApi)]
#[openapi(info(
description = "The legacy Grzegorz Brzeczyszczykiewicz API, used to control a running mpv instance",
version = "1.0.0",
))]
struct ApiDoc;
#[derive(serde::Serialize, utoipa::ToSchema)]
struct EmptySuccessResponse {
success: bool,
error: bool,
}
#[derive(serde::Serialize, utoipa::ToSchema)]
struct SuccessResponse {
#[schema(example = true)]
success: bool,
#[schema(example = false)]
error: bool,
#[schema(example = json!({ some: "arbitrary json value" }))]
value: Value,
}
#[derive(serde::Serialize, utoipa::ToSchema)]
struct ErrorResponse {
#[schema(example = "error....")]
error: String,
#[schema(example = "error....")]
errortext: String,
#[schema(example = false)]
success: bool,
}
pub struct RestResponse(anyhow::Result<Value>);
impl From<anyhow::Result<Value>> for RestResponse {
fn from(result: anyhow::Result<Value>) -> Self {
Self(result.map(|value| json!({ "success": true, "error": false, "value": value })))
}
}
impl From<anyhow::Result<()>> for RestResponse {
fn from(result: anyhow::Result<()>) -> Self {
Self(result.map(|_| json!({ "success": true, "error": false })))
}
}
impl IntoResponse for RestResponse {
fn into_response(self) -> Response {
match self.0 {
Ok(value) => (StatusCode::OK, Json(value)).into_response(),
Err(err) => (
StatusCode::INTERNAL_SERVER_ERROR,
Json(json!({ "error": err.to_string(), "errortext": err.to_string(), "success": false })),
)
.into_response(),
}
}
}
// -------------------//
// Boilerplate galore //
// -------------------//
// TODO: These could possibly be generated with a proc macro
#[derive(serde::Deserialize, JsonSchema)]
#[derive(serde::Deserialize, utoipa::IntoParams)]
struct LoadFileArgs {
path: String,
}
#[axum::debug_handler]
async fn loadfile(
State(mpv): State<Arc<Mutex<Mpv>>>,
Query(query): Query<LoadFileArgs>,
) -> RestResponse {
/// Add item to playlist
#[utoipa::path(
post,
path = "/load",
params(LoadFileArgs),
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn loadfile(State(mpv): State<Mpv>, Query(query): Query<LoadFileArgs>) -> RestResponse {
base::loadfile(mpv, &query.path).await.into()
}
async fn play_get(State(mpv): State<Arc<Mutex<Mpv>>>) -> impl IntoApiResponse {
RestResponse::from(base::play_get(mpv).await)
/// Check whether the player is paused or playing
#[utoipa::path(
get,
path = "/play",
responses(
(status = 200, description = "Success", body = SuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn play_get(State(mpv): State<Mpv>) -> RestResponse {
base::play_get(mpv).await.into()
}
#[derive(serde::Deserialize, JsonSchema)]
#[derive(serde::Deserialize, utoipa::IntoParams)]
struct PlaySetArgs {
play: String,
}
async fn play_set(
State(mpv): State<Arc<Mutex<Mpv>>>,
Query(query): Query<PlaySetArgs>,
) -> RestResponse {
/// Set whether the player is paused or playing
#[utoipa::path(
post,
path = "/play",
params(PlaySetArgs),
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn play_set(State(mpv): State<Mpv>, Query(query): Query<PlaySetArgs>) -> RestResponse {
let play = query.play.to_lowercase() == "true";
base::play_set(mpv, play).await.into()
}
async fn volume_get(State(mpv): State<Arc<Mutex<Mpv>>>) -> RestResponse {
/// Get the current player volume
#[utoipa::path(
get,
path = "/volume",
responses(
(status = 200, description = "Success", body = SuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn volume_get(State(mpv): State<Mpv>) -> RestResponse {
base::volume_get(mpv).await.into()
}
#[derive(serde::Deserialize, JsonSchema)]
#[derive(serde::Deserialize, utoipa::IntoParams)]
struct VolumeSetArgs {
volume: f64,
}
async fn volume_set(
State(mpv): State<Arc<Mutex<Mpv>>>,
Query(query): Query<VolumeSetArgs>,
) -> RestResponse {
/// Set the player volume
#[utoipa::path(
post,
path = "/volume",
params(VolumeSetArgs),
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn volume_set(State(mpv): State<Mpv>, Query(query): Query<VolumeSetArgs>) -> RestResponse {
base::volume_set(mpv, query.volume).await.into()
}
async fn time_get(State(mpv): State<Arc<Mutex<Mpv>>>) -> RestResponse {
/// Get current playback position
#[utoipa::path(
get,
path = "/time",
responses(
(status = 200, description = "Success", body = SuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn time_get(State(mpv): State<Mpv>) -> RestResponse {
base::time_get(mpv).await.into()
}
#[derive(serde::Deserialize, JsonSchema)]
#[derive(serde::Deserialize, utoipa::IntoParams)]
struct TimeSetArgs {
pos: Option<f64>,
percent: Option<f64>,
}
async fn time_set(
State(mpv): State<Arc<Mutex<Mpv>>>,
Query(query): Query<TimeSetArgs>,
) -> RestResponse {
/// Set playback position
#[utoipa::path(
post,
path = "/time",
params(TimeSetArgs),
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn time_set(State(mpv): State<Mpv>, Query(query): Query<TimeSetArgs>) -> RestResponse {
base::time_set(mpv, query.pos, query.percent).await.into()
}
async fn playlist_get(State(mpv): State<Arc<Mutex<Mpv>>>) -> RestResponse {
/// Get the current playlist
#[utoipa::path(
get,
path = "/playlist",
responses(
(status = 200, description = "Success", body = SuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn playlist_get(State(mpv): State<Mpv>) -> RestResponse {
base::playlist_get(mpv).await.into()
}
async fn playlist_next(State(mpv): State<Arc<Mutex<Mpv>>>) -> RestResponse {
/// Go to the next item in the playlist
#[utoipa::path(
post,
path = "/playlist/next",
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn playlist_next(State(mpv): State<Mpv>) -> RestResponse {
base::playlist_next(mpv).await.into()
}
async fn playlist_previous(State(mpv): State<Arc<Mutex<Mpv>>>) -> RestResponse {
/// Go back to the previous item in the playlist
#[utoipa::path(
post,
path = "/playlist/previous",
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn playlist_previous(State(mpv): State<Mpv>) -> RestResponse {
base::playlist_previous(mpv).await.into()
}
#[derive(serde::Deserialize, JsonSchema)]
#[derive(serde::Deserialize, utoipa::IntoParams)]
struct PlaylistGotoArgs {
index: usize,
}
/// Go to a specific item in the playlist
#[utoipa::path(
post,
path = "/playlist/goto",
params(PlaylistGotoArgs),
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn playlist_goto(
State(mpv): State<Arc<Mutex<Mpv>>>,
State(mpv): State<Mpv>,
Query(query): Query<PlaylistGotoArgs>,
) -> RestResponse {
base::playlist_goto(mpv, query.index).await.into()
}
#[derive(serde::Deserialize, JsonSchema)]
#[derive(serde::Deserialize, utoipa::IntoParams)]
struct PlaylistRemoveOrClearArgs {
index: Option<usize>,
}
/// Clears a single item or the entire playlist
#[utoipa::path(
delete,
path = "/playlist",
params(PlaylistRemoveOrClearArgs),
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn playlist_remove_or_clear(
State(mpv): State<Arc<Mutex<Mpv>>>,
State(mpv): State<Mpv>,
Query(query): Query<PlaylistRemoveOrClearArgs>,
) -> RestResponse {
match query.index {
@ -246,14 +329,24 @@ async fn playlist_remove_or_clear(
}
}
#[derive(serde::Deserialize, JsonSchema)]
#[derive(serde::Deserialize, utoipa::IntoParams)]
struct PlaylistMoveArgs {
index1: usize,
index2: usize,
}
/// Move a playlist item to a different position
#[utoipa::path(
post,
path = "/playlist/move",
params(PlaylistMoveArgs),
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn playlist_move(
State(mpv): State<Arc<Mutex<Mpv>>>,
State(mpv): State<Mpv>,
Query(query): Query<PlaylistMoveArgs>,
) -> RestResponse {
base::playlist_move(mpv, query.index1, query.index2)
@ -261,21 +354,49 @@ async fn playlist_move(
.into()
}
async fn shuffle(State(mpv): State<Arc<Mutex<Mpv>>>) -> RestResponse {
/// Shuffle the playlist
#[utoipa::path(
post,
path = "/playlist/shuffle",
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn shuffle(State(mpv): State<Mpv>) -> RestResponse {
base::shuffle(mpv).await.into()
}
async fn playlist_get_looping(State(mpv): State<Arc<Mutex<Mpv>>>) -> RestResponse {
/// Check whether the playlist is looping
#[utoipa::path(
get,
path = "/playlist/loop",
responses(
(status = 200, description = "Success", body = SuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn playlist_get_looping(State(mpv): State<Mpv>) -> RestResponse {
base::playlist_get_looping(mpv).await.into()
}
#[derive(serde::Deserialize, JsonSchema)]
#[derive(serde::Deserialize, utoipa::IntoParams)]
struct PlaylistSetLoopingArgs {
r#loop: bool,
}
/// Set whether the playlist should loop
#[utoipa::path(
post,
path = "/playlist/loop",
params(PlaylistSetLoopingArgs),
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn playlist_set_looping(
State(mpv): State<Arc<Mutex<Mpv>>>,
State(mpv): State<Mpv>,
Query(query): Query<PlaylistSetLoopingArgs>,
) -> RestResponse {
base::playlist_set_looping(mpv, query.r#loop).await.into()

449
src/api/websocket_v1.rs Normal file
View File

@ -0,0 +1,449 @@
use std::{
net::SocketAddr,
sync::{Arc, Mutex},
};
use anyhow::Context;
use futures::{stream::FuturesUnordered, StreamExt};
use serde::{Deserialize, Serialize};
use axum::{
extract::{
ws::{Message, WebSocket},
ConnectInfo, State, WebSocketUpgrade,
},
response::IntoResponse,
routing::any,
Router,
};
use mpvipc_async::{
LoopProperty, Mpv, MpvExt, NumberChangeOptions, Playlist, PlaylistAddTypeOptions, SeekOptions,
Switch,
};
use serde_json::{json, Value};
use tokio::{select, sync::watch};
use crate::util::IdPool;
#[derive(Debug, Clone)]
struct WebsocketState {
mpv: Mpv,
id_pool: Arc<Mutex<IdPool>>,
}
pub fn websocket_api(mpv: Mpv, id_pool: Arc<Mutex<IdPool>>) -> Router {
let state = WebsocketState { mpv, id_pool };
Router::new()
.route("/", any(websocket_handler))
.with_state(state)
}
async fn websocket_handler(
ws: WebSocketUpgrade,
ConnectInfo(addr): ConnectInfo<SocketAddr>,
State(WebsocketState { mpv, id_pool }): State<WebsocketState>,
) -> impl IntoResponse {
let mpv = mpv.clone();
let id = match id_pool.lock().unwrap().request_id() {
Ok(id) => id,
Err(e) => {
log::error!("Failed to get id from id pool: {:?}", e);
return axum::http::StatusCode::INTERNAL_SERVER_ERROR.into_response();
}
};
ws.on_upgrade(move |socket| handle_connection(socket, addr, mpv, id, id_pool))
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct InitialState {
pub cached_timestamp: Option<f64>,
pub chapters: Vec<Value>,
pub connections: u64,
pub current_percent_pos: Option<f64>,
pub current_track: String,
pub duration: f64,
pub is_looping: bool,
pub is_muted: bool,
pub is_playing: bool,
pub is_paused_for_cache: bool,
pub playlist: Playlist,
pub tracks: Vec<Value>,
pub volume: f64,
}
async fn get_initial_state(mpv: &Mpv, id_pool: Arc<Mutex<IdPool>>) -> InitialState {
let cached_timestamp = mpv
.get_property_value("demuxer-cache-state")
.await
.unwrap_or(None)
.and_then(|v| {
v.as_object()
.and_then(|o| o.get("data"))
.and_then(|v| v.as_object())
.and_then(|o| o.get("cache-end"))
.and_then(|v| v.as_f64())
});
let chapters = match mpv.get_property_value("chapter-list").await {
Ok(Some(Value::Array(chapters))) => chapters,
_ => vec![],
};
let connections = id_pool.lock().unwrap().id_count();
let current_percent_pos = mpv.get_property("percent-pos").await.unwrap_or(None);
let current_track = mpv.get_file_path().await.unwrap_or("".to_string());
let duration = mpv.get_duration().await.unwrap_or(0.0);
let is_looping =
mpv.playlist_is_looping().await.unwrap_or(LoopProperty::No) != LoopProperty::No;
let is_muted = mpv
.get_property("mute")
.await
.unwrap_or(Some(false))
.unwrap_or(false);
let is_playing = mpv.is_playing().await.unwrap_or(false);
let is_paused_for_cache = mpv
.get_property("paused-for-cache")
.await
.unwrap_or(Some(false))
.unwrap_or(false);
let playlist = mpv.get_playlist().await.unwrap_or(Playlist(vec![]));
let tracks = match mpv.get_property_value("track-list").await {
Ok(Some(Value::Array(tracks))) => tracks
.into_iter()
.filter(|t| {
t.as_object()
.and_then(|o| o.get("type"))
.and_then(|t| t.as_str())
.unwrap_or("")
== "sub"
})
.collect(),
_ => vec![],
};
let volume = mpv.get_volume().await.unwrap_or(0.0);
// TODO: use default when new version is released
InitialState {
cached_timestamp,
chapters,
connections,
current_percent_pos,
current_track,
duration,
is_looping,
is_muted,
is_playing,
is_paused_for_cache,
playlist,
tracks,
volume,
}
}
const DEFAULT_PROPERTY_SUBSCRIPTIONS: [&str; 11] = [
"chapter-list",
"demuxer-cache-state",
"duration",
"loop-playlist",
"mute",
"pause",
"paused-for-cache",
"percent-pos",
"playlist",
"track-list",
"volume",
];
async fn setup_default_subscribes(mpv: &Mpv) -> anyhow::Result<()> {
let mut futures = FuturesUnordered::new();
futures.extend(
DEFAULT_PROPERTY_SUBSCRIPTIONS
.iter()
.map(|property| mpv.observe_property(0, property)),
);
while let Some(result) = futures.next().await {
result?;
}
Ok(())
}
async fn handle_connection(
mut socket: WebSocket,
addr: SocketAddr,
mpv: Mpv,
channel_id: u64,
id_pool: Arc<Mutex<IdPool>>,
) {
// TODO: There is an asynchronous gap between gathering the initial state and subscribing to the properties
// This could lead to missing events if they happen in that gap. Send initial state, but also ensure
// that there is an additional "initial state" sent upon subscription to all properties to ensure that
// the state is correct.
let initial_state = get_initial_state(&mpv, id_pool.clone()).await;
let message = Message::Text(
json!({
"type": "initial_state",
"value": initial_state,
})
.to_string(),
);
socket.send(message).await.unwrap();
setup_default_subscribes(&mpv).await.unwrap();
let id_count_watch_receiver = id_pool.lock().unwrap().get_id_count_watch_receiver();
let connection_loop_result = tokio::spawn(connection_loop(
socket,
addr,
mpv.clone(),
channel_id,
id_count_watch_receiver,
));
match connection_loop_result.await {
Ok(Ok(())) => {
log::trace!("Connection loop ended for {:?}", addr);
}
Ok(Err(e)) => {
log::error!("Error in connection loop for {:?}: {:?}", addr, e);
}
Err(e) => {
log::error!("Error in connection loop for {:?}: {:?}", addr, e);
}
}
match mpv.unobserve_property(channel_id).await {
Ok(()) => {
log::trace!("Unsubscribed from properties for {:?}", addr);
}
Err(e) => {
log::error!(
"Error unsubscribing from properties for {:?}: {:?}",
addr,
e
);
}
}
match id_pool.lock().unwrap().release_id(channel_id) {
Ok(()) => {
log::trace!("Released id {} for {:?}", channel_id, addr);
}
Err(e) => {
log::error!("Error releasing id {} for {:?}: {:?}", channel_id, addr, e);
}
}
}
async fn connection_loop(
mut socket: WebSocket,
addr: SocketAddr,
mpv: Mpv,
channel_id: u64,
mut id_count_watch_receiver: watch::Receiver<u64>,
) -> Result<(), anyhow::Error> {
let mut event_stream = mpv.get_event_stream().await;
loop {
select! {
id_count = id_count_watch_receiver.changed() => {
if let Err(e) = id_count {
anyhow::bail!("Error reading id count watch receiver for {:?}: {:?}", addr, e);
}
let message = Message::Text(json!({
"type": "connection_count",
"value": id_count_watch_receiver.borrow().clone(),
}).to_string());
socket.send(message).await?;
}
message = socket.recv() => {
log::trace!("Received command from {:?}: {:?}", addr, message);
let ws_message_content = message
.ok_or(anyhow::anyhow!("Event stream ended for {:?}", addr))
.and_then(|message| {
match message {
Ok(message) => Ok(message),
err => Err(anyhow::anyhow!("Error reading message for {:?}: {:?}", addr, err)),
}
})?;
if let Message::Close(_) = ws_message_content {
log::trace!("Closing connection for {:?}", addr);
return Ok(());
}
if let Message::Ping(xs) = ws_message_content {
log::trace!("Ponging {:?} with {:?}", addr, xs);
socket.send(Message::Pong(xs)).await?;
continue;
}
let message_content = match ws_message_content {
Message::Text(text) => text,
m => anyhow::bail!("Unexpected message type: {:?}", m),
};
let message_json = match serde_json::from_str::<Value>(&message_content) {
Ok(json) => json,
Err(e) => anyhow::bail!("Error parsing message from {:?}: {:?}", addr, e),
};
log::trace!("Handling command from {:?}: {:?}", addr, message_json);
// TODO: handle errors
match handle_message(message_json, mpv.clone(), channel_id).await {
Ok(Some(response)) => {
log::trace!("Handled command from {:?} successfully, sending response", addr);
let message = Message::Text(json!({
"type": "response",
"value": response,
}).to_string());
socket.send(message).await?;
}
Ok(None) => {
log::trace!("Handled command from {:?} successfully", addr);
}
Err(e) => {
log::error!("Error handling message from {:?}: {:?}", addr, e);
}
}
}
event = event_stream.next() => {
match event {
Some(Ok(event)) => {
log::trace!("Sending event to {:?}: {:?}", addr, event);
let message = Message::Text(json!({
"type": "event",
"value": event,
}).to_string());
socket.send(message).await?;
}
Some(Err(e)) => {
log::error!("Error reading event stream for {:?}: {:?}", addr, e);
anyhow::bail!("Error reading event stream for {:?}: {:?}", addr, e);
}
None => {
log::trace!("Event stream ended for {:?}", addr);
return Ok(());
}
}
}
}
}
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum WSCommand {
// Subscribe { property: String },
// UnsubscribeAll,
Load { urls: Vec<String> },
TogglePlayback,
Volume { volume: f64 },
Time { time: f64 },
PlaylistNext,
PlaylistPrevious,
PlaylistGoto { position: usize },
PlaylistClear,
PlaylistRemove { positions: Vec<usize> },
PlaylistMove { from: usize, to: usize },
Shuffle,
SetSubtitleTrack { track: Option<usize> },
SetLooping { value: bool },
}
async fn handle_message(
message: Value,
mpv: Mpv,
_channel_id: u64,
) -> anyhow::Result<Option<Value>> {
let command =
serde_json::from_value::<WSCommand>(message).context("Failed to parse message")?;
log::trace!("Successfully parsed message: {:?}", command);
match command {
// WSCommand::Subscribe { property } => {
// mpv.observe_property(channel_id, &property).await?;
// Ok(None)
// }
// WSCommand::UnsubscribeAll => {
// mpv.unobserve_property(channel_id).await?;
// Ok(None)
// }
WSCommand::Load { urls } => {
for url in urls {
mpv.playlist_add(
&url,
PlaylistAddTypeOptions::File,
mpvipc_async::PlaylistAddOptions::Append,
)
.await?;
}
Ok(None)
}
WSCommand::TogglePlayback => {
mpv.set_playback(mpvipc_async::Switch::Toggle).await?;
Ok(None)
}
WSCommand::Volume { volume } => {
mpv.set_volume(volume, NumberChangeOptions::Absolute)
.await?;
Ok(None)
}
WSCommand::Time { time } => {
mpv.seek(time, SeekOptions::AbsolutePercent).await?;
Ok(None)
}
WSCommand::PlaylistNext => {
mpv.next().await?;
Ok(None)
}
WSCommand::PlaylistPrevious => {
mpv.prev().await?;
Ok(None)
}
WSCommand::PlaylistGoto { position } => {
mpv.playlist_play_id(position).await?;
Ok(None)
}
WSCommand::PlaylistClear => {
mpv.playlist_clear().await?;
Ok(None)
}
// FIXME: this could lead to a race condition between `playlist_remove_id` commands
WSCommand::PlaylistRemove { mut positions } => {
positions.sort();
for position in positions.iter().rev() {
mpv.playlist_remove_id(*position).await?;
}
Ok(None)
}
WSCommand::PlaylistMove { from, to } => {
mpv.playlist_move_id(from, to).await?;
Ok(None)
}
WSCommand::Shuffle => {
mpv.playlist_shuffle().await?;
Ok(None)
}
WSCommand::SetSubtitleTrack { track } => {
mpv.set_property("sid", track).await?;
Ok(None)
}
WSCommand::SetLooping { value } => {
mpv.set_loop_playlist(if value { Switch::On } else { Switch::Off })
.await?;
Ok(None)
}
}
}

View File

@ -1,20 +1,22 @@
use anyhow::Context;
use axum::{Router, Server};
use axum::Router;
use clap::Parser;
use mpvipc::Mpv;
use clap_verbosity_flag::Verbosity;
use futures::StreamExt;
use mpv_setup::{connect_to_mpv, create_mpv_config_file, show_grzegorz_image};
use mpvipc_async::{Event, Mpv, MpvDataType, MpvExt};
use std::{
fs::create_dir_all,
net::{IpAddr, SocketAddr},
path::Path,
sync::Arc,
};
use tokio::{
process::{Child, Command},
sync::Mutex,
sync::{Arc, Mutex},
};
use systemd_journal_logger::JournalLog;
use tempfile::NamedTempFile;
use tokio::task::JoinHandle;
use util::IdPool;
mod api;
mod mpv_broker;
mod mpv_setup;
mod util;
#[derive(Parser)]
struct Args {
@ -24,12 +26,21 @@ struct Args {
#[clap(short, long, default_value = "8008")]
port: u16,
#[command(flatten)]
verbose: Verbosity,
#[clap(long)]
systemd: bool,
#[clap(long, value_name = "PATH", default_value = "/run/mpv/mpv.sock")]
mpv_socket_path: String,
#[clap(long, value_name = "PATH")]
mpv_executable_path: Option<String>,
#[clap(long, value_name = "PATH")]
mpv_config_file: Option<String>,
#[clap(long, default_value = "true")]
auto_start_mpv: bool,
@ -37,90 +48,16 @@ struct Args {
force_auto_start: bool,
}
struct MpvConnectionArgs {
struct MpvConnectionArgs<'a> {
socket_path: String,
executable_path: Option<String>,
config_file: &'a NamedTempFile,
auto_start: bool,
force_auto_start: bool,
}
async fn connect_to_mpv(args: &MpvConnectionArgs) -> anyhow::Result<(Mpv, Option<Child>)> {
log::debug!("Connecting to mpv");
debug_assert!(
!args.force_auto_start || args.auto_start,
"force_auto_start requires auto_start"
);
let socket_path = Path::new(&args.socket_path);
if !socket_path.exists() {
log::debug!("Mpv socket not found at {}", &args.socket_path);
if !args.auto_start {
panic!("Mpv socket not found at {}", &args.socket_path);
}
log::debug!("Ensuring parent dir of mpv socket exists");
let parent_dir = Path::new(&args.socket_path)
.parent()
.context("Failed to get parent dir of mpv socket")?;
if !parent_dir.is_dir() {
create_dir_all(parent_dir).context("Failed to create parent dir of mpv socket")?;
}
} else {
log::debug!("Existing mpv socket found at {}", &args.socket_path);
if args.force_auto_start {
log::debug!("Removing mpv socket");
std::fs::remove_file(&args.socket_path)?;
}
}
let process_handle = if args.auto_start {
log::info!("Starting mpv with socket at {}", &args.socket_path);
// TODO: try to fetch mpv from PATH
Some(
Command::new(args.executable_path.as_deref().unwrap_or("mpv"))
.arg(format!("--input-ipc-server={}", &args.socket_path))
.arg("--idle")
.arg("--force-window")
// .arg("--fullscreen")
// .arg("--no-terminal")
// .arg("--load-unsafe-playlists")
.arg("--keep-open") // Keep last frame of video on end of video
.spawn()
.context("Failed to start mpv")?,
)
} else {
None
};
// Wait for mpv to create the socket
if tokio::time::timeout(tokio::time::Duration::from_millis(500), async {
while !&socket_path.exists() {
log::debug!("Waiting for mpv socket at {}", &args.socket_path);
tokio::time::sleep(tokio::time::Duration::from_millis(10)).await;
}
})
.await
.is_err()
{
return Err(anyhow::anyhow!(
"Failed to connect to mpv socket: {}",
&args.socket_path
));
}
Ok((
Mpv::connect(&args.socket_path).context(format!(
"Failed to connect to mpv socket: {}",
&args.socket_path
))?,
process_handle,
))
}
/// Helper function to resolve a hostname to an IP address.
/// Why is this not in the standard library? >:(
async fn resolve(host: &str) -> anyhow::Result<IpAddr> {
let addr = format!("{}:0", host);
let addresses = tokio::net::lookup_host(addr).await?;
@ -131,42 +68,212 @@ async fn resolve(host: &str) -> anyhow::Result<IpAddr> {
.ok_or_else(|| anyhow::anyhow!("Failed to resolve address"))
}
/// Helper function that spawns a tokio thread that
/// continuously sends a ping to systemd watchdog, if enabled.
async fn setup_systemd_watchdog_thread() -> anyhow::Result<()> {
let mut watchdog_microsecs: u64 = 0;
if sd_notify::watchdog_enabled(true, &mut watchdog_microsecs) {
watchdog_microsecs = watchdog_microsecs.div_ceil(2);
tokio::spawn(async move {
log::debug!(
"Starting systemd watchdog thread with {} millisecond interval",
watchdog_microsecs.div_ceil(1000)
);
loop {
tokio::time::sleep(tokio::time::Duration::from_micros(watchdog_microsecs)).await;
if let Err(err) = sd_notify::notify(false, &[sd_notify::NotifyState::Watchdog]) {
log::warn!("Failed to notify systemd watchdog: {}", err);
} else {
log::trace!("Ping sent to systemd watchdog");
}
}
});
} else {
log::info!("Watchdog not enabled, skipping");
}
Ok(())
}
fn systemd_update_play_status(playing: bool, current_song: &Option<String>) {
sd_notify::notify(
false,
&[sd_notify::NotifyState::Status(&format!(
"{} {:?}",
if playing { "[PLAY]" } else { "[STOP]" },
if let Some(song) = current_song {
song
} else {
""
}
))],
)
.unwrap_or_else(|e| log::warn!("Failed to update systemd status with current song: {}", e));
}
async fn setup_systemd_notifier(mpv: Mpv) -> anyhow::Result<JoinHandle<()>> {
let handle = tokio::spawn(async move {
log::debug!("Starting systemd notifier thread");
let mut event_stream = mpv.get_event_stream().await;
mpv.observe_property(100, "media-title").await.unwrap();
mpv.observe_property(100, "pause").await.unwrap();
let mut current_song: Option<String> = mpv.get_property("media-title").await.unwrap();
let mut playing = !mpv.get_property("pause").await.unwrap().unwrap_or(false);
systemd_update_play_status(playing, &current_song);
loop {
if let Some(Ok(Event::PropertyChange { name, data, .. })) = event_stream.next().await {
match (name.as_str(), data) {
("media-title", Some(MpvDataType::String(s))) => {
current_song = Some(s);
}
("media-title", None) => {
current_song = None;
}
("pause", Some(MpvDataType::Bool(b))) => {
playing = !b;
}
(event_name, _) => {
log::trace!(
"Received unexpected property change on systemd notifier thread: {}",
event_name
);
}
}
systemd_update_play_status(playing, &current_song)
}
}
});
Ok(handle)
}
async fn shutdown(mpv: Mpv, proc: Option<tokio::process::Child>) {
log::info!("Shutting down");
sd_notify::notify(false, &[sd_notify::NotifyState::Stopping]).unwrap_or_else(|e| {
log::warn!(
"Failed to notify systemd that the service is stopping: {}",
e
)
});
mpv.disconnect()
.await
.unwrap_or_else(|e| log::warn!("Failed to disconnect from mpv: {}", e));
if let Some(mut proc) = proc {
proc.kill()
.await
.unwrap_or_else(|e| log::warn!("Failed to kill mpv process: {}", e));
}
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
env_logger::init();
let args = Args::parse();
let systemd_mode = args.systemd && sd_notify::booted().unwrap_or(false);
if systemd_mode {
JournalLog::new()
.context("Failed to initialize journald logging")?
.install()
.context("Failed to install journald logger")?;
log::set_max_level(args.verbose.log_level_filter());
log::debug!("Running with systemd integration");
setup_systemd_watchdog_thread().await?;
} else {
env_logger::Builder::new()
.filter_level(args.verbose.log_level_filter())
.init();
log::info!("Running without systemd integration");
}
let mpv_config_file = create_mpv_config_file(args.mpv_config_file)?;
let (mpv, proc) = connect_to_mpv(&MpvConnectionArgs {
socket_path: args.mpv_socket_path,
executable_path: args.mpv_executable_path,
config_file: &mpv_config_file,
auto_start: args.auto_start_mpv,
force_auto_start: args.force_auto_start,
})
.await?;
.await
.context("Failed to connect to mpv")?;
let addr = SocketAddr::new(resolve(&args.host).await?, args.port);
log::info!("Starting API on {}", addr);
if systemd_mode {
setup_systemd_notifier(mpv.clone()).await?;
}
let mpv = Arc::new(Mutex::new(mpv));
let app = Router::new().nest("/api", api::rest_api_routes(mpv));
if let Err(e) = show_grzegorz_image(mpv.clone()).await {
log::warn!("Could not show Grzegorz image: {}", e);
}
let addr = match resolve(&args.host)
.await
.context(format!("Failed to resolve address: {}", &args.host))
{
Ok(addr) => addr,
Err(e) => {
log::error!("{}", e);
shutdown(mpv, proc).await;
return Err(e);
}
};
let socket_addr = SocketAddr::new(addr, args.port);
log::info!("Starting API on {}", socket_addr);
let id_pool = Arc::new(Mutex::new(IdPool::new_with_max_limit(1024)));
let app = Router::new()
.nest("/api", api::rest_api_routes(mpv.clone()))
.nest("/ws", api::websocket_api(mpv.clone(), id_pool.clone()))
.merge(api::rest_api_docs(mpv.clone()))
.into_make_service_with_connect_info::<SocketAddr>();
let listener = match tokio::net::TcpListener::bind(&socket_addr)
.await
.context(format!("Failed to bind API server to '{}'", &socket_addr))
{
Ok(listener) => listener,
Err(e) => {
log::error!("{}", e);
shutdown(mpv, proc).await;
return Err(e);
}
};
if systemd_mode {
match sd_notify::notify(false, &[sd_notify::NotifyState::Ready])
.context("Failed to notify systemd that the service is ready")
{
Ok(_) => log::trace!("Notified systemd that the service is ready"),
Err(e) => {
log::error!("{}", e);
shutdown(mpv, proc).await;
return Err(e);
}
}
}
if let Some(mut proc) = proc {
tokio::select! {
exit_status = proc.wait() => {
log::warn!("mpv process exited with status: {}", exit_status?);
}
_ = tokio::signal::ctrl_c() => {
log::info!("Received Ctrl-C, exiting");
proc.kill().await?;
}
result = async {
match Server::try_bind(&addr.clone()).context("Failed to bind server") {
Ok(server) => server.serve(app.into_make_service()).await.context("Failed to serve app"),
Err(err) => Err(err),
}
} => {
exit_status = proc.wait() => {
log::warn!("mpv process exited with status: {}", exit_status?);
shutdown(mpv, Some(proc)).await;
}
_ = tokio::signal::ctrl_c() => {
log::info!("Received Ctrl-C, exiting");
shutdown(mpv, Some(proc)).await;
}
result = axum::serve(listener, app) => {
log::info!("API server exited");
proc.kill().await?;
shutdown(mpv, Some(proc)).await;
result?;
}
}
@ -174,12 +281,17 @@ async fn main() -> anyhow::Result<()> {
tokio::select! {
_ = tokio::signal::ctrl_c() => {
log::info!("Received Ctrl-C, exiting");
shutdown(mpv.clone(), None).await;
}
_ = Server::bind(&addr.clone()).serve(app.into_make_service()) => {
log::info!("API server exited");
result = axum::serve(listener, app) => {
log::info!("API server exited");
shutdown(mpv.clone(), None).await;
result?;
}
}
}
std::mem::drop(mpv_config_file);
Ok(())
}

144
src/mpv_setup.rs Normal file
View File

@ -0,0 +1,144 @@
use std::{fs::create_dir_all, io::Write, path::Path};
use anyhow::Context;
use mpvipc_async::{Mpv, MpvExt};
use tempfile::NamedTempFile;
use tokio::process::{Child, Command};
use crate::MpvConnectionArgs;
const DEFAULT_MPV_CONFIG_CONTENT: &str = include_str!("../assets/default-mpv.conf");
const THE_MAN_PNG: &[u8] = include_bytes!("../assets/the_man.png");
// https://mpv.io/manual/master/#options-ytdl
const YTDL_HOOK_ARGS: [&str; 2] = ["try_ytdl_first=yes", "thumbnails=none"];
pub fn create_mpv_config_file(args_config_file: Option<String>) -> anyhow::Result<NamedTempFile> {
let file_content = if let Some(path) = args_config_file {
if !Path::new(&path).exists() {
anyhow::bail!("Mpv config file not found at {}", &path);
}
std::fs::read_to_string(&path).context("Failed to read mpv config file")?
} else {
DEFAULT_MPV_CONFIG_CONTENT.to_string()
};
let tmpfile = tempfile::Builder::new()
.prefix("mpv-")
.rand_bytes(8)
.suffix(".conf")
.tempfile()?;
tmpfile.reopen()?.write_all(file_content.as_bytes())?;
Ok(tmpfile)
}
pub async fn connect_to_mpv<'a>(
args: &MpvConnectionArgs<'a>,
) -> anyhow::Result<(Mpv, Option<Child>)> {
log::debug!("Connecting to mpv");
debug_assert!(
!args.force_auto_start || args.auto_start,
"force_auto_start requires auto_start"
);
let socket_path = Path::new(&args.socket_path);
if !socket_path.exists() {
log::debug!("Mpv socket not found at {}", &args.socket_path);
if !args.auto_start {
panic!("Mpv socket not found at {}", &args.socket_path);
}
log::debug!("Ensuring parent dir of mpv socket exists");
let parent_dir = Path::new(&args.socket_path)
.parent()
.context("Failed to get parent dir of mpv socket")?;
if !parent_dir.is_dir() {
create_dir_all(parent_dir).context("Failed to create parent dir of mpv socket")?;
}
} else {
log::debug!("Existing mpv socket found at {}", &args.socket_path);
if args.force_auto_start {
log::debug!("Removing mpv socket");
std::fs::remove_file(&args.socket_path)?;
}
}
let process_handle = if args.auto_start {
log::info!("Starting mpv with socket at {}", &args.socket_path);
// TODO: try to fetch mpv from PATH
Some(
Command::new(args.executable_path.as_deref().unwrap_or("mpv"))
.arg(format!("--input-ipc-server={}", &args.socket_path))
.arg("--idle")
.arg("--force-window")
.arg("--fullscreen")
.arg("--no-config")
.arg("--ytdl=yes")
.args(
YTDL_HOOK_ARGS
.into_iter()
.map(|x| format!("--script-opts=ytdl_hook-{}", x))
.collect::<Vec<_>>(),
)
.arg(format!(
"--include={}",
&args.config_file.path().to_string_lossy()
))
// .arg("--no-terminal")
.arg("--load-unsafe-playlists")
.arg("--keep-open") // Keep last frame of video on end of video
.spawn()
.context("Failed to start mpv")?,
)
} else {
None
};
// Wait for mpv to create the socket
if tokio::time::timeout(tokio::time::Duration::from_millis(500), async {
while !&socket_path.exists() {
log::debug!("Waiting for mpv socket at {}", &args.socket_path);
tokio::time::sleep(tokio::time::Duration::from_millis(10)).await;
}
})
.await
.is_err()
{
return Err(anyhow::anyhow!(
"Failed to connect to mpv socket: {}",
&args.socket_path
));
}
Ok((
Mpv::connect(&args.socket_path).await.context(format!(
"Failed to connect to mpv socket: {}",
&args.socket_path
))?,
process_handle,
))
}
pub async fn show_grzegorz_image(mpv: Mpv) -> anyhow::Result<()> {
let path = std::env::temp_dir().join("the_man.png");
std::fs::write(path.as_path(), THE_MAN_PNG)?;
mpv.playlist_clear().await?;
mpv.playlist_add(
path.to_string_lossy().as_ref(),
mpvipc_async::PlaylistAddTypeOptions::File,
mpvipc_async::PlaylistAddOptions::Append,
)
.await?;
mpv.next().await?;
Ok(())
}

3
src/util.rs Normal file
View File

@ -0,0 +1,3 @@
mod id_pool;
pub use id_pool::IdPool;

145
src/util/id_pool.rs Normal file
View File

@ -0,0 +1,145 @@
use std::{collections::BTreeSet, fmt::Debug};
use tokio::sync::watch;
/// A relatively naive ID pool implementation.
pub struct IdPool {
max_id: u64,
free_ids: BTreeSet<u64>,
id_count: u64,
id_count_watch_sender: watch::Sender<u64>,
id_count_watch_receiver: watch::Receiver<u64>,
}
impl Debug for IdPool {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("IdPool")
.field("max_id", &self.max_id)
.field("free_ids", &self.free_ids)
.field("id_count", &self.id_count)
.finish()
}
}
impl Default for IdPool {
fn default() -> Self {
let (id_count_watch_sender, id_count_watch_receiver) = watch::channel(0);
Self {
max_id: u64::MAX,
free_ids: BTreeSet::new(),
id_count: 0,
id_count_watch_sender,
id_count_watch_receiver,
}
}
}
//TODO: thiserror
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum IdPoolError {
NoFreeIds,
IdNotInUse(u64),
IdOutOfBound(u64),
}
impl IdPool {
pub fn new_with_max_limit(max_id: u64) -> Self {
let (id_count_watch_sender, id_count_watch_receiver) = watch::channel(0);
Self {
max_id,
free_ids: BTreeSet::new(),
id_count: 0,
id_count_watch_sender,
id_count_watch_receiver,
}
}
pub fn id_count(&self) -> u64 {
self.id_count - self.free_ids.len() as u64
}
pub fn id_is_used(&self, id: u64) -> Result<bool, IdPoolError> {
if id > self.max_id {
Err(IdPoolError::IdOutOfBound(id))
} else if self.free_ids.contains(&id) {
return Ok(false);
} else {
return Ok(id <= self.id_count);
}
}
pub fn request_id(&mut self) -> Result<u64, IdPoolError> {
if !self.free_ids.is_empty() {
let id = self.free_ids.pop_first().unwrap();
self.update_watch();
Ok(id)
} else if self.id_count < self.max_id {
self.id_count += 1;
self.update_watch();
Ok(self.id_count)
} else {
Err(IdPoolError::NoFreeIds)
}
}
pub fn release_id(&mut self, id: u64) -> Result<(), IdPoolError> {
if !self.id_is_used(id)? {
Err(IdPoolError::IdNotInUse(id))
} else {
self.free_ids.insert(id);
self.update_watch();
Ok(())
}
}
fn update_watch(&self) {
self.id_count_watch_sender.send(self.id_count()).unwrap();
}
pub fn get_id_count_watch_receiver(&self) -> watch::Receiver<u64> {
self.id_count_watch_receiver.clone()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_id_pool() {
let mut pool = IdPool::new_with_max_limit(10);
assert_eq!(pool.request_id(), Ok(1));
assert_eq!(pool.request_id(), Ok(2));
assert_eq!(pool.request_id(), Ok(3));
assert_eq!(pool.request_id(), Ok(4));
assert_eq!(pool.id_count(), 4);
assert_eq!(pool.request_id(), Ok(5));
assert_eq!(pool.request_id(), Ok(6));
assert_eq!(pool.request_id(), Ok(7));
assert_eq!(pool.request_id(), Ok(8));
assert_eq!(pool.request_id(), Ok(9));
assert_eq!(pool.request_id(), Ok(10));
assert_eq!(pool.id_count(), 10);
assert_eq!(pool.request_id(), Err(IdPoolError::NoFreeIds));
assert_eq!(pool.release_id(5), Ok(()));
assert_eq!(pool.release_id(5), Err(IdPoolError::IdNotInUse(5)));
assert_eq!(pool.id_count(), 9);
assert_eq!(pool.request_id(), Ok(5));
assert_eq!(pool.release_id(11), Err(IdPoolError::IdOutOfBound(11)));
}
#[test]
fn test_id_pool_watch() {
let mut pool = IdPool::new_with_max_limit(10);
let receiver = pool.get_id_count_watch_receiver();
assert_eq!(receiver.borrow().clone(), 0);
pool.request_id().unwrap();
assert_eq!(receiver.borrow().clone(), 1);
pool.request_id().unwrap();
assert_eq!(receiver.borrow().clone(), 2);
pool.release_id(1).unwrap();
assert_eq!(receiver.borrow().clone(), 1);
}
}