1 Commits

Author SHA1 Message Date
d08eafdc33 main: continuously report play status to systemd 2024-12-23 01:12:01 +01:00
13 changed files with 523 additions and 1661 deletions

1327
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -9,26 +9,25 @@ readme = "README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
anyhow = "1.0.98" anyhow = "1.0.82"
axum = { version = "0.8.4", features = ["macros", "ws"] } axum = { version = "0.7.7", features = ["macros"] }
clap = { version = "4.5.41", features = ["derive"] } clap = { version = "4.4.1", features = ["derive"] }
clap-verbosity-flag = "3.0.3" clap-verbosity-flag = "2.2.2"
env_logger = "0.11.8" env_logger = "0.10.0"
futures = "0.3.31" futures = "0.3.31"
log = "0.4.27" log = "0.4.20"
mpvipc-async = { git = "https://git.pvv.ntnu.no/Grzegorz/mpvipc-async.git", branch = "main" } mpvipc-async = { git = "https://git.pvv.ntnu.no/Projects/mpvipc-async.git", rev = "v0.1.0" }
sd-notify = "0.4.5" sd-notify = "0.4.3"
serde = { version = "1.0.219", features = ["derive"] } serde = { version = "1.0.188", features = ["derive"] }
serde_json = "1.0.140" serde_json = "1.0.105"
systemd-journal-logger = "2.2.2" systemd-journal-logger = "2.2.0"
tempfile = "3.20.0" tempfile = "3.11.0"
tokio = { version = "1.46.1", features = ["full"] } tokio = { version = "1.32.0", features = ["full"] }
tower = { version = "0.5.2", features = ["full"] } tower = { version = "0.4.13", features = ["full"] }
tower-http = { version = "0.6.6", features = ["full"] } tower-http = { version = "0.4.3", features = ["full"] }
tungstenite = "0.27.0" utoipa = { version = "5.1.3", features = ["axum_extras"] }
utoipa = { version = "5.4.0", features = ["axum_extras"] } utoipa-axum = "0.1.2"
utoipa-axum = "0.2.0" utoipa-swagger-ui = { version = "8.0.3", features = ["axum", "vendored"] }
utoipa-swagger-ui = { version = "9.0.2", features = ["axum", "vendored"] }
[profile.release] [profile.release]
strip = true strip = true

View File

@@ -6,13 +6,13 @@ New implementation of https://github.com/Programvareverkstedet/grzegorz
```sh ```sh
# NixOS # NixOS
nix run "git+https://git.pvv.ntnu.no/Grzegorz/greg-ng#" -- --mpv-socket-path /tmp/mpv.sock nix run "git+https://git.pvv.ntnu.no/Projects/greg-ng#" -- --mpv-socket-path /tmp/mpv.sock
# Other (after git clone and rust toolchain has been set up) # Other (after git clone and rust toolchain has been set up)
cargo run -- --mpv-socket-path /tmp/mpv.sock cargo run -- --mpv-socket-path /tmp/mpv.sock
``` ```
See also https://git.pvv.ntnu.no/Grzegorz/grzegorz-clients for frontend alternatives See also https://git.pvv.ntnu.no/Projects/grzegorz-clients for frontend alternatives
## Debugging ## Debugging

View File

@@ -34,7 +34,7 @@ rustPlatform.buildRustPackage rec {
cargoLock = { cargoLock = {
lockFile = ./Cargo.lock; lockFile = ./Cargo.lock;
outputHashes = { outputHashes = {
"mpvipc-async-0.1.0" = "sha256-V22wdnVVCBzayqkwb2d0msG7YypVss0cGBihtXrHtuM="; "mpvipc-async-0.1.0" = "sha256-2TQ2d4q9/DTxTZe9kOAoKBhsmegRZw32x3G2hbluS6U=";
}; };
}; };

12
flake.lock generated
View File

@@ -2,11 +2,11 @@
"nodes": { "nodes": {
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1751984180, "lastModified": 1729256560,
"narHash": "sha256-LwWRsENAZJKUdD3SpLluwDmdXY9F45ZEgCb0X+xgOL0=", "narHash": "sha256-/uilDXvCIEs3C9l73JTACm4quuHUsIHcns1c+cHUJwA=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "9807714d6944a957c2e036f84b0ff8caf9930bc0", "rev": "4c2fcb090b1f3e5b47eaa7bd33913b574a11e0a0",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -29,11 +29,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1752201818, "lastModified": 1729391507,
"narHash": "sha256-d8KczaVT8WFEZdWg//tMAbv8EDyn2YTWcJvSY8gqKBU=", "narHash": "sha256-as0I9xieJUHf7kiK2a9znDsVZQTFWhM1pLivII43Gi0=",
"owner": "oxalica", "owner": "oxalica",
"repo": "rust-overlay", "repo": "rust-overlay",
"rev": "bd8f8329780b348fedcd37b53dbbee48c08c496d", "rev": "784981a9feeba406de38c1c9a3decf966d853cca",
"type": "github" "type": "github"
}, },
"original": { "original": {

View File

@@ -49,7 +49,6 @@
nativeBuildInputs = [ nativeBuildInputs = [
toolchain toolchain
pkgs.mpv pkgs.mpv
pkgs.cargo-edit
]; ];
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library"; RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";

View File

@@ -1,6 +1,4 @@
mod base; mod base;
mod rest_wrapper_v1; mod rest_wrapper_v1;
mod websocket_v1;
pub use rest_wrapper_v1::{rest_api_docs, rest_api_routes}; pub use rest_wrapper_v1::{rest_api_docs, rest_api_routes};
pub use websocket_v1::websocket_api;

View File

@@ -1,507 +0,0 @@
use std::{
net::SocketAddr,
sync::{Arc, Mutex},
};
use anyhow::Context;
use futures::{stream::FuturesUnordered, StreamExt};
use serde::{Deserialize, Serialize};
use axum::{
extract::{
ws::{Message, WebSocket},
ConnectInfo, State, WebSocketUpgrade,
},
response::IntoResponse,
routing::any,
Router,
};
use mpvipc_async::{
LoopProperty, Mpv, MpvExt, NumberChangeOptions, Playlist, PlaylistAddTypeOptions, SeekOptions,
Switch,
};
use serde_json::{json, Value};
use tokio::{
select,
sync::{mpsc, watch},
};
use crate::util::{ConnectionEvent, IdPool};
#[derive(Debug, Clone)]
struct WebsocketState {
mpv: Mpv,
id_pool: Arc<Mutex<IdPool>>,
connection_counter_tx: mpsc::Sender<ConnectionEvent>,
}
pub fn websocket_api(
mpv: Mpv,
id_pool: Arc<Mutex<IdPool>>,
connection_counter_tx: mpsc::Sender<ConnectionEvent>,
) -> Router {
let state = WebsocketState {
mpv,
id_pool,
connection_counter_tx,
};
Router::new()
.route("/", any(websocket_handler))
.with_state(state)
}
async fn websocket_handler(
ws: WebSocketUpgrade,
ConnectInfo(addr): ConnectInfo<SocketAddr>,
State(WebsocketState {
mpv,
id_pool,
connection_counter_tx,
}): State<WebsocketState>,
) -> impl IntoResponse {
let mpv = mpv.clone();
let id = match id_pool.lock().unwrap().request_id() {
Ok(id) => id,
Err(e) => {
log::error!("Failed to get id from id pool: {:?}", e);
return axum::http::StatusCode::INTERNAL_SERVER_ERROR.into_response();
}
};
ws.on_upgrade(move |socket| {
handle_connection(socket, addr, mpv, id, id_pool, connection_counter_tx)
})
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct InitialState {
pub cached_timestamp: Option<f64>,
pub chapters: Vec<Value>,
pub connections: u64,
pub current_percent_pos: Option<f64>,
pub current_track: String,
pub duration: f64,
pub is_looping: bool,
pub is_muted: bool,
pub is_playing: bool,
pub is_paused_for_cache: bool,
pub playlist: Playlist,
pub tracks: Vec<Value>,
pub volume: f64,
}
async fn get_initial_state(mpv: &Mpv, id_pool: Arc<Mutex<IdPool>>) -> InitialState {
let cached_timestamp = mpv
.get_property_value("demuxer-cache-state")
.await
.unwrap_or(None)
.and_then(|v| {
v.as_object()
.and_then(|o| o.get("data"))
.and_then(|v| v.as_object())
.and_then(|o| o.get("cache-end"))
.and_then(|v| v.as_f64())
});
let chapters = match mpv.get_property_value("chapter-list").await {
Ok(Some(Value::Array(chapters))) => chapters,
_ => vec![],
};
let connections = id_pool.lock().unwrap().id_count();
let current_percent_pos = mpv.get_property("percent-pos").await.unwrap_or(None);
let current_track = mpv.get_file_path().await.unwrap_or("".to_string());
let duration = mpv.get_duration().await.unwrap_or(0.0);
let is_looping =
mpv.playlist_is_looping().await.unwrap_or(LoopProperty::No) != LoopProperty::No;
let is_muted = mpv
.get_property("mute")
.await
.unwrap_or(Some(false))
.unwrap_or(false);
let is_playing = mpv.is_playing().await.unwrap_or(false);
let is_paused_for_cache = mpv
.get_property("paused-for-cache")
.await
.unwrap_or(Some(false))
.unwrap_or(false);
let playlist = mpv.get_playlist().await.unwrap_or(Playlist(vec![]));
let tracks = match mpv.get_property_value("track-list").await {
Ok(Some(Value::Array(tracks))) => tracks
.into_iter()
.filter(|t| {
t.as_object()
.and_then(|o| o.get("type"))
.and_then(|t| t.as_str())
.unwrap_or("")
== "sub"
})
.collect(),
_ => vec![],
};
let volume = mpv.get_volume().await.unwrap_or(0.0);
// TODO: use default when new version is released
InitialState {
cached_timestamp,
chapters,
connections,
current_percent_pos,
current_track,
duration,
is_looping,
is_muted,
is_playing,
is_paused_for_cache,
playlist,
tracks,
volume,
}
}
const DEFAULT_PROPERTY_SUBSCRIPTIONS: [&str; 11] = [
"chapter-list",
"demuxer-cache-state",
"duration",
"loop-playlist",
"mute",
"pause",
"paused-for-cache",
"percent-pos",
"playlist",
"track-list",
"volume",
];
async fn setup_default_subscribes(mpv: &Mpv) -> anyhow::Result<()> {
let mut futures = FuturesUnordered::new();
futures.extend(
DEFAULT_PROPERTY_SUBSCRIPTIONS
.iter()
.map(|property| mpv.observe_property(0, property)),
);
while let Some(result) = futures.next().await {
result?;
}
Ok(())
}
async fn handle_connection(
mut socket: WebSocket,
addr: SocketAddr,
mpv: Mpv,
channel_id: u64,
id_pool: Arc<Mutex<IdPool>>,
connection_counter_tx: mpsc::Sender<ConnectionEvent>,
) {
match connection_counter_tx.send(ConnectionEvent::Connected).await {
Ok(()) => {
log::trace!("Connection count updated for {:?}", addr);
}
Err(e) => {
log::error!("Error updating connection count for {:?}: {:?}", addr, e);
}
}
// TODO: There is an asynchronous gap between gathering the initial state and subscribing to the properties
// This could lead to missing events if they happen in that gap. Send initial state, but also ensure
// that there is an additional "initial state" sent upon subscription to all properties to ensure that
// the state is correct.
let initial_state = get_initial_state(&mpv, id_pool.clone()).await;
let message = Message::Text(
json!({
"type": "initial_state",
"value": initial_state,
})
.to_string()
.into(),
);
socket.send(message).await.unwrap();
setup_default_subscribes(&mpv).await.unwrap();
let id_count_watch_receiver = id_pool.lock().unwrap().get_id_count_watch_receiver();
let connection_loop_result = tokio::spawn(connection_loop(
socket,
addr,
mpv.clone(),
channel_id,
id_count_watch_receiver,
));
match connection_loop_result.await {
Ok(Ok(())) => {
log::trace!("Connection loop ended for {:?}", addr);
}
Ok(Err(e)) => {
log::error!("Error in connection loop for {:?}: {:?}", addr, e);
}
Err(e) => {
log::error!("Error in connection loop for {:?}: {:?}", addr, e);
}
}
match mpv.unobserve_property(channel_id).await {
Ok(()) => {
log::trace!("Unsubscribed from properties for {:?}", addr);
}
Err(e) => {
log::error!(
"Error unsubscribing from properties for {:?}: {:?}",
addr,
e
);
}
}
match id_pool.lock().unwrap().release_id(channel_id) {
Ok(()) => {
log::trace!("Released id {} for {:?}", channel_id, addr);
}
Err(e) => {
log::error!("Error releasing id {} for {:?}: {:?}", channel_id, addr, e);
}
}
match connection_counter_tx
.send(ConnectionEvent::Disconnected)
.await
{
Ok(()) => {
log::trace!("Connection count updated for {:?}", addr);
}
Err(e) => {
log::error!("Error updating connection count for {:?}: {:?}", addr, e);
}
}
}
async fn connection_loop(
mut socket: WebSocket,
addr: SocketAddr,
mpv: Mpv,
channel_id: u64,
mut id_count_watch_receiver: watch::Receiver<u64>,
) -> Result<(), anyhow::Error> {
let mut event_stream = mpv.get_event_stream().await;
loop {
select! {
id_count = id_count_watch_receiver.changed() => {
if let Err(e) = id_count {
anyhow::bail!("Error reading id count watch receiver for {:?}: {:?}", addr, e);
}
let message = Message::Text(json!({
"type": "connection_count",
"value": id_count_watch_receiver.borrow().clone(),
}).to_string().into(),);
socket.send(message).await?;
}
message = socket.recv() => {
log::trace!("Received command from {:?}: {:?}", addr, message);
let ws_message_content = match message {
Some(Ok(message)) => message,
None => {
log::debug!("Connection closed for {:?}", addr);
return Ok(());
},
Some(Err(e)) => {
let inner_error = e.into_inner();
if inner_error
.downcast_ref::<tungstenite::error::Error>()
.is_some_and(|e| match *e {
tungstenite::error::Error::Protocol(tungstenite::error::ProtocolError::ResetWithoutClosingHandshake) => true,
_ => false,
}) {
log::warn!("Connection reset without closing handshake for {:?}", addr);
return Ok(());
} else {
log::error!("Error reading message for {:?}: {:?}", addr, inner_error);
anyhow::bail!("Error reading message for {:?}: {:?}", addr, inner_error);
}
},
};
if let Message::Close(_) = ws_message_content {
log::trace!("Closing connection for {:?}", addr);
return Ok(());
}
if let Message::Ping(xs) = ws_message_content {
log::trace!("Ponging {:?} with {:?}", addr, xs);
socket.send(Message::Pong(xs)).await?;
continue;
}
let message_content = match ws_message_content {
Message::Text(text) => text,
m => anyhow::bail!("Unexpected message type: {:?}", m),
};
let message_json = match serde_json::from_str::<Value>(&message_content) {
Ok(json) => json,
Err(e) => anyhow::bail!("Error parsing message from {:?}: {:?}", addr, e),
};
log::trace!("Handling command from {:?}: {:?}", addr, message_json);
// TODO: handle errors
match handle_message(message_json, mpv.clone(), channel_id).await {
Ok(Some(response)) => {
log::trace!("Handled command from {:?} successfully, sending response", addr);
let message = Message::Text(json!({
"type": "response",
"value": response,
}).to_string().into(),);
socket.send(message).await?;
}
Ok(None) => {
log::trace!("Handled command from {:?} successfully", addr);
}
Err(e) => {
log::error!("Error handling message from {:?}: {:?}", addr, e);
}
}
}
event = event_stream.next() => {
match event {
Some(Ok(event)) => {
log::trace!("Sending event to {:?}: {:?}", addr, event);
let message = Message::Text(json!({
"type": "event",
"value": event,
}).to_string().into(),);
socket.send(message).await?;
}
Some(Err(e)) => {
log::error!("Error reading event stream for {:?}: {:?}", addr, e);
anyhow::bail!("Error reading event stream for {:?}: {:?}", addr, e);
}
None => {
log::trace!("Event stream ended for {:?}", addr);
return Ok(());
}
}
}
}
}
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum WSCommand {
// Subscribe { property: String },
// UnsubscribeAll,
Load { urls: Vec<String> },
TogglePlayback,
Volume { volume: f64 },
Time { time: f64 },
PlaylistNext,
PlaylistPrevious,
PlaylistGoto { position: usize },
PlaylistClear,
PlaylistRemove { positions: Vec<usize> },
PlaylistMove { from: usize, to: usize },
Shuffle,
SetSubtitleTrack { track: Option<usize> },
SetLooping { value: bool },
}
async fn handle_message(
message: Value,
mpv: Mpv,
_channel_id: u64,
) -> anyhow::Result<Option<Value>> {
let command =
serde_json::from_value::<WSCommand>(message).context("Failed to parse message")?;
log::trace!("Successfully parsed message: {:?}", command);
match command {
// WSCommand::Subscribe { property } => {
// mpv.observe_property(channel_id, &property).await?;
// Ok(None)
// }
// WSCommand::UnsubscribeAll => {
// mpv.unobserve_property(channel_id).await?;
// Ok(None)
// }
WSCommand::Load { urls } => {
for url in urls {
mpv.playlist_add(
&url,
PlaylistAddTypeOptions::File,
mpvipc_async::PlaylistAddOptions::Append,
)
.await?;
}
Ok(None)
}
WSCommand::TogglePlayback => {
mpv.set_playback(mpvipc_async::Switch::Toggle).await?;
Ok(None)
}
WSCommand::Volume { volume } => {
mpv.set_volume(volume, NumberChangeOptions::Absolute)
.await?;
Ok(None)
}
WSCommand::Time { time } => {
mpv.seek(time, SeekOptions::AbsolutePercent).await?;
Ok(None)
}
WSCommand::PlaylistNext => {
mpv.next().await?;
Ok(None)
}
WSCommand::PlaylistPrevious => {
mpv.prev().await?;
Ok(None)
}
WSCommand::PlaylistGoto { position } => {
mpv.playlist_play_id(position).await?;
Ok(None)
}
WSCommand::PlaylistClear => {
mpv.playlist_clear().await?;
Ok(None)
}
// FIXME: this could lead to a race condition between `playlist_remove_id` commands
WSCommand::PlaylistRemove { mut positions } => {
positions.sort();
for position in positions.iter().rev() {
mpv.playlist_remove_id(*position).await?;
}
Ok(None)
}
WSCommand::PlaylistMove { from, to } => {
mpv.playlist_move_id(from, to).await?;
Ok(None)
}
WSCommand::Shuffle => {
mpv.playlist_shuffle().await?;
Ok(None)
}
WSCommand::SetSubtitleTrack { track } => {
mpv.set_property("sid", track).await?;
Ok(None)
}
WSCommand::SetLooping { value } => {
mpv.set_loop_playlist(if value { Switch::On } else { Switch::Off })
.await?;
Ok(None)
}
}
}

View File

@@ -5,18 +5,13 @@ use clap_verbosity_flag::Verbosity;
use futures::StreamExt; use futures::StreamExt;
use mpv_setup::{connect_to_mpv, create_mpv_config_file, show_grzegorz_image}; use mpv_setup::{connect_to_mpv, create_mpv_config_file, show_grzegorz_image};
use mpvipc_async::{Event, Mpv, MpvDataType, MpvExt}; use mpvipc_async::{Event, Mpv, MpvDataType, MpvExt};
use std::{ use std::net::{IpAddr, SocketAddr};
net::{IpAddr, SocketAddr},
sync::{Arc, Mutex},
};
use systemd_journal_logger::JournalLog; use systemd_journal_logger::JournalLog;
use tempfile::NamedTempFile; use tempfile::NamedTempFile;
use tokio::{sync::mpsc, task::JoinHandle}; use tokio::task::JoinHandle;
use util::{ConnectionEvent, IdPool};
mod api; mod api;
mod mpv_setup; mod mpv_setup;
mod util;
#[derive(Parser)] #[derive(Parser)]
struct Args { struct Args {
@@ -94,37 +89,23 @@ async fn setup_systemd_watchdog_thread() -> anyhow::Result<()> {
Ok(()) Ok(())
} }
fn send_play_status( fn systemd_update_play_status(playing: bool, current_song: &Option<String>) {
systemd: bool, sd_notify::notify(
playing: bool, false,
current_song: &Option<String>, &[sd_notify::NotifyState::Status(&format!(
connection_count: u64, "{} {:?}",
) { if playing { "[PLAY]" } else { "[STOP]" },
let status = &format!( if let Some(song) = current_song {
"[CONN: {}] {} {:?}", song
connection_count, } else {
if playing { "[PLAY]" } else { "[STOP]" }, ""
if let Some(song) = current_song { }
song ))],
} else { )
"" .unwrap_or_else(|e| log::warn!("Failed to update systemd status with current song: {}", e));
}
);
if systemd {
sd_notify::notify(false, &[sd_notify::NotifyState::Status(status)]).unwrap_or_else(|e| {
log::warn!("Failed to update systemd status with current song: {}", e)
});
} else {
log::info!("{}", status);
}
} }
async fn start_status_notifier_thread( async fn setup_systemd_notifier(mpv: Mpv) -> anyhow::Result<JoinHandle<()>> {
systemd: bool,
mpv: Mpv,
mut connection_counter_rx: mpsc::Receiver<ConnectionEvent>,
) -> anyhow::Result<JoinHandle<()>> {
let handle = tokio::spawn(async move { let handle = tokio::spawn(async move {
log::debug!("Starting systemd notifier thread"); log::debug!("Starting systemd notifier thread");
let mut event_stream = mpv.get_event_stream().await; let mut event_stream = mpv.get_event_stream().await;
@@ -134,13 +115,12 @@ async fn start_status_notifier_thread(
let mut current_song: Option<String> = mpv.get_property("media-title").await.unwrap(); let mut current_song: Option<String> = mpv.get_property("media-title").await.unwrap();
let mut playing = !mpv.get_property("pause").await.unwrap().unwrap_or(false); let mut playing = !mpv.get_property("pause").await.unwrap().unwrap_or(false);
let mut connection_count = 0;
send_play_status(systemd, playing, &current_song, connection_count); systemd_update_play_status(playing, &current_song);
loop { loop {
tokio::select! { match event_stream.next().await {
Some(Ok(Event::PropertyChange { name, data, .. })) = event_stream.next() => { Some(Ok(Event::PropertyChange { name, data, .. })) => {
match (name.as_str(), data) { match (name.as_str(), data) {
("media-title", Some(MpvDataType::String(s))) => { ("media-title", Some(MpvDataType::String(s))) => {
current_song = Some(s); current_song = Some(s);
@@ -159,28 +139,9 @@ async fn start_status_notifier_thread(
} }
} }
send_play_status(systemd, playing, &current_song, connection_count) systemd_update_play_status(playing, &current_song)
}
Some(connection_counter_update) = connection_counter_rx.recv() => {
log::trace!("Received connection counter update: {}", connection_counter_update);
match connection_count.checked_add_signed(connection_counter_update.to_i8().into()) {
Some(new_count) => connection_count = new_count,
None => {
log::warn!("Invalid connection count: trying to add {} to {}", connection_counter_update.to_i8(), connection_count);
log::warn!("Resetting connection count to 0");
connection_count = 0;
}
}
match connection_count {
0 => log::debug!("No connections"),
_ => log::debug!("Connection count: {}", connection_count),
}
send_play_status(systemd, playing, &current_song, connection_count);
} }
_ => {}
} }
} }
}); });
@@ -243,10 +204,9 @@ async fn main() -> anyhow::Result<()> {
.await .await
.context("Failed to connect to mpv")?; .context("Failed to connect to mpv")?;
let (connection_counter_tx, connection_counter_rx) = mpsc::channel(10); if systemd_mode {
setup_systemd_notifier(mpv.clone()).await?;
let status_notifier_thread_handle = }
start_status_notifier_thread(systemd_mode, mpv.clone(), connection_counter_rx).await?;
if let Err(e) = show_grzegorz_image(mpv.clone()).await { if let Err(e) = show_grzegorz_image(mpv.clone()).await {
log::warn!("Could not show Grzegorz image: {}", e); log::warn!("Could not show Grzegorz image: {}", e);
@@ -266,16 +226,9 @@ async fn main() -> anyhow::Result<()> {
let socket_addr = SocketAddr::new(addr, args.port); let socket_addr = SocketAddr::new(addr, args.port);
log::info!("Starting API on {}", socket_addr); log::info!("Starting API on {}", socket_addr);
let id_pool = Arc::new(Mutex::new(IdPool::new_with_max_limit(1024)));
let app = Router::new() let app = Router::new()
.nest("/api", api::rest_api_routes(mpv.clone())) .nest("/api", api::rest_api_routes(mpv.clone()))
.nest( .merge(api::rest_api_docs(mpv.clone()));
"/ws",
api::websocket_api(mpv.clone(), id_pool.clone(), connection_counter_tx.clone()),
)
.merge(api::rest_api_docs(mpv.clone()))
.into_make_service_with_connect_info::<SocketAddr>();
let listener = match tokio::net::TcpListener::bind(&socket_addr) let listener = match tokio::net::TcpListener::bind(&socket_addr)
.await .await
@@ -312,16 +265,11 @@ async fn main() -> anyhow::Result<()> {
log::info!("Received Ctrl-C, exiting"); log::info!("Received Ctrl-C, exiting");
shutdown(mpv, Some(proc)).await; shutdown(mpv, Some(proc)).await;
} }
result = axum::serve(listener, app) => { result = axum::serve(listener, app.into_make_service()) => {
log::info!("API server exited"); log::info!("API server exited");
shutdown(mpv, Some(proc)).await; shutdown(mpv, Some(proc)).await;
result?; result?;
} }
result = status_notifier_thread_handle => {
log::info!("Status notifier thread exited unexpectedly, shutting dow");
shutdown(mpv, Some(proc)).await;
result?;
}
} }
} else { } else {
tokio::select! { tokio::select! {
@@ -329,16 +277,11 @@ async fn main() -> anyhow::Result<()> {
log::info!("Received Ctrl-C, exiting"); log::info!("Received Ctrl-C, exiting");
shutdown(mpv.clone(), None).await; shutdown(mpv.clone(), None).await;
} }
result = axum::serve(listener, app) => { result = axum::serve(listener, app.into_make_service()) => {
log::info!("API server exited"); log::info!("API server exited");
shutdown(mpv.clone(), None).await; shutdown(mpv.clone(), None).await;
result?; result?;
} }
result = status_notifier_thread_handle => {
log::info!("Status notifier thread exited unexpectedly, shutting down");
shutdown(mpv.clone(), None).await;
result?;
}
} }
} }

View File

@@ -36,7 +36,9 @@ pub fn create_mpv_config_file(args_config_file: Option<String>) -> anyhow::Resul
Ok(tmpfile) Ok(tmpfile)
} }
pub async fn connect_to_mpv(args: &MpvConnectionArgs<'_>) -> anyhow::Result<(Mpv, Option<Child>)> { pub async fn connect_to_mpv<'a>(
args: &MpvConnectionArgs<'a>,
) -> anyhow::Result<(Mpv, Option<Child>)> {
log::debug!("Connecting to mpv"); log::debug!("Connecting to mpv");
debug_assert!( debug_assert!(

View File

@@ -1,5 +0,0 @@
mod connection_counter;
mod id_pool;
pub use connection_counter::ConnectionEvent;
pub use id_pool::IdPool;

View File

@@ -1,25 +0,0 @@
use std::fmt;
#[derive(Debug, Clone, Copy)]
pub enum ConnectionEvent {
Connected,
Disconnected,
}
impl ConnectionEvent {
pub fn to_i8(self) -> i8 {
match self {
ConnectionEvent::Connected => 1,
ConnectionEvent::Disconnected => -1,
}
}
}
impl fmt::Display for ConnectionEvent {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ConnectionEvent::Connected => write!(f, "Connected"),
ConnectionEvent::Disconnected => write!(f, "Disconnected"),
}
}
}

View File

@@ -1,145 +0,0 @@
use std::{collections::BTreeSet, fmt::Debug};
use tokio::sync::watch;
/// A relatively naive ID pool implementation.
pub struct IdPool {
max_id: u64,
free_ids: BTreeSet<u64>,
id_count: u64,
id_count_watch_sender: watch::Sender<u64>,
id_count_watch_receiver: watch::Receiver<u64>,
}
impl Debug for IdPool {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("IdPool")
.field("max_id", &self.max_id)
.field("free_ids", &self.free_ids)
.field("id_count", &self.id_count)
.finish()
}
}
impl Default for IdPool {
fn default() -> Self {
let (id_count_watch_sender, id_count_watch_receiver) = watch::channel(0);
Self {
max_id: u64::MAX,
free_ids: BTreeSet::new(),
id_count: 0,
id_count_watch_sender,
id_count_watch_receiver,
}
}
}
//TODO: thiserror
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum IdPoolError {
NoFreeIds,
IdNotInUse(u64),
IdOutOfBound(u64),
}
impl IdPool {
pub fn new_with_max_limit(max_id: u64) -> Self {
let (id_count_watch_sender, id_count_watch_receiver) = watch::channel(0);
Self {
max_id,
free_ids: BTreeSet::new(),
id_count: 0,
id_count_watch_sender,
id_count_watch_receiver,
}
}
pub fn id_count(&self) -> u64 {
self.id_count - self.free_ids.len() as u64
}
pub fn id_is_used(&self, id: u64) -> Result<bool, IdPoolError> {
if id > self.max_id {
Err(IdPoolError::IdOutOfBound(id))
} else if self.free_ids.contains(&id) {
return Ok(false);
} else {
return Ok(id <= self.id_count);
}
}
pub fn request_id(&mut self) -> Result<u64, IdPoolError> {
if !self.free_ids.is_empty() {
let id = self.free_ids.pop_first().unwrap();
self.update_watch();
Ok(id)
} else if self.id_count < self.max_id {
self.id_count += 1;
self.update_watch();
Ok(self.id_count)
} else {
Err(IdPoolError::NoFreeIds)
}
}
pub fn release_id(&mut self, id: u64) -> Result<(), IdPoolError> {
if !self.id_is_used(id)? {
Err(IdPoolError::IdNotInUse(id))
} else {
self.free_ids.insert(id);
self.update_watch();
Ok(())
}
}
fn update_watch(&self) {
self.id_count_watch_sender.send(self.id_count()).unwrap();
}
pub fn get_id_count_watch_receiver(&self) -> watch::Receiver<u64> {
self.id_count_watch_receiver.clone()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_id_pool() {
let mut pool = IdPool::new_with_max_limit(10);
assert_eq!(pool.request_id(), Ok(1));
assert_eq!(pool.request_id(), Ok(2));
assert_eq!(pool.request_id(), Ok(3));
assert_eq!(pool.request_id(), Ok(4));
assert_eq!(pool.id_count(), 4);
assert_eq!(pool.request_id(), Ok(5));
assert_eq!(pool.request_id(), Ok(6));
assert_eq!(pool.request_id(), Ok(7));
assert_eq!(pool.request_id(), Ok(8));
assert_eq!(pool.request_id(), Ok(9));
assert_eq!(pool.request_id(), Ok(10));
assert_eq!(pool.id_count(), 10);
assert_eq!(pool.request_id(), Err(IdPoolError::NoFreeIds));
assert_eq!(pool.release_id(5), Ok(()));
assert_eq!(pool.release_id(5), Err(IdPoolError::IdNotInUse(5)));
assert_eq!(pool.id_count(), 9);
assert_eq!(pool.request_id(), Ok(5));
assert_eq!(pool.release_id(11), Err(IdPoolError::IdOutOfBound(11)));
}
#[test]
fn test_id_pool_watch() {
let mut pool = IdPool::new_with_max_limit(10);
let receiver = pool.get_id_count_watch_receiver();
assert_eq!(receiver.borrow().clone(), 0);
pool.request_id().unwrap();
assert_eq!(receiver.borrow().clone(), 1);
pool.request_id().unwrap();
assert_eq!(receiver.borrow().clone(), 2);
pool.release_id(1).unwrap();
assert_eq!(receiver.borrow().clone(), 1);
}
}