Compare commits

...

12 Commits

13 changed files with 1789 additions and 185 deletions

999
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -10,12 +10,13 @@ readme = "README.md"
[dependencies]
anyhow = "1.0.82"
axum = { version = "0.6.20", features = ["macros"] }
axum = { version = "0.7.7", features = ["macros", "ws"] }
clap = { version = "4.4.1", features = ["derive"] }
clap-verbosity-flag = "2.2.2"
env_logger = "0.10.0"
futures = "0.3.31"
log = "0.4.20"
mpvipc-async = { git = "https://git.pvv.ntnu.no/oysteikt/mpvipc-async.git", rev = "v0.1.0" }
mpvipc-async = { git = "https://git.pvv.ntnu.no/Grzegorz/mpvipc-async.git", branch = "main" }
sd-notify = "0.4.3"
serde = { version = "1.0.188", features = ["derive"] }
serde_json = "1.0.105"
@ -24,6 +25,9 @@ tempfile = "3.11.0"
tokio = { version = "1.32.0", features = ["full"] }
tower = { version = "0.4.13", features = ["full"] }
tower-http = { version = "0.4.3", features = ["full"] }
utoipa = { version = "5.1.3", features = ["axum_extras"] }
utoipa-axum = "0.1.2"
utoipa-swagger-ui = { version = "8.0.3", features = ["axum", "vendored"] }
[profile.release]
strip = true

View File

@ -6,13 +6,13 @@ New implementation of https://github.com/Programvareverkstedet/grzegorz
```sh
# NixOS
nix run "git+https://git.pvv.ntnu.no/Projects/greg-ng#" -- --mpv-socket-path /tmp/mpv.sock
nix run "git+https://git.pvv.ntnu.no/Grzegorz/greg-ng#" -- --mpv-socket-path /tmp/mpv.sock
# Other (after git clone and rust toolchain has been set up)
cargo run -- --mpv-socket-path /tmp/mpv.sock
```
See also https://git.pvv.ntnu.no/Projects/grzegorz-clients for frontend alternatives
See also https://git.pvv.ntnu.no/Grzegorz/grzegorz-clients for frontend alternatives
## Debugging

View File

@ -22,6 +22,7 @@ rustPlatform.buildRustPackage rec {
])
(type == "regular" && lib.elem baseName [
"flake.nix"
"flake.lock"
"default.nix"
"module.nix"
".envrc"
@ -33,7 +34,7 @@ rustPlatform.buildRustPackage rec {
cargoLock = {
lockFile = ./Cargo.lock;
outputHashes = {
"mpvipc-async-0.1.0" = "sha256-2TQ2d4q9/DTxTZe9kOAoKBhsmegRZw32x3G2hbluS6U=";
"mpvipc-async-0.1.0" = "sha256-V22wdnVVCBzayqkwb2d0msG7YypVss0cGBihtXrHtuM=";
};
};

View File

@ -38,7 +38,9 @@
package = self.packages.${system}.greg-ng-wrapped;
in {
type = "app";
program = lib.getExe package;
program = toString (pkgs.writeShellScript "greg-ng" ''
${lib.getExe package} --mpv-socket-path /tmp/greg-ng-mpv.sock -vvvv
'');
};
});

View File

@ -135,18 +135,20 @@ in
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "full";
# I'll figure it out sometime
# ProtectSystem = "full";
RemoveIPC = true;
UMask = "0077";
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = [
"@system-service"
"~@privileged"
"~@resources"
];
# Something brokey
# SystemCallFilter = [
# "@system-service"
# "~@privileged"
# "~@resources"
# ];
};
};
})

View File

@ -1,4 +1,6 @@
mod base;
mod rest_wrapper_v1;
mod websocket_v1;
pub use rest_wrapper_v1::rest_api_routes;
pub use rest_wrapper_v1::{rest_api_docs, rest_api_routes};
pub use websocket_v1::websocket_api;

View File

@ -8,6 +8,10 @@ use axum::{
use mpvipc_async::Mpv;
use serde_json::{json, Value};
use utoipa::OpenApi;
use utoipa_axum::{router::OpenApiRouter, routes};
use utoipa_swagger_ui::SwaggerUi;
use super::base;
pub fn rest_api_routes(mpv: Mpv) -> Router {
@ -31,6 +35,64 @@ pub fn rest_api_routes(mpv: Mpv) -> Router {
.with_state(mpv)
}
pub fn rest_api_docs(mpv: Mpv) -> Router {
let (router, api) = OpenApiRouter::with_openapi(ApiDoc::openapi())
.routes(routes!(loadfile))
.routes(routes!(play_get, play_set))
.routes(routes!(volume_get, volume_set))
.routes(routes!(time_get, time_set))
.routes(routes!(playlist_get, playlist_remove_or_clear))
.routes(routes!(playlist_next))
.routes(routes!(playlist_previous))
.routes(routes!(playlist_goto))
.routes(routes!(playlist_move))
.routes(routes!(playlist_get_looping, playlist_set_looping))
.routes(routes!(shuffle))
.with_state(mpv)
.split_for_parts();
router.merge(SwaggerUi::new("/docs").url("/docs/openapi.json", api))
}
// NOTE: the openapi stuff is very heavily duplicated and introduces
// a lot of maintenance overhead and boilerplate. It should theoretically
// be possible to infer a lot of this from axum, but I haven't found a
// good library that does this and works properly yet (I have tried some
// but they all had issues). Feel free to replace this with a better solution.
#[derive(OpenApi)]
#[openapi(info(
description = "The legacy Grzegorz Brzeczyszczykiewicz API, used to control a running mpv instance",
version = "1.0.0",
))]
struct ApiDoc;
#[derive(serde::Serialize, utoipa::ToSchema)]
struct EmptySuccessResponse {
success: bool,
error: bool,
}
#[derive(serde::Serialize, utoipa::ToSchema)]
struct SuccessResponse {
#[schema(example = true)]
success: bool,
#[schema(example = false)]
error: bool,
#[schema(example = json!({ some: "arbitrary json value" }))]
value: Value,
}
#[derive(serde::Serialize, utoipa::ToSchema)]
struct ErrorResponse {
#[schema(example = "error....")]
error: String,
#[schema(example = "error....")]
errortext: String,
#[schema(example = false)]
success: bool,
}
pub struct RestResponse(anyhow::Result<Value>);
impl From<anyhow::Result<Value>> for RestResponse {
@ -64,73 +126,177 @@ impl IntoResponse for RestResponse {
// TODO: These could possibly be generated with a proc macro
#[derive(serde::Deserialize)]
#[derive(serde::Deserialize, utoipa::IntoParams)]
struct LoadFileArgs {
path: String,
}
/// Add item to playlist
#[utoipa::path(
post,
path = "/load",
params(LoadFileArgs),
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn loadfile(State(mpv): State<Mpv>, Query(query): Query<LoadFileArgs>) -> RestResponse {
base::loadfile(mpv, &query.path).await.into()
}
/// Check whether the player is paused or playing
#[utoipa::path(
get,
path = "/play",
responses(
(status = 200, description = "Success", body = SuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn play_get(State(mpv): State<Mpv>) -> RestResponse {
base::play_get(mpv).await.into()
}
#[derive(serde::Deserialize)]
#[derive(serde::Deserialize, utoipa::IntoParams)]
struct PlaySetArgs {
play: String,
}
/// Set whether the player is paused or playing
#[utoipa::path(
post,
path = "/play",
params(PlaySetArgs),
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn play_set(State(mpv): State<Mpv>, Query(query): Query<PlaySetArgs>) -> RestResponse {
let play = query.play.to_lowercase() == "true";
base::play_set(mpv, play).await.into()
}
/// Get the current player volume
#[utoipa::path(
get,
path = "/volume",
responses(
(status = 200, description = "Success", body = SuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn volume_get(State(mpv): State<Mpv>) -> RestResponse {
base::volume_get(mpv).await.into()
}
#[derive(serde::Deserialize)]
#[derive(serde::Deserialize, utoipa::IntoParams)]
struct VolumeSetArgs {
volume: f64,
}
/// Set the player volume
#[utoipa::path(
post,
path = "/volume",
params(VolumeSetArgs),
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn volume_set(State(mpv): State<Mpv>, Query(query): Query<VolumeSetArgs>) -> RestResponse {
base::volume_set(mpv, query.volume).await.into()
}
/// Get current playback position
#[utoipa::path(
get,
path = "/time",
responses(
(status = 200, description = "Success", body = SuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn time_get(State(mpv): State<Mpv>) -> RestResponse {
base::time_get(mpv).await.into()
}
#[derive(serde::Deserialize)]
#[derive(serde::Deserialize, utoipa::IntoParams)]
struct TimeSetArgs {
pos: Option<f64>,
percent: Option<f64>,
}
/// Set playback position
#[utoipa::path(
post,
path = "/time",
params(TimeSetArgs),
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn time_set(State(mpv): State<Mpv>, Query(query): Query<TimeSetArgs>) -> RestResponse {
base::time_set(mpv, query.pos, query.percent).await.into()
}
/// Get the current playlist
#[utoipa::path(
get,
path = "/playlist",
responses(
(status = 200, description = "Success", body = SuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn playlist_get(State(mpv): State<Mpv>) -> RestResponse {
base::playlist_get(mpv).await.into()
}
/// Go to the next item in the playlist
#[utoipa::path(
post,
path = "/playlist/next",
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn playlist_next(State(mpv): State<Mpv>) -> RestResponse {
base::playlist_next(mpv).await.into()
}
/// Go back to the previous item in the playlist
#[utoipa::path(
post,
path = "/playlist/previous",
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn playlist_previous(State(mpv): State<Mpv>) -> RestResponse {
base::playlist_previous(mpv).await.into()
}
#[derive(serde::Deserialize)]
#[derive(serde::Deserialize, utoipa::IntoParams)]
struct PlaylistGotoArgs {
index: usize,
}
/// Go to a specific item in the playlist
#[utoipa::path(
post,
path = "/playlist/goto",
params(PlaylistGotoArgs),
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn playlist_goto(
State(mpv): State<Mpv>,
Query(query): Query<PlaylistGotoArgs>,
@ -138,11 +304,21 @@ async fn playlist_goto(
base::playlist_goto(mpv, query.index).await.into()
}
#[derive(serde::Deserialize)]
#[derive(serde::Deserialize, utoipa::IntoParams)]
struct PlaylistRemoveOrClearArgs {
index: Option<usize>,
}
/// Clears a single item or the entire playlist
#[utoipa::path(
delete,
path = "/playlist",
params(PlaylistRemoveOrClearArgs),
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn playlist_remove_or_clear(
State(mpv): State<Mpv>,
Query(query): Query<PlaylistRemoveOrClearArgs>,
@ -153,12 +329,22 @@ async fn playlist_remove_or_clear(
}
}
#[derive(serde::Deserialize)]
#[derive(serde::Deserialize, utoipa::IntoParams)]
struct PlaylistMoveArgs {
index1: usize,
index2: usize,
}
/// Move a playlist item to a different position
#[utoipa::path(
post,
path = "/playlist/move",
params(PlaylistMoveArgs),
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn playlist_move(
State(mpv): State<Mpv>,
Query(query): Query<PlaylistMoveArgs>,
@ -168,19 +354,47 @@ async fn playlist_move(
.into()
}
/// Shuffle the playlist
#[utoipa::path(
post,
path = "/playlist/shuffle",
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn shuffle(State(mpv): State<Mpv>) -> RestResponse {
base::shuffle(mpv).await.into()
}
/// Check whether the playlist is looping
#[utoipa::path(
get,
path = "/playlist/loop",
responses(
(status = 200, description = "Success", body = SuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn playlist_get_looping(State(mpv): State<Mpv>) -> RestResponse {
base::playlist_get_looping(mpv).await.into()
}
#[derive(serde::Deserialize)]
#[derive(serde::Deserialize, utoipa::IntoParams)]
struct PlaylistSetLoopingArgs {
r#loop: bool,
}
/// Set whether the playlist should loop
#[utoipa::path(
post,
path = "/playlist/loop",
params(PlaylistSetLoopingArgs),
responses(
(status = 200, description = "Success", body = EmptySuccessResponse),
(status = 500, description = "Internal server error", body = ErrorResponse),
)
)]
async fn playlist_set_looping(
State(mpv): State<Mpv>,
Query(query): Query<PlaylistSetLoopingArgs>,

449
src/api/websocket_v1.rs Normal file
View File

@ -0,0 +1,449 @@
use std::{
net::SocketAddr,
sync::{Arc, Mutex},
};
use anyhow::Context;
use futures::{stream::FuturesUnordered, StreamExt};
use serde::{Deserialize, Serialize};
use axum::{
extract::{
ws::{Message, WebSocket},
ConnectInfo, State, WebSocketUpgrade,
},
response::IntoResponse,
routing::any,
Router,
};
use mpvipc_async::{
LoopProperty, Mpv, MpvExt, NumberChangeOptions, Playlist, PlaylistAddTypeOptions, SeekOptions,
Switch,
};
use serde_json::{json, Value};
use tokio::{select, sync::watch};
use crate::util::IdPool;
#[derive(Debug, Clone)]
struct WebsocketState {
mpv: Mpv,
id_pool: Arc<Mutex<IdPool>>,
}
pub fn websocket_api(mpv: Mpv, id_pool: Arc<Mutex<IdPool>>) -> Router {
let state = WebsocketState { mpv, id_pool };
Router::new()
.route("/", any(websocket_handler))
.with_state(state)
}
async fn websocket_handler(
ws: WebSocketUpgrade,
ConnectInfo(addr): ConnectInfo<SocketAddr>,
State(WebsocketState { mpv, id_pool }): State<WebsocketState>,
) -> impl IntoResponse {
let mpv = mpv.clone();
let id = match id_pool.lock().unwrap().request_id() {
Ok(id) => id,
Err(e) => {
log::error!("Failed to get id from id pool: {:?}", e);
return axum::http::StatusCode::INTERNAL_SERVER_ERROR.into_response();
}
};
ws.on_upgrade(move |socket| handle_connection(socket, addr, mpv, id, id_pool))
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct InitialState {
pub cached_timestamp: Option<f64>,
pub chapters: Vec<Value>,
pub connections: u64,
pub current_percent_pos: Option<f64>,
pub current_track: String,
pub duration: f64,
pub is_looping: bool,
pub is_muted: bool,
pub is_playing: bool,
pub is_paused_for_cache: bool,
pub playlist: Playlist,
pub tracks: Vec<Value>,
pub volume: f64,
}
async fn get_initial_state(mpv: &Mpv, id_pool: Arc<Mutex<IdPool>>) -> InitialState {
let cached_timestamp = mpv
.get_property_value("demuxer-cache-state")
.await
.unwrap_or(None)
.and_then(|v| {
v.as_object()
.and_then(|o| o.get("data"))
.and_then(|v| v.as_object())
.and_then(|o| o.get("cache-end"))
.and_then(|v| v.as_f64())
});
let chapters = match mpv.get_property_value("chapter-list").await {
Ok(Some(Value::Array(chapters))) => chapters,
_ => vec![],
};
let connections = id_pool.lock().unwrap().id_count();
let current_percent_pos = mpv.get_property("percent-pos").await.unwrap_or(None);
let current_track = mpv.get_file_path().await.unwrap_or("".to_string());
let duration = mpv.get_duration().await.unwrap_or(0.0);
let is_looping =
mpv.playlist_is_looping().await.unwrap_or(LoopProperty::No) != LoopProperty::No;
let is_muted = mpv
.get_property("mute")
.await
.unwrap_or(Some(false))
.unwrap_or(false);
let is_playing = mpv.is_playing().await.unwrap_or(false);
let is_paused_for_cache = mpv
.get_property("paused-for-cache")
.await
.unwrap_or(Some(false))
.unwrap_or(false);
let playlist = mpv.get_playlist().await.unwrap_or(Playlist(vec![]));
let tracks = match mpv.get_property_value("track-list").await {
Ok(Some(Value::Array(tracks))) => tracks
.into_iter()
.filter(|t| {
t.as_object()
.and_then(|o| o.get("type"))
.and_then(|t| t.as_str())
.unwrap_or("")
== "sub"
})
.collect(),
_ => vec![],
};
let volume = mpv.get_volume().await.unwrap_or(0.0);
// TODO: use default when new version is released
InitialState {
cached_timestamp,
chapters,
connections,
current_percent_pos,
current_track,
duration,
is_looping,
is_muted,
is_playing,
is_paused_for_cache,
playlist,
tracks,
volume,
}
}
const DEFAULT_PROPERTY_SUBSCRIPTIONS: [&str; 11] = [
"chapter-list",
"demuxer-cache-state",
"duration",
"loop-playlist",
"mute",
"pause",
"paused-for-cache",
"percent-pos",
"playlist",
"track-list",
"volume",
];
async fn setup_default_subscribes(mpv: &Mpv) -> anyhow::Result<()> {
let mut futures = FuturesUnordered::new();
futures.extend(
DEFAULT_PROPERTY_SUBSCRIPTIONS
.iter()
.map(|property| mpv.observe_property(0, property)),
);
while let Some(result) = futures.next().await {
result?;
}
Ok(())
}
async fn handle_connection(
mut socket: WebSocket,
addr: SocketAddr,
mpv: Mpv,
channel_id: u64,
id_pool: Arc<Mutex<IdPool>>,
) {
// TODO: There is an asynchronous gap between gathering the initial state and subscribing to the properties
// This could lead to missing events if they happen in that gap. Send initial state, but also ensure
// that there is an additional "initial state" sent upon subscription to all properties to ensure that
// the state is correct.
let initial_state = get_initial_state(&mpv, id_pool.clone()).await;
let message = Message::Text(
json!({
"type": "initial_state",
"value": initial_state,
})
.to_string(),
);
socket.send(message).await.unwrap();
setup_default_subscribes(&mpv).await.unwrap();
let id_count_watch_receiver = id_pool.lock().unwrap().get_id_count_watch_receiver();
let connection_loop_result = tokio::spawn(connection_loop(
socket,
addr,
mpv.clone(),
channel_id,
id_count_watch_receiver,
));
match connection_loop_result.await {
Ok(Ok(())) => {
log::trace!("Connection loop ended for {:?}", addr);
}
Ok(Err(e)) => {
log::error!("Error in connection loop for {:?}: {:?}", addr, e);
}
Err(e) => {
log::error!("Error in connection loop for {:?}: {:?}", addr, e);
}
}
match mpv.unobserve_property(channel_id).await {
Ok(()) => {
log::trace!("Unsubscribed from properties for {:?}", addr);
}
Err(e) => {
log::error!(
"Error unsubscribing from properties for {:?}: {:?}",
addr,
e
);
}
}
match id_pool.lock().unwrap().release_id(channel_id) {
Ok(()) => {
log::trace!("Released id {} for {:?}", channel_id, addr);
}
Err(e) => {
log::error!("Error releasing id {} for {:?}: {:?}", channel_id, addr, e);
}
}
}
async fn connection_loop(
mut socket: WebSocket,
addr: SocketAddr,
mpv: Mpv,
channel_id: u64,
mut id_count_watch_receiver: watch::Receiver<u64>,
) -> Result<(), anyhow::Error> {
let mut event_stream = mpv.get_event_stream().await;
loop {
select! {
id_count = id_count_watch_receiver.changed() => {
if let Err(e) = id_count {
anyhow::bail!("Error reading id count watch receiver for {:?}: {:?}", addr, e);
}
let message = Message::Text(json!({
"type": "connection_count",
"value": id_count_watch_receiver.borrow().clone(),
}).to_string());
socket.send(message).await?;
}
message = socket.recv() => {
log::trace!("Received command from {:?}: {:?}", addr, message);
let ws_message_content = message
.ok_or(anyhow::anyhow!("Event stream ended for {:?}", addr))
.and_then(|message| {
match message {
Ok(message) => Ok(message),
err => Err(anyhow::anyhow!("Error reading message for {:?}: {:?}", addr, err)),
}
})?;
if let Message::Close(_) = ws_message_content {
log::trace!("Closing connection for {:?}", addr);
return Ok(());
}
if let Message::Ping(xs) = ws_message_content {
log::trace!("Ponging {:?} with {:?}", addr, xs);
socket.send(Message::Pong(xs)).await?;
continue;
}
let message_content = match ws_message_content {
Message::Text(text) => text,
m => anyhow::bail!("Unexpected message type: {:?}", m),
};
let message_json = match serde_json::from_str::<Value>(&message_content) {
Ok(json) => json,
Err(e) => anyhow::bail!("Error parsing message from {:?}: {:?}", addr, e),
};
log::trace!("Handling command from {:?}: {:?}", addr, message_json);
// TODO: handle errors
match handle_message(message_json, mpv.clone(), channel_id).await {
Ok(Some(response)) => {
log::trace!("Handled command from {:?} successfully, sending response", addr);
let message = Message::Text(json!({
"type": "response",
"value": response,
}).to_string());
socket.send(message).await?;
}
Ok(None) => {
log::trace!("Handled command from {:?} successfully", addr);
}
Err(e) => {
log::error!("Error handling message from {:?}: {:?}", addr, e);
}
}
}
event = event_stream.next() => {
match event {
Some(Ok(event)) => {
log::trace!("Sending event to {:?}: {:?}", addr, event);
let message = Message::Text(json!({
"type": "event",
"value": event,
}).to_string());
socket.send(message).await?;
}
Some(Err(e)) => {
log::error!("Error reading event stream for {:?}: {:?}", addr, e);
anyhow::bail!("Error reading event stream for {:?}: {:?}", addr, e);
}
None => {
log::trace!("Event stream ended for {:?}", addr);
return Ok(());
}
}
}
}
}
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum WSCommand {
// Subscribe { property: String },
// UnsubscribeAll,
Load { urls: Vec<String> },
TogglePlayback,
Volume { volume: f64 },
Time { time: f64 },
PlaylistNext,
PlaylistPrevious,
PlaylistGoto { position: usize },
PlaylistClear,
PlaylistRemove { positions: Vec<usize> },
PlaylistMove { from: usize, to: usize },
Shuffle,
SetSubtitleTrack { track: Option<usize> },
SetLooping { value: bool },
}
async fn handle_message(
message: Value,
mpv: Mpv,
_channel_id: u64,
) -> anyhow::Result<Option<Value>> {
let command =
serde_json::from_value::<WSCommand>(message).context("Failed to parse message")?;
log::trace!("Successfully parsed message: {:?}", command);
match command {
// WSCommand::Subscribe { property } => {
// mpv.observe_property(channel_id, &property).await?;
// Ok(None)
// }
// WSCommand::UnsubscribeAll => {
// mpv.unobserve_property(channel_id).await?;
// Ok(None)
// }
WSCommand::Load { urls } => {
for url in urls {
mpv.playlist_add(
&url,
PlaylistAddTypeOptions::File,
mpvipc_async::PlaylistAddOptions::Append,
)
.await?;
}
Ok(None)
}
WSCommand::TogglePlayback => {
mpv.set_playback(mpvipc_async::Switch::Toggle).await?;
Ok(None)
}
WSCommand::Volume { volume } => {
mpv.set_volume(volume, NumberChangeOptions::Absolute)
.await?;
Ok(None)
}
WSCommand::Time { time } => {
mpv.seek(time, SeekOptions::AbsolutePercent).await?;
Ok(None)
}
WSCommand::PlaylistNext => {
mpv.next().await?;
Ok(None)
}
WSCommand::PlaylistPrevious => {
mpv.prev().await?;
Ok(None)
}
WSCommand::PlaylistGoto { position } => {
mpv.playlist_play_id(position).await?;
Ok(None)
}
WSCommand::PlaylistClear => {
mpv.playlist_clear().await?;
Ok(None)
}
// FIXME: this could lead to a race condition between `playlist_remove_id` commands
WSCommand::PlaylistRemove { mut positions } => {
positions.sort();
for position in positions.iter().rev() {
mpv.playlist_remove_id(*position).await?;
}
Ok(None)
}
WSCommand::PlaylistMove { from, to } => {
mpv.playlist_move_id(from, to).await?;
Ok(None)
}
WSCommand::Shuffle => {
mpv.playlist_shuffle().await?;
Ok(None)
}
WSCommand::SetSubtitleTrack { track } => {
mpv.set_property("sid", track).await?;
Ok(None)
}
WSCommand::SetLooping { value } => {
mpv.set_loop_playlist(if value { Switch::On } else { Switch::Off })
.await?;
Ok(None)
}
}
}

View File

@ -1,15 +1,22 @@
use anyhow::Context;
use axum::{Router, Server};
use axum::Router;
use clap::Parser;
use clap_verbosity_flag::Verbosity;
use futures::StreamExt;
use mpv_setup::{connect_to_mpv, create_mpv_config_file, show_grzegorz_image};
use mpvipc_async::Mpv;
use std::net::{IpAddr, SocketAddr};
use mpvipc_async::{Event, Mpv, MpvDataType, MpvExt};
use std::{
net::{IpAddr, SocketAddr},
sync::{Arc, Mutex},
};
use systemd_journal_logger::JournalLog;
use tempfile::NamedTempFile;
use tokio::task::JoinHandle;
use util::IdPool;
mod api;
mod mpv_setup;
mod util;
#[derive(Parser)]
struct Args {
@ -87,10 +94,71 @@ async fn setup_systemd_watchdog_thread() -> anyhow::Result<()> {
Ok(())
}
fn systemd_update_play_status(playing: bool, current_song: &Option<String>) {
sd_notify::notify(
false,
&[sd_notify::NotifyState::Status(&format!(
"{} {:?}",
if playing { "[PLAY]" } else { "[STOP]" },
if let Some(song) = current_song {
song
} else {
""
}
))],
)
.unwrap_or_else(|e| log::warn!("Failed to update systemd status with current song: {}", e));
}
async fn setup_systemd_notifier(mpv: Mpv) -> anyhow::Result<JoinHandle<()>> {
let handle = tokio::spawn(async move {
log::debug!("Starting systemd notifier thread");
let mut event_stream = mpv.get_event_stream().await;
mpv.observe_property(100, "media-title").await.unwrap();
mpv.observe_property(100, "pause").await.unwrap();
let mut current_song: Option<String> = mpv.get_property("media-title").await.unwrap();
let mut playing = !mpv.get_property("pause").await.unwrap().unwrap_or(false);
systemd_update_play_status(playing, &current_song);
loop {
if let Some(Ok(Event::PropertyChange { name, data, .. })) = event_stream.next().await {
match (name.as_str(), data) {
("media-title", Some(MpvDataType::String(s))) => {
current_song = Some(s);
}
("media-title", None) => {
current_song = None;
}
("pause", Some(MpvDataType::Bool(b))) => {
playing = !b;
}
(event_name, _) => {
log::trace!(
"Received unexpected property change on systemd notifier thread: {}",
event_name
);
}
}
systemd_update_play_status(playing, &current_song)
}
}
});
Ok(handle)
}
async fn shutdown(mpv: Mpv, proc: Option<tokio::process::Child>) {
log::info!("Shutting down");
sd_notify::notify(false, &[sd_notify::NotifyState::Stopping])
.unwrap_or_else(|e| log::warn!("Failed to notify systemd that the service is stopping: {}", e));
sd_notify::notify(false, &[sd_notify::NotifyState::Stopping]).unwrap_or_else(|e| {
log::warn!(
"Failed to notify systemd that the service is stopping: {}",
e
)
});
mpv.disconnect()
.await
@ -138,6 +206,10 @@ async fn main() -> anyhow::Result<()> {
.await
.context("Failed to connect to mpv")?;
if systemd_mode {
setup_systemd_notifier(mpv.clone()).await?;
}
if let Err(e) = show_grzegorz_image(mpv.clone()).await {
log::warn!("Could not show Grzegorz image: {}", e);
}
@ -156,11 +228,19 @@ async fn main() -> anyhow::Result<()> {
let socket_addr = SocketAddr::new(addr, args.port);
log::info!("Starting API on {}", socket_addr);
let app = Router::new().nest("/api", api::rest_api_routes(mpv.clone()));
let server = match Server::try_bind(&socket_addr.clone())
let id_pool = Arc::new(Mutex::new(IdPool::new_with_max_limit(1024)));
let app = Router::new()
.nest("/api", api::rest_api_routes(mpv.clone()))
.nest("/ws", api::websocket_api(mpv.clone(), id_pool.clone()))
.merge(api::rest_api_docs(mpv.clone()))
.into_make_service_with_connect_info::<SocketAddr>();
let listener = match tokio::net::TcpListener::bind(&socket_addr)
.await
.context(format!("Failed to bind API server to '{}'", &socket_addr))
{
Ok(server) => server,
Ok(listener) => listener,
Err(e) => {
log::error!("{}", e);
shutdown(mpv, proc).await;
@ -191,7 +271,7 @@ async fn main() -> anyhow::Result<()> {
log::info!("Received Ctrl-C, exiting");
shutdown(mpv, Some(proc)).await;
}
result = server.serve(app.into_make_service()) => {
result = axum::serve(listener, app) => {
log::info!("API server exited");
shutdown(mpv, Some(proc)).await;
result?;
@ -203,7 +283,7 @@ async fn main() -> anyhow::Result<()> {
log::info!("Received Ctrl-C, exiting");
shutdown(mpv.clone(), None).await;
}
result = server.serve(app.into_make_service()) => {
result = axum::serve(listener, app) => {
log::info!("API server exited");
shutdown(mpv.clone(), None).await;
result?;

View File

@ -11,6 +11,9 @@ const DEFAULT_MPV_CONFIG_CONTENT: &str = include_str!("../assets/default-mpv.con
const THE_MAN_PNG: &[u8] = include_bytes!("../assets/the_man.png");
// https://mpv.io/manual/master/#options-ytdl
const YTDL_HOOK_ARGS: [&str; 2] = ["try_ytdl_first=yes", "thumbnails=none"];
pub fn create_mpv_config_file(args_config_file: Option<String>) -> anyhow::Result<NamedTempFile> {
let file_content = if let Some(path) = args_config_file {
if !Path::new(&path).exists() {
@ -78,6 +81,13 @@ pub async fn connect_to_mpv<'a>(
.arg("--force-window")
.arg("--fullscreen")
.arg("--no-config")
.arg("--ytdl=yes")
.args(
YTDL_HOOK_ARGS
.into_iter()
.map(|x| format!("--script-opts=ytdl_hook-{}", x))
.collect::<Vec<_>>(),
)
.arg(format!(
"--include={}",
&args.config_file.path().to_string_lossy()
@ -132,4 +142,3 @@ pub async fn show_grzegorz_image(mpv: Mpv) -> anyhow::Result<()> {
Ok(())
}

3
src/util.rs Normal file
View File

@ -0,0 +1,3 @@
mod id_pool;
pub use id_pool::IdPool;

145
src/util/id_pool.rs Normal file
View File

@ -0,0 +1,145 @@
use std::{collections::BTreeSet, fmt::Debug};
use tokio::sync::watch;
/// A relatively naive ID pool implementation.
pub struct IdPool {
max_id: u64,
free_ids: BTreeSet<u64>,
id_count: u64,
id_count_watch_sender: watch::Sender<u64>,
id_count_watch_receiver: watch::Receiver<u64>,
}
impl Debug for IdPool {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("IdPool")
.field("max_id", &self.max_id)
.field("free_ids", &self.free_ids)
.field("id_count", &self.id_count)
.finish()
}
}
impl Default for IdPool {
fn default() -> Self {
let (id_count_watch_sender, id_count_watch_receiver) = watch::channel(0);
Self {
max_id: u64::MAX,
free_ids: BTreeSet::new(),
id_count: 0,
id_count_watch_sender,
id_count_watch_receiver,
}
}
}
//TODO: thiserror
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum IdPoolError {
NoFreeIds,
IdNotInUse(u64),
IdOutOfBound(u64),
}
impl IdPool {
pub fn new_with_max_limit(max_id: u64) -> Self {
let (id_count_watch_sender, id_count_watch_receiver) = watch::channel(0);
Self {
max_id,
free_ids: BTreeSet::new(),
id_count: 0,
id_count_watch_sender,
id_count_watch_receiver,
}
}
pub fn id_count(&self) -> u64 {
self.id_count - self.free_ids.len() as u64
}
pub fn id_is_used(&self, id: u64) -> Result<bool, IdPoolError> {
if id > self.max_id {
Err(IdPoolError::IdOutOfBound(id))
} else if self.free_ids.contains(&id) {
return Ok(false);
} else {
return Ok(id <= self.id_count);
}
}
pub fn request_id(&mut self) -> Result<u64, IdPoolError> {
if !self.free_ids.is_empty() {
let id = self.free_ids.pop_first().unwrap();
self.update_watch();
Ok(id)
} else if self.id_count < self.max_id {
self.id_count += 1;
self.update_watch();
Ok(self.id_count)
} else {
Err(IdPoolError::NoFreeIds)
}
}
pub fn release_id(&mut self, id: u64) -> Result<(), IdPoolError> {
if !self.id_is_used(id)? {
Err(IdPoolError::IdNotInUse(id))
} else {
self.free_ids.insert(id);
self.update_watch();
Ok(())
}
}
fn update_watch(&self) {
self.id_count_watch_sender.send(self.id_count()).unwrap();
}
pub fn get_id_count_watch_receiver(&self) -> watch::Receiver<u64> {
self.id_count_watch_receiver.clone()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_id_pool() {
let mut pool = IdPool::new_with_max_limit(10);
assert_eq!(pool.request_id(), Ok(1));
assert_eq!(pool.request_id(), Ok(2));
assert_eq!(pool.request_id(), Ok(3));
assert_eq!(pool.request_id(), Ok(4));
assert_eq!(pool.id_count(), 4);
assert_eq!(pool.request_id(), Ok(5));
assert_eq!(pool.request_id(), Ok(6));
assert_eq!(pool.request_id(), Ok(7));
assert_eq!(pool.request_id(), Ok(8));
assert_eq!(pool.request_id(), Ok(9));
assert_eq!(pool.request_id(), Ok(10));
assert_eq!(pool.id_count(), 10);
assert_eq!(pool.request_id(), Err(IdPoolError::NoFreeIds));
assert_eq!(pool.release_id(5), Ok(()));
assert_eq!(pool.release_id(5), Err(IdPoolError::IdNotInUse(5)));
assert_eq!(pool.id_count(), 9);
assert_eq!(pool.request_id(), Ok(5));
assert_eq!(pool.release_id(11), Err(IdPoolError::IdOutOfBound(11)));
}
#[test]
fn test_id_pool_watch() {
let mut pool = IdPool::new_with_max_limit(10);
let receiver = pool.get_id_count_watch_receiver();
assert_eq!(receiver.borrow().clone(), 0);
pool.request_id().unwrap();
assert_eq!(receiver.borrow().clone(), 1);
pool.request_id().unwrap();
assert_eq!(receiver.borrow().clone(), 2);
pool.release_id(1).unwrap();
assert_eq!(receiver.borrow().clone(), 1);
}
}