From 31f813f444d67bc202e1bf80819a3282c538010e Mon Sep 17 00:00:00 2001 From: Ryan Daum Date: Tue, 12 Nov 2024 20:28:03 -0500 Subject: [PATCH] Initial support of listen() --- Cargo.lock | 137 +--- Cargo.toml | 2 - crates/console-host/Cargo.toml | 38 -- crates/console-host/src/main.rs | 408 ----------- crates/daemon/src/connections.rs | 4 +- crates/daemon/src/connections_rb.rs | 4 +- crates/daemon/src/connections_wt.rs | 6 +- crates/daemon/src/main.rs | 40 +- crates/daemon/src/rpc_server.rs | 644 +++++++++++++----- crates/daemon/src/rpc_session.rs | 10 +- crates/kernel/src/builtins/bf_server.rs | 132 +++- crates/kernel/src/tasks/mod.rs | 20 +- crates/kernel/src/tasks/scheduler.rs | 64 +- crates/kernel/src/tasks/scheduler_client.rs | 6 + crates/kernel/src/tasks/sessions.rs | 90 ++- crates/kernel/src/tasks/task.rs | 27 +- .../kernel/src/tasks/task_scheduler_client.rs | 85 ++- crates/kernel/src/vm/moo_execute.rs | 12 +- crates/kernel/testsuite/moot_suite.rs | 9 +- crates/rpc-async-client/Cargo.toml | 4 + crates/rpc-async-client/src/lib.rs | 201 ++++++ crates/rpc-async-client/src/listeners.rs | 59 ++ crates/rpc-async-client/src/pubsub_client.rs | 51 +- crates/rpc-async-client/src/rpc_client.rs | 66 +- crates/rpc-common/Cargo.toml | 5 + crates/rpc-common/src/lib.rs | 170 ++++- crates/rpc-sync-client/src/pubsub_client.rs | 10 +- crates/rpc-sync-client/src/rpc_client.rs | 6 +- crates/telnet-host/Cargo.toml | 5 + .../src/{telnet.rs => connection.rs} | 206 ++---- crates/telnet-host/src/listen.rs | 272 ++++++++ crates/telnet-host/src/main.rs | 104 ++- crates/web-host/src/host/auth.rs | 25 +- crates/web-host/src/host/props.rs | 20 +- crates/web-host/src/host/verbs.rs | 67 +- crates/web-host/src/host/web_host.rs | 105 ++- crates/web-host/src/host/ws_connection.rs | 54 +- crates/web-host/src/main.rs | 232 ++++++- 38 files changed, 2291 insertions(+), 1109 deletions(-) delete mode 100644 crates/console-host/Cargo.toml delete mode 100644 crates/console-host/src/main.rs create mode 100644 crates/rpc-async-client/src/listeners.rs rename crates/telnet-host/src/{telnet.rs => connection.rs} (70%) create mode 100644 crates/telnet-host/src/listen.rs diff --git a/Cargo.lock b/Cargo.lock index d69b44de..7091221e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -403,12 +403,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" -[[package]] -name = "cfg_aliases" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" - [[package]] name = "chacha20" version = "0.9.1" @@ -551,15 +545,6 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" -[[package]] -name = "clipboard-win" -version = "5.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15efe7a882b08f34e38556b14f2fb3daa98769d06c7f0c1b076dfd0d983bc892" -dependencies = [ - "error-code", -] - [[package]] name = "cmake" version = "0.1.51" @@ -983,12 +968,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "endian-type" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" - [[package]] name = "enum-primitive-derive" version = "0.3.0" @@ -1026,12 +1005,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "error-code" -version = "3.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5d9305ccc6942a704f4335694ecd3de2ea531b114ac2d51f5f843750787a92f" - [[package]] name = "escargot" version = "0.5.13" @@ -1060,17 +1033,6 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" -[[package]] -name = "fd-lock" -version = "4.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e5768da2206272c81ef0b5e951a41862938a6070da63bcea197899942d3b947" -dependencies = [ - "cfg-if", - "rustix", - "windows-sys 0.52.0", -] - [[package]] name = "fiat-crypto" version = "0.2.9" @@ -1302,15 +1264,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "home" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" -dependencies = [ - "windows-sys 0.52.0", -] - [[package]] name = "http" version = "1.1.0" @@ -1737,26 +1690,6 @@ dependencies = [ "unindent", ] -[[package]] -name = "moor-console-host" -version = "0.1.0" -dependencies = [ - "clap", - "clap_derive", - "color-eyre", - "eyre", - "moor-compiler", - "moor-values", - "rpc-common", - "rpc-sync-client", - "rustyline", - "signal-hook", - "tracing", - "tracing-subscriber", - "uuid", - "zmq", -] - [[package]] name = "moor-daemon" version = "0.1.0" @@ -1887,6 +1820,7 @@ dependencies = [ name = "moor-moot" version = "0.1.0" dependencies = [ + "eyre", "moor-values", "pretty_assertions", "tracing", @@ -1900,14 +1834,17 @@ dependencies = [ "clap", "clap_derive", "color-eyre", + "ed25519-dalek", "escargot", "eyre", "futures-util", "moor-compiler", "moor-moot", "moor-values", + "pem", "rpc-async-client", "rpc-common", + "rusty_paseto", "serial_test", "tempfile", "termimad", @@ -1974,27 +1911,6 @@ dependencies = [ "getrandom", ] -[[package]] -name = "nibble_vec" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43" -dependencies = [ - "smallvec", -] - -[[package]] -name = "nix" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" -dependencies = [ - "bitflags 2.6.0", - "cfg-if", - "cfg_aliases", - "libc", -] - [[package]] name = "nom" version = "7.1.3" @@ -2369,16 +2285,6 @@ dependencies = [ "proc-macro2", ] -[[package]] -name = "radix_trie" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c069c179fcdc6a2fe24d8d18305cf085fdbd4f922c041943e203685d6a1c58fd" -dependencies = [ - "endian-type", - "nibble_vec", -] - [[package]] name = "rand" version = "0.8.5" @@ -2534,8 +2440,12 @@ version = "0.1.0" dependencies = [ "bincode", "futures-util", + "moor-values", "rpc-common", + "rusty_paseto", + "thiserror 2.0.3", "tmq", + "tokio", "tracing", "uuid", ] @@ -2545,7 +2455,10 @@ name = "rpc-common" version = "0.1.0" dependencies = [ "bincode", + "ed25519-dalek", "moor-values", + "pem", + "rusty_paseto", "thiserror 2.0.3", ] @@ -2623,28 +2536,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rustyline" -version = "14.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7803e8936da37efd9b6d4478277f4b2b9bb5cdb37a113e8d63222e58da647e63" -dependencies = [ - "bitflags 2.6.0", - "cfg-if", - "clipboard-win", - "fd-lock", - "home", - "libc", - "log", - "memchr", - "nix", - "radix_trie", - "unicode-segmentation", - "unicode-width", - "utf8parse", - "windows-sys 0.52.0", -] - [[package]] name = "ryu" version = "1.0.18" @@ -3515,12 +3406,6 @@ version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" -[[package]] -name = "unicode-segmentation" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" - [[package]] name = "unicode-width" version = "0.1.14" diff --git a/Cargo.toml b/Cargo.toml index e876542e..c01a7535 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,6 @@ resolver = "2" members = [ "crates/compiler", - "crates/console-host", "crates/daemon", "crates/db", "crates/db-relbox", @@ -29,7 +28,6 @@ default-members = [ "crates/daemon", "crates/telnet-host", "crates/web-host", - "crates/console-host", "crates/moot", ] diff --git a/crates/console-host/Cargo.toml b/crates/console-host/Cargo.toml deleted file mode 100644 index f753e4f4..00000000 --- a/crates/console-host/Cargo.toml +++ /dev/null @@ -1,38 +0,0 @@ -[package] -name = "moor-console-host" -version = "0.1.0" -authors.workspace = true -categories.workspace = true -edition.workspace = true -keywords.workspace = true -license.workspace = true -readme.workspace = true -repository.workspace = true -rust-version.workspace = true -description = "A tool to connect to a local or remote moor daemon and interact with it via the TTY." - -[dependencies] -moor-compiler = { path = "../compiler" } -moor-values = { path = "../values" } -rpc-common = { path = "../rpc-common" } -rpc-sync-client = { path = "../rpc-sync-client" } - -## Command line arguments parsing. -clap.workspace = true -clap_derive.workspace = true - -## Error handling -color-eyre.workspace = true -eyre.workspace = true - -## Logging & tracing -tracing.workspace = true -tracing-subscriber.workspace = true - -## ZMQ / RPC -uuid.workspace = true -zmq.workspace = true - -## For console -rustyline.workspace = true -signal-hook.workspace = true diff --git a/crates/console-host/src/main.rs b/crates/console-host/src/main.rs deleted file mode 100644 index 2127544b..00000000 --- a/crates/console-host/src/main.rs +++ /dev/null @@ -1,408 +0,0 @@ -// Copyright (C) 2024 Ryan Daum -// -// This program is free software: you can redistribute it and/or modify it under -// the terms of the GNU General Public License as published by the Free Software -// Foundation, version 3. -// -// This program is distributed in the hope that it will be useful, but WITHOUT -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -// FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License along with -// this program. If not, see . -// - -use eyre::Error; -use std::sync::atomic::AtomicBool; -use std::sync::{Arc, Mutex}; -use std::time::SystemTime; - -use clap::Parser; -use clap_derive::Parser; -use color_eyre::owo_colors::OwoColorize; -use moor_compiler::to_literal; -use moor_values::Objid; -use rpc_common::{ - AuthToken, BroadcastEvent, ClientToken, ConnectionEvent, RpcRequest, RpcResponse, RpcResult, - BROADCAST_TOPIC, -}; -use rpc_sync_client::RpcSendClient; -use rpc_sync_client::{broadcast_recv, events_recv}; -use rustyline::config::Configurer; -use rustyline::error::ReadlineError; -use rustyline::{ColorMode, DefaultEditor, ExternalPrinter}; -use tracing::{debug, error, info, trace, warn}; -use uuid::Uuid; - -#[derive(Parser, Debug)] -struct Args { - #[arg( - long, - value_name = "rpc-address", - help = "RPC server address", - default_value = "ipc:///tmp/moor_rpc.sock" - )] - rpc_address: String, - - #[arg( - long, - value_name = "events-address", - help = "Narrative server address", - default_value = "ipc:///tmp/moor_events.sock" - )] - events_address: String, - - #[arg( - long, - value_name = "username", - help = "Username to use for authentication", - default_value = "Wizard" - )] - username: String, - - #[arg( - long, - value_name = "password", - help = "Password to use for authentication", - default_value = "" - )] - password: String, -} - -fn establish_connection( - client_id: Uuid, - rpc_client: &mut RpcSendClient, -) -> Result<(ClientToken, Objid), Error> { - match rpc_client.make_rpc_call( - client_id, - RpcRequest::ConnectionEstablish("console".to_string()), - ) { - Ok(RpcResult::Success(RpcResponse::NewConnection(token, conn_id))) => Ok((token, conn_id)), - Ok(RpcResult::Success(response)) => { - error!(?response, "Unexpected response"); - Err(Error::msg("Unexpected response")) - } - Ok(RpcResult::Failure(error)) => { - error!(?error, "Failure connecting"); - Err(Error::msg("Failure connecting")) - } - Err(error) => { - error!(?error, "Error connecting"); - Err(Error::msg("Error connecting")) - } - } -} - -fn perform_auth( - token: ClientToken, - client_id: Uuid, - rpc_client: &mut RpcSendClient, - username: &str, - password: &str, -) -> Result<(AuthToken, Objid), Error> { - // Need to first authenticate with the server. - match rpc_client.make_rpc_call( - client_id, - RpcRequest::LoginCommand( - token, - vec![ - "connect".to_string(), - username.to_string(), - password.to_string(), - ], - true, - ), - ) { - Ok(RpcResult::Success(RpcResponse::LoginResult(Some(( - auth_token, - connect_type, - player, - ))))) => { - info!(?connect_type, ?player, "Authenticated"); - Ok((auth_token, player)) - } - Ok(RpcResult::Success(RpcResponse::LoginResult(None))) => { - error!("Authentication failed"); - Err(Error::msg("Authentication failed")) - } - Ok(RpcResult::Success(response)) => { - error!(?response, "Unexpected response"); - Err(Error::msg("Unexpected response")) - } - Ok(RpcResult::Failure(failure)) => { - error!(?failure, "Failure authenticating"); - Err(Error::msg("Failure authenticating")) - } - Err(error) => { - error!(?error, "Error authenticating"); - Err(Error::msg("Error authenticating")) - } - } -} - -fn handle_console_line( - client_token: ClientToken, - auth_token: AuthToken, - client_id: Uuid, - line: &str, - rpc_client: &mut RpcSendClient, - input_request_id: Option, -) { - let line = line.trim(); - if let Some(input_request_id) = input_request_id { - match rpc_client.make_rpc_call( - client_id, - RpcRequest::RequestedInput( - client_token.clone(), - auth_token.clone(), - input_request_id.as_u128(), - line.to_string(), - ), - ) { - Ok(RpcResult::Success(RpcResponse::InputThanks)) => { - trace!("Input complete"); - } - Ok(RpcResult::Success(response)) => { - warn!(?response, "Unexpected input response"); - } - Ok(RpcResult::Failure(error)) => { - error!(?error, "Failure executing input"); - } - Err(error) => { - error!(?error, "Error executing input"); - } - } - return; - } - - match rpc_client.make_rpc_call( - client_id, - RpcRequest::Command(client_token.clone(), auth_token.clone(), line.to_string()), - ) { - Ok(RpcResult::Success(RpcResponse::CommandSubmitted(_))) => { - trace!("Command complete"); - } - Ok(RpcResult::Success(response)) => { - warn!(?response, "Unexpected command response"); - } - Ok(RpcResult::Failure(error)) => { - error!(?error, "Failure executing command"); - } - Err(error) => { - error!(?error, "Error executing command"); - } - } -} - -fn console_loop( - rpc_server: &str, - narrative_server: &str, - username: &str, - password: &str, - kill_switch: Arc, -) -> Result<(), Error> { - let zmq_ctx = zmq::Context::new(); - - let rpc_socket = zmq_ctx.socket(zmq::REQ)?; - rpc_socket.connect(rpc_server)?; - - // Establish a connection to the RPC server - let client_id = Uuid::new_v4(); - - let mut rpc_client = RpcSendClient::new(rpc_socket); - - let (client_token, conn_obj_id) = establish_connection(client_id, &mut rpc_client)?; - debug!("Transitional connection ID before auth: {:?}", conn_obj_id); - - // Now authenticate with the server. - let (auth_token, player) = perform_auth( - client_token.clone(), - client_id, - &mut rpc_client, - username, - password, - )?; - - println!( - "Authenticated as {:?} ({})", - username.yellow(), - player.yellow() - ); - - // Spawn a thread to listen for events on the narrative pubsub channel, and send them to the - // console. - let narr_sub_socket = zmq_ctx.socket(zmq::SUB)?; - narr_sub_socket.connect(narrative_server)?; - narr_sub_socket.set_subscribe(client_id.as_bytes())?; - let input_request_id = Arc::new(Mutex::new(None)); - let output_input_request_id = input_request_id.clone(); - - let mut rl = DefaultEditor::new().unwrap(); - let mut printer = rl.create_external_printer().unwrap(); - - let output_kill_switch = kill_switch.clone(); - std::thread::Builder::new() - .name("output-loop".to_string()) - .spawn(move || loop { - if output_kill_switch.load(std::sync::atomic::Ordering::Relaxed) { - return; - } - match events_recv(client_id, &narr_sub_socket) { - Ok(ConnectionEvent::Narrative(_, msg)) => { - let var = match msg.event() { - moor_values::tasks::Event::Notify(s, _content_type) => s, - }; - match var.variant() { - moor_values::Variant::Str(s) => { - printer.print(s.as_string().to_string()).unwrap(); - } - _ => { - let literal = to_literal(&var); - printer.print(format!("{}", literal.yellow())).unwrap(); - } - } - } - Ok(ConnectionEvent::SystemMessage(o, msg)) => { - printer - .print(format!("System message from {}: {}", o.yellow(), msg.red())) - .unwrap(); - } - Ok(ConnectionEvent::Disconnect()) => { - printer - .print("Received disconnect event; Session ending.".to_string()) - .unwrap(); - return; - } - Ok(ConnectionEvent::TaskError(e)) => { - printer.print(format!("Error: {:?}", e)).unwrap(); - } - Ok(ConnectionEvent::TaskSuccess(result)) => { - printer.print(format!("=> {:?}", result)).unwrap(); - } - Err(error) => { - printer - .print(format!( - "Error receiving narrative event {:?}; Session ending.", - error - )) - .unwrap(); - return; - } - Ok(ConnectionEvent::RequestInput(requested_input_id)) => { - *output_input_request_id.lock().unwrap() = - Some(Uuid::from_u128(requested_input_id)); - } - } - })?; - - let mut broadcast_subscriber = zmq_ctx.socket(zmq::SUB)?; - broadcast_subscriber.connect(narrative_server)?; - broadcast_subscriber.set_subscribe(BROADCAST_TOPIC)?; - - let broadcast_rpc_socket = zmq_ctx.socket(zmq::REQ)?; - broadcast_rpc_socket.connect(rpc_server)?; - let mut broadcast_rpc_client = RpcSendClient::new(broadcast_rpc_socket); - - let broadcast_client_token = client_token.clone(); - let broadcast_kill_switch = kill_switch.clone(); - std::thread::spawn(move || loop { - if broadcast_kill_switch.load(std::sync::atomic::Ordering::Relaxed) { - return; - } - match broadcast_recv(&mut broadcast_subscriber) { - Ok(BroadcastEvent::PingPong(_)) => { - if let Err(e) = broadcast_rpc_client.make_rpc_call( - client_id, - RpcRequest::Pong(broadcast_client_token.clone(), SystemTime::now()), - ) { - error!("Error sending pong: {:?}", e); - return; - } - } - Err(e) => { - error!("Error receiving broadcast event: {:?}; Session ending.", e); - return; - } - } - }); - - let edit_client_token = client_token.clone(); - let edit_auth_token = auth_token.clone(); - - rl.set_color_mode(ColorMode::Enabled); - - loop { - if kill_switch.load(std::sync::atomic::Ordering::Relaxed) { - break; - } - // TODO: unprovoked output from the narrative stream screws up the prompt midstream, - // but we have no real way to signal to this loop that it should newline for - // cleanliness. Need to figure out something for this. - let input_request_id = input_request_id.lock().unwrap().take(); - let prompt = if let Some(input_request_id) = input_request_id { - format!("{} > ", input_request_id) - } else { - "> ".to_string() - }; - let output = rl.readline(prompt.as_str()); - match output { - Ok(line) => { - rl.add_history_entry(line.clone()) - .expect("Could not add history"); - handle_console_line( - edit_client_token.clone(), - edit_auth_token.clone(), - client_id, - &line, - &mut rpc_client, - input_request_id, - ); - } - Err(ReadlineError::Eof) => { - eprintln!("{}", "".red()); - break; - } - Err(ReadlineError::Interrupted) => { - eprintln!("{}", "^C".red()); - continue; - } - Err(e) => { - eprintln!("{}: {}", "Error".red(), e.red()); - break; - } - } - } - - Ok(()) -} - -fn main() -> Result<(), Error> { - color_eyre::install()?; - - let args: Args = Args::parse(); - - let main_subscriber = tracing_subscriber::fmt() - .compact() - .with_ansi(true) - .with_file(false) - .with_line_number(false) - .with_thread_names(false) - .without_time() - .with_target(false) - .with_max_level(tracing::Level::INFO) - .finish(); - tracing::subscriber::set_global_default(main_subscriber) - .expect("Unable to set configure logging"); - - let kill_switch = Arc::new(AtomicBool::new(false)); - signal_hook::flag::register(signal_hook::consts::SIGTERM, kill_switch.clone())?; - signal_hook::flag::register(signal_hook::consts::SIGINT, kill_switch.clone())?; - - console_loop( - &args.rpc_address, - args.events_address.as_str(), - &args.username, - &args.password, - kill_switch.clone(), - ) -} diff --git a/crates/daemon/src/connections.rs b/crates/daemon/src/connections.rs index 07cc23f1..99853b8a 100644 --- a/crates/daemon/src/connections.rs +++ b/crates/daemon/src/connections.rs @@ -18,7 +18,7 @@ use uuid::Uuid; use moor_kernel::tasks::sessions::SessionError; use moor_values::Objid; -use rpc_common::RpcRequestError; +use rpc_common::RpcMessageError; pub const CONNECTION_TIMEOUT_DURATION: Duration = Duration::from_secs(30); @@ -37,7 +37,7 @@ pub trait ConnectionsDB { client_id: Uuid, hostname: String, player: Option, - ) -> Result; + ) -> Result; /// Record activity for the given client. fn record_client_activity(&self, client_id: Uuid, connobj: Objid) -> Result<(), eyre::Error>; diff --git a/crates/daemon/src/connections_rb.rs b/crates/daemon/src/connections_rb.rs index bcf1367c..54ef2993 100644 --- a/crates/daemon/src/connections_rb.rs +++ b/crates/daemon/src/connections_rb.rs @@ -30,7 +30,7 @@ use moor_kernel::tasks::sessions::SessionError; use moor_values::AsByteBuffer; use moor_values::Objid; use relbox::{relation_info_for, RelBox, RelationId, RelationInfo, Transaction}; -use rpc_common::RpcRequestError; +use rpc_common::RpcMessageError; use crate::connections::{ConnectionsDB, CONNECTION_TIMEOUT_DURATION}; @@ -191,7 +191,7 @@ impl ConnectionsDB for ConnectionsRb { client_id: Uuid, hostname: String, player: Option, - ) -> Result { + ) -> Result { let connection_oid = match player { None => { // The connection object is pulled from the sequence, then we invert it and subtract from diff --git a/crates/daemon/src/connections_wt.rs b/crates/daemon/src/connections_wt.rs index f697191e..0051e059 100644 --- a/crates/daemon/src/connections_wt.rs +++ b/crates/daemon/src/connections_wt.rs @@ -33,7 +33,7 @@ use moor_kernel::tasks::sessions::SessionError; use moor_values::model::{CommitResult, ValSet}; use moor_values::Objid; use moor_values::{AsByteBuffer, DecodingError, EncodingError}; -use rpc_common::RpcRequestError; +use rpc_common::RpcMessageError; use crate::connections::{ConnectionsDB, CONNECTION_TIMEOUT_DURATION}; use crate::connections_wt::ConnectionRelation::{ @@ -280,7 +280,7 @@ impl ConnectionsDB for ConnectionsWT { client_id: Uuid, hostname: String, player: Option, - ) -> Result { + ) -> Result { retry_tx_action(&self.db, |tx| { let connection_oid = match player { None => { @@ -304,7 +304,7 @@ impl ConnectionsDB for ConnectionsWT { Ok(connection_oid) }) - .map_err(|e| RpcRequestError::InternalError(e.to_string())) + .map_err(|e| RpcMessageError::InternalError(e.to_string())) } fn record_client_activity(&self, client_id: Uuid, _connobj: Objid) -> Result<(), Error> { diff --git a/crates/daemon/src/main.rs b/crates/daemon/src/main.rs index 258bef42..4fe244cd 100644 --- a/crates/daemon/src/main.rs +++ b/crates/daemon/src/main.rs @@ -39,6 +39,7 @@ use moor_db_relbox::RelBoxDatabaseBuilder; #[cfg(feature = "relbox")] use moor_kernel::tasks::NoopTasksDb; use moor_kernel::tasks::TasksDb; +use rpc_common::load_keypair; mod connections; @@ -128,7 +129,7 @@ struct Args { #[arg( long, value_name = "public_key", - help = "file containing a pkcs8 ed25519 public key, used for authenticating client connections", + help = "file containing a pkcs8 ed25519 public key, used for authenticating client & host connections", default_value = "public_key.pem" )] public_key: PathBuf, @@ -136,7 +137,7 @@ struct Args { #[arg( long, value_name = "private_key", - help = "file containing a pkcs8 ed25519 private key, used for authenticating client connections", + help = "file containing a pkcs8 ed25519 private key, used for authenticating client & host connections", default_value = "private_key.pem" )] private_key: PathBuf, @@ -251,21 +252,11 @@ fn main() -> Result<(), Report> { // Check the public/private keypair file to see if it exists. If it does, parse it and establish // the keypair from it... let keypair = if args.public_key.exists() && args.private_key.exists() { - let privkey_pem = std::fs::read(args.private_key).expect("Unable to read private key"); - let pubkey_pem = std::fs::read(args.public_key).expect("Unable to read public key"); - - let privkey_pem = pem::parse(privkey_pem).expect("Unable to parse private key"); - let pubkey_pem = pem::parse(pubkey_pem).expect("Unable to parse public key"); - - let mut key_bytes = privkey_pem.contents().to_vec(); - key_bytes.extend_from_slice(pubkey_pem.contents()); - - let key: Key<64> = Key::from(&key_bytes[0..64]); - key + load_keypair(&args.public_key, &args.private_key) + .expect("Unable to load keypair from public and private key files") } else { // Otherwise, check to see if --generate-keypair was passed. If it was, generate a new // keypair and save it to the file; otherwise, error out. - if args.generate_keypair { let mut csprng = OsRng; let signing_key: SigningKey = SigningKey::generate(&mut csprng); @@ -355,12 +346,6 @@ fn main() -> Result<(), Report> { } }; - // The pieces from core we're going to use: - // Our DB. - // Our scheduler. - let scheduler = Scheduler::new(database, tasks_db, config.clone()); - let scheduler_client = scheduler.client().expect("Failed to get scheduler client"); - // We have to create the RpcServer before starting the scheduler because we need to pass it in // as a parameter to the scheduler for background session construction. @@ -374,8 +359,15 @@ fn main() -> Result<(), Report> { zmq_ctx.clone(), args.events_listen.as_str(), args.db_flavour, - config, + config.clone(), )); + let kill_switch = rpc_server.kill_switch(); + + // The pieces from core we're going to use: + // Our DB. + // Our scheduler. + let scheduler = Scheduler::new(database, tasks_db, config, rpc_server.clone()); + let scheduler_client = scheduler.client().expect("Failed to get scheduler client"); // The scheduler thread: let scheduler_rpc_server = rpc_server.clone(); @@ -383,8 +375,6 @@ fn main() -> Result<(), Report> { .name("moor-scheduler".to_string()) .spawn(move || scheduler.run(scheduler_rpc_server))?; - let kill_switch = Arc::new(std::sync::atomic::AtomicBool::new(false)); - // Background DB checkpoint thread. let checkpoint_kill_switch = kill_switch.clone(); let checkpoint_scheduler_client = scheduler_client.clone(); @@ -402,15 +392,13 @@ fn main() -> Result<(), Report> { .expect("Failed to submit checkpoint"); })?; - let rpc_kill_switch = kill_switch.clone(); - let rpc_loop_scheduler_client = scheduler_client.clone(); let rpc_listen = args.rpc_listen.clone(); let rpc_loop_thread = std::thread::Builder::new() .name("moor-rpc".to_string()) .spawn(move || { rpc_server - .zmq_loop(rpc_listen, rpc_loop_scheduler_client, rpc_kill_switch) + .request_loop(rpc_listen, rpc_loop_scheduler_client) .expect("RPC thread failed"); })?; diff --git a/crates/daemon/src/rpc_server.rs b/crates/daemon/src/rpc_server.rs index 980c17f3..1a91b625 100644 --- a/crates/daemon/src/rpc_server.rs +++ b/crates/daemon/src/rpc_server.rs @@ -15,10 +15,11 @@ //! The core of the server logic for the RPC daemon use std::collections::HashMap; +use std::net::SocketAddr; use std::path::PathBuf; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; -use std::time::SystemTime; +use std::time::{Duration, SystemTime}; use eyre::{Context, Error}; @@ -31,7 +32,7 @@ use moor_db::DatabaseFlavour; use moor_kernel::config::Config; use moor_kernel::tasks::command_parse::preposition_to_string; use moor_kernel::tasks::sessions::SessionError::DeliveryError; -use moor_kernel::tasks::sessions::{Session, SessionError, SessionFactory}; +use moor_kernel::tasks::sessions::{Session, SessionError, SessionFactory, SystemControl}; use moor_kernel::tasks::TaskHandle; use moor_kernel::SchedulerClient; use moor_values::model::{Named, ObjectRef, PropFlag, ValSet, VerbFlag}; @@ -42,11 +43,13 @@ use moor_values::Variant; use moor_values::SYSTEM_OBJECT; use moor_values::{v_objid, v_str, Symbol}; use moor_values::{Objid, Var}; -use rpc_common::RpcResponse::{LoginResult, NewConnection}; +use rpc_common::DaemonToClientReply::{LoginResult, NewConnection}; use rpc_common::{ - AuthToken, BroadcastEvent, ClientToken, ConnectType, ConnectionEvent, EntityType, PropInfo, - RpcRequest, RpcRequestError, RpcResponse, RpcResult, VerbInfo, VerbProgramResponse, - BROADCAST_TOPIC, MOOR_AUTH_TOKEN_FOOTER, MOOR_SESSION_TOKEN_FOOTER, + AuthToken, ClientEvent, ClientToken, ClientsBroadcastEvent, ConnectType, DaemonToClientReply, + DaemonToHostReply, EntityType, HostBroadcastEvent, HostClientToDaemonMessage, + HostToDaemonMessage, HostToken, HostType, MessageType, PropInfo, ReplyResult, RpcMessageError, + VerbInfo, VerbProgramResponse, CLIENT_BROADCAST_TOPIC, HOST_BROADCAST_TOPIC, + MOOR_AUTH_TOKEN_FOOTER, MOOR_HOST_TOKEN_FOOTER, MOOR_SESSION_TOKEN_FOOTER, }; use rusty_paseto::core::{ Footer, Paseto, PasetoAsymmetricPrivateKey, PasetoAsymmetricPublicKey, Payload, Public, V4, @@ -64,12 +67,77 @@ pub struct RpcServer { connections: Arc, task_handles: Mutex>, config: Arc, + kill_switch: Arc, + hosts: Arc>, } -pub(crate) fn pack_response(result: Result) -> Vec { +/// If we don't hear from a host in this time, we consider it dead and its listeners gone. +pub const HOST_TIMEOUT: Duration = Duration::from_secs(10); + +#[derive(Default)] +struct Hosts(HashMap)>); + +impl Hosts { + fn receive_ping( + &mut self, + host_token: HostToken, + host_type: HostType, + listeners: Vec<(Objid, SocketAddr)>, + ) -> bool { + let now = SystemTime::now(); + self.0 + .insert(host_token, (now, host_type, listeners)) + .is_none() + } + + fn ping_check(&mut self, timeout: std::time::Duration) { + let now = SystemTime::now(); + let mut expired = vec![]; + for (host_token, (last_seen, _, _)) in self.0.iter() { + if now.duration_since(*last_seen).unwrap() > timeout { + warn!( + "Host {} has not responded in time: {:?}, removing its listeners from the list", + host_token.0, + now.duration_since(*last_seen).unwrap() + ); + expired.push(host_token.clone()); + } + } + for host_token in expired { + self.unregister_host(&host_token); + } + } + + fn listeners(&self) -> Vec<(Objid, HostType, SocketAddr)> { + self.0 + .values() + .flat_map(|(_, host_type, listeners)| { + listeners + .iter() + .map(move |(oid, addr)| (*oid, *host_type, *addr)) + }) + .collect() + } + + fn unregister_host(&mut self, host_token: &HostToken) { + self.0.remove(host_token); + } +} + +pub(crate) fn pack_client_response( + result: Result, +) -> Vec { let rpc_result = match result { - Ok(r) => RpcResult::Success(r), - Err(e) => RpcResult::Failure(e), + Ok(r) => ReplyResult::ClientSuccess(r), + Err(e) => ReplyResult::Failure(e), + }; + bincode::encode_to_vec(&rpc_result, bincode::config::standard()).unwrap() +} + +pub(crate) fn pack_host_response(result: Result) -> Vec { + let rpc_result = match result { + Ok(r) => ReplyResult::HostSuccess(r), + Err(e) => ReplyResult::Failure(e), }; bincode::encode_to_vec(&rpc_result, bincode::config::standard()).unwrap() } @@ -105,6 +173,7 @@ impl RpcServer { "Created connections list, with {} initial known connections", connections.connections().len() ); + let kill_switch = Arc::new(AtomicBool::new(false)); Self { keypair, connections, @@ -112,14 +181,19 @@ impl RpcServer { zmq_context, task_handles: Default::default(), config, + kill_switch, + hosts: Default::default(), } } - pub(crate) fn zmq_loop( + pub(crate) fn kill_switch(&self) -> Arc { + self.kill_switch.clone() + } + + pub(crate) fn request_loop( self: Arc, rpc_endpoint: String, scheduler_client: SchedulerClient, - kill_switch: Arc, ) -> eyre::Result<()> { // Start up the ping-ponger timer in a background thread... let t_rpc_server = self.clone(); @@ -143,7 +217,7 @@ impl RpcServer { let this = self.clone(); loop { - if kill_switch.load(Ordering::Relaxed) { + if self.kill_switch.load(Ordering::Relaxed) { info!("Kill switch activated, exiting"); return Ok(()); } @@ -164,85 +238,181 @@ impl RpcServer { Ok(request) => { trace!(num_parts = request.len(), "ZQM Request received"); - // Components are: - if request.len() != 2 { - error!("Invalid request received, ignoring"); - - rpc_socket.send_multipart( - vec![pack_response(Err(RpcRequestError::InvalidRequest))], - 0, - )?; - continue; - } + // Components are: [msg_type, request_body] if request.len() != 2 { - rpc_socket.send_multipart( - vec![pack_response(Err(RpcRequestError::InvalidRequest))], - 0, - )?; + Self::reply_invalid_request(&rpc_socket, "Incorrect message length")?; continue; } - let (client_id, request_body) = (&request[0], &request[1]); - - let Ok(client_id) = Uuid::from_slice(client_id) else { - rpc_socket.send_multipart( - vec![pack_response(Err(RpcRequestError::InvalidRequest))], - 0, - )?; - continue; - }; + let (msg_type, request_body) = (&request[0], &request[1]); - // Decode 'request_body' as a bincode'd ClientEvent. - let request = - match bincode::decode_from_slice(request_body, bincode::config::standard()) - { - Ok((request, _)) => request, + // Decode the msg_type + let msg_type: MessageType = + match bincode::decode_from_slice(msg_type, bincode::config::standard()) { + Ok((msg_type, _)) => msg_type, Err(_) => { - rpc_socket.send_multipart( - vec![pack_response(Err(RpcRequestError::InvalidRequest))], - 0, + Self::reply_invalid_request( + &rpc_socket, + "Could not decode message type", )?; - continue; } }; - // The remainder of the payload are all the request arguments, which vary depending - // on the type. - let response = - this.clone() - .process_request(scheduler_client.clone(), client_id, request); - let response = pack_response(response); - rpc_socket.send_multipart(vec![response], 0)?; + match msg_type { + MessageType::HostToDaemon(host_token) => { + // Validate host token, and process host message... + // The host token is a Paseto Token signed with our same keypair. + if let Err(e) = self.validate_host_token(&host_token) { + Self::reply_invalid_request( + &rpc_socket, + &format!("Invalid host token received: {}", e), + )?; + continue; + } + + // Decode. + let host_message: HostToDaemonMessage = match bincode::decode_from_slice( + request_body, + bincode::config::standard(), + ) { + Ok((host_message, _)) => host_message, + Err(_) => { + Self::reply_invalid_request( + &rpc_socket, + "Could not decode host message", + )?; + continue; + } + }; + + // Process + let response = + this.clone().process_host_request(host_token, host_message); + + // Reply with Ack. + rpc_socket.send_multipart(vec![response], 0)?; + } + MessageType::HostClientToDaemon(client_id) => { + // Parse the client_id as a uuid + let client_id = match Uuid::from_slice(&client_id) { + Ok(client_id) => client_id, + Err(_) => { + Self::reply_invalid_request(&rpc_socket, "Bad client id")?; + continue; + } + }; + + // Decode 'request_body' as a bincode'd ClientEvent. + let request = match bincode::decode_from_slice( + request_body, + bincode::config::standard(), + ) { + Ok((request, _)) => request, + Err(_) => { + Self::reply_invalid_request( + &rpc_socket, + "Could not decode request body", + )?; + continue; + } + }; + + // The remainder of the payload are all the request arguments, which vary depending + // on the type. + let response = this.clone().process_request( + scheduler_client.clone(), + client_id, + request, + ); + let response = pack_client_response(response); + rpc_socket.send_multipart(vec![response], 0)?; + } + } } } } } - fn client_auth(&self, token: ClientToken, client_id: Uuid) -> Result { + fn reply_invalid_request(socket: &zmq::Socket, reason: &str) -> eyre::Result<()> { + warn!("Invalid request received, replying with error: {reason}"); + socket.send_multipart( + vec![pack_client_response(Err(RpcMessageError::InvalidRequest( + reason.to_string(), + )))], + 0, + )?; + Ok(()) + } + + fn client_auth(&self, token: ClientToken, client_id: Uuid) -> Result { let Some(connection) = self.connections.connection_object_for_client(client_id) else { - return Err(RpcRequestError::NoConnection); + return Err(RpcMessageError::NoConnection); }; self.validate_client_token(token, client_id)?; Ok(connection) } + pub fn process_host_request( + self: Arc, + host_token: HostToken, + host_message: HostToDaemonMessage, + ) -> Vec { + let mut hosts = self.hosts.lock().unwrap(); + match host_message { + HostToDaemonMessage::RegisterHost(_, host_type, listeners) => { + info!( + "Host {} registered with {} listeners", + host_token.0, + listeners.len() + ); + // Record this as a ping. If it's a new host, log that. + hosts.receive_ping(host_token, host_type, listeners); + + // Reply with an ack. + pack_host_response(Ok(DaemonToHostReply::Ack)) + } + HostToDaemonMessage::HostPong(_, host_type, listeners) => { + // Record this as a ping + let num_listeners = listeners.len(); + if hosts.receive_ping(host_token.clone(), host_type, listeners) { + info!( + "Host {} registered with {} listeners", + host_token.0, num_listeners + ); + } + + // Reply with an ack. + pack_host_response(Ok(DaemonToHostReply::Ack)) + } + HostToDaemonMessage::DetachHost() => { + hosts.unregister_host(&host_token); + pack_host_response(Ok(DaemonToHostReply::Ack)) + } + } + } + /// Process a request (originally ZMQ REQ) and produce a reply (becomes ZMQ REP) pub fn process_request( self: Arc, scheduler_client: SchedulerClient, client_id: Uuid, - request: RpcRequest, - ) -> Result { + request: HostClientToDaemonMessage, + ) -> Result { match request { - RpcRequest::ConnectionEstablish(hostname) => { + HostClientToDaemonMessage::ConnectionEstablish(hostname) => { let oid = self.connections.new_connection(client_id, hostname, None)?; let token = self.make_client_token(client_id); Ok(NewConnection(token, oid)) } - RpcRequest::Attach(auth_token, connect_type, hostname) => { + HostClientToDaemonMessage::Attach( + auth_token, + connect_type, + handler_object, + hostname, + ) => { // Validate the auth token, and get the player. let player = self.validate_auth_token(auth_token, None)?; @@ -253,6 +423,7 @@ impl RpcServer { if let Some(connect_type) = connect_type { trace!(?player, "Submitting user_connected task"); if let Err(e) = self.clone().submit_connected_task( + handler_object, scheduler_client, client_id, player, @@ -264,14 +435,17 @@ impl RpcServer { // but we do log the error. } } - Ok(RpcResponse::AttachResult(Some((client_token, player)))) + Ok(DaemonToClientReply::AttachResult(Some(( + client_token, + player, + )))) } // Bodacious Totally Awesome Hey Dudes Have Mr Pong's Chinese Food - RpcRequest::Pong(token, _client_sys_time) => { + HostClientToDaemonMessage::ClientPong(token, _client_sys_time, _, _, _) => { // Always respond with a ThanksPong, even if it's somebody we don't know. // Can easily be a connection that was in the middle of negotiation at the time the // ping was sent out, or dangling in some other way. - let response = Ok(RpcResponse::ThanksPong(SystemTime::now())); + let response = Ok(DaemonToClientReply::ThanksPong(SystemTime::now())); let connection = self.client_auth(token, client_id)?; // Let 'connections' know that the connection is still alive. @@ -281,26 +455,37 @@ impl RpcServer { }; response } - RpcRequest::RequestSysProp(token, object, property) => { + HostClientToDaemonMessage::RequestSysProp(token, object, property) => { let connection = self.client_auth(token, client_id)?; self.clone() .request_sys_prop(scheduler_client, connection, object, property) } - RpcRequest::LoginCommand(token, args, attach) => { + HostClientToDaemonMessage::LoginCommand(token, handler_object, args, attach) => { let connection = self.client_auth(token, client_id)?; - self.clone() - .perform_login(scheduler_client, client_id, connection, args, attach) + self.clone().perform_login( + handler_object, + scheduler_client, + client_id, + connection, + args, + attach, + ) } - RpcRequest::Command(token, auth_token, command) => { + HostClientToDaemonMessage::Command(token, auth_token, handler_object, command) => { let connection = self.client_auth(token, client_id)?; self.validate_auth_token(auth_token, Some(connection))?; - self.clone() - .perform_command(scheduler_client, client_id, connection, command) + self.clone().perform_command( + scheduler_client, + client_id, + handler_object, + connection, + command, + ) } - RpcRequest::RequestedInput(token, auth_token, request_id, input) => { + HostClientToDaemonMessage::RequestedInput(token, auth_token, request_id, input) => { let connection = self.client_auth(token, client_id)?; self.validate_auth_token(auth_token, Some(connection))?; @@ -313,22 +498,27 @@ impl RpcServer { input, ) } - RpcRequest::OutOfBand(token, auth_token, command) => { + HostClientToDaemonMessage::OutOfBand(token, auth_token, handler_object, command) => { let connection = self.client_auth(token, client_id)?; self.validate_auth_token(auth_token, Some(connection))?; - self.clone() - .perform_out_of_band(scheduler_client, client_id, connection, command) + self.clone().perform_out_of_band( + scheduler_client, + handler_object, + client_id, + connection, + command, + ) } - RpcRequest::Eval(token, auth_token, evalstr) => { + HostClientToDaemonMessage::Eval(token, auth_token, evalstr) => { let connection = self.client_auth(token, client_id)?; self.validate_auth_token(auth_token, Some(connection))?; self.clone() .eval(scheduler_client, client_id, connection, evalstr) } - RpcRequest::InvokeVerb(token, auth_token, object, verb, args) => { + HostClientToDaemonMessage::InvokeVerb(token, auth_token, object, verb, args) => { let connection = self.client_auth(token, client_id)?; self.validate_auth_token(auth_token, Some(connection))?; @@ -342,7 +532,7 @@ impl RpcServer { ) } - RpcRequest::Retrieve(token, auth_token, who, retr_type, what) => { + HostClientToDaemonMessage::Retrieve(token, auth_token, who, retr_type, what) => { let connection = self.client_auth(token, client_id)?; self.validate_auth_token(auth_token, Some(connection))?; @@ -352,11 +542,11 @@ impl RpcServer { .request_property(connection, connection, who, what) .map_err(|e| { error!(error = ?e, "Error requesting property"); - RpcRequestError::EntityRetrievalError( + RpcMessageError::EntityRetrievalError( "error requesting property".to_string(), ) })?; - Ok(RpcResponse::PropertyValue( + Ok(DaemonToClientReply::PropertyValue( PropInfo { definer: propdef.definer(), location: propdef.location(), @@ -374,7 +564,7 @@ impl RpcServer { .request_verb(connection, connection, who, what) .map_err(|e| { error!(error = ?e, "Error requesting verb"); - RpcRequestError::EntityRetrievalError( + RpcMessageError::EntityRetrievalError( "error requesting verb".to_string(), ) })?; @@ -384,7 +574,7 @@ impl RpcServer { Symbol::mk(preposition_to_string(&argspec.prep)), Symbol::mk(argspec.iobj.to_string()), ]; - Ok(RpcResponse::VerbValue( + Ok(DaemonToClientReply::VerbValue( VerbInfo { location: verbdef.location(), owner: verbdef.owner(), @@ -400,7 +590,7 @@ impl RpcServer { } } } - RpcRequest::Resolve(token, auth_token, objref) => { + HostClientToDaemonMessage::Resolve(token, auth_token, objref) => { let connection = self.client_auth(token, client_id)?; self.validate_auth_token(auth_token, Some(connection))?; @@ -408,12 +598,12 @@ impl RpcServer { .resolve_object(connection, objref) .map_err(|e| { error!(error = ?e, "Error resolving object"); - RpcRequestError::EntityRetrievalError("error resolving object".to_string()) + RpcMessageError::EntityRetrievalError("error resolving object".to_string()) })?; - Ok(RpcResponse::ResolveResult(resolved)) + Ok(DaemonToClientReply::ResolveResult(resolved)) } - RpcRequest::Properties(token, auth_token, obj) => { + HostClientToDaemonMessage::Properties(token, auth_token, obj) => { let connection = self.client_auth(token, client_id)?; self.validate_auth_token(auth_token, Some(connection))?; @@ -421,7 +611,7 @@ impl RpcServer { .request_properties(connection, connection, obj) .map_err(|e| { error!(error = ?e, "Error requesting properties"); - RpcRequestError::EntityRetrievalError( + RpcMessageError::EntityRetrievalError( "error requesting properties".to_string(), ) })?; @@ -439,9 +629,9 @@ impl RpcServer { }) .collect(); - Ok(RpcResponse::Properties(props)) + Ok(DaemonToClientReply::Properties(props)) } - RpcRequest::Verbs(token, auth_token, obj) => { + HostClientToDaemonMessage::Verbs(token, auth_token, obj) => { let connection = self.client_auth(token, client_id)?; self.validate_auth_token(auth_token, Some(connection))?; @@ -449,7 +639,7 @@ impl RpcServer { .request_verbs(connection, connection, obj) .map_err(|e| { error!(error = ?e, "Error requesting verbs"); - RpcRequestError::EntityRetrievalError("error requesting verbs".to_string()) + RpcMessageError::EntityRetrievalError("error requesting verbs".to_string()) })?; let verbs = verbs @@ -470,23 +660,23 @@ impl RpcServer { }) .collect(); - Ok(RpcResponse::Verbs(verbs)) + Ok(DaemonToClientReply::Verbs(verbs)) } - RpcRequest::Detach(token) => { + HostClientToDaemonMessage::Detach(token) => { self.validate_client_token(token, client_id)?; debug!(?client_id, "Detaching client"); // Detach this client id from the player/connection object. let Ok(_) = self.connections.remove_client_connection(client_id) else { - return Err(RpcRequestError::InternalError( + return Err(RpcMessageError::InternalError( "Unable to remove client connection".to_string(), )); }; - Ok(RpcResponse::Disconnected) + Ok(DaemonToClientReply::Disconnected) } - RpcRequest::Program(token, auth_token, object, verb, code) => { + HostClientToDaemonMessage::Program(token, auth_token, object, verb, code) => { let connection = self.client_auth(token, client_id)?; self.validate_auth_token(auth_token, Some(connection))?; @@ -544,7 +734,7 @@ impl RpcServer { let all_client_ids = self.connections.client_ids_for(player)?; let publish = self.events_publish.lock().unwrap(); - let event = ConnectionEvent::Disconnect(); + let event = ClientEvent::Disconnect(); let event_bytes = bincode::encode_to_vec(event, bincode::config::standard()) .expect("Unable to serialize disconnection event"); for client_id in all_client_ids { @@ -572,31 +762,32 @@ impl RpcServer { player: Objid, object: ObjectRef, property: Symbol, - ) -> Result { + ) -> Result { let pv = match scheduler_client.request_system_property(player, object, property) { Ok(pv) => pv, Err(CommandExecutionError(CommandError::NoObjectMatch)) => { - return Ok(RpcResponse::SysPropValue(None)); + return Ok(DaemonToClientReply::SysPropValue(None)); } Err(e) => { error!(error = ?e, "Error requesting system property"); - return Err(RpcRequestError::ErrorCouldNotRetrieveSysProp( + return Err(RpcMessageError::ErrorCouldNotRetrieveSysProp( "error requesting system property".to_string(), )); } }; - Ok(RpcResponse::SysPropValue(Some(pv))) + Ok(DaemonToClientReply::SysPropValue(Some(pv))) } fn perform_login( self: Arc, + handler_object: Objid, scheduler_client: SchedulerClient, client_id: Uuid, connection: Objid, args: Vec, attach: bool, - ) -> Result { + ) -> Result { // TODO: change result of login to return this information, rather than just Objid, so // we're not dependent on this. let connect_type = if args.first() == Some(&"create".to_string()) { @@ -610,11 +801,11 @@ impl RpcServer { connect_type, client_id ); let Ok(session) = self.clone().new_session(client_id, connection) else { - return Err(RpcRequestError::CreateSessionFailed); + return Err(RpcMessageError::CreateSessionFailed); }; let task_handle = match scheduler_client.submit_verb_task( connection, - ObjectRef::Id(SYSTEM_OBJECT), + ObjectRef::Id(handler_object), Symbol::mk("do_login_command"), args.iter().map(|s| v_str(s)).collect(), args.join(" "), @@ -625,7 +816,7 @@ impl RpcServer { Err(e) => { error!(error = ?e, "Error submitting login task"); - return Err(RpcRequestError::InternalError(e.to_string())); + return Err(RpcMessageError::InternalError(e.to_string())); } }; let receiver = task_handle.into_receiver(); @@ -645,12 +836,12 @@ impl RpcServer { Ok(Err(e)) => { error!(error = ?e, "Error waiting for login results"); - return Err(RpcRequestError::LoginTaskFailed); + return Err(RpcMessageError::LoginTaskFailed); } Err(e) => { error!(error = ?e, "Error waiting for login results"); - return Err(RpcRequestError::InternalError(e.to_string())); + return Err(RpcMessageError::InternalError(e.to_string())); } }; @@ -664,7 +855,7 @@ impl RpcServer { .connections .update_client_connection(connection, player) else { - return Err(RpcRequestError::InternalError( + return Err(RpcMessageError::InternalError( "Unable to update client connection".to_string(), )); }; @@ -672,6 +863,7 @@ impl RpcServer { if attach { trace!(?player, "Submitting user_connected task"); if let Err(e) = self.clone().submit_connected_task( + handler_object, scheduler_client, client_id, player, @@ -691,6 +883,7 @@ impl RpcServer { fn submit_connected_task( self: Arc, + handler_object: Objid, scheduler_client: SchedulerClient, client_id: Uuid, player: Objid, @@ -709,7 +902,7 @@ impl RpcServer { scheduler_client .submit_verb_task( player, - ObjectRef::Id(SYSTEM_OBJECT), + ObjectRef::Id(handler_object), connected_verb, vec![v_objid(player)], "".to_string(), @@ -724,11 +917,12 @@ impl RpcServer { self: Arc, scheduler_client: SchedulerClient, client_id: Uuid, + handler_object: Objid, connection: Objid, command: String, - ) -> Result { + ) -> Result { let Ok(session) = self.clone().new_session(client_id, connection) else { - return Err(RpcRequestError::CreateSessionFailed); + return Err(RpcMessageError::CreateSessionFailed); }; if let Err(e) = self @@ -744,16 +938,20 @@ impl RpcServer { ?connection, "Invoking submit_command_task" ); - let parse_command_task_handle = - match scheduler_client.submit_command_task(connection, command.as_str(), session) { - Ok(t) => t, - Err(e) => return Err(RpcRequestError::TaskError(e)), - }; + let parse_command_task_handle = match scheduler_client.submit_command_task( + handler_object, + connection, + command.as_str(), + session, + ) { + Ok(t) => t, + Err(e) => return Err(RpcMessageError::TaskError(e)), + }; let task_id = parse_command_task_handle.task_id(); let mut th_q = self.task_handles.lock().unwrap(); th_q.insert(task_id, (client_id, parse_command_task_handle)); - Ok(RpcResponse::CommandSubmitted(task_id)) + Ok(DaemonToClientReply::CommandSubmitted(task_id)) } fn respond_input( @@ -763,7 +961,7 @@ impl RpcServer { connection: Objid, input_request_id: Uuid, input: String, - ) -> Result { + ) -> Result { if let Err(e) = self .connections .record_client_activity(client_id, connection) @@ -775,27 +973,29 @@ impl RpcServer { if let Err(e) = scheduler_client.submit_requested_input(connection, input_request_id, input) { error!(error = ?e, "Error submitting requested input"); - return Err(RpcRequestError::InternalError(e.to_string())); + return Err(RpcMessageError::InternalError(e.to_string())); } // TODO: do we need a new response for this? Maybe just a "Thanks"? - Ok(RpcResponse::InputThanks) + Ok(DaemonToClientReply::InputThanks) } /// Call $do_out_of_band(command) fn perform_out_of_band( self: Arc, scheduler_client: SchedulerClient, + handler_object: Objid, client_id: Uuid, connection: Objid, command: String, - ) -> Result { + ) -> Result { let Ok(session) = self.clone().new_session(client_id, connection) else { - return Err(RpcRequestError::CreateSessionFailed); + return Err(RpcMessageError::CreateSessionFailed); }; let command_components = parse_into_words(command.as_str()); let task_handle = match scheduler_client.submit_out_of_band_task( + handler_object, connection, command_components, command, @@ -804,7 +1004,7 @@ impl RpcServer { Ok(t) => t, Err(e) => { error!(error = ?e, "Error submitting command task"); - return Err(RpcRequestError::InternalError(e.to_string())); + return Err(RpcMessageError::InternalError(e.to_string())); } }; @@ -812,7 +1012,7 @@ impl RpcServer { // let the session run to completion on its own and output back to the client. // Maybe we should be returning a value from this for the future, but the way clients are // written right now, there's little point. - Ok(RpcResponse::CommandSubmitted(task_handle.task_id())) + Ok(DaemonToClientReply::CommandSubmitted(task_handle.task_id())) } fn eval( @@ -821,9 +1021,9 @@ impl RpcServer { client_id: Uuid, connection: Objid, expression: String, - ) -> Result { + ) -> Result { let Ok(session) = self.clone().new_session(client_id, connection) else { - return Err(RpcRequestError::CreateSessionFailed); + return Err(RpcMessageError::CreateSessionFailed); }; let task_handle = match scheduler_client.submit_eval_task( @@ -836,16 +1036,16 @@ impl RpcServer { Ok(t) => t, Err(e) => { error!(error = ?e, "Error submitting eval task"); - return Err(RpcRequestError::InternalError(e.to_string())); + return Err(RpcMessageError::InternalError(e.to_string())); } }; match task_handle.into_receiver().recv() { - Ok(Ok(v)) => Ok(RpcResponse::EvalResult(v)), - Ok(Err(e)) => Err(RpcRequestError::TaskError(e)), + Ok(Ok(v)) => Ok(DaemonToClientReply::EvalResult(v)), + Ok(Err(e)) => Err(RpcMessageError::TaskError(e)), Err(e) => { error!(error = ?e, "Error processing eval"); - Err(RpcRequestError::InternalError(e.to_string())) + Err(RpcMessageError::InternalError(e.to_string())) } } } @@ -858,9 +1058,9 @@ impl RpcServer { object: ObjectRef, verb: Symbol, args: Vec, - ) -> Result { + ) -> Result { let Ok(session) = self.clone().new_session(client_id, connection) else { - return Err(RpcRequestError::CreateSessionFailed); + return Err(RpcMessageError::CreateSessionFailed); }; let task_handle = match scheduler_client.submit_verb_task( @@ -875,14 +1075,14 @@ impl RpcServer { Ok(t) => t, Err(e) => { error!(error = ?e, "Error submitting verb task"); - return Err(RpcRequestError::InternalError(e.to_string())); + return Err(RpcMessageError::InternalError(e.to_string())); } }; let task_id = task_handle.task_id(); let mut th_q = self.task_handles.lock().unwrap(); th_q.insert(task_id, (client_id, task_handle)); - Ok(RpcResponse::CommandSubmitted(task_id)) + Ok(DaemonToClientReply::CommandSubmitted(task_id)) } fn program_verb( @@ -893,21 +1093,20 @@ impl RpcServer { object: ObjectRef, verb: Symbol, code: Vec, - ) -> Result { + ) -> Result { if self.clone().new_session(client_id, connection).is_err() { - return Err(RpcRequestError::CreateSessionFailed); + return Err(RpcMessageError::CreateSessionFailed); }; let verb = Symbol::mk_case_insensitive(verb.as_str()); match scheduler_client.submit_verb_program(connection, connection, object, verb, code) { - Ok((obj, verb)) => Ok(RpcResponse::ProgramResponse(VerbProgramResponse::Success( - obj, - verb.to_string(), - ))), - Err(SchedulerError::VerbProgramFailed(f)) => Ok(RpcResponse::ProgramResponse( + Ok((obj, verb)) => Ok(DaemonToClientReply::ProgramResponse( + VerbProgramResponse::Success(obj, verb.to_string()), + )), + Err(SchedulerError::VerbProgramFailed(f)) => Ok(DaemonToClientReply::ProgramResponse( VerbProgramResponse::Failure(f), )), - Err(e) => Err(RpcRequestError::TaskError(e)), + Err(e) => Err(RpcMessageError::TaskError(e)), } } @@ -933,8 +1132,8 @@ impl RpcServer { let publish = self.events_publish.lock().unwrap(); for (task_id, client_id, result) in completed { let result = match result { - Ok(v) => ConnectionEvent::TaskSuccess(v), - Err(e) => ConnectionEvent::TaskError(e), + Ok(v) => ClientEvent::TaskSuccess(v), + Err(e) => ClientEvent::TaskError(e), }; debug!(?client_id, ?task_id, ?result, "Task completed"); let payload = bincode::encode_to_vec(&result, bincode::config::standard()) @@ -955,7 +1154,7 @@ impl RpcServer { let publish = self.events_publish.lock().unwrap(); for (player, event) in events { let client_ids = self.connections.client_ids_for(*player)?; - let event = ConnectionEvent::Narrative(*player, event.clone()); + let event = ClientEvent::Narrative(*player, event.clone()); let event_bytes = bincode::encode_to_vec(&event, bincode::config::standard())?; for client_id in &client_ids { let payload = vec![client_id.as_bytes().to_vec(), event_bytes.clone()]; @@ -974,7 +1173,7 @@ impl RpcServer { player: Objid, message: String, ) -> Result<(), SessionError> { - let event = ConnectionEvent::SystemMessage(player, message); + let event = ClientEvent::SystemMessage(player, message); let event_bytes = bincode::encode_to_vec(event, bincode::config::standard()) .expect("Unable to serialize system message"); let payload = vec![client_id.as_bytes().to_vec(), event_bytes]; @@ -1008,7 +1207,7 @@ impl RpcServer { return Err(SessionError::NoConnectionForPlayer(player)); } - let event = ConnectionEvent::RequestInput(input_request_id.as_u128()); + let event = ClientEvent::RequestInput(input_request_id.as_u128()); let event_bytes = bincode::encode_to_vec(event, bincode::config::standard()) .expect("Unable to serialize input request"); let payload = vec![client_id.as_bytes().to_vec(), event_bytes]; @@ -1023,11 +1222,11 @@ impl RpcServer { } fn ping_pong(&self) -> Result<(), SessionError> { - let event = BroadcastEvent::PingPong(SystemTime::now()); + let event = ClientsBroadcastEvent::PingPong(SystemTime::now()); let event_bytes = bincode::encode_to_vec(event, bincode::config::standard()).unwrap(); // We want responses from all clients, so send on this broadcast "topic" - let payload = vec![BROADCAST_TOPIC.to_vec(), event_bytes]; + let payload = vec![CLIENT_BROADCAST_TOPIC.to_vec(), event_bytes]; { let publish = self.events_publish.lock().unwrap(); publish.send_multipart(payload, 0).map_err(|e| { @@ -1036,6 +1235,22 @@ impl RpcServer { })?; } self.connections.ping_check(); + + // while we're here we're also sending HostPings, requesting their list of listeners, + // and their liveness. + let event = HostBroadcastEvent::PingPong(SystemTime::now()); + let event_bytes = bincode::encode_to_vec(event, bincode::config::standard()).unwrap(); + let payload = vec![HOST_BROADCAST_TOPIC.to_vec(), event_bytes]; + { + let publish = self.events_publish.lock().unwrap(); + publish.send_multipart(payload, 0).map_err(|e| { + error!(error = ?e, "Unable to send PingPong to host"); + DeliveryError + })?; + } + + let mut hosts = self.hosts.lock().unwrap(); + hosts.ping_check(HOST_TIMEOUT); Ok(()) } @@ -1079,13 +1294,37 @@ impl RpcServer { AuthToken(token) } - /// Validate the provided PASETO token against the provided client id + /// Validate a provided PASTEO host token. Just verifying that it is a valid token signed + /// with our same keypair. + fn validate_host_token(&self, token: &HostToken) -> Result { + let key: Key<32> = Key::from(&self.keypair[32..]); + let pk: PasetoAsymmetricPublicKey = PasetoAsymmetricPublicKey::from(&key); + let host_type = Paseto::::try_verify( + token.0.as_str(), + &pk, + Footer::from(MOOR_HOST_TOKEN_FOOTER), + None, + ) + .map_err(|e| { + warn!(error = ?e, "Unable to parse/validate token"); + RpcMessageError::PermissionDenied + })?; + + let Some(host_type) = HostType::parse_id_str(host_type.as_str()) else { + warn!("Unable to parse/validate host type in token"); + return Err(RpcMessageError::PermissionDenied); + }; + + Ok(host_type) + } + + /// Validate the provided PASETO client token against the provided client id /// If they do not match, the request is rejected, permissions denied. fn validate_client_token( &self, token: ClientToken, client_id: Uuid, - ) -> Result<(), RpcRequestError> { + ) -> Result<(), RpcMessageError> { let key: Key<32> = Key::from(&self.keypair[32..]); let pk: PasetoAsymmetricPublicKey = PasetoAsymmetricPublicKey::from(&key); let verified_token = Paseto::::try_verify( @@ -1096,27 +1335,27 @@ impl RpcServer { ) .map_err(|e| { warn!(error = ?e, "Unable to parse/validate token"); - RpcRequestError::PermissionDenied + RpcMessageError::PermissionDenied })?; let verified_token = serde_json::from_str::(verified_token.as_str()) .map_err(|e| { warn!(error = ?e, "Unable to parse/validate token"); - RpcRequestError::PermissionDenied + RpcMessageError::PermissionDenied })?; // Does the token match the client it came from? If not, reject it. let Some(token_client_id) = verified_token.get("client_id") else { debug!("Token does not contain client_id"); - return Err(RpcRequestError::PermissionDenied); + return Err(RpcMessageError::PermissionDenied); }; let Some(token_client_id) = token_client_id.as_str() else { debug!("Token client_id is null"); - return Err(RpcRequestError::PermissionDenied); + return Err(RpcMessageError::PermissionDenied); }; let Ok(token_client_id) = Uuid::parse_str(token_client_id) else { debug!("Token client_id is not a valid UUID"); - return Err(RpcRequestError::PermissionDenied); + return Err(RpcMessageError::PermissionDenied); }; if client_id != token_client_id { debug!( @@ -1124,7 +1363,7 @@ impl RpcServer { ?token_client_id, "Token client_id does not match client_id" ); - return Err(RpcRequestError::PermissionDenied); + return Err(RpcMessageError::PermissionDenied); } Ok(()) @@ -1140,7 +1379,7 @@ impl RpcServer { &self, token: AuthToken, objid: Option, - ) -> Result { + ) -> Result { let key: Key<32> = Key::from(&self.keypair[32..]); let pk: PasetoAsymmetricPublicKey = PasetoAsymmetricPublicKey::from(&key); let verified_token = Paseto::::try_verify( @@ -1151,30 +1390,30 @@ impl RpcServer { ) .map_err(|e| { warn!(error = ?e, "Unable to parse/validate token"); - RpcRequestError::PermissionDenied + RpcMessageError::PermissionDenied })?; let verified_token = serde_json::from_str::(verified_token.as_str()) .map_err(|e| { warn!(error = ?e, "Unable to parse/validate token"); - RpcRequestError::PermissionDenied + RpcMessageError::PermissionDenied }) .unwrap(); let Some(token_player) = verified_token.get("player") else { debug!("Token does not contain player"); - return Err(RpcRequestError::PermissionDenied); + return Err(RpcMessageError::PermissionDenied); }; let Some(token_player) = token_player.as_i64() else { debug!("Token player is not valid"); - return Err(RpcRequestError::PermissionDenied); + return Err(RpcMessageError::PermissionDenied); }; let token_player = Objid(token_player); if let Some(objid) = objid { // Does the 'player' match objid? If not, reject it. if objid != token_player { debug!(?objid, ?token_player, "Token player does not match objid"); - return Err(RpcRequestError::PermissionDenied); + return Err(RpcMessageError::PermissionDenied); } } @@ -1188,6 +1427,95 @@ impl RpcServer { } } +impl SystemControl for RpcServer { + fn shutdown(&self, msg: Option) -> Result<(), moor_values::Error> { + warn!("Shutting down server: {}", msg.unwrap_or_default()); + self.kill_switch.store(true, Ordering::SeqCst); + Ok(()) + } + + fn listen( + &self, + handler_object: Objid, + host_type: &str, + port: u16, + print_messages: bool, + ) -> Result<(), moor_values::Error> { + let host_type = match host_type { + "tcp" => HostType::TCP, + _ => return Err(moor_values::Error::E_INVARG), + }; + + let event = HostBroadcastEvent::Listen { + handler_object, + host_type, + port, + print_messages, + }; + + let event_bytes = bincode::encode_to_vec(event, bincode::config::standard()).unwrap(); + + // We want responses from all clients, so send on this broadcast "topic" + let payload = vec![HOST_BROADCAST_TOPIC.to_vec(), event_bytes]; + { + let publish = self.events_publish.lock().unwrap(); + publish + .send_multipart(payload, 0) + .map_err(|e| { + error!(error = ?e, "Unable to send Listen to client"); + DeliveryError + }) + .map_err(|e| { + error!("Could not send Listen event: {}", e); + moor_values::Error::E_INVARG + })?; + } + + // TODO: we should probably wait for a response from the host to make sure it was successful + // this is a bit tricky because the response comes on a different socket (REQ/REP) + // and we can't really block here waiting for it. So we'd need to do something fancy + // with a channel or semaphore, etc. + Ok(()) + } + + fn unlisten(&self, port: u16, host_type: &str) -> Result<(), moor_values::Error> { + let host_type = match host_type { + "tcp" => HostType::TCP, + _ => return Err(moor_values::Error::E_INVARG), + }; + + let event = HostBroadcastEvent::Unlisten { host_type, port }; + + let event_bytes = bincode::encode_to_vec(event, bincode::config::standard()).unwrap(); + + // We want responses from all clients, so send on this broadcast "topic" + let payload = vec![HOST_BROADCAST_TOPIC.to_vec(), event_bytes]; + { + let publish = self.events_publish.lock().unwrap(); + publish + .send_multipart(payload, 0) + .map_err(|e| { + error!(error = ?e, "Unable to send Unlisten to client"); + DeliveryError + }) + .map_err(|e| { + error!("Could not send Unlisten event: {}", e); + moor_values::Error::E_INVARG + })?; + } + Ok(()) + } + + fn listeners(&self) -> Result, moor_values::Error> { + let hosts = self.hosts.lock().unwrap(); + let listeners = hosts + .listeners() + .iter() + .map(|(o, t, h)| (*o, t.id_str().to_string(), h.port(), true)) + .collect(); + Ok(listeners) + } +} impl SessionFactory for RpcServer { fn mk_background_session( self: Arc, diff --git a/crates/daemon/src/rpc_session.rs b/crates/daemon/src/rpc_session.rs index 4acd5eab..0686863f 100644 --- a/crates/daemon/src/rpc_session.rs +++ b/crates/daemon/src/rpc_session.rs @@ -97,8 +97,14 @@ impl Session for RpcSession { Ok(()) } - fn shutdown(&self, _msg: Option) -> Result<(), SessionError> { - todo!() + fn notify_shutdown(&self, msg: Option) -> Result<(), SessionError> { + let shutdown_msg = match msg { + Some(msg) => format!("** Server is shutting down: {} **", msg), + None => "** Server is shutting down ** ".to_string(), + }; + self.rpc_server + .send_system_message(self.client_id, self.player, shutdown_msg.clone())?; + Ok(()) } fn connection_name(&self, player: Objid) -> Result { diff --git a/crates/kernel/src/builtins/bf_server.rs b/crates/kernel/src/builtins/bf_server.rs index 4970aeaf..bfe00441 100644 --- a/crates/kernel/src/builtins/bf_server.rs +++ b/crates/kernel/src/builtins/bf_server.rs @@ -724,21 +724,141 @@ fn bf_function_info(bf_args: &mut BfCallState<'_>) -> Result { } bf_declare!(function_info, bf_function_info); +/// Function: value listen (obj object, point [, print-messages], [host-type]) +/// Start listening for connections on the given port. +/// `object` is the object to call when a connection is established, in lieux of #0 (the system object) +/// if `print-messages` is true, then the server will print messages like ** Connected ** etc to the connection when it establishes +/// if `host-type` is provided, it should be a string, and it will be used to determine the type of host that will be expected to listen. +/// this defaults to "tcp", but other values can include "websocket" +fn bf_listen(bf_args: &mut BfCallState<'_>) -> Result { + // Requires wizard permissions. + bf_args + .task_perms() + .map_err(world_state_bf_err)? + .check_wizard() + .map_err(world_state_bf_err)?; + + if bf_args.args.len() < 2 || bf_args.args.len() > 4 { + return Err(BfErr::Code(E_ARGS)); + } + + let Variant::Obj(object) = bf_args.args[0].variant().clone() else { + return Err(BfErr::Code(E_TYPE)); + }; + + // point is a protocol specific value, but for now we'll just assume it's an integer for port + let Variant::Int(point) = bf_args.args[1].variant().clone() else { + return Err(BfErr::Code(E_TYPE)); + }; + + if point < 0 || point > (u16::MAX as i64) { + return Err(BfErr::Code(E_INVARG)); + } + + let port = point as u16; + + let print_messages = if bf_args.args.len() >= 3 { + let Variant::Int(print_messages) = bf_args.args[2].variant().clone() else { + return Err(BfErr::Code(E_TYPE)); + }; + print_messages == 1 + } else { + false + }; + + let host_type = if bf_args.args.len() == 4 { + let Variant::Str(host_type) = bf_args.args[3].variant().clone() else { + return Err(BfErr::Code(E_TYPE)); + }; + host_type.as_string().clone() + } else { + "tcp".to_string() + }; + + // Ask the scheduler to broadcast a listen request out to all the hosts. + if let Some(error) = + bf_args + .task_scheduler_client + .listen(object, host_type, port, print_messages) + { + return Err(BfErr::Code(error)); + } + + // "Listen() returns canon, a `canonicalized' version of point, with any configuration-specific defaulting or aliasing accounted for. " + // Uh, ok for now we'll just return the port. + Ok(Ret(v_int(port as i64))) +} + +bf_declare!(listen, bf_listen); + fn bf_listeners(bf_args: &mut BfCallState<'_>) -> Result { + // Requires wizard permissions. + bf_args + .task_perms() + .map_err(world_state_bf_err)? + .check_wizard() + .map_err(world_state_bf_err)?; + if !bf_args.args.is_empty() { return Err(BfErr::Code(E_ARGS)); } - // TODO: Return something better from bf_listeners, rather than hardcoded value - // this function is hardcoded to just return {{#0, 7777, 1}} - // this is on account that existing cores expect this to be the case - // but we have no intend of supporting other network listener magic at this point - let listeners = v_list(&[v_list(&[v_int(0), v_int(7777), v_int(1)])]); + let listeners = bf_args.task_scheduler_client.listeners(); + let listeners = listeners.iter().map(|listener| { + let print_messages = if listener.3 { v_int(1) } else { v_int(0) }; + v_list(&[ + v_objid(listener.0), + v_int(listener.2 as i64), + print_messages, + ]) + }); + + let listeners = v_list_iter(listeners); Ok(Ret(listeners)) } bf_declare!(listeners, bf_listeners); +// unlisten(port, [host-type]) +fn bf_unlisten(bf_args: &mut BfCallState<'_>) -> Result { + // Requires wizard permissions. + bf_args + .task_perms() + .map_err(world_state_bf_err)? + .check_wizard() + .map_err(world_state_bf_err)?; + + if bf_args.args.is_empty() || bf_args.args.len() > 2 { + return Err(BfErr::Code(E_ARGS)); + } + + // point is a protocol specific value, but for now we'll just assume it's an integer for port + let Variant::Int(point) = bf_args.args[0].variant().clone() else { + return Err(BfErr::Code(E_TYPE)); + }; + + if point < 0 || point > (u16::MAX as i64) { + return Err(BfErr::Code(E_INVARG)); + } + + let port = point as u16; + let host_type = if bf_args.args.len() == 4 { + let Variant::Str(host_type) = bf_args.args[3].variant().clone() else { + return Err(BfErr::Code(E_TYPE)); + }; + host_type.as_string().clone() + } else { + "tcp".to_string() + }; + + if let Some(err) = bf_args.task_scheduler_client.unlisten(host_type, port) { + return Err(BfErr::Code(err)); + } + + Ok(Ret(v_none())) +} +bf_declare!(unlisten, bf_unlisten); + pub const BF_SERVER_EVAL_TRAMPOLINE_START_INITIALIZE: usize = 0; pub const BF_SERVER_EVAL_TRAMPOLINE_RESUME: usize = 1; @@ -928,6 +1048,8 @@ pub(crate) fn register_bf_server(builtins: &mut [Box]) { builtins[offset_for_builtin("server_log")] = Box::new(BfServerLog {}); builtins[offset_for_builtin("function_info")] = Box::new(BfFunctionInfo {}); builtins[offset_for_builtin("listeners")] = Box::new(BfListeners {}); + builtins[offset_for_builtin("listen")] = Box::new(BfListen {}); + builtins[offset_for_builtin("unlisten")] = Box::new(BfUnlisten {}); builtins[offset_for_builtin("eval")] = Box::new(BfEval {}); builtins[offset_for_builtin("read")] = Box::new(BfRead {}); builtins[offset_for_builtin("dump_database")] = Box::new(BfDumpDatabase {}); diff --git a/crates/kernel/src/tasks/mod.rs b/crates/kernel/src/tasks/mod.rs index 3b46de6c..91702b6b 100644 --- a/crates/kernel/src/tasks/mod.rs +++ b/crates/kernel/src/tasks/mod.rs @@ -244,7 +244,7 @@ pub mod scheduler_test_utils { use std::time::Duration; use moor_values::tasks::{CommandError, SchedulerError}; - use moor_values::{Error::E_VERBNF, Objid, Var}; + use moor_values::{Error::E_VERBNF, Objid, Var, SYSTEM_OBJECT}; use super::TaskHandle; use crate::config::Config; @@ -286,7 +286,7 @@ pub mod scheduler_test_utils { player: Objid, command: &str, ) -> Result { - execute(|| scheduler.submit_command_task(player, command, session)) + execute(|| scheduler.submit_command_task(SYSTEM_OBJECT, player, command, session)) } pub fn call_eval( @@ -305,12 +305,24 @@ pub mod scheduler_test_utils { pub enum TaskStart { /// The scheduler is telling the task to parse a command and execute whatever verbs are /// associated with it. - StartCommandVerb { player: Objid, command: String }, + StartCommandVerb { + /// The object that will handle the command, usually #0 (the system object), but can + /// be a connection handler passed from `listen()`. + handler_object: Objid, + player: Objid, + command: String, + }, /// The task start has been turned into an invocation to $do_command, which is a verb on the /// system object that is called when a player types a command. If it returns true, all is /// well and we just return. If it returns false, we intercept and turn it back into a /// StartCommandVerb and dispatch it as an old school parsed command. - StartDoCommand { player: Objid, command: String }, + StartDoCommand { + /// The object that will handle the command, usually #0 (the system object), but can + /// be a connection handler passed from `listen()`. + handler_object: Objid, + player: Objid, + command: String, + }, /// The scheduler is telling the task to run a (method) verb. StartVerb { player: Objid, diff --git a/crates/kernel/src/tasks/scheduler.rs b/crates/kernel/src/tasks/scheduler.rs index 4ace6dd7..74449d07 100644 --- a/crates/kernel/src/tasks/scheduler.rs +++ b/crates/kernel/src/tasks/scheduler.rs @@ -45,7 +45,7 @@ use crate::matching::match_env::MatchEnvironmentParseMatcher; use crate::matching::ws_match_env::WsMatchEnv; use crate::tasks::command_parse::ParseMatcher; use crate::tasks::scheduler_client::{SchedulerClient, SchedulerClientMsg}; -use crate::tasks::sessions::{Session, SessionFactory}; +use crate::tasks::sessions::{Session, SessionFactory, SystemControl}; use crate::tasks::suspension::{SuspensionQ, WakeCondition}; use crate::tasks::task::Task; use crate::tasks::task_scheduler_client::{TaskControlMsg, TaskSchedulerClient}; @@ -94,6 +94,8 @@ pub struct Scheduler { builtin_registry: Arc, + system_control: Arc, + /// The internal task queue which holds our suspended tasks, and control records for actively /// running tasks. /// This is in a lock to allow interior mutability for the scheduler loop, but is only ever @@ -147,6 +149,7 @@ impl Scheduler { database: Box, tasks_database: Box, config: Arc, + system_control: Arc, ) -> Self { let (task_control_sender, task_control_receiver) = crossbeam_channel::unbounded(); let (scheduler_sender, scheduler_receiver) = crossbeam_channel::unbounded(); @@ -175,6 +178,7 @@ impl Scheduler { scheduler_receiver, builtin_registry, server_options: default_server_options, + system_control, } } @@ -341,12 +345,14 @@ impl Scheduler { let task_q = &mut self.task_q; match msg { SchedulerClientMsg::SubmitCommandTask { + handler_object, player, command, session, reply, } => { let task_start = Arc::new(TaskStart::StartCommandVerb { + handler_object, player, command: command.to_string(), }); @@ -463,6 +469,7 @@ impl Scheduler { reply.send(response).expect("Could not send input reply"); } SchedulerClientMsg::SubmitOobTask { + handler_object, player, command, argstr, @@ -472,7 +479,7 @@ impl Scheduler { let args = command.into_iter().map(v_string).collect::>(); let task_start = Arc::new(TaskStart::StartVerb { player, - vloc: SYSTEM_OBJECT, + vloc: handler_object, verb: *DO_OUT_OF_BAND_COMMAND, args, argstr, @@ -1108,6 +1115,52 @@ impl Scheduler { return task_q.send_task_result(task_id, Err(TaskAbortedError)); }; } + TaskControlMsg::GetListeners(reply) => { + let listeners = self + .system_control + .listeners() + .expect("Could not get listeners"); + if let Err(e) = reply.send(listeners) { + error!(?e, "Could not send listeners to requester"); + } + } + TaskControlMsg::Listen { + handler_object, + host_type, + port, + print_messages, + reply, + } => { + let Some(_task) = task_q.tasks.get_mut(&task_id) else { + warn!(task_id, "Task not found for listen request"); + return; + }; + let result = match self.system_control.listen( + handler_object, + &host_type, + port, + print_messages, + ) { + Ok(()) => None, + Err(e) => Some(e), + }; + reply.send(result).expect("Could not send listen reply"); + } + TaskControlMsg::Unlisten { + host_type, + port, + reply, + } => { + let Some(_task) = task_q.tasks.get_mut(&task_id) else { + warn!(task_id, "Task not found for unlisten request"); + return; + }; + let result = match self.system_control.unlisten(port, &host_type) { + Ok(_) => None, + Err(_) => Some(E_PERM), + }; + reply.send(result).expect("Could not send unlisten reply"); + } TaskControlMsg::Shutdown(msg) => { info!("Shutting down scheduler. Reason: {msg:?}"); self.stop(msg) @@ -1225,7 +1278,7 @@ impl Scheduler { fn stop(&mut self, msg: Option) -> Result<(), SchedulerError> { // Send shutdown notification to all live tasks. for (_, task) in self.task_q.tasks.iter() { - let _ = task.session.shutdown(msg.clone()); + let _ = task.session.notify_shutdown(msg.clone()); } warn!("Issuing clean shutdown..."); { @@ -1246,6 +1299,11 @@ impl Scheduler { yield_now(); } + // Now ask the rpc server and hosts to shutdown + self.system_control + .shutdown(msg) + .expect("Could not cleanly shutdown system"); + warn!("All tasks finished. Stopping scheduler."); self.running = false; diff --git a/crates/kernel/src/tasks/scheduler_client.rs b/crates/kernel/src/tasks/scheduler_client.rs index 6acd2e75..0e82db0b 100644 --- a/crates/kernel/src/tasks/scheduler_client.rs +++ b/crates/kernel/src/tasks/scheduler_client.rs @@ -45,6 +45,7 @@ impl SchedulerClient { #[instrument(skip(self, session))] pub fn submit_command_task( &self, + handler_object: Objid, player: Objid, command: &str, session: Arc, @@ -53,6 +54,7 @@ impl SchedulerClient { let (reply, receive) = oneshot::channel(); self.scheduler_sender .send(SchedulerClientMsg::SubmitCommandTask { + handler_object, player, command: command.to_string(), session, @@ -129,6 +131,7 @@ impl SchedulerClient { #[instrument(skip(self, session))] pub fn submit_out_of_band_task( &self, + handler_object: Objid, player: Objid, command: Vec, argstr: String, @@ -138,6 +141,7 @@ impl SchedulerClient { let (reply, receive) = oneshot::channel(); self.scheduler_sender .send(SchedulerClientMsg::SubmitOobTask { + handler_object, player, command, argstr, @@ -355,6 +359,7 @@ impl SchedulerClient { pub enum SchedulerClientMsg { /// Submit a command to be executed by the player. SubmitCommandTask { + handler_object: Objid, player: Objid, command: String, session: Arc, @@ -380,6 +385,7 @@ pub enum SchedulerClientMsg { }, /// Submit an out-of-band task to be executed SubmitOobTask { + handler_object: Objid, player: Objid, command: Vec, argstr: String, diff --git a/crates/kernel/src/tasks/sessions.rs b/crates/kernel/src/tasks/sessions.rs index 841b47f6..4f311bc5 100644 --- a/crates/kernel/src/tasks/sessions.rs +++ b/crates/kernel/src/tasks/sessions.rs @@ -18,7 +18,7 @@ use thiserror::Error; use uuid::Uuid; use moor_values::tasks::NarrativeEvent; -use moor_values::Objid; +use moor_values::{Error, Objid}; /// The interface for managing the user I/O connection side of state, exposed by the scheduler to /// the VM during execution and by the host server to the scheduler. @@ -82,8 +82,8 @@ pub trait Session: Send + Sync { /// across multiple connections, etc. fn send_system_msg(&self, player: Objid, msg: &str) -> Result<(), SessionError>; - /// Process a (wizard) request for system shutdown, with an optional shutdown message. - fn shutdown(&self, msg: Option) -> Result<(), SessionError>; + /// Let the player know that the server is shutting down, with an optional message. + fn notify_shutdown(&self, msg: Option) -> Result<(), SessionError>; /// The 'name' of the *most recent* connection associated with the player. /// In a networked environment this is the hostname. @@ -103,6 +103,29 @@ pub trait Session: Send + Sync { fn idle_seconds(&self, player: Objid) -> Result; } +/// A handle back to the controlling process (e.g. RpcServer) for handling system level events, +/// such as shutdown, listen(), etc. +/// +pub trait SystemControl: Send + Sync { + /// Process a (wizard) request for system shutdown, with an optional shutdown message. + fn shutdown(&self, msg: Option) -> Result<(), Error>; + + /// Ask hosts of `host_type` to listen on the given port, with the given handler object. + fn listen( + &self, + handler_object: Objid, + host_type: &str, + port: u16, + print_messages: bool, + ) -> Result<(), Error>; + + /// Ask hosts of `host_type` to stop listening on the given port. + fn unlisten(&self, port: u16, host_type: &str) -> Result<(), Error>; + + /// Return the set of listeners, their type, and the port they are listening on. + fn listeners(&self) -> Result, Error>; +} + /// A factory for creating background sessions, usually on task resumption on server restart. pub trait SessionFactory { fn mk_background_session( @@ -165,9 +188,10 @@ impl Session for NoopClientSession { Ok(()) } - fn shutdown(&self, _msg: Option) -> Result<(), SessionError> { + fn notify_shutdown(&self, _msg: Option) -> Result<(), SessionError> { Ok(()) } + fn connection_name(&self, player: Objid) -> Result { Ok(format!("player-{}", player.0)) } @@ -187,6 +211,32 @@ impl Session for NoopClientSession { } } +#[derive(Default)] +pub struct NoopSystemControl {} + +impl SystemControl for NoopSystemControl { + fn shutdown(&self, _msg: Option) -> Result<(), Error> { + Ok(()) + } + + fn listen( + &self, + _handler_object: Objid, + _host_type: &str, + _port: u16, + _print_messages: bool, + ) -> Result<(), Error> { + Ok(()) + } + + fn unlisten(&self, _port: u16, _host_type: &str) -> Result<(), Error> { + Ok(()) + } + + fn listeners(&self) -> Result, Error> { + Ok(vec![]) + } +} /// A 'mock' client connection which collects output in a vector of strings that tests can use to /// verify output. /// For now that's all it does, but facilities for pretending players are connected, mocking @@ -270,7 +320,7 @@ impl Session for MockClientSession { Ok(()) } - fn shutdown(&self, msg: Option) -> Result<(), SessionError> { + fn notify_shutdown(&self, msg: Option) -> Result<(), SessionError> { let mut system = self.system.write().unwrap(); if let Some(msg) = msg { system.push(format!("shutdown: {}", msg)); @@ -302,3 +352,33 @@ impl Session for MockClientSession { Ok(0.0) } } + +impl SystemControl for MockClientSession { + fn shutdown(&self, _msg: Option) -> Result<(), Error> { + let mut system = self.system.write().unwrap(); + system.push(String::from("shutdown")); + Ok(()) + } + + fn listen( + &self, + _handler_object: Objid, + _host_type: &str, + _port: u16, + _print_messages: bool, + ) -> Result<(), Error> { + let mut system = self.system.write().unwrap(); + system.push(String::from("listen")); + Ok(()) + } + + fn unlisten(&self, port: u16, host_type: &str) -> Result<(), Error> { + let mut system = self.system.write().unwrap(); + system.push(format!("unlisten: {} {}", host_type, port)); + Ok(()) + } + + fn listeners(&self) -> Result, Error> { + Ok(vec![(Objid(0), String::from("tcp"), 8888, true)]) + } +} diff --git a/crates/kernel/src/tasks/task.rs b/crates/kernel/src/tasks/task.rs index fc14a2fa..e8cbc679 100644 --- a/crates/kernel/src/tasks/task.rs +++ b/crates/kernel/src/tasks/task.rs @@ -236,11 +236,17 @@ impl Task { // Special case: in case of return from $do_command @ top-level, we need to look at the results: // non-true value? => parse_command and restart. // true value? => commit and return success. - if let TaskStart::StartDoCommand { player, command } = &self.task_start.as_ref() { + if let TaskStart::StartDoCommand { + handler_object, + player, + command, + } = &self.task_start.as_ref() + { let (player, command) = (*player, command.clone()); if !result.is_true() { // Intercept and rewrite us back to StartVerbCommand and do old school parse. self.task_start = Arc::new(TaskStart::StartCommandVerb { + handler_object: *handler_object, player, command: command.clone(), }); @@ -326,8 +332,14 @@ impl Task { match self.task_start.clone().as_ref() { // We've been asked to start a command. // We need to set up the VM and then execute it. - TaskStart::StartCommandVerb { player, command } => { - if let Err(e) = self.start_command(*player, command.as_str(), world_state) { + TaskStart::StartCommandVerb { + handler_object, + player, + command, + } => { + if let Err(e) = + self.start_command(*handler_object, *player, command.as_str(), world_state) + { control_sender .send((self.task_id, TaskControlMsg::TaskCommandError(e))) .expect("Could not send start response"); @@ -409,6 +421,7 @@ impl Task { fn start_command( &mut self, + handler_object: Objid, player: Objid, command: &str, world_state: &mut dyn WorldState, @@ -440,16 +453,17 @@ impl Task { let args = arguments.iter().map(|s| v_str(s)).collect::>(); let verb_call = VerbCall { verb_name: Symbol::mk("do_command"), - location: SYSTEM_OBJECT, - this: SYSTEM_OBJECT, + location: handler_object, + this: handler_object, player, args, argstr: command.to_string(), - caller: SYSTEM_OBJECT, + caller: handler_object, }; self.vm_host .start_call_method_verb(self.task_id, self.perms, verb_info, verb_call); self.task_start = Arc::new(TaskStart::StartDoCommand { + handler_object, player, command: command.to_string(), }); @@ -780,6 +794,7 @@ mod tests { Receiver<(TaskId, TaskControlMsg)>, ) { let task_start = Arc::new(TaskStart::StartCommandVerb { + handler_object: SYSTEM_OBJECT, player: SYSTEM_OBJECT, command: command.to_string(), }); diff --git a/crates/kernel/src/tasks/task_scheduler_client.rs b/crates/kernel/src/tasks/task_scheduler_client.rs index 26968da0..af1ec8e2 100644 --- a/crates/kernel/src/tasks/task_scheduler_client.rs +++ b/crates/kernel/src/tasks/task_scheduler_client.rs @@ -12,19 +12,18 @@ // this program. If not, see . // -use std::time::Instant; +use std::time::{Duration, Instant}; use crossbeam_channel::Sender; +use crate::tasks::task::Task; +use crate::tasks::TaskDescription; +use crate::vm::Fork; use moor_values::model::Perms; use moor_values::tasks::{AbortLimitReason, CommandError, Exception, NarrativeEvent, TaskId}; -use moor_values::Objid; use moor_values::Symbol; use moor_values::Var; - -use crate::tasks::task::Task; -use crate::tasks::TaskDescription; -use crate::vm::Fork; +use moor_values::{Error, Objid}; /// A handle for talking to the scheduler from within a task. #[derive(Clone)] @@ -192,6 +191,59 @@ impl TaskSchedulerClient { .expect("Could not deliver client message -- scheduler shut down?"); } + pub fn listen( + &self, + handler_object: Objid, + host_type: String, + port: u16, + print_messages: bool, + ) -> Option { + let (reply, receive) = oneshot::channel(); + self.scheduler_sender + .send(( + self.task_id, + TaskControlMsg::Listen { + reply, + handler_object, + host_type, + port, + print_messages, + }, + )) + .expect("Unable to send listen message to scheduler"); + + receive + .recv_timeout(Duration::from_secs(5)) + .expect("Listen message timed out") + } + + pub fn listeners(&self) -> Vec<(Objid, String, u16, bool)> { + let (reply, receive) = oneshot::channel(); + self.scheduler_sender + .send((self.task_id, TaskControlMsg::GetListeners(reply))) + .expect("Could not deliver client message -- scheduler shut down?"); + receive + .recv() + .expect("Could not receive listeners -- scheduler shut down?") + } + + pub fn unlisten(&self, host_type: String, port: u16) -> Option { + let (reply, receive) = oneshot::channel(); + self.scheduler_sender + .send(( + self.task_id, + TaskControlMsg::Unlisten { + host_type, + port, + reply, + }, + )) + .expect("Could not deliver client message -- scheduler shut down?"); + receive + .recv() + .expect("Could not receive unlisten reply -- scheduler shut down?") + } + /// Request that the server refresh its set of information off $server_options pub fn refresh_server_options(&self) { self.scheduler_sender @@ -247,13 +299,32 @@ pub enum TaskControlMsg { result_sender: oneshot::Sender, }, /// Task is requesting that the scheduler boot a player. - BootPlayer { player: Objid }, + BootPlayer { + player: Objid, + }, /// Task is requesting that a textdump checkpoint happen, to the configured file. Checkpoint, Notify { player: Objid, event: NarrativeEvent, }, + GetListeners(oneshot::Sender>), + /// Ask hosts to listen for connections on `port` and send them to `handler_object` + /// `print_messages` is a flag to enable or disable printing of connected etc strings + /// `host_type` is a string identifying the type of host + Listen { + handler_object: Objid, + host_type: String, + port: u16, + print_messages: bool, + reply: oneshot::Sender>, + }, + /// Ask hosts of type `host_type` to stop listening on `port` + Unlisten { + host_type: String, + port: u16, + reply: oneshot::Sender>, + }, /// Request that the server refresh its set of information off $server_options RefreshServerOptions, /// Task requesting shutdown diff --git a/crates/kernel/src/vm/moo_execute.rs b/crates/kernel/src/vm/moo_execute.rs index 0c5bd3a9..3fa02424 100644 --- a/crates/kernel/src/vm/moo_execute.rs +++ b/crates/kernel/src/vm/moo_execute.rs @@ -642,12 +642,12 @@ pub fn moo_frame_execute( // first argument. // e.g. "blah":reverse() becomes $string:reverse("blah") let sysprop_sym = match non_obj { - Variant::Int(_) => INTEGER_SYM.clone(), - Variant::Float(_) => FLOAT_SYM.clone(), - Variant::Str(_) => STRING_SYM.clone(), - Variant::List(_) => LIST_SYM.clone(), - Variant::Map(_) => MAP_SYM.clone(), - Variant::Err(_) => ERROR_SYM.clone(), + Variant::Int(_) => *INTEGER_SYM, + Variant::Float(_) => *FLOAT_SYM, + Variant::Str(_) => *STRING_SYM, + Variant::List(_) => *LIST_SYM, + Variant::Map(_) => *MAP_SYM, + Variant::Err(_) => *ERROR_SYM, _ => { return state.push_error(E_TYPE); } diff --git a/crates/kernel/testsuite/moot_suite.rs b/crates/kernel/testsuite/moot_suite.rs index 74261546..7eadba40 100644 --- a/crates/kernel/testsuite/moot_suite.rs +++ b/crates/kernel/testsuite/moot_suite.rs @@ -25,7 +25,7 @@ use common::create_relbox_db; use common::{create_wiredtiger_db, testsuite_dir}; use moor_compiler::to_literal; use moor_db::Database; -use moor_kernel::tasks::sessions::{SessionError, SessionFactory}; +use moor_kernel::tasks::sessions::{NoopSystemControl, SessionError, SessionFactory}; use moor_kernel::tasks::NoopTasksDb; use moor_kernel::{ config::Config, @@ -138,7 +138,12 @@ fn test(db: Box, path: &Path) { return; } let tasks_db = Box::new(NoopTasksDb {}); - let scheduler = Scheduler::new(db, tasks_db, Arc::new(Config::default())); + let scheduler = Scheduler::new( + db, + tasks_db, + Arc::new(Config::default()), + Arc::new(NoopSystemControl::default()), + ); let scheduler_client = scheduler.client().unwrap(); let session_factory = Arc::new(NoopSessionFactory {}); let scheduler_loop_jh = std::thread::Builder::new() diff --git a/crates/rpc-async-client/Cargo.toml b/crates/rpc-async-client/Cargo.toml index 4af408d4..863abdcf 100644 --- a/crates/rpc-async-client/Cargo.toml +++ b/crates/rpc-async-client/Cargo.toml @@ -13,10 +13,14 @@ description = "Utilities for connection to the 0MQ RPC server via tokio/async ca [dependencies] # Own +moor-values = { path = "../values" } rpc-common = { path = "../rpc-common" } bincode.workspace = true futures-util.workspace = true +rusty_paseto.workspace = true +thiserror.workspace = true tmq.workspace = true +tokio.workspace = true tracing.workspace = true uuid.workspace = true diff --git a/crates/rpc-async-client/src/lib.rs b/crates/rpc-async-client/src/lib.rs index 57dec311..a710e7ea 100644 --- a/crates/rpc-async-client/src/lib.rs +++ b/crates/rpc-async-client/src/lib.rs @@ -11,6 +11,207 @@ // You should have received a copy of the GNU General Public License along with // this program. If not, see . // +#![allow(clippy::too_many_arguments)] +use crate::rpc_client::RpcSendClient; +use rpc_common::{ + DaemonToHostReply, HostBroadcastEvent, HostToDaemonMessage, HostToken, HostType, ReplyResult, + RpcError, HOST_BROADCAST_TOPIC, MOOR_HOST_TOKEN_FOOTER, +}; +use rusty_paseto::prelude::{Footer, Key, Paseto, PasetoAsymmetricPrivateKey, Payload, Public, V4}; +use std::net::SocketAddr; +use std::sync::atomic::AtomicBool; +use std::sync::Arc; +use std::time::SystemTime; +use tmq::request; +use tracing::{error, info, warn}; + +use crate::pubsub_client::hosts_events_recv; +pub use listeners::{ListenersClient, ListenersError, ListenersMessage}; + +mod listeners; pub mod pubsub_client; pub mod rpc_client; + +/// Construct a PASETO token for this host, to authenticate the host itself to the daemon. +pub fn make_host_token(keypair: &Key<64>, host_type: HostType) -> HostToken { + let privkey: PasetoAsymmetricPrivateKey = PasetoAsymmetricPrivateKey::from(keypair); + let token = Paseto::::default() + .set_footer(Footer::from(MOOR_HOST_TOKEN_FOOTER)) + .set_payload(Payload::from(host_type.id_str())) + .try_sign(&privkey) + .expect("Unable to build Paseto host token"); + + HostToken(token) +} + +pub async fn send_host_to_daemon_msg( + rpc_client: &mut RpcSendClient, + host_token: &HostToken, + msg: HostToDaemonMessage, +) -> Result { + match rpc_client.make_host_rpc_call(host_token, msg).await { + Ok(ReplyResult::HostSuccess(msg)) => Ok(msg), + Ok(ReplyResult::Failure(f)) => Err(RpcError::CouldNotSend(f.to_string())), + Ok(m) => Err(RpcError::UnexpectedReply(format!( + "Unexpected reply from daemon: {:?}", + m + ))), + Err(e) => { + error!("Error communicating with daemon: {}", e); + Err(RpcError::CouldNotSend(e.to_string())) + } + } +} + +/// Start the host session with the daemon, and return the RPC client to use for further +/// communication. +pub async fn start_host_session( + host_token: HostToken, + zmq_ctx: tmq::Context, + rpc_address: String, + kill_switch: Arc, + listeners: ListenersClient, +) -> Result { + // Establish the initial connection to the daemon, and send the host token and our initial + // listener list. + let rpc_client = loop { + let rpc_request_sock = request(&zmq_ctx) + .set_rcvtimeo(100) + .set_sndtimeo(100) + .connect(rpc_address.as_str()) + .expect("Unable to bind RPC server for connection"); + + // And let the RPC server know we're here, and it should start sending events on the + // narrative subscription. + let mut rpc_client = RpcSendClient::new(rpc_request_sock); + + info!("Registering host with daemon..."); + let host_hello = HostToDaemonMessage::RegisterHost( + SystemTime::now(), + HostType::TCP, + listeners + .get_listeners() + .await + .map_err(|e| RpcError::CouldNotSend(e.to_string()))?, + ); + match send_host_to_daemon_msg(&mut rpc_client, &host_token, host_hello).await { + Ok(DaemonToHostReply::Ack) => { + info!("Host token accepted by daemon."); + break rpc_client; + } + Ok(DaemonToHostReply::Reject(reason)) => { + error!("Daemon has rejected this host: {}. Shutting down.", reason); + kill_switch.store(true, std::sync::atomic::Ordering::SeqCst); + return Err(RpcError::AuthenticationError(format!( + "Daemon rejected host token: {}", + reason + ))); + } + Err(e) => { + warn!("Error communicating with daemon: {} to send host token", e); + tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; + continue; + } + } + }; + Ok(rpc_client) +} + +pub async fn proces_hosts_events( + mut rpc_client: RpcSendClient, + host_token: HostToken, + zmq_ctx: tmq::Context, + events_zmq_address: String, + listen_address: String, + kill_switch: Arc, + listeners: ListenersClient, + our_host_type: HostType, +) -> Result<(), RpcError> { + // Handle inbound events from the daemon specifically to the host + let events_sub = tmq::subscribe(&zmq_ctx) + .connect(&events_zmq_address) + .expect("Unable to connect host events subscriber "); + let mut events_sub = events_sub.subscribe(HOST_BROADCAST_TOPIC).unwrap(); + + loop { + if kill_switch.load(std::sync::atomic::Ordering::Relaxed) { + info!("Kill switch activated, stopping..."); + return Ok(()); + } + let msg = hosts_events_recv(&mut events_sub).await?; + + match msg { + HostBroadcastEvent::PingPong(_) => { + // Respond with a HostPong + let host_pong = HostToDaemonMessage::HostPong( + SystemTime::now(), + our_host_type, + listeners + .get_listeners() + .await + .map_err(|e| RpcError::CouldNotSend(e.to_string()))?, + ); + match send_host_to_daemon_msg(&mut rpc_client, &host_token, host_pong).await { + Ok(DaemonToHostReply::Ack) => { + // All good + } + Ok(DaemonToHostReply::Reject(reason)) => { + error!("Daemon has rejected this host: {}. Shutting down.", reason); + kill_switch.store(true, std::sync::atomic::Ordering::SeqCst); + } + Err(e) => { + warn!( + "Error communicating with daemon: {} to respond to ping: {:?}", + e, msg + ); + } + } + } + HostBroadcastEvent::Listen { + handler_object, + host_type, + port, + print_messages: _, + } => { + if host_type == our_host_type { + let listen_addr = format!("{}:{}", listen_address, port); + let sockaddr = listen_addr.parse::().unwrap(); + info!( + "Starting listener for {} on {}", + host_type.id_str(), + sockaddr + ); + let listeners = listeners.clone(); + tokio::spawn(async move { + let sockaddr_sockaddr = listen_addr + .parse::() + .unwrap_or_else(|_| panic!("Unable to parse address: {}", listen_addr)); + if let Err(e) = listeners + .add_listener(handler_object, sockaddr_sockaddr) + .await + { + error!("Error starting listener: {}", e); + } + }); + } + } + HostBroadcastEvent::Unlisten { host_type, port } => { + if host_type == our_host_type { + // Stop listening on the given port, on `listen_address`. + let listen_addr = format!("{}:{}", listen_address, port); + let sockaddr = listen_addr.parse::().unwrap(); + info!( + "Stopping listener for {} on {}", + host_type.id_str(), + sockaddr + ); + listeners + .remove_listener(sockaddr) + .await + .expect("Unable to stop listener"); + } + } + } + } +} diff --git a/crates/rpc-async-client/src/listeners.rs b/crates/rpc-async-client/src/listeners.rs new file mode 100644 index 00000000..6df684de --- /dev/null +++ b/crates/rpc-async-client/src/listeners.rs @@ -0,0 +1,59 @@ +use moor_values::Objid; +use std::net::SocketAddr; + +#[derive(Debug, thiserror::Error)] +pub enum ListenersError { + #[error("Failed to add listener {0:?} at {1}")] + AddListenerFailed(Objid, SocketAddr), + #[error("Failed to remove listener at {0}")] + RemoveListenerFailed(SocketAddr), + #[error("Failed to get listeners")] + GetListenersFailed, +} + +/// A client for talking to a host-specific backend for managing the set of listeners. +#[derive(Clone)] +pub struct ListenersClient { + listeners_channel: tokio::sync::mpsc::Sender, +} + +pub enum ListenersMessage { + AddListener(Objid, SocketAddr), + RemoveListener(SocketAddr), + GetListeners(tokio::sync::oneshot::Sender>), +} + +impl ListenersClient { + pub fn new(listeners_channel: tokio::sync::mpsc::Sender) -> Self { + Self { listeners_channel } + } + + pub async fn add_listener( + &self, + handler: Objid, + addr: SocketAddr, + ) -> Result<(), ListenersError> { + self.listeners_channel + .send(ListenersMessage::AddListener(handler, addr)) + .await + .map_err(|_| ListenersError::AddListenerFailed(handler, addr))?; + Ok(()) + } + + pub async fn remove_listener(&self, addr: SocketAddr) -> Result<(), ListenersError> { + self.listeners_channel + .send(ListenersMessage::RemoveListener(addr)) + .await + .map_err(|_| ListenersError::RemoveListenerFailed(addr))?; + Ok(()) + } + + pub async fn get_listeners(&self) -> Result, ListenersError> { + let (tx, rx) = tokio::sync::oneshot::channel(); + self.listeners_channel + .send(ListenersMessage::GetListeners(tx)) + .await + .map_err(|_| ListenersError::GetListenersFailed)?; + rx.await.map_err(|_| ListenersError::GetListenersFailed) + } +} diff --git a/crates/rpc-async-client/src/pubsub_client.rs b/crates/rpc-async-client/src/pubsub_client.rs index e42ccf94..c650b920 100644 --- a/crates/rpc-async-client/src/pubsub_client.rs +++ b/crates/rpc-async-client/src/pubsub_client.rs @@ -18,12 +18,12 @@ use tmq::subscribe::Subscribe; use tracing::trace; use uuid::Uuid; -use rpc_common::{BroadcastEvent, ConnectionEvent, RpcError}; +use rpc_common::{ClientEvent, ClientsBroadcastEvent, HostBroadcastEvent, RpcError}; pub async fn events_recv( client_id: Uuid, subscribe: &mut Subscribe, -) -> Result { +) -> Result { let Some(Ok(mut inbound)) = subscribe.next().await else { return Err(RpcError::CouldNotReceive( "Unable to receive published event".to_string(), @@ -54,14 +54,14 @@ pub async fn events_recv( } let decode_result = bincode::decode_from_slice(event.as_ref(), bincode::config::standard()); - let (msg, _msg_size): (ConnectionEvent, usize) = decode_result.map_err(|e| { + let (msg, _msg_size): (ClientEvent, usize) = decode_result.map_err(|e| { RpcError::CouldNotDecode(format!("Unable to decode published event: {}", e)) })?; Ok(msg) } -pub async fn broadcast_recv(subscribe: &mut Subscribe) -> Result { +pub async fn broadcast_recv(subscribe: &mut Subscribe) -> Result { let Some(Ok(mut inbound)) = subscribe.next().await else { return Err(RpcError::CouldNotReceive( "Unable to receive broadcast message".to_string(), @@ -95,9 +95,50 @@ pub async fn broadcast_recv(subscribe: &mut Subscribe) -> Result Result { + let Some(Ok(mut inbound)) = subscribe.next().await else { + return Err(RpcError::CouldNotReceive( + "Unable to receive host broadcast message".to_string(), + )); + }; + + trace!(message = ?inbound, "host_broadcast_message"); + if inbound.len() != 2 { + return Err(RpcError::CouldNotDecode(format!( + "Unexpected message length: {}", + inbound.len() + ))); + } + + let Some(topic) = inbound.pop_front() else { + return Err(RpcError::CouldNotDecode( + "Unexpected message format".to_string(), + )); + }; + + if &topic[..] != b"hosts" { + return Err(RpcError::CouldNotDecode(format!( + "Unexpected topic: {:?}", + topic + ))); + } + + let Some(event) = inbound.pop_front() else { + return Err(RpcError::CouldNotDecode( + "Unexpected message format".to_string(), + )); + }; + + let (msg, _msg_size): (HostBroadcastEvent, usize) = + bincode::decode_from_slice(event.as_ref(), bincode::config::standard()).map_err(|e| { + RpcError::CouldNotDecode(format!("Unable to decode host broadcast message: {}", e)) + })?; + Ok(msg) +} diff --git a/crates/rpc-async-client/src/rpc_client.rs b/crates/rpc-async-client/src/rpc_client.rs index e17a6320..73216853 100644 --- a/crates/rpc-async-client/src/rpc_client.rs +++ b/crates/rpc-async-client/src/rpc_client.rs @@ -12,7 +12,9 @@ // this program. If not, see . // -use rpc_common::{RpcError, RpcRequest, RpcResult}; +use rpc_common::{ + HostClientToDaemonMessage, HostToDaemonMessage, HostToken, MessageType, ReplyResult, RpcError, +}; use tmq::request_reply::RequestSender; use tmq::Multipart; use tracing::error; @@ -34,14 +36,68 @@ impl RpcSendClient { } /// Call the ZMQ RPC (REQ/REPLY) endpoint with a `ClientRequest`, and receive a `ServerResponse`. - pub async fn make_rpc_call( + pub async fn make_client_rpc_call( &mut self, client_id: Uuid, - rpc_msg: RpcRequest, - ) -> Result { + rpc_msg: HostClientToDaemonMessage, + ) -> Result { let rpc_msg_payload = bincode::encode_to_vec(&rpc_msg, bincode::config::standard()) .map_err(|e| RpcError::CouldNotSend(e.to_string()))?; - let message = Multipart::from(vec![client_id.as_bytes().to_vec(), rpc_msg_payload]); + let client_message_type = MessageType::HostClientToDaemon(client_id.into_bytes().to_vec()); + let message_type_bytes = + bincode::encode_to_vec(&client_message_type, bincode::config::standard()) + .map_err(|e| RpcError::CouldNotSend(e.to_string()))?; + let message = Multipart::from(vec![message_type_bytes, rpc_msg_payload]); + let rpc_request_sock = self.rcp_request_sock.take().ok_or(RpcError::CouldNotSend( + "RPC request socket not initialized".to_string(), + ))?; + let rpc_reply_sock = match rpc_request_sock.send(message).await { + Ok(rpc_reply_sock) => rpc_reply_sock, + Err(e) => { + error!( + "Unable to send connection establish request to RPC server: {}", + e + ); + return Err(RpcError::CouldNotSend(e.to_string())); + } + }; + + let (msg, recv_sock) = match rpc_reply_sock.recv().await { + Ok((msg, recv_sock)) => (msg, recv_sock), + Err(e) => { + error!( + "Unable to receive connection establish reply from RPC server: {}", + e + ); + return Err(RpcError::CouldNotReceive(e.to_string())); + } + }; + + match bincode::decode_from_slice(&msg[0], bincode::config::standard()) { + Ok((msg, _)) => { + self.rcp_request_sock = Some(recv_sock); + Ok(msg) + } + Err(e) => { + error!("Unable to decode RPC response: {}", e); + Err(RpcError::CouldNotDecode(e.to_string())) + } + } + } + + pub async fn make_host_rpc_call( + &mut self, + host_token: &HostToken, + rpc_message: HostToDaemonMessage, + ) -> Result { + let host_message_type = MessageType::HostToDaemon(host_token.clone()); + let message_type_bytes = + bincode::encode_to_vec(&host_message_type, bincode::config::standard()) + .map_err(|e| RpcError::CouldNotSend(e.to_string()))?; + + let rpc_msg_payload = bincode::encode_to_vec(&rpc_message, bincode::config::standard()) + .map_err(|e| RpcError::CouldNotSend(e.to_string()))?; + let message = Multipart::from(vec![message_type_bytes, rpc_msg_payload]); let rpc_request_sock = self.rcp_request_sock.take().ok_or(RpcError::CouldNotSend( "RPC request socket not initialized".to_string(), ))?; diff --git a/crates/rpc-common/Cargo.toml b/crates/rpc-common/Cargo.toml index 07bad6ea..9eed9129 100644 --- a/crates/rpc-common/Cargo.toml +++ b/crates/rpc-common/Cargo.toml @@ -17,3 +17,8 @@ moor-values = { path = "../values" } bincode.workspace = true thiserror.workspace = true + +# Auth/Auth +ed25519-dalek.workspace = true +pem.workspace = true +rusty_paseto.workspace = true diff --git a/crates/rpc-common/src/lib.rs b/crates/rpc-common/src/lib.rs index 4fbd213f..85a000ad 100644 --- a/crates/rpc-common/src/lib.rs +++ b/crates/rpc-common/src/lib.rs @@ -16,25 +16,44 @@ use bincode::{Decode, Encode}; use moor_values::model::ObjectRef; use moor_values::tasks::{NarrativeEvent, SchedulerError, VerbProgramError}; use moor_values::{Objid, Symbol, Var}; +use pem::PemError; +use rusty_paseto::prelude::Key; +use std::net::SocketAddr; +use std::path::Path; use std::time::SystemTime; use thiserror::Error; -pub const BROADCAST_TOPIC: &[u8; 9] = b"broadcast"; +/// A ZMQ topic for broadcasting to all clients of all hosts. +pub const CLIENT_BROADCAST_TOPIC: &[u8; 9] = b"broadcast"; -pub const MOOR_SESSION_TOKEN_FOOTER: &str = "key-id:moor_rpc"; +/// A ZMQ topic for broadcasting to just the hosts. +pub const HOST_BROADCAST_TOPIC: &[u8; 5] = b"hosts"; + +pub const MOOR_HOST_TOKEN_FOOTER: &str = "key-id:moor_host"; +pub const MOOR_SESSION_TOKEN_FOOTER: &str = "key-id:moor_client"; pub const MOOR_AUTH_TOKEN_FOOTER: &str = "key-id:moor_player"; /// Errors at the RPC transport / encoding layer. #[derive(Debug, Error)] pub enum RpcError { + #[error("could not initiate session: {0}")] + CouldNotInitiateSession(String), + #[error("could not authenticate: {0}")] + AuthenticationError(String), #[error("could not send RPC request: {0}")] CouldNotSend(String), #[error("could not receive RPC response: {0}")] CouldNotReceive(String), #[error("could not decode RPC response: {0}")] CouldNotDecode(String), + #[error("unexpected reply: {0}")] + UnexpectedReply(String), } +/// PASETO public token representing the host's identity. +#[derive(Debug, Clone, Eq, PartialEq, Encode, Decode, Hash)] +pub struct HostToken(pub String); + /// PASETO public token for a connection, used for the validation of RPC requests after the initial /// connection is established. #[derive(Debug, Clone, Eq, PartialEq, Encode, Decode)] @@ -44,21 +63,64 @@ pub struct ClientToken(pub String); #[derive(Debug, Clone, Eq, PartialEq, Encode, Decode)] pub struct AuthToken(pub String); +#[derive(Debug, Eq, PartialEq, Clone, Decode, Encode)] +pub enum MessageType { + HostToDaemon(HostToken), + /// A message from a host to the daemon on behalf of a client (client id is included) + HostClientToDaemon(Vec), +} + +#[derive(Copy, Debug, Eq, PartialEq, Clone, Decode, Encode)] +pub enum HostType { + TCP, + WebSocket, +} + +impl HostType { + pub fn id_str(&self) -> &str { + match self { + HostType::TCP => "tcp", + HostType::WebSocket => "websocket", + } + } + + pub fn parse_id_str(id_str: &str) -> Option { + match id_str { + "tcp" => Some(HostType::TCP), + "websocket" => Some(HostType::WebSocket), + _ => None, + } + } +} +/// An RPC message sent from a host itself to the daemon, on behalf of the host +#[derive(Debug, Clone, Eq, PartialEq, Encode, Decode)] +pub enum HostToDaemonMessage { + /// Register the presence of this host's listeners with the daemon. + /// Lets the daemon know about the listeners, and then respond to the host with any additional + /// listeners that the daemon expects the host to start listening on. + RegisterHost(SystemTime, HostType, Vec<(Objid, SocketAddr)>), + /// Unregister the presence of this host's listeners with the daemon. + DetachHost(), + /// Respond to a host ping request. + HostPong(SystemTime, HostType, Vec<(Objid, SocketAddr)>), +} + +/// An RPC message sent from a host to the daemon on behalf of a client. #[derive(Debug, Clone, Eq, PartialEq, Encode, Decode)] -pub enum RpcRequest { +pub enum HostClientToDaemonMessage { /// Establish a new connection, requesting a client token and a connection object ConnectionEstablish(String), /// Anonymously request a sysprop (e.g. $login.welcome_message) RequestSysProp(ClientToken, ObjectRef, Symbol), /// Login using the words (e.g. "create player bob" or "connect player bob") and return an /// auth token and the object id of the player. None if the login failed. - LoginCommand(ClientToken, Vec, bool /* attach? */), + LoginCommand(ClientToken, Objid, Vec, bool /* attach? */), /// Attach to a previously-authenticated user, returning the object id of the player, /// and a client token -- or None if the auth token is not valid. /// If a ConnectType is specified, the user_connected verb will be called. - Attach(AuthToken, Option, String), + Attach(AuthToken, Option, Objid, String), /// Send a command to be executed. - Command(ClientToken, AuthToken, String), + Command(ClientToken, AuthToken, Objid, String), /// Return the (visible) verbs on the given object. Verbs(ClientToken, AuthToken, ObjectRef), /// Invoke the given verb on the given object. @@ -72,13 +134,13 @@ pub enum RpcRequest { /// Respond to a request for input. RequestedInput(ClientToken, AuthToken, u128, String), /// Send an "out of band" command to be executed. - OutOfBand(ClientToken, AuthToken, String), + OutOfBand(ClientToken, AuthToken, Objid, String), /// Evaluate a MOO expression. Eval(ClientToken, AuthToken, String), /// Resolve an object reference into a Var Resolve(ClientToken, AuthToken, ObjectRef), - /// Respond to a ping request. - Pong(ClientToken, SystemTime), + /// Respond to a client ping request. + ClientPong(ClientToken, SystemTime, Objid, HostType, SocketAddr), /// We're done with this connection, buh-bye. Detach(ClientToken), } @@ -98,9 +160,10 @@ pub enum ConnectType { } #[derive(Debug, Clone, PartialEq, Encode, Decode)] -pub enum RpcResult { - Success(RpcResponse), - Failure(RpcRequestError), +pub enum ReplyResult { + HostSuccess(DaemonToHostReply), + ClientSuccess(DaemonToClientReply), + Failure(RpcMessageError), } #[derive(Debug, Clone, Eq, PartialEq, Encode, Decode)] @@ -132,8 +195,19 @@ pub struct PropInfo { pub chown: bool, } +/// An RPC message sent from the daemon to a host in response to a HostToDaemonMessage. +#[derive(Debug, Clone, PartialEq, Encode, Decode)] +pub enum DaemonToHostReply { + /// The daemon is happy with this host and its messages. + Ack, + /// The daemon does not like this host for some reason. The host should die. + Reject(String), +} + +/// An RPC message sent from the daemon to a client on a specific host, in response to a +/// HostClientToDaemonMessage. #[derive(Debug, Clone, PartialEq, Encode, Decode)] -pub enum RpcResponse { +pub enum DaemonToClientReply { NewConnection(ClientToken, Objid), SysPropValue(Option), LoginResult(Option<(AuthToken, ConnectType, Objid)>), @@ -152,13 +226,13 @@ pub enum RpcResponse { InvokeResult(Result), } -/// Errors at the call/request level. +/// Errors at the message passing level. #[derive(Debug, PartialEq, Error, Clone, Decode, Encode)] -pub enum RpcRequestError { +pub enum RpcMessageError { #[error("Already connected")] AlreadyConnected, #[error("Invalid request")] - InvalidRequest, + InvalidRequest(String), #[error("No connection for client")] NoConnection, #[error("Could not retrieve system property")] @@ -177,9 +251,9 @@ pub enum RpcRequestError { InternalError(String), } -/// Events which occur over the pubsub channel, per client. +/// Events which occur over the pubsub channel, but destined for specific clients. #[derive(Debug, PartialEq, Clone, Decode, Encode)] -pub enum ConnectionEvent { +pub enum ClientEvent { /// An event has occurred in the narrative that the connections for the given object are /// expected to see. Narrative(Objid, NarrativeEvent), @@ -197,15 +271,67 @@ pub enum ConnectionEvent { TaskSuccess(Var), } -/// Events which occur over the pubsub channel, but are for all hosts. +/// Events which occur over the pubsub endpoint, but are for all the hosts. #[derive(Debug, Eq, PartialEq, Clone, Decode, Encode)] -pub enum BroadcastEvent { +pub enum HostBroadcastEvent { + /// The system is requesting that all hosts are of the given HostType begin listening on + /// the given port. + /// Triggered from the `listen` builtin. + Listen { + handler_object: Objid, + host_type: HostType, + port: u16, + print_messages: bool, + }, + /// The system is requesting that all hosts of the given HostType stop listening on the given port. + Unlisten { host_type: HostType, port: u16 }, + /// The system wants to know which hosts are still alive. They should respond by sending + /// a `HostPong` message RPC to the server. + /// If a host does not respond, the server will assume it is dead and remove its listeners + /// from the list of active listeners. + PingPong(SystemTime), +} + +/// Events which occur over the pubsub endpoint, but are for all clients on all hosts. +#[derive(Debug, Eq, PartialEq, Clone, Decode, Encode)] +pub enum ClientsBroadcastEvent { /// The system wants to know which clients are still alive. The host should respond by sending /// a `Pong` message RPC to the server (and it will then respond with ThanksPong) for each - /// active client it still has. + /// active client it still has, along with the host type and IP address of the client. + /// This is used to keep track of which clients are still connected to the server, and + /// also to fill in output from `listeners`. + /// /// (The time parameter is the server's current time. The client will respond with its own /// current time. This could be used in the future to synchronize event times, but isn't currently /// used.) PingPong(SystemTime), - // TODO: Shutdown, Broadcast BroadcastEvent messages in RPC layer +} + +#[derive(Error, Debug)] +pub enum KeyError { + #[error("Could not read key from file: {0}")] + ParseError(PemError), + #[error("Could not read key from file: {0}")] + ReadError(std::io::Error), +} + +/// Load a keypair from the given public and private key (PEM) files. +pub fn load_keypair(public_key: &Path, private_key: &Path) -> Result, KeyError> { + let (Some(pubkey_pem), Some(privkey_pem)) = ( + std::fs::read(public_key).ok(), + std::fs::read(private_key).ok(), + ) else { + return Err(KeyError::ReadError(std::io::Error::new( + std::io::ErrorKind::NotFound, + "Could not read key from file", + ))); + }; + + let privkey_pem = pem::parse(privkey_pem).map_err(KeyError::ParseError)?; + let pubkey_pem = pem::parse(pubkey_pem).map_err(KeyError::ParseError)?; + + let mut key_bytes = privkey_pem.contents().to_vec(); + key_bytes.extend_from_slice(pubkey_pem.contents()); + + Ok(Key::from(&key_bytes[0..64])) } diff --git a/crates/rpc-sync-client/src/pubsub_client.rs b/crates/rpc-sync-client/src/pubsub_client.rs index 5b326f4d..e1d199dd 100644 --- a/crates/rpc-sync-client/src/pubsub_client.rs +++ b/crates/rpc-sync-client/src/pubsub_client.rs @@ -17,10 +17,10 @@ use tracing::trace; use uuid::Uuid; use zmq::Socket; -use rpc_common::{BroadcastEvent, ConnectionEvent, RpcError}; +use rpc_common::{ClientEvent, ClientsBroadcastEvent, RpcError}; /// Blocking receive on the narrative channel, returning a `ConnectionEvent`. -pub fn events_recv(client_id: Uuid, subscribe: &Socket) -> Result { +pub fn events_recv(client_id: Uuid, subscribe: &Socket) -> Result { let Ok(inbound) = subscribe.recv_multipart(0) else { return Err(RpcError::CouldNotReceive( "Unable to receive narrative message".to_string(), @@ -48,7 +48,7 @@ pub fn events_recv(client_id: Uuid, subscribe: &Socket) -> Result Result Result { +pub fn broadcast_recv(subscribe: &mut Socket) -> Result { let Ok(inbound) = subscribe.recv_multipart(0) else { return Err(RpcError::CouldNotReceive( "Unable to receive broadcast message".to_string(), @@ -80,7 +80,7 @@ pub fn broadcast_recv(subscribe: &mut Socket) -> Result. // -use rpc_common::{RpcError, RpcRequest, RpcResult}; +use rpc_common::{HostClientToDaemonMessage, ReplyResult, RpcError}; use tracing::error; use uuid::Uuid; use zmq::Socket; @@ -36,8 +36,8 @@ impl RpcSendClient { pub fn make_rpc_call( &mut self, client_id: Uuid, - rpc_msg: RpcRequest, - ) -> Result { + rpc_msg: HostClientToDaemonMessage, + ) -> Result { let rpc_msg_payload = bincode::encode_to_vec(rpc_msg, bincode::config::standard()) .map_err(|e| RpcError::CouldNotSend(e.to_string()))?; diff --git a/crates/telnet-host/Cargo.toml b/crates/telnet-host/Cargo.toml index 27dc032a..1e047aa0 100644 --- a/crates/telnet-host/Cargo.toml +++ b/crates/telnet-host/Cargo.toml @@ -42,6 +42,11 @@ uuid.workspace = true ## Rich content termimad.workspace = true +# Auth/Auth +ed25519-dalek.workspace = true +pem.workspace = true +rusty_paseto.workspace = true + # Testing [dev-dependencies] escargot.workspace = true diff --git a/crates/telnet-host/src/telnet.rs b/crates/telnet-host/src/connection.rs similarity index 70% rename from crates/telnet-host/src/telnet.rs rename to crates/telnet-host/src/connection.rs index 44279728..166bec16 100644 --- a/crates/telnet-host/src/telnet.rs +++ b/crates/telnet-host/src/connection.rs @@ -29,16 +29,14 @@ use moor_values::util::parse_into_words; use moor_values::{Objid, Symbol, Variant}; use rpc_async_client::pubsub_client::{broadcast_recv, events_recv}; use rpc_async_client::rpc_client::RpcSendClient; -use rpc_common::RpcRequest::ConnectionEstablish; use rpc_common::{ - AuthToken, BroadcastEvent, ClientToken, ConnectType, ConnectionEvent, RpcRequestError, - RpcResult, VerbProgramResponse, BROADCAST_TOPIC, + AuthToken, ClientEvent, ClientToken, ClientsBroadcastEvent, ConnectType, HostType, ReplyResult, + RpcMessageError, VerbProgramResponse, }; -use rpc_common::{RpcRequest, RpcResponse}; +use rpc_common::{DaemonToClientReply, HostClientToDaemonMessage}; use termimad::MadSkin; use tmq::subscribe::Subscribe; -use tmq::{request, subscribe}; -use tokio::net::{TcpListener, TcpStream}; +use tokio::net::TcpStream; use tokio::select; use tokio_util::codec::{Framed, LinesCodec}; use tracing::{debug, error, info, trace, warn}; @@ -51,12 +49,18 @@ const OUT_OF_BAND_PREFIX: &str = "#$#"; const CONTENT_TYPE_MARKDOWN: &str = "text/markdown"; pub(crate) struct TelnetConnection { - client_id: Uuid, + pub(crate) peer_addr: SocketAddr, + /// The "handler" object, who is responsible for this connection, defaults to SYSTEM_OBJECT, + /// but custom listeners can be set up to handle connections differently. + pub(crate) handler_object: Objid, + /// The MOO connection object ID. + pub(crate) connection_oid: Objid, + pub(crate) client_id: Uuid, /// Current PASETO token. - client_token: ClientToken, - write: SplitSink, String>, - read: SplitStream>, - kill_switch: Arc, + pub(crate) client_token: ClientToken, + pub(crate) write: SplitSink, String>, + pub(crate) read: SplitStream>, + pub(crate) kill_switch: Arc, } /// The input modes the telnet session can be in. @@ -71,7 +75,7 @@ enum LineMode { } impl TelnetConnection { - async fn run( + pub(crate) async fn run( &mut self, events_sub: &mut Subscribe, broadcast_sub: &mut Subscribe, @@ -80,9 +84,14 @@ impl TelnetConnection { // Provoke welcome message, which is a login command with no arguments, and we // don't care about the reply at this point. rpc_client - .make_rpc_call( + .make_client_rpc_call( self.client_id, - RpcRequest::LoginCommand(self.client_token.clone(), vec![], false), + HostClientToDaemonMessage::LoginCommand( + self.client_token.clone(), + self.handler_object, + vec![], + false, + ), ) .await .expect("Unable to send login request to RPC server"); @@ -112,9 +121,9 @@ impl TelnetConnection { // Let the server know this client is gone. rpc_client - .make_rpc_call( + .make_client_rpc_call( self.client_id, - RpcRequest::Detach(self.client_token.clone()), + HostClientToDaemonMessage::Detach(self.client_token.clone()), ) .await?; @@ -167,32 +176,32 @@ impl TelnetConnection { Ok(event) = broadcast_recv(broadcast_sub) => { trace!(?event, "broadcast_event"); match event { - BroadcastEvent::PingPong(_server_time) => { - let _ = rpc_client.make_rpc_call(self.client_id, - RpcRequest::Pong(self.client_token.clone(), SystemTime::now())).await?; + ClientsBroadcastEvent::PingPong(_server_time) => { + let _ = rpc_client.make_client_rpc_call(self.client_id, + HostClientToDaemonMessage::ClientPong(self.client_token.clone(), SystemTime::now(), self.connection_oid, HostType::TCP, self.peer_addr)).await?; } } } Ok(event) = events_recv(self.client_id, narrative_sub) => { trace!(?event, "narrative_event"); match event { - ConnectionEvent::SystemMessage(_author, msg) => { + ClientEvent::SystemMessage(_author, msg) => { self.write.send(msg).await.with_context(|| "Unable to send message to client")?; } - ConnectionEvent::Narrative(_author, event) => { + ClientEvent::Narrative(_author, event) => { self.output(event.event()).await?; } - ConnectionEvent::RequestInput(_request_id) => { + ClientEvent::RequestInput(_request_id) => { bail!("RequestInput before login"); } - ConnectionEvent::Disconnect() => { + ClientEvent::Disconnect() => { self.write.close().await?; bail!("Disconnect before login"); } - ConnectionEvent::TaskError(te) => { + ClientEvent::TaskError(te) => { self.handle_task_error(te).await?; } - ConnectionEvent::TaskSuccess(result) => { + ClientEvent::TaskSuccess(result) => { trace!(?result, "TaskSuccess") // We don't need to do anything with successes. } @@ -205,10 +214,11 @@ impl TelnetConnection { }; let line = line.unwrap(); let words = parse_into_words(&line); - let response = rpc_client.make_rpc_call(self.client_id, - RpcRequest::LoginCommand(self.client_token.clone(), words, true)).await.expect("Unable to send login request to RPC server"); - if let RpcResult::Success(RpcResponse::LoginResult(Some((auth_token, connect_type, player)))) = response { + let response = rpc_client.make_client_rpc_call(self.client_id, + HostClientToDaemonMessage::LoginCommand(self.client_token.clone(), self.handler_object, words, true)).await.expect("Unable to send login request to RPC server"); + if let ReplyResult::ClientSuccess(DaemonToClientReply::LoginResult(Some((auth_token, connect_type, player)))) = response { info!(?player, client_id = ?self.client_id, "Login successful"); + self.connection_oid = player; return Ok((auth_token, player, connect_type)) } } @@ -279,15 +289,15 @@ impl TelnetConnection { // If the line begins with the out of band prefix, then send it that way, // instead. And really just fire and forget. if line.starts_with(OUT_OF_BAND_PREFIX) { - rpc_client.make_rpc_call(self.client_id, RpcRequest::OutOfBand(self.client_token.clone(), auth_token.clone(), line)).await? + rpc_client.make_client_rpc_call(self.client_id, HostClientToDaemonMessage::OutOfBand(self.client_token.clone(), auth_token.clone(), self.handler_object, line)).await? } else { - rpc_client.make_rpc_call(self.client_id, RpcRequest::Command(self.client_token.clone(), auth_token.clone(), line)).await? + rpc_client.make_client_rpc_call(self.client_id, HostClientToDaemonMessage::Command(self.client_token.clone(), auth_token.clone(), self.handler_object, line)).await? } }, // Are we expecting to respond to prompt input? If so, send this through to that, and switch the mode back to input LineMode::WaitingReply(ref input_reply_id) => { line_mode = LineMode::Input; - rpc_client.make_rpc_call(self.client_id, RpcRequest::RequestedInput(self.client_token.clone(), auth_token.clone(), *input_reply_id, line)).await? + rpc_client.make_client_rpc_call(self.client_id, HostClientToDaemonMessage::RequestedInput(self.client_token.clone(), auth_token.clone(), *input_reply_id, line)).await? } LineMode::SpoolingProgram(target, verb) => { @@ -299,7 +309,7 @@ impl TelnetConnection { let code = std::mem::take(&mut program_input); let target = ObjectRef::Match(target); let verb = Symbol::mk(&verb); - rpc_client.make_rpc_call(self.client_id, RpcRequest::Program(self.client_token.clone(), auth_token.clone(), target, verb, code)).await? + rpc_client.make_client_rpc_call(self.client_id, HostClientToDaemonMessage::Program(self.client_token.clone(), auth_token.clone(), target, verb, code)).await? } else { // Otherwise, we're still spooling up the program, so just keep spooling. program_input.push(line); @@ -309,18 +319,21 @@ impl TelnetConnection { }; match response { - RpcResult::Success(RpcResponse::CommandSubmitted(_)) | - RpcResult::Success(RpcResponse::InputThanks) => { + ReplyResult::ClientSuccess(DaemonToClientReply::CommandSubmitted(_)) | + ReplyResult::ClientSuccess(DaemonToClientReply::InputThanks) => { // Nothing to do } - RpcResult::Failure(RpcRequestError::TaskError(te)) => { + ReplyResult::HostSuccess(_) => { + error!("Unexpected host response to client message!"); + } + ReplyResult::Failure(RpcMessageError::TaskError(te)) => { self.handle_task_error(te).await?; } - RpcResult::Failure(e) => { + ReplyResult::Failure(e) => { error!("Unhandled RPC error: {:?}", e); continue; } - RpcResult::Success(RpcResponse::ProgramResponse(resp)) => { + ReplyResult::ClientSuccess(DaemonToClientReply::ProgramResponse(resp)) => { match resp { VerbProgramResponse::Success(o,verb) => { self.write.send(format!("0 error(s).\nVerb {} programmed on object {}", verb, o)).await?; @@ -337,7 +350,7 @@ impl TelnetConnection { } continue; } - RpcResult::Success(s) => { + ReplyResult::ClientSuccess(s) => { error!("Unexpected RPC success: {:?}", s); continue; } @@ -346,33 +359,33 @@ impl TelnetConnection { Ok(event) = broadcast_recv(broadcast_sub) => { trace!(?event, "broadcast_event"); match event { - BroadcastEvent::PingPong(_server_time) => { - let _ = rpc_client.make_rpc_call(self.client_id, - RpcRequest::Pong(self.client_token.clone(), SystemTime::now())).await?; + ClientsBroadcastEvent::PingPong(_server_time) => { + let _ = rpc_client.make_client_rpc_call(self.client_id, + HostClientToDaemonMessage::ClientPong(self.client_token.clone(), SystemTime::now(), self.connection_oid, HostType::TCP, self.peer_addr)).await?; } } } Ok(event) = events_recv(self.client_id, events_sub) => { match event { - ConnectionEvent::SystemMessage(_author, msg) => { + ClientEvent::SystemMessage(_author, msg) => { self.write.send(msg).await.with_context(|| "Unable to send message to client")?; } - ConnectionEvent::Narrative(_author, event) => { + ClientEvent::Narrative(_author, event) => { self.output(event.event()).await?; } - ConnectionEvent::RequestInput(request_id) => { + ClientEvent::RequestInput(request_id) => { // Server is requesting that the next line of input get sent through as a response to this request. line_mode = LineMode::WaitingReply(request_id); } - ConnectionEvent::Disconnect() => { + ClientEvent::Disconnect() => { self.write.send("** Disconnected **".to_string()).await.expect("Unable to send disconnect message to client"); self.write.close().await.expect("Unable to close connection"); return Ok(()) } - ConnectionEvent::TaskError(te) => { + ClientEvent::TaskError(te) => { self.handle_task_error(te).await?; } - ConnectionEvent::TaskSuccess(result) => { + ClientEvent::TaskSuccess(result) => { trace!(?result, "TaskSuccess") // We don't need to do anything with successes. @@ -440,105 +453,6 @@ impl TelnetConnection { } } -pub async fn telnet_listen_loop( - telnet_sockaddr: SocketAddr, - rpc_address: &str, - events_address: &str, - kill_switch: Arc, -) -> Result<(), eyre::Error> { - let listener = TcpListener::bind(telnet_sockaddr).await?; - let zmq_ctx = tmq::Context::new(); - zmq_ctx - .set_io_threads(8) - .expect("Unable to set ZMQ IO threads"); - - loop { - if kill_switch.load(std::sync::atomic::Ordering::Relaxed) { - info!("Kill switch activated, stopping..."); - return Ok(()); - } - let (stream, peer_addr) = listener.accept().await?; - let zmq_ctx = zmq_ctx.clone(); - let pubsub_address = events_address.to_string(); - let rpc_address = rpc_address.to_string(); - let connection_kill_switch = kill_switch.clone(); - tokio::spawn(async move { - let client_id = Uuid::new_v4(); - info!(peer_addr = ?peer_addr, client_id = ?client_id, - "Accepted connection" - ); - - let rpc_request_sock = request(&zmq_ctx) - .set_rcvtimeo(100) - .set_sndtimeo(100) - .connect(rpc_address.as_str()) - .expect("Unable to bind RPC server for connection"); - - // And let the RPC server know we're here, and it should start sending events on the - // narrative subscription. - debug!(rpc_address, "Contacting RPC server to establish connection"); - let mut rpc_client = RpcSendClient::new(rpc_request_sock); - - let (token, connection_oid) = match rpc_client - .make_rpc_call(client_id, ConnectionEstablish(peer_addr.to_string())) - .await - { - Ok(RpcResult::Success(RpcResponse::NewConnection(token, objid))) => { - info!("Connection established, connection ID: {}", objid); - (token, objid) - } - Ok(RpcResult::Failure(f)) => { - bail!("RPC failure in connection establishment: {}", f); - } - Ok(_) => { - bail!("Unexpected response from RPC server"); - } - Err(e) => { - bail!("Unable to establish connection: {}", e); - } - }; - debug!(client_id = ?client_id, connection = ?connection_oid, "Connection established"); - - // Before attempting login, we subscribe to the events socket, using our client - // id. The daemon should be sending events here. - let events_sub = subscribe(&zmq_ctx) - .connect(pubsub_address.as_str()) - .expect("Unable to connect narrative subscriber "); - let mut events_sub = events_sub - .subscribe(&client_id.as_bytes()[..]) - .expect("Unable to subscribe to narrative messages for client connection"); - - let broadcast_sub = subscribe(&zmq_ctx) - .connect(pubsub_address.as_str()) - .expect("Unable to connect broadcast subscriber "); - let mut broadcast_sub = broadcast_sub - .subscribe(BROADCAST_TOPIC) - .expect("Unable to subscribe to broadcast messages for client connection"); - - info!( - "Subscribed on pubsub socket for {:?}, socket addr {}", - client_id, pubsub_address - ); - - // Re-ify the connection. - let framed_stream = Framed::new(stream, LinesCodec::new()); - let (write, read): (SplitSink, String>, _) = - framed_stream.split(); - let mut tcp_connection = TelnetConnection { - client_token: token, - client_id, - write, - read, - kill_switch: connection_kill_switch, - }; - - tcp_connection - .run(&mut events_sub, &mut broadcast_sub, &mut rpc_client) - .await?; - Ok(()) - }); - } -} fn markdown_to_ansi(markdown: &str) -> String { let skin = MadSkin::default_dark(); // TODO: permit different text stylings here. e.g. user themes for colours, styling, etc. diff --git a/crates/telnet-host/src/listen.rs b/crates/telnet-host/src/listen.rs new file mode 100644 index 00000000..d3ad6863 --- /dev/null +++ b/crates/telnet-host/src/listen.rs @@ -0,0 +1,272 @@ +// Copyright (C) 2024 Ryan Daum +// +// This program is free software: you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free Software +// Foundation, version 3. +// +// This program is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with +// this program. If not, see . +// + +use crate::connection::TelnetConnection; +use eyre::bail; +use futures_util::stream::SplitSink; +use futures_util::StreamExt; +use moor_values::Objid; +use rpc_async_client::rpc_client::RpcSendClient; +use rpc_async_client::{ListenersClient, ListenersMessage}; +use rpc_common::HostClientToDaemonMessage::ConnectionEstablish; +use rpc_common::{DaemonToClientReply, ReplyResult, CLIENT_BROADCAST_TOPIC}; +use std::collections::HashMap; +use std::net::SocketAddr; +use std::sync::atomic::AtomicBool; +use std::sync::Arc; +use tmq::{request, subscribe}; +use tokio::net::{TcpListener, TcpStream}; +use tokio::select; +use tokio_util::codec::{Framed, LinesCodec}; +use tracing::{debug, info, warn}; +use uuid::Uuid; + +pub struct Listeners { + listeners: HashMap, + zmq_ctx: tmq::Context, + rpc_address: String, + events_address: String, + kill_switch: Arc, +} + +impl Listeners { + pub fn new( + zmq_ctx: tmq::Context, + rpc_address: String, + events_address: String, + kill_switch: Arc, + ) -> ( + Self, + tokio::sync::mpsc::Receiver, + ListenersClient, + ) { + let (tx, rx) = tokio::sync::mpsc::channel(100); + let listeners = Self { + listeners: HashMap::new(), + zmq_ctx, + rpc_address, + events_address, + kill_switch, + }; + let listeners_client = ListenersClient::new(tx); + (listeners, rx, listeners_client) + } + + pub async fn run( + &mut self, + mut listeners_channel: tokio::sync::mpsc::Receiver, + ) { + self.zmq_ctx + .set_io_threads(8) + .expect("Unable to set ZMQ IO threads"); + + loop { + if self.kill_switch.load(std::sync::atomic::Ordering::Relaxed) { + info!("Host kill switch activated, stopping..."); + return; + } + + match listeners_channel.recv().await { + Some(ListenersMessage::AddListener(handler, addr)) => { + let listener = TcpListener::bind(addr) + .await + .expect("Unable to bind listener"); + let (terminate_send, terminate_receive) = + tokio::sync::watch::channel(false); + self.listeners + .insert(addr, Listener::new(terminate_send, handler)); + + let zmq_ctx = self.zmq_ctx.clone(); + let rpc_address = self.rpc_address.clone(); + let events_address = self.events_address.clone(); + let kill_switch = self.kill_switch.clone(); + + // One task per listener. + tokio::spawn(async move { + loop { + let mut term_receive = terminate_receive.clone(); + select! { + _ = term_receive.changed() => { + info!("Listener terminated, stopping..."); + break; + } + result = listener.accept() => { + match result { + Ok((stream, addr)) => { + info!(?addr, "Accepted connection for listener"); + let listener_port = addr.port(); + let zmq_ctx = zmq_ctx.clone(); + let rpc_address = rpc_address.clone(); + let events_address = events_address.clone(); + let kill_switch = kill_switch.clone(); + + // Spawn a task to handle the accepted connection. + tokio::spawn(Listener::handle_accepted_connection( + zmq_ctx, + rpc_address, + events_address, + handler, + kill_switch, + listener_port, + stream, + addr, + )); + } + Err(e) => { + warn!(?e, "Accept failed, can't handle connection"); + break; + } + } + } + } + } + }); + } + Some(ListenersMessage::RemoveListener(addr)) => { + let listener = self.listeners.remove(&addr); + info!(?addr, "Removing listener"); + if let Some(listener) = listener { + listener + .terminate + .send(true) + .expect("Unable to send terminate message"); + } + } + Some(ListenersMessage::GetListeners(tx)) => { + let listeners = self + .listeners + .iter() + .map(|(addr, listener)| (listener.handler_object, *addr)) + .collect(); + tx.send(listeners).expect("Unable to send listeners list"); + } + None => { + warn!("Listeners channel closed, stopping..."); + return; + } + } + } + } +} + +pub struct Listener { + pub(crate) handler_object: Objid, + pub(crate) terminate: tokio::sync::watch::Sender, +} + +impl Listener { + pub fn new(terminate: tokio::sync::watch::Sender, handler_object: Objid) -> Self { + Self { + handler_object, + terminate, + } + } + + async fn handle_accepted_connection( + zmq_ctx: tmq::Context, + rpc_address: String, + events_address: String, + handler_object: Objid, + kill_switch: Arc, + listener_port: u16, + stream: TcpStream, + peer_addr: SocketAddr, + ) -> Result<(), eyre::Report> { + let connection_kill_switch = kill_switch.clone(); + let rpc_address = rpc_address.clone(); + let events_address = events_address.clone(); + let zmq_ctx = zmq_ctx.clone(); + tokio::spawn(async move { + let client_id = Uuid::new_v4(); + info!(peer_addr = ?peer_addr, client_id = ?client_id, port = listener_port, + "Accepted connection for listener" + ); + + let rpc_request_sock = request(&zmq_ctx) + .set_rcvtimeo(100) + .set_sndtimeo(100) + .connect(rpc_address.as_str()) + .expect("Unable to bind RPC server for connection"); + + // And let the RPC server know we're here, and it should start sending events on the + // narrative subscription. + debug!(rpc_address, "Contacting RPC server to establish connection"); + let mut rpc_client = RpcSendClient::new(rpc_request_sock); + + let (client_token, connection_oid) = match rpc_client + .make_client_rpc_call(client_id, ConnectionEstablish(peer_addr.to_string())) + .await + { + Ok(ReplyResult::ClientSuccess(DaemonToClientReply::NewConnection( + token, + objid, + ))) => { + info!("Connection established, connection ID: {}", objid); + (token, objid) + } + Ok(ReplyResult::Failure(f)) => { + bail!("RPC failure in connection establishment: {}", f); + } + Ok(_) => { + bail!("Unexpected response from RPC server"); + } + Err(e) => { + bail!("Unable to establish connection: {}", e); + } + }; + debug!(client_id = ?client_id, connection = ?connection_oid, "Connection established"); + + // Before attempting login, we subscribe to the events socket, using our client + // id. The daemon should be sending events here. + let events_sub = subscribe(&zmq_ctx) + .connect(events_address.as_str()) + .expect("Unable to connect narrative subscriber "); + let mut events_sub = events_sub + .subscribe(&client_id.as_bytes()[..]) + .expect("Unable to subscribe to narrative messages for client connection"); + let broadcast_sub = subscribe(&zmq_ctx) + .connect(events_address.as_str()) + .expect("Unable to connect broadcast subscriber "); + let mut broadcast_sub = broadcast_sub + .subscribe(CLIENT_BROADCAST_TOPIC) + .expect("Unable to subscribe to broadcast messages for client connection"); + + info!( + "Subscribed on pubsub events socket for {:?}, socket addr {}", + client_id, events_address + ); + + // Re-ify the connection. + let framed_stream = Framed::new(stream, LinesCodec::new()); + let (write, read): (SplitSink, String>, _) = + framed_stream.split(); + let mut tcp_connection = TelnetConnection { + handler_object, + peer_addr, + connection_oid, + client_token, + client_id, + write, + read, + kill_switch: connection_kill_switch, + }; + + tcp_connection + .run(&mut events_sub, &mut broadcast_sub, &mut rpc_client) + .await?; + Ok(()) + }); + Ok(()) + } +} diff --git a/crates/telnet-host/src/main.rs b/crates/telnet-host/src/main.rs index a4e7d2f3..031fc1d5 100644 --- a/crates/telnet-host/src/main.rs +++ b/crates/telnet-host/src/main.rs @@ -12,32 +12,47 @@ // this program. If not, see . // -use std::net::SocketAddr; -use std::sync::atomic::AtomicBool; -use std::sync::Arc; +#![allow(clippy::too_many_arguments)] +use crate::listen::Listeners; use clap::Parser; use clap_derive::Parser; +use moor_values::SYSTEM_OBJECT; +use rpc_async_client::{make_host_token, proces_hosts_events, start_host_session}; +use rpc_common::{load_keypair, HostType}; +use std::net::SocketAddr; +use std::path::PathBuf; +use std::sync::atomic::AtomicBool; +use std::sync::Arc; use tokio::select; use tokio::signal::unix::{signal, SignalKind}; use tracing::info; -mod telnet; +mod connection; +mod listen; #[derive(Parser, Debug)] struct Args { #[arg( long, value_name = "telnet-address", - help = "Telnet server listen address", - default_value = "0.0.0.0:8888" + help = "Listen address for the default telnet connections listener", + default_value = "0.0.0.0" )] telnet_address: String, + #[arg( + long, + value_name = "telnet-port", + help = "Listen port for the default telnet connections listener", + default_value = "8888" + )] + telnet_port: u16, + #[arg( long, value_name = "rpc-address", - help = "RPC socket address", + help = "RPC ZMQ req-reply socket address", default_value = "ipc:///tmp/moor_rpc.sock" )] rpc_address: String, @@ -45,11 +60,27 @@ struct Args { #[arg( long, value_name = "events-address", - help = "Events socket address", + help = "Events ZMQ pub-sub address", default_value = "ipc:///tmp/moor_events.sock" )] events_address: String, + #[arg( + long, + value_name = "public_key", + help = "file containing the pkcs8 ed25519 public key (shared with the daemon), used for authenticating client & host connections", + default_value = "public_key.pem" + )] + public_key: PathBuf, + + #[arg( + long, + value_name = "private_key", + help = "file containing a pkcs8 ed25519 private key (shared with the daemon), used for authenticating client & host connections", + default_value = "private_key.pem" + )] + private_key: PathBuf, + #[arg(long, help = "Enable debug logging", default_value = "false")] debug: bool, } @@ -80,19 +111,58 @@ async fn main() -> Result<(), eyre::Error> { signal(SignalKind::interrupt()).expect("Unable to register STOP signal handler"); let kill_switch = Arc::new(AtomicBool::new(false)); - let telnet_sockaddr = args.telnet_address.parse::().unwrap(); - let listen_loop = telnet::telnet_listen_loop( - telnet_sockaddr, - args.rpc_address.as_str(), - args.events_address.as_str(), + + // Parse the telnet address and port. + let listen_addr = format!("{}:{}", args.telnet_address, args.telnet_port); + let telnet_sockaddr = listen_addr.parse::().unwrap(); + + let zmq_ctx = tmq::Context::new(); + + let (mut listeners_server, listeners_channel, listeners) = Listeners::new( + zmq_ctx.clone(), + args.rpc_address.clone(), + args.events_address.clone(), kill_switch.clone(), ); + let listeners_thread = tokio::spawn(async move { + listeners_server.run(listeners_channel).await; + }); - info!("Host started, listening @ {}...", args.telnet_address); + listeners + .add_listener(SYSTEM_OBJECT, telnet_sockaddr) + .await + .expect("Unable to start default listener"); + + let keypair = load_keypair(&args.public_key, &args.private_key) + .expect("Unable to load keypair from public and private key files"); + let host_token = make_host_token(&keypair, HostType::TCP); + + let rpc_client = start_host_session( + host_token.clone(), + zmq_ctx.clone(), + args.rpc_address.clone(), + kill_switch.clone(), + listeners.clone(), + ) + .await + .expect("Unable to establish initial host session"); + + let host_listen_loop = proces_hosts_events( + rpc_client, + host_token, + zmq_ctx.clone(), + args.events_address.clone(), + args.telnet_address.clone(), + kill_switch.clone(), + listeners.clone(), + HostType::TCP, + ); select! { - msg = listen_loop => { - msg?; - info!("ZMQ client loop exited, stopping..."); + _ = host_listen_loop => { + info!("Host events loop exited."); + }, + _ = listeners_thread => { + info!("Listener set exited."); } _ = hup_signal.recv() => { info!("HUP received, stopping..."); diff --git a/crates/web-host/src/host/auth.rs b/crates/web-host/src/host/auth.rs index ae37e37b..31961051 100644 --- a/crates/web-host/src/host/auth.rs +++ b/crates/web-host/src/host/auth.rs @@ -19,7 +19,9 @@ use axum::http::{HeaderMap, HeaderValue, StatusCode}; use axum::response::{IntoResponse, Response}; use axum::Form; use rpc_async_client::rpc_client::RpcSendClient; -use rpc_common::{AuthToken, ClientToken, RpcRequest, RpcResponse, RpcResult}; +use rpc_common::{ + AuthToken, ClientToken, DaemonToClientReply, HostClientToDaemonMessage, ReplyResult, +}; use serde_derive::Deserialize; use std::net::SocketAddr; use tracing::{debug, error, warn}; @@ -83,14 +85,22 @@ async fn auth_handler( let words = vec![auth_verb.to_string(), player, password]; let response = rpc_client - .make_rpc_call( + .make_client_rpc_call( client_id, - RpcRequest::LoginCommand(client_token.clone(), words, false), + HostClientToDaemonMessage::LoginCommand( + client_token.clone(), + host.handler_object, + words, + false, + ), ) .await .expect("Unable to send login request to RPC server"); - let RpcResult::Success(RpcResponse::LoginResult(Some((auth_token, _connect_type, player)))) = - response + let ReplyResult::ClientSuccess(DaemonToClientReply::LoginResult(Some(( + auth_token, + _connect_type, + player, + )))) = response else { error!(?response, "Login failed"); @@ -111,7 +121,10 @@ async fn auth_handler( // We're done with this RPC connection, so we detach it. let _ = rpc_client - .make_rpc_call(client_id, RpcRequest::Detach(client_token.clone())) + .make_client_rpc_call( + client_id, + HostClientToDaemonMessage::Detach(client_token.clone()), + ) .await .expect("Unable to send detach to RPC server"); diff --git a/crates/web-host/src/host/props.rs b/crates/web-host/src/host/props.rs index b3459c97..f778be76 100644 --- a/crates/web-host/src/host/props.rs +++ b/crates/web-host/src/host/props.rs @@ -19,7 +19,7 @@ use axum::response::{IntoResponse, Response}; use axum::Json; use moor_values::model::ObjectRef; use moor_values::Symbol; -use rpc_common::{EntityType, PropInfo, RpcRequest, RpcResponse}; +use rpc_common::{DaemonToClientReply, EntityType, HostClientToDaemonMessage, PropInfo}; use serde_json::json; use std::net::SocketAddr; use tracing::{debug, error}; @@ -43,11 +43,11 @@ pub async fn properties_handler( let response = match web_host::rpc_call( client_id, &mut rpc_client, - RpcRequest::Properties(client_token.clone(), auth_token.clone(), object), + HostClientToDaemonMessage::Properties(client_token.clone(), auth_token.clone(), object), ) .await { - Ok(RpcResponse::Properties(properties)) => Json( + Ok(DaemonToClientReply::Properties(properties)) => Json( properties .iter() .map(|prop| { @@ -73,7 +73,10 @@ pub async fn properties_handler( // We're done with this RPC connection, so we detach it. let _ = rpc_client - .make_rpc_call(client_id, RpcRequest::Detach(client_token.clone())) + .make_client_rpc_call( + client_id, + HostClientToDaemonMessage::Detach(client_token.clone()), + ) .await .expect("Unable to send detach to RPC server"); @@ -101,7 +104,7 @@ pub async fn property_retrieval_handler( let response = match web_host::rpc_call( client_id, &mut rpc_client, - RpcRequest::Retrieve( + HostClientToDaemonMessage::Retrieve( client_token.clone(), auth_token.clone(), object, @@ -111,7 +114,7 @@ pub async fn property_retrieval_handler( ) .await { - Ok(RpcResponse::PropertyValue( + Ok(DaemonToClientReply::PropertyValue( PropInfo { definer, location, @@ -145,7 +148,10 @@ pub async fn property_retrieval_handler( // We're done with this RPC connection, so we detach it. let _ = rpc_client - .make_rpc_call(client_id, RpcRequest::Detach(client_token.clone())) + .make_client_rpc_call( + client_id, + HostClientToDaemonMessage::Detach(client_token.clone()), + ) .await .expect("Unable to send detach to RPC server"); diff --git a/crates/web-host/src/host/verbs.rs b/crates/web-host/src/host/verbs.rs index 1173891f..abdcbb33 100644 --- a/crates/web-host/src/host/verbs.rs +++ b/crates/web-host/src/host/verbs.rs @@ -21,7 +21,9 @@ use axum::Json; use moor_values::model::ObjectRef; use moor_values::tasks::VerbProgramError; use moor_values::Symbol; -use rpc_common::{EntityType, RpcRequest, RpcResponse, VerbInfo, VerbProgramResponse}; +use rpc_common::{ + DaemonToClientReply, EntityType, HostClientToDaemonMessage, VerbInfo, VerbProgramResponse, +}; use serde_json::json; use std::net::SocketAddr; use tracing::{debug, error}; @@ -59,7 +61,7 @@ pub async fn verb_invoke_handler( let response = match web_host::rpc_call( client_id, &mut rpc_client, - RpcRequest::InvokeVerb( + HostClientToDaemonMessage::InvokeVerb( client_token.clone(), auth_token.clone(), object, @@ -69,7 +71,7 @@ pub async fn verb_invoke_handler( ) .await { - Ok(RpcResponse::InvokeResult(Ok(value))) => { + Ok(DaemonToClientReply::InvokeResult(Ok(value))) => { debug!("Invoke verb result: {:?}", value); let result_json = var_as_json(&value); Json(json!({ @@ -77,7 +79,7 @@ pub async fn verb_invoke_handler( })) .into_response() } - Ok(RpcResponse::InvokeResult(Err(e))) => { + Ok(DaemonToClientReply::InvokeResult(Err(e))) => { error!("Invoke verb error: {:?}", e); Json(json!({ "error": e.to_string() @@ -93,7 +95,10 @@ pub async fn verb_invoke_handler( // We're done with this RPC connection, so we detach it. let _ = rpc_client - .make_rpc_call(client_id, RpcRequest::Detach(client_token.clone())) + .make_client_rpc_call( + client_id, + HostClientToDaemonMessage::Detach(client_token.clone()), + ) .await .expect("Unable to send detach to RPC server"); @@ -128,27 +133,34 @@ pub async fn verb_program_handler( let response = match web_host::rpc_call( client_id, &mut rpc_client, - RpcRequest::Program(client_token.clone(), auth_token.clone(), object, name, code), + HostClientToDaemonMessage::Program( + client_token.clone(), + auth_token.clone(), + object, + name, + code, + ), ) .await { - Ok(RpcResponse::ProgramResponse(VerbProgramResponse::Success(objid, verb_name))) => { - Json(json!({ - "location": objid.0, - "name": verb_name, - })) - .into_response() - } - Ok(RpcResponse::ProgramResponse(VerbProgramResponse::Failure( + Ok(DaemonToClientReply::ProgramResponse(VerbProgramResponse::Success( + objid, + verb_name, + ))) => Json(json!({ + "location": objid.0, + "name": verb_name, + })) + .into_response(), + Ok(DaemonToClientReply::ProgramResponse(VerbProgramResponse::Failure( VerbProgramError::NoVerbToProgram, ))) => { // 404 StatusCode::NOT_FOUND.into_response() } - Ok(RpcResponse::ProgramResponse(VerbProgramResponse::Failure( + Ok(DaemonToClientReply::ProgramResponse(VerbProgramResponse::Failure( VerbProgramError::DatabaseError, ))) => StatusCode::INTERNAL_SERVER_ERROR.into_response(), - Ok(RpcResponse::ProgramResponse(VerbProgramResponse::Failure( + Ok(DaemonToClientReply::ProgramResponse(VerbProgramResponse::Failure( VerbProgramError::CompilationError(errors), ))) => Json(json!({ "errors": errors.iter().map(|e| e.to_string()).collect::>() @@ -163,7 +175,10 @@ pub async fn verb_program_handler( // We're done with this RPC connection, so we detach it. let _ = rpc_client - .make_rpc_call(client_id, RpcRequest::Detach(client_token.clone())) + .make_client_rpc_call( + client_id, + HostClientToDaemonMessage::Detach(client_token.clone()), + ) .await .expect("Unable to send detach to RPC server"); @@ -191,7 +206,7 @@ pub async fn verb_retrieval_handler( let response = match web_host::rpc_call( client_id, &mut rpc_client, - RpcRequest::Retrieve( + HostClientToDaemonMessage::Retrieve( client_token.clone(), auth_token.clone(), object, @@ -201,7 +216,7 @@ pub async fn verb_retrieval_handler( ) .await { - Ok(RpcResponse::VerbValue( + Ok(DaemonToClientReply::VerbValue( VerbInfo { location, owner, @@ -234,7 +249,10 @@ pub async fn verb_retrieval_handler( // We're done with this RPC connection, so we detach it. let _ = rpc_client - .make_rpc_call(client_id, RpcRequest::Detach(client_token.clone())) + .make_client_rpc_call( + client_id, + HostClientToDaemonMessage::Detach(client_token.clone()), + ) .await .expect("Unable to send detach to RPC server"); @@ -260,11 +278,11 @@ pub async fn verbs_handler( let response = match web_host::rpc_call( client_id, &mut rpc_client, - RpcRequest::Verbs(client_token.clone(), auth_token.clone(), object), + HostClientToDaemonMessage::Verbs(client_token.clone(), auth_token.clone(), object), ) .await { - Ok(RpcResponse::Verbs(verbs)) => Json( + Ok(DaemonToClientReply::Verbs(verbs)) => Json( verbs .iter() .map(|verb| { @@ -291,7 +309,10 @@ pub async fn verbs_handler( // We're done with this RPC connection, so we detach it. let _ = rpc_client - .make_rpc_call(client_id, RpcRequest::Detach(client_token.clone())) + .make_client_rpc_call( + client_id, + HostClientToDaemonMessage::Detach(client_token.clone()), + ) .await .expect("Unable to send detach to RPC server"); diff --git a/crates/web-host/src/host/web_host.rs b/crates/web-host/src/host/web_host.rs index d4abb316..01343aa5 100644 --- a/crates/web-host/src/host/web_host.rs +++ b/crates/web-host/src/host/web_host.rs @@ -12,6 +12,8 @@ // this program. If not, see . // +#![allow(clippy::too_many_arguments)] + use crate::host::ws_connection::WebSocketConnection; use crate::host::{auth, var_as_json}; use axum::body::{Body, Bytes}; @@ -25,10 +27,13 @@ use moor_values::model::ObjectRef; use moor_values::Error::E_INVIND; use moor_values::{v_err, Objid, Symbol}; use rpc_async_client::rpc_client::RpcSendClient; -use rpc_common::RpcRequest::{Attach, ConnectionEstablish}; use rpc_common::AuthToken; -use rpc_common::{ClientToken, RpcRequestError}; -use rpc_common::{ConnectType, RpcRequest, RpcResponse, RpcResult, BROADCAST_TOPIC}; +use rpc_common::HostClientToDaemonMessage::{Attach, ConnectionEstablish}; +use rpc_common::{ClientToken, RpcMessageError}; +use rpc_common::{ + ConnectType, DaemonToClientReply, HostClientToDaemonMessage, ReplyResult, + CLIENT_BROADCAST_TOPIC, +}; use std::net::SocketAddr; use tmq::{request, subscribe}; use tracing::warn; @@ -46,12 +51,13 @@ pub struct WebHost { zmq_context: tmq::Context, rpc_addr: String, pubsub_addr: String, + pub(crate) handler_object: Objid, } #[derive(Debug, thiserror::Error)] pub enum WsHostError { #[error("RPC request error: {0}")] - RpcFailure(RpcRequestError), + RpcFailure(RpcMessageError), #[error("RPC system error: {0}")] RpcError(eyre::Error), #[error("Authentication failed")] @@ -59,12 +65,13 @@ pub enum WsHostError { } impl WebHost { - pub fn new(rpc_addr: String, narrative_addr: String) -> Self { + pub fn new(rpc_addr: String, narrative_addr: String, handler_object: Objid) -> Self { let tmq_context = tmq::Context::new(); Self { zmq_context: tmq_context, rpc_addr, pubsub_addr: narrative_addr, + handler_object, } } } @@ -95,21 +102,29 @@ impl WebHost { let mut rpc_client = RpcSendClient::new(rcp_request_sock); let (client_token, player) = match rpc_client - .make_rpc_call( + .make_client_rpc_call( client_id, - Attach(auth_token, connect_type, peer_addr.to_string()), + Attach( + auth_token, + connect_type, + self.handler_object, + peer_addr.to_string(), + ), ) .await { - Ok(RpcResult::Success(RpcResponse::AttachResult(Some((client_token, player))))) => { + Ok(ReplyResult::ClientSuccess(DaemonToClientReply::AttachResult(Some(( + client_token, + player, + ))))) => { info!("Connection authenticated, player: {}", player); (client_token, player) } - Ok(RpcResult::Success(RpcResponse::AttachResult(None))) => { + Ok(ReplyResult::ClientSuccess(DaemonToClientReply::AttachResult(None))) => { warn!("Connection authentication failed from {}", peer_addr); return Err(WsHostError::AuthenticationFailed); } - Ok(RpcResult::Failure(f)) => { + Ok(ReplyResult::Failure(f)) => { error!("RPC failure in connection establishment: {}", f); return Err(WsHostError::RpcFailure(f)); } @@ -130,6 +145,7 @@ impl WebHost { /// Actually instantiate the connection now that we've validated the auth token. pub async fn start_ws_connection( &self, + handler_object: Objid, player: Objid, client_id: Uuid, client_token: ClientToken, @@ -151,7 +167,7 @@ impl WebHost { .connect(self.pubsub_addr.as_str()) .expect("Unable to connect broadcast subscriber "); let broadcast_sub = broadcast_sub - .subscribe(BROADCAST_TOPIC) + .subscribe(CLIENT_BROADCAST_TOPIC) .expect("Unable to subscribe to broadcast messages for client connection"); info!( @@ -160,6 +176,7 @@ impl WebHost { ); Ok(WebSocketConnection { + handler_object, player, peer_addr, broadcast_sub, @@ -186,18 +203,21 @@ impl WebHost { let mut rpc_client = RpcSendClient::new(rcp_request_sock); let client_token = match rpc_client - .make_rpc_call(client_id, ConnectionEstablish(addr.to_string())) + .make_client_rpc_call(client_id, ConnectionEstablish(addr.to_string())) .await { - Ok(RpcResult::Success(RpcResponse::NewConnection(client_token, objid))) => { + Ok(ReplyResult::ClientSuccess(DaemonToClientReply::NewConnection( + client_token, + objid, + ))) => { info!("Connection established, connection ID: {}", objid); client_token } - Ok(RpcResult::Failure(f)) => { + Ok(ReplyResult::Failure(f)) => { error!("RPC failure in connection establishment: {}", f); return Err(WsHostError::RpcFailure(f)); } - Ok(RpcResult::Success(r)) => { + Ok(ReplyResult::ClientSuccess(r)) => { error!("Unexpected response from RPC server"); return Err(WsHostError::RpcError(eyre!( "Unexpected response from RPC server: {:?}", @@ -208,6 +228,13 @@ impl WebHost { error!("Unable to establish connection: {}", e); return Err(WsHostError::RpcError(eyre!(e))); } + Ok(ReplyResult::HostSuccess(hs)) => { + error!("Unexpected response from RPC server: {:?}", hs); + return Err(WsHostError::RpcError(eyre!( + "Unexpected response from RPC server: {:?}", + hs + ))); + } }; Ok((client_id, rpc_client, client_token)) @@ -217,19 +244,23 @@ impl WebHost { pub(crate) async fn rpc_call( client_id: Uuid, rpc_client: &mut RpcSendClient, - request: RpcRequest, -) -> Result { - match rpc_client.make_rpc_call(client_id, request).await { + request: HostClientToDaemonMessage, +) -> Result { + match rpc_client.make_client_rpc_call(client_id, request).await { Ok(rpc_response) => match rpc_response { - RpcResult::Success(r) => Ok(r), + ReplyResult::ClientSuccess(r) => Ok(r), - RpcResult::Failure(RpcRequestError::PermissionDenied) => { + ReplyResult::Failure(RpcMessageError::PermissionDenied) => { Err(StatusCode::INTERNAL_SERVER_ERROR) } - RpcResult::Failure(f) => { + ReplyResult::Failure(f) => { error!("RPC failure in welcome message retrieval: {:?}", f); Err(StatusCode::INTERNAL_SERVER_ERROR) } + ReplyResult::HostSuccess(hs) => { + error!("Unexpected response from RPC server: {:?}", hs); + Err(StatusCode::INTERNAL_SERVER_ERROR) + } }, Err(_) => Err(StatusCode::INTERNAL_SERVER_ERROR), } @@ -253,7 +284,7 @@ pub async fn welcome_message_handler( let response = match rpc_call( client_id, &mut rpc_client, - RpcRequest::RequestSysProp( + HostClientToDaemonMessage::RequestSysProp( client_token.clone(), ObjectRef::SysObj(vec![Symbol::mk("login")]), Symbol::mk("welcome_message"), @@ -261,8 +292,10 @@ pub async fn welcome_message_handler( ) .await { - Ok(RpcResponse::SysPropValue(Some(value))) => Json(var_as_json(&value)).into_response(), - Ok(RpcResponse::SysPropValue(None)) => StatusCode::NOT_FOUND.into_response(), + Ok(DaemonToClientReply::SysPropValue(Some(value))) => { + Json(var_as_json(&value)).into_response() + } + Ok(DaemonToClientReply::SysPropValue(None)) => StatusCode::NOT_FOUND.into_response(), Ok(r) => { error!("Unexpected response from RPC server: {:?}", r); StatusCode::INTERNAL_SERVER_ERROR.into_response() @@ -272,7 +305,10 @@ pub async fn welcome_message_handler( // We're done with this RPC connection, so we detach it. let _ = rpc_client - .make_rpc_call(client_id, RpcRequest::Detach(client_token.clone())) + .make_client_rpc_call( + client_id, + HostClientToDaemonMessage::Detach(client_token.clone()), + ) .await .expect("Unable to send detach to RPC server"); @@ -296,11 +332,11 @@ pub async fn eval_handler( let response = match rpc_call( client_id, &mut rpc_client, - RpcRequest::Eval(client_token.clone(), auth_token.clone(), expression), + HostClientToDaemonMessage::Eval(client_token.clone(), auth_token.clone(), expression), ) .await { - Ok(RpcResponse::EvalResult(value)) => { + Ok(DaemonToClientReply::EvalResult(value)) => { debug!("Eval result: {:?}", value); Json(var_as_json(&value)).into_response() } @@ -313,7 +349,10 @@ pub async fn eval_handler( // We're done with this RPC connection, so we detach it. let _ = rpc_client - .make_rpc_call(client_id, RpcRequest::Detach(client_token.clone())) + .make_client_rpc_call( + client_id, + HostClientToDaemonMessage::Detach(client_token.clone()), + ) .await .expect("Unable to send detach to RPC server"); @@ -342,11 +381,11 @@ pub async fn resolve_objref_handler( let response = match rpc_call( client_id, &mut rpc_client, - RpcRequest::Resolve(client_token.clone(), auth_token.clone(), objref), + HostClientToDaemonMessage::Resolve(client_token.clone(), auth_token.clone(), objref), ) .await { - Ok(RpcResponse::ResolveResult(obj)) => { + Ok(DaemonToClientReply::ResolveResult(obj)) => { if obj == v_err(E_INVIND) { StatusCode::NOT_FOUND.into_response() } else { @@ -362,7 +401,10 @@ pub async fn resolve_objref_handler( // We're done with this RPC connection, so we detach it. let _ = rpc_client - .make_rpc_call(client_id, RpcRequest::Detach(client_token.clone())) + .make_client_rpc_call( + client_id, + HostClientToDaemonMessage::Detach(client_token.clone()), + ) .await .expect("Unable to send detach to RPC server"); @@ -403,6 +445,7 @@ async fn attach( let Ok(mut connection) = host .start_ws_connection( + host.handler_object, player, client_id, client_token, diff --git a/crates/web-host/src/host/ws_connection.rs b/crates/web-host/src/host/ws_connection.rs index 74d8cb78..258d9ee2 100644 --- a/crates/web-host/src/host/ws_connection.rs +++ b/crates/web-host/src/host/ws_connection.rs @@ -21,11 +21,12 @@ use moor_values::{Objid, Var}; use rpc_async_client::pubsub_client::broadcast_recv; use rpc_async_client::pubsub_client::events_recv; use rpc_async_client::rpc_client::RpcSendClient; -use rpc_common::BroadcastEvent; -use rpc_common::ConnectionEvent; +use rpc_common::ClientsBroadcastEvent; use rpc_common::{ - AuthToken, ClientToken, ConnectType, RpcRequest, RpcRequestError, RpcResponse, RpcResult, + AuthToken, ClientToken, ConnectType, DaemonToClientReply, HostClientToDaemonMessage, + ReplyResult, RpcMessageError, }; +use rpc_common::{ClientEvent, HostType}; use serde_json::Value; use std::net::SocketAddr; use std::time::SystemTime; @@ -43,6 +44,7 @@ pub struct WebSocketConnection { pub(crate) client_token: ClientToken, pub(crate) auth_token: AuthToken, pub(crate) rpc_client: RpcSendClient, + pub(crate) handler_object: Objid, } /// The JSON output of a narrative event. @@ -105,16 +107,18 @@ impl WebSocketConnection { Ok(event) = broadcast_recv(&mut self.broadcast_sub) => { trace!(?event, "broadcast_event"); match event { - BroadcastEvent::PingPong(_server_time) => { - let _ = self.rpc_client.make_rpc_call(self.client_id, - RpcRequest::Pong(self.client_token.clone(), SystemTime::now())).await.expect("Unable to send pong to RPC server"); + ClientsBroadcastEvent::PingPong(_server_time) => { + let _ = self.rpc_client.make_client_rpc_call(self.client_id, + HostClientToDaemonMessage::ClientPong(self.client_token.clone(), SystemTime::now(), + self.handler_object, HostType::WebSocket, self.peer_addr)).await.expect("Unable to send pong to RPC server"); + } } } Ok(event) = events_recv(self.client_id, &mut self.narrative_sub) => { trace!(?event, "narrative_event"); match event { - ConnectionEvent::SystemMessage(author, msg) => { + ClientEvent::SystemMessage(author, msg) => { Self::emit_narrative(&mut ws_sender, NarrativeOutput { author: author.0, system_message: Some(msg), @@ -123,7 +127,7 @@ impl WebSocketConnection { server_time: SystemTime::now(), }).await; } - ConnectionEvent::Narrative(_author, event) => { + ClientEvent::Narrative(_author, event) => { let msg = event.event(); let Event::Notify(msg, content_type) = msg; let content_type = content_type.map(|s| s.to_string()); @@ -135,10 +139,10 @@ impl WebSocketConnection { server_time: event.timestamp(), }).await; } - ConnectionEvent::RequestInput(request_id) => { + ClientEvent::RequestInput(request_id) => { expecting_input = Some(request_id); } - ConnectionEvent::Disconnect() => { + ClientEvent::Disconnect() => { Self::emit_narrative(&mut ws_sender, NarrativeOutput { author: self.player.0, system_message: Some("** Disconnected **".to_string()), @@ -149,10 +153,10 @@ impl WebSocketConnection { ws_sender.close().await.expect("Unable to close connection"); return ; } - ConnectionEvent::TaskError(te) => { + ClientEvent::TaskError(te) => { self.handle_task_error(&mut ws_sender, te).await.expect("Unable to handle task error"); } - ConnectionEvent::TaskSuccess(s) => { + ClientEvent::TaskSuccess(s) => { Self::emit_value(&mut ws_sender, ValueResult(s)).await; } } @@ -173,9 +177,9 @@ impl WebSocketConnection { let response = match expecting_input.take() { Some(input_request_id) => self .rpc_client - .make_rpc_call( + .make_client_rpc_call( self.client_id, - RpcRequest::RequestedInput( + HostClientToDaemonMessage::RequestedInput( self.client_token.clone(), self.auth_token.clone(), input_request_id, @@ -186,30 +190,38 @@ impl WebSocketConnection { .expect("Unable to send input to RPC server"), None => self .rpc_client - .make_rpc_call( + .make_client_rpc_call( self.client_id, - RpcRequest::Command(self.client_token.clone(), self.auth_token.clone(), cmd), + HostClientToDaemonMessage::Command( + self.client_token.clone(), + self.auth_token.clone(), + self.handler_object, + cmd, + ), ) .await .expect("Unable to send command to RPC server"), }; match response { - RpcResult::Success(RpcResponse::CommandSubmitted(_)) - | RpcResult::Success(RpcResponse::InputThanks) => { + ReplyResult::ClientSuccess(DaemonToClientReply::CommandSubmitted(_)) + | ReplyResult::ClientSuccess(DaemonToClientReply::InputThanks) => { // Nothing to do } - RpcResult::Failure(RpcRequestError::TaskError(e)) => { + ReplyResult::Failure(RpcMessageError::TaskError(e)) => { self.handle_task_error(ws_sender, e) .await .expect("Unable to handle task error"); } - RpcResult::Failure(e) => { + ReplyResult::Failure(e) => { error!("Unhandled RPC error: {:?}", e); } - RpcResult::Success(s) => { + ReplyResult::ClientSuccess(s) => { error!("Unexpected RPC success: {:?}", s); } + ReplyResult::HostSuccess(hs) => { + error!("Unexpected host success: {:?}", hs); + } } } diff --git a/crates/web-host/src/main.rs b/crates/web-host/src/main.rs index ac11ac1c..a82a4dc0 100644 --- a/crates/web-host/src/main.rs +++ b/crates/web-host/src/main.rs @@ -17,15 +17,26 @@ mod host; use crate::client::{editor_handler, js_handler, root_handler}; use crate::host::WebHost; +use std::collections::HashMap; use axum::routing::{get, post}; use axum::Router; use clap::Parser; use clap_derive::Parser; +use moor_values::{Objid, SYSTEM_OBJECT}; +use rpc_async_client::{ + make_host_token, proces_hosts_events, start_host_session, ListenersClient, ListenersMessage, +}; +use rpc_common::{load_keypair, HostType}; use std::net::SocketAddr; +use std::path::PathBuf; +use std::sync::atomic::AtomicBool; +use std::sync::Arc; use tokio::net::TcpListener; -use tracing::info; +use tokio::select; +use tokio::signal::unix::{signal, SignalKind}; +use tracing::{info, warn}; #[derive(Parser, Debug)] struct Args { @@ -33,7 +44,7 @@ struct Args { long, value_name = "listen-address", help = "HTTP listen address", - default_value = "0.0.0.0:8888" + default_value = "0.0.0.0:8080" )] listen_address: String, @@ -52,8 +63,155 @@ struct Args { default_value = "ipc:///tmp/moor_events.sock" )] events_address: String, + + #[arg( + long, + value_name = "public_key", + help = "file containing the pkcs8 ed25519 public key (shared with the daemon), used for authenticating client & host connections", + default_value = "public_key.pem" + )] + public_key: PathBuf, + + #[arg( + long, + value_name = "private_key", + help = "file containing a pkcs8 ed25519 private key (shared with the daemon), used for authenticating client & host connections", + default_value = "private_key.pem" + )] + private_key: PathBuf, +} + +struct Listeners { + listeners: HashMap, + zmq_ctx: tmq::Context, + rpc_address: String, + events_address: String, + kill_switch: Arc, +} + +impl Listeners { + pub fn new( + zmq_ctx: tmq::Context, + rpc_address: String, + events_address: String, + kill_switch: Arc, + ) -> ( + Self, + tokio::sync::mpsc::Receiver, + ListenersClient, + ) { + let (tx, rx) = tokio::sync::mpsc::channel(100); + let listeners = Self { + listeners: HashMap::new(), + zmq_ctx, + rpc_address, + events_address, + kill_switch, + }; + let listeners_client = ListenersClient::new(tx); + (listeners, rx, listeners_client) + } + + pub async fn run( + &mut self, + mut listeners_channel: tokio::sync::mpsc::Receiver, + ) { + self.zmq_ctx + .set_io_threads(8) + .expect("Unable to set ZMQ IO threads"); + + loop { + if self.kill_switch.load(std::sync::atomic::Ordering::Relaxed) { + info!("Host kill switch activated, stopping..."); + return; + } + + match listeners_channel.recv().await { + Some(ListenersMessage::AddListener(handler, addr)) => { + let ws_host = WebHost::new( + self.rpc_address.clone(), + self.events_address.clone(), + handler, + ); + let main_router = match mk_routes(ws_host) { + Ok(mr) => mr, + Err(e) => { + warn!(?e, "Unable to create main router"); + return; + } + }; + + let listener = TcpListener::bind(addr) + .await + .expect("Unable to bind listener"); + let (terminate_send, terminate_receive) = tokio::sync::watch::channel(false); + self.listeners + .insert(addr, Listener::new(terminate_send, handler)); + + // One task per listener. + tokio::spawn(async move { + let mut term_receive = terminate_receive.clone(); + select! { + _ = term_receive.changed() => { + info!("Listener terminated, stopping..."); + } + _ = Listener::serve(listener, main_router) => { + info!("Listener exited, restarting..."); + } + } + }); + } + Some(ListenersMessage::RemoveListener(addr)) => { + let listener = self.listeners.remove(&addr); + info!(?addr, "Removing listener"); + if let Some(listener) = listener { + listener + .terminate + .send(true) + .expect("Unable to send terminate message"); + } + } + Some(ListenersMessage::GetListeners(tx)) => { + let listeners = self + .listeners + .iter() + .map(|(addr, listener)| (listener.handler_object, *addr)) + .collect(); + tx.send(listeners).expect("Unable to send listeners list"); + } + None => { + warn!("Listeners channel closed, stopping..."); + return; + } + } + } + } +} +pub struct Listener { + pub(crate) handler_object: Objid, + pub(crate) terminate: tokio::sync::watch::Sender, } +impl Listener { + pub fn new(terminate: tokio::sync::watch::Sender, handler_object: Objid) -> Self { + Self { + handler_object, + terminate, + } + } + + pub async fn serve(listener: TcpListener, main_router: Router) -> eyre::Result<()> { + let addr = listener.local_addr()?; + info!("Listening on {:?}", addr); + axum::serve( + listener, + main_router.into_make_service_with_connect_info::(), + ) + .await?; + info!("Done listening on {:?}", addr); + Ok(()) + } +} fn mk_routes(web_host: WebHost) -> eyre::Result { let webhost_router = Router::new() .route( @@ -105,23 +263,71 @@ async fn main() -> Result<(), eyre::Error> { tracing::subscriber::set_global_default(main_subscriber) .expect("Unable to set configure logging"); - let ws_host = WebHost::new(args.rpc_address, args.events_address); + let mut hup_signal = + signal(SignalKind::hangup()).expect("Unable to register HUP signal handler"); + let mut stop_signal = + signal(SignalKind::interrupt()).expect("Unable to register STOP signal handler"); - let main_router = mk_routes(ws_host).expect("Unable to create main router"); + let kill_switch = Arc::new(AtomicBool::new(false)); - let address = &args.listen_address.parse::().unwrap(); - info!(address=?address, "Listening"); + let keypair = load_keypair(&args.public_key, &args.private_key) + .expect("Unable to load keypair from public and private key files"); + let host_token = make_host_token(&keypair, HostType::TCP); - let listener = TcpListener::bind(address) - .await - .expect("Unable to bind HTTP listener"); + let zmq_ctx = tmq::Context::new(); + + let (mut listeners_server, listeners_channel, listeners) = Listeners::new( + zmq_ctx.clone(), + args.rpc_address.clone(), + args.events_address.clone(), + kill_switch.clone(), + ); + let listeners_thread = tokio::spawn(async move { + listeners_server.run(listeners_channel).await; + }); - axum::serve( - listener, - main_router.into_make_service_with_connect_info::(), + let rpc_client = start_host_session( + host_token.clone(), + zmq_ctx.clone(), + args.rpc_address.clone(), + kill_switch.clone(), + listeners.clone(), ) .await - .unwrap(); + .expect("Unable to establish initial host session"); + + listeners + .add_listener(SYSTEM_OBJECT, args.listen_address.parse().unwrap()) + .await + .expect("Unable to start default listener"); + + let host_listen_loop = proces_hosts_events( + rpc_client, + host_token, + zmq_ctx.clone(), + args.events_address.clone(), + args.listen_address.clone(), + kill_switch.clone(), + listeners.clone(), + HostType::TCP, + ); + select! { + _ = host_listen_loop => { + info!("Host events loop exited."); + }, + _ = listeners_thread => { + info!("Listener set exited."); + } + _ = hup_signal.recv() => { + info!("HUP received, stopping..."); + kill_switch.store(true, std::sync::atomic::Ordering::SeqCst); + }, + _ = stop_signal.recv() => { + info!("STOP received, stopping..."); + kill_switch.store(true, std::sync::atomic::Ordering::SeqCst); + } + } + info!("Done."); Ok(()) }