summaryrefslogtreecommitdiff
path: root/src/events
diff options
context:
space:
mode:
authorOwen Jacobson <owen@grimoire.ca>2024-09-20 23:01:18 -0400
committerOwen Jacobson <owen@grimoire.ca>2024-09-20 23:05:44 -0400
commit0a05491930fb34ce7c93c33ea0b7599360483fc7 (patch)
tree552906477fd81c6687c0ca9c6bdc25e22461b52a /src/events
parent22348bfa35f009e62abe2f30863e0434079a1fe2 (diff)
Push events into a module structure consistent with the rest of the project.
Diffstat (limited to 'src/events')
-rw-r--r--src/events/app.rs111
-rw-r--r--src/events/mod.rs5
-rw-r--r--src/events/repo/broadcast.rs121
-rw-r--r--src/events/repo/mod.rs1
-rw-r--r--src/events/routes.rs133
5 files changed, 371 insertions, 0 deletions
diff --git a/src/events/app.rs b/src/events/app.rs
new file mode 100644
index 0000000..dfb23d7
--- /dev/null
+++ b/src/events/app.rs
@@ -0,0 +1,111 @@
+use std::collections::{hash_map::Entry, HashMap};
+use std::sync::{Arc, Mutex, MutexGuard};
+
+use futures::{future, stream::StreamExt as _, Stream};
+use sqlx::sqlite::SqlitePool;
+use tokio::sync::broadcast::{channel, Sender};
+use tokio_stream::wrappers::{errors::BroadcastStreamRecvError, BroadcastStream};
+
+use super::repo::broadcast;
+use crate::repo::channel::{self, Provider as _};
+
+// Clones will share the same senders collection.
+#[derive(Clone)]
+pub struct Broadcaster {
+ // The use of std::sync::Mutex, and not tokio::sync::Mutex, follows Tokio's
+ // own advice: <https://tokio.rs/tokio/tutorial/shared-state>. Methods that
+ // lock it must be sync.
+ senders: Arc<Mutex<HashMap<channel::Id, Sender<broadcast::Message>>>>,
+}
+
+impl Broadcaster {
+ pub async fn from_database(db: &SqlitePool) -> Result<Self, sqlx::Error> {
+ let mut tx = db.begin().await?;
+ let channels = tx.channels().all().await?;
+ tx.commit().await?;
+
+ let channels = channels.iter().map(|c| &c.id);
+ let broadcaster = Self::new(channels);
+ Ok(broadcaster)
+ }
+
+ fn new<'i>(channels: impl IntoIterator<Item = &'i channel::Id>) -> Self {
+ let senders: HashMap<_, _> = channels
+ .into_iter()
+ .cloned()
+ .map(|id| (id, Self::make_sender()))
+ .collect();
+
+ Self {
+ senders: Arc::new(Mutex::new(senders)),
+ }
+ }
+
+ // panic: if ``channel`` is already registered.
+ pub fn register_channel(&self, channel: &channel::Id) {
+ match self.senders().entry(channel.clone()) {
+ // This ever happening indicates a serious logic error.
+ Entry::Occupied(_) => panic!("duplicate channel registration for channel {channel}"),
+ Entry::Vacant(entry) => {
+ entry.insert(Self::make_sender());
+ }
+ }
+ }
+
+ // panic: if ``channel`` has not been previously registered, and was not
+ // part of the initial set of channels.
+ pub fn broadcast(&self, channel: &channel::Id, message: broadcast::Message) {
+ let tx = self.sender(channel);
+
+ // Per the Tokio docs, the returned error is only used to indicate that
+ // there are no receivers. In this use case, that's fine; a lack of
+ // listening consumers (chat clients) when a message is sent isn't an
+ // error.
+ //
+ // The successful return value, which includes the number of active
+ // receivers, also isn't that interesting to us.
+ let _ = tx.send(message);
+ }
+
+ // panic: if ``channel`` has not been previously registered, and was not
+ // part of the initial set of channels.
+ pub fn listen(&self, channel: &channel::Id) -> impl Stream<Item = broadcast::Message> {
+ let rx = self.sender(channel).subscribe();
+
+ BroadcastStream::from(rx)
+ .take_while(|r| {
+ future::ready(match r {
+ Ok(_) => true,
+ // Stop the stream here. This will disconnect SSE clients
+ // (see `routes.rs`), who will then resume from
+ // `Last-Event-ID`, allowing them to catch up by reading
+ // the skipped messages from the database.
+ Err(BroadcastStreamRecvError::Lagged(_)) => false,
+ })
+ })
+ .map(|r| {
+ // Since the previous transform stops at the first error, this
+ // should always hold.
+ //
+ // See also <https://users.rust-lang.org/t/taking-from-stream-while-ok/48854>.
+ r.expect("after filtering, only `Ok` messages should remain")
+ })
+ }
+
+ // panic: if ``channel`` has not been previously registered, and was not
+ // part of the initial set of channels.
+ fn sender(&self, channel: &channel::Id) -> Sender<broadcast::Message> {
+ self.senders()[channel].clone()
+ }
+
+ fn senders(&self) -> MutexGuard<HashMap<channel::Id, Sender<broadcast::Message>>> {
+ self.senders.lock().unwrap() // propagate panics when mutex is poisoned
+ }
+
+ fn make_sender() -> Sender<broadcast::Message> {
+ // Queue depth of 16 chosen entirely arbitrarily. Don't read too much
+ // into it.
+ let (tx, _) = channel(16);
+ tx
+ }
+}
diff --git a/src/events/mod.rs b/src/events/mod.rs
new file mode 100644
index 0000000..f67ea04
--- /dev/null
+++ b/src/events/mod.rs
@@ -0,0 +1,5 @@
+pub mod app;
+pub mod repo;
+mod routes;
+
+pub use self::routes::router;
diff --git a/src/events/repo/broadcast.rs b/src/events/repo/broadcast.rs
new file mode 100644
index 0000000..182203a
--- /dev/null
+++ b/src/events/repo/broadcast.rs
@@ -0,0 +1,121 @@
+use sqlx::{sqlite::Sqlite, SqliteConnection, Transaction};
+
+use crate::{
+ clock::DateTime,
+ repo::{
+ channel::Channel,
+ login::{self, Login},
+ message,
+ },
+};
+
+pub trait Provider {
+ fn broadcast(&mut self) -> Broadcast;
+}
+
+impl<'c> Provider for Transaction<'c, Sqlite> {
+ fn broadcast(&mut self) -> Broadcast {
+ Broadcast(self)
+ }
+}
+
+pub struct Broadcast<'t>(&'t mut SqliteConnection);
+
+#[derive(Clone, Debug, serde::Serialize)]
+pub struct Message {
+ pub id: message::Id,
+ pub sender: Login,
+ pub body: String,
+ pub sent_at: DateTime,
+}
+
+impl<'c> Broadcast<'c> {
+ pub async fn create(
+ &mut self,
+ sender: &Login,
+ channel: &Channel,
+ body: &str,
+ sent_at: &DateTime,
+ ) -> Result<Message, sqlx::Error> {
+ let id = message::Id::generate();
+
+ let message = sqlx::query!(
+ r#"
+ insert into message
+ (id, sender, channel, body, sent_at)
+ values ($1, $2, $3, $4, $5)
+ returning
+ id as "id: message::Id",
+ sender as "sender: login::Id",
+ body,
+ sent_at as "sent_at: DateTime"
+ "#,
+ id,
+ sender.id,
+ channel.id,
+ body,
+ sent_at,
+ )
+ .map(|row| Message {
+ id: row.id,
+ sender: sender.clone(),
+ body: row.body,
+ sent_at: row.sent_at,
+ })
+ .fetch_one(&mut *self.0)
+ .await?;
+
+ Ok(message)
+ }
+
+ pub async fn expire(&mut self, expire_at: &DateTime) -> Result<(), sqlx::Error> {
+ sqlx::query!(
+ r#"
+ delete from message
+ where sent_at < $1
+ "#,
+ expire_at,
+ )
+ .execute(&mut *self.0)
+ .await?;
+
+ Ok(())
+ }
+
+ pub async fn replay(
+ &mut self,
+ channel: &Channel,
+ resume_at: Option<&DateTime>,
+ ) -> Result<Vec<Message>, sqlx::Error> {
+ let messages = sqlx::query!(
+ r#"
+ select
+ message.id as "id: message::Id",
+ login.id as "sender_id: login::Id",
+ login.name as sender_name,
+ message.body,
+ message.sent_at as "sent_at: DateTime"
+ from message
+ join login on message.sender = login.id
+ where channel = $1
+ and coalesce(sent_at > $2, true)
+ order by sent_at asc
+ "#,
+ channel.id,
+ resume_at,
+ )
+ .map(|row| Message {
+ id: row.id,
+ sender: Login {
+ id: row.sender_id,
+ name: row.sender_name,
+ },
+ body: row.body,
+ sent_at: row.sent_at,
+ })
+ .fetch_all(&mut *self.0)
+ .await?;
+
+ Ok(messages)
+ }
+}
diff --git a/src/events/repo/mod.rs b/src/events/repo/mod.rs
new file mode 100644
index 0000000..2ed3062
--- /dev/null
+++ b/src/events/repo/mod.rs
@@ -0,0 +1 @@
+pub mod broadcast;
diff --git a/src/events/routes.rs b/src/events/routes.rs
new file mode 100644
index 0000000..f880c70
--- /dev/null
+++ b/src/events/routes.rs
@@ -0,0 +1,133 @@
+use axum::{
+ extract::State,
+ http::StatusCode,
+ response::{
+ sse::{self, Sse},
+ IntoResponse, Response,
+ },
+ routing::get,
+ Router,
+};
+use axum_extra::extract::Query;
+use chrono::{self, format::SecondsFormat, DateTime};
+use futures::stream::{self, Stream, StreamExt as _, TryStreamExt as _};
+
+use super::repo::broadcast;
+use crate::{
+ app::App,
+ channel::app::EventsError,
+ clock::RequestedAt,
+ error::InternalError,
+ header::LastEventId,
+ repo::{channel, login::Login},
+};
+
+pub fn router() -> Router<App> {
+ Router::new().route("/api/events", get(on_events))
+}
+
+#[derive(serde::Deserialize)]
+struct EventsQuery {
+ #[serde(default, rename = "channel")]
+ channels: Vec<channel::Id>,
+}
+
+async fn on_events(
+ State(app): State<App>,
+ RequestedAt(now): RequestedAt,
+ _: Login, // requires auth, but doesn't actually care who you are
+ last_event_id: Option<LastEventId>,
+ Query(query): Query<EventsQuery>,
+) -> Result<Events<impl Stream<Item = ChannelEvent<broadcast::Message>>>, ErrorResponse> {
+ let resume_at = last_event_id
+ .map(|LastEventId(header)| header)
+ .map(|header| DateTime::parse_from_rfc3339(&header))
+ .transpose()
+ // impl From would take more code; this is used once.
+ .map_err(ErrorResponse::LastEventIdError)?
+ .map(|ts| ts.to_utc());
+
+ let streams = stream::iter(query.channels)
+ .then(|channel| {
+ let app = app.clone();
+ async move {
+ let events = app
+ .channels()
+ .events(&channel, &now, resume_at.as_ref())
+ .await?
+ .map(ChannelEvent::wrap(channel));
+
+ Ok::<_, EventsError>(events)
+ }
+ })
+ .try_collect::<Vec<_>>()
+ .await
+ // impl From would take more code; this is used once.
+ .map_err(ErrorResponse::EventsError)?;
+
+ let stream = stream::select_all(streams);
+
+ Ok(Events(stream))
+}
+
+struct Events<S>(S);
+
+impl<S> IntoResponse for Events<S>
+where
+ S: Stream<Item = ChannelEvent<broadcast::Message>> + Send + 'static,
+{
+ fn into_response(self) -> Response {
+ let Self(stream) = self;
+ let stream = stream.map(to_sse_event);
+ Sse::new(stream)
+ .keep_alive(sse::KeepAlive::default())
+ .into_response()
+ }
+}
+
+enum ErrorResponse {
+ EventsError(EventsError),
+ LastEventIdError(chrono::ParseError),
+}
+
+impl IntoResponse for ErrorResponse {
+ fn into_response(self) -> Response {
+ match self {
+ Self::EventsError(not_found @ EventsError::ChannelNotFound(_)) => {
+ (StatusCode::NOT_FOUND, not_found.to_string()).into_response()
+ }
+ Self::EventsError(other) => InternalError::from(other).into_response(),
+ Self::LastEventIdError(other) => {
+ (StatusCode::BAD_REQUEST, other.to_string()).into_response()
+ }
+ }
+ }
+}
+
+fn to_sse_event(event: ChannelEvent<broadcast::Message>) -> Result<sse::Event, serde_json::Error> {
+ let data = serde_json::to_string_pretty(&event)?;
+ let event = sse::Event::default()
+ .id(event
+ .message
+ .sent_at
+ .to_rfc3339_opts(SecondsFormat::AutoSi, /* use_z */ true))
+ .data(&data);
+
+ Ok(event)
+}
+
+#[derive(serde::Serialize)]
+struct ChannelEvent<M> {
+ channel: channel::Id,
+ #[serde(flatten)]
+ message: M,
+}
+
+impl<M> ChannelEvent<M> {
+ fn wrap(channel: channel::Id) -> impl Fn(M) -> Self {
+ move |message| Self {
+ channel: channel.clone(),
+ message,
+ }
+ }
+}