summaryrefslogtreecommitdiff
path: root/src/events/app
diff options
context:
space:
mode:
Diffstat (limited to 'src/events/app')
-rw-r--r--src/events/app/broadcaster.rs116
-rw-r--r--src/events/app/events.rs138
-rw-r--r--src/events/app/mod.rs5
3 files changed, 259 insertions, 0 deletions
diff --git a/src/events/app/broadcaster.rs b/src/events/app/broadcaster.rs
new file mode 100644
index 0000000..6a1219a
--- /dev/null
+++ b/src/events/app/broadcaster.rs
@@ -0,0 +1,116 @@
+use std::collections::{hash_map::Entry, HashMap};
+use std::sync::{Arc, Mutex, MutexGuard};
+
+use futures::{future, stream::StreamExt as _, Stream};
+use sqlx::sqlite::SqlitePool;
+use tokio::sync::broadcast::{channel, Sender};
+use tokio_stream::wrappers::{errors::BroadcastStreamRecvError, BroadcastStream};
+
+use crate::{
+ events::repo::broadcast,
+ repo::channel::{self, Provider as _},
+};
+
+// Clones will share the same senders collection.
+#[derive(Clone)]
+pub struct Broadcaster {
+ // The use of std::sync::Mutex, and not tokio::sync::Mutex, follows Tokio's
+ // own advice: <https://tokio.rs/tokio/tutorial/shared-state>. Methods that
+ // lock it must be sync.
+ senders: Arc<Mutex<HashMap<channel::Id, Sender<broadcast::Message>>>>,
+}
+
+impl Broadcaster {
+ pub async fn from_database(db: &SqlitePool) -> Result<Self, sqlx::Error> {
+ let mut tx = db.begin().await?;
+ let channels = tx.channels().all().await?;
+ tx.commit().await?;
+
+ let channels = channels.iter().map(|c| &c.id);
+ let broadcaster = Self::new(channels);
+ Ok(broadcaster)
+ }
+
+ fn new<'i>(channels: impl IntoIterator<Item = &'i channel::Id>) -> Self {
+ let senders: HashMap<_, _> = channels
+ .into_iter()
+ .cloned()
+ .map(|id| (id, Self::make_sender()))
+ .collect();
+
+ Self {
+ senders: Arc::new(Mutex::new(senders)),
+ }
+ }
+
+ // panic: if ``channel`` is already registered.
+ pub fn register_channel(&self, channel: &channel::Id) {
+ match self.senders().entry(channel.clone()) {
+ // This ever happening indicates a serious logic error.
+ Entry::Occupied(_) => panic!("duplicate channel registration for channel {channel}"),
+ Entry::Vacant(entry) => {
+ entry.insert(Self::make_sender());
+ }
+ }
+ }
+
+ // panic: if ``channel`` has not been previously registered, and was not
+ // part of the initial set of channels.
+ pub fn broadcast(&self, channel: &channel::Id, message: &broadcast::Message) {
+ let tx = self.sender(channel);
+
+ // Per the Tokio docs, the returned error is only used to indicate that
+ // there are no receivers. In this use case, that's fine; a lack of
+ // listening consumers (chat clients) when a message is sent isn't an
+ // error.
+ //
+ // The successful return value, which includes the number of active
+ // receivers, also isn't that interesting to us.
+ let _ = tx.send(message.clone());
+ }
+
+ // panic: if ``channel`` has not been previously registered, and was not
+ // part of the initial set of channels.
+ pub fn subscribe(
+ &self,
+ channel: &channel::Id,
+ ) -> impl Stream<Item = broadcast::Message> + std::fmt::Debug {
+ let rx = self.sender(channel).subscribe();
+
+ BroadcastStream::from(rx)
+ .take_while(|r| {
+ future::ready(match r {
+ Ok(_) => true,
+ // Stop the stream here. This will disconnect SSE clients
+ // (see `routes.rs`), who will then resume from
+ // `Last-Event-ID`, allowing them to catch up by reading
+ // the skipped messages from the database.
+ Err(BroadcastStreamRecvError::Lagged(_)) => false,
+ })
+ })
+ .map(|r| {
+ // Since the previous transform stops at the first error, this
+ // should always hold.
+ //
+ // See also <https://users.rust-lang.org/t/taking-from-stream-while-ok/48854>.
+ r.expect("after filtering, only `Ok` messages should remain")
+ })
+ }
+
+ // panic: if ``channel`` has not been previously registered, and was not
+ // part of the initial set of channels.
+ fn sender(&self, channel: &channel::Id) -> Sender<broadcast::Message> {
+ self.senders()[channel].clone()
+ }
+
+ fn senders(&self) -> MutexGuard<HashMap<channel::Id, Sender<broadcast::Message>>> {
+ self.senders.lock().unwrap() // propagate panics when mutex is poisoned
+ }
+
+ fn make_sender() -> Sender<broadcast::Message> {
+ // Queue depth of 16 chosen entirely arbitrarily. Don't read too much
+ // into it.
+ let (tx, _) = channel(16);
+ tx
+ }
+}
diff --git a/src/events/app/events.rs b/src/events/app/events.rs
new file mode 100644
index 0000000..a8814c9
--- /dev/null
+++ b/src/events/app/events.rs
@@ -0,0 +1,138 @@
+use chrono::TimeDelta;
+use futures::{
+ future,
+ stream::{self, StreamExt as _},
+ Stream,
+};
+use sqlx::sqlite::SqlitePool;
+
+use super::Broadcaster;
+use crate::{
+ clock::DateTime,
+ events::repo::broadcast::{self, Provider as _},
+ repo::{
+ channel::{self, Provider as _},
+ error::NotFound as _,
+ login::Login,
+ },
+};
+
+pub struct Events<'a> {
+ db: &'a SqlitePool,
+ broadcaster: &'a Broadcaster,
+}
+
+impl<'a> Events<'a> {
+ pub const fn new(db: &'a SqlitePool, broadcaster: &'a Broadcaster) -> Self {
+ Self { db, broadcaster }
+ }
+
+ pub async fn send(
+ &self,
+ login: &Login,
+ channel: &channel::Id,
+ body: &str,
+ sent_at: &DateTime,
+ ) -> Result<broadcast::Message, EventsError> {
+ let mut tx = self.db.begin().await?;
+ let channel = tx
+ .channels()
+ .by_id(channel)
+ .await
+ .not_found(|| EventsError::ChannelNotFound(channel.clone()))?;
+ let message = tx
+ .broadcast()
+ .create(login, &channel, body, sent_at)
+ .await?;
+ tx.commit().await?;
+
+ self.broadcaster.broadcast(&channel.id, &message);
+ Ok(message)
+ }
+
+ pub async fn subscribe(
+ &self,
+ channel: &channel::Id,
+ subscribed_at: &DateTime,
+ resume_at: Option<broadcast::Sequence>,
+ ) -> Result<impl Stream<Item = broadcast::Message> + std::fmt::Debug, EventsError> {
+ // Somewhat arbitrarily, expire after 90 days.
+ let expire_at = subscribed_at.to_owned() - TimeDelta::days(90);
+
+ let mut tx = self.db.begin().await?;
+ let channel = tx
+ .channels()
+ .by_id(channel)
+ .await
+ .not_found(|| EventsError::ChannelNotFound(channel.clone()))?;
+
+ // Subscribe before retrieving, to catch messages broadcast while we're
+ // querying the DB. We'll prune out duplicates later.
+ let live_messages = self.broadcaster.subscribe(&channel.id);
+
+ tx.broadcast().expire(&expire_at).await?;
+ let stored_messages = tx.broadcast().replay(&channel, resume_at).await?;
+ tx.commit().await?;
+
+ let resume_broadcast_at = stored_messages
+ .last()
+ .map(|message| message.sequence)
+ .or(resume_at);
+
+ // This should always be the case, up to integer rollover, primarily
+ // because every message in stored_messages has a sequence not less
+ // than `resume_at`, or `resume_at` is None. We use the last message
+ // (if any) to decide when to resume the `live_messages` stream.
+ //
+ // It probably simplifies to assert!(resume_at <= resume_broadcast_at), but
+ // this form captures more of the reasoning.
+ assert!(
+ (resume_at.is_none() && resume_broadcast_at.is_none())
+ || (stored_messages.is_empty() && resume_at == resume_broadcast_at)
+ || resume_at < resume_broadcast_at
+ );
+
+ // no skip_expired or resume transforms for stored_messages, as it's
+ // constructed not to contain messages meeting either criterion.
+ //
+ // * skip_expired is redundant with the `tx.broadcasts().expire(…)` call;
+ // * resume is redundant with the resume_at argument to
+ // `tx.broadcasts().replay(…)`.
+ let stored_messages = stream::iter(stored_messages);
+ let live_messages = live_messages
+ // Sure, it's temporally improbable that we'll ever skip a message
+ // that's 90 days old, but there's no reason not to be thorough.
+ .filter(Self::skip_expired(&expire_at))
+ // Filtering on the broadcast resume point filters out messages
+ // before resume_at, and filters out messages duplicated from
+ // stored_messages.
+ .filter(Self::resume(resume_broadcast_at));
+
+ Ok(stored_messages.chain(live_messages))
+ }
+
+ fn resume(
+ resume_at: Option<broadcast::Sequence>,
+ ) -> impl for<'m> FnMut(&'m broadcast::Message) -> future::Ready<bool> {
+ move |msg| {
+ future::ready(match resume_at {
+ None => true,
+ Some(resume_at) => msg.sequence > resume_at,
+ })
+ }
+ }
+ fn skip_expired(
+ expire_at: &DateTime,
+ ) -> impl for<'m> FnMut(&'m broadcast::Message) -> future::Ready<bool> {
+ let expire_at = expire_at.to_owned();
+ move |msg| future::ready(msg.sent_at > expire_at)
+ }
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum EventsError {
+ #[error("channel {0} not found")]
+ ChannelNotFound(channel::Id),
+ #[error(transparent)]
+ DatabaseError(#[from] sqlx::Error),
+}
diff --git a/src/events/app/mod.rs b/src/events/app/mod.rs
new file mode 100644
index 0000000..03a7da2
--- /dev/null
+++ b/src/events/app/mod.rs
@@ -0,0 +1,5 @@
+mod broadcaster;
+mod events;
+
+pub use self::broadcaster::Broadcaster;
+pub use self::events::{Events, EventsError};