summaryrefslogtreecommitdiff
path: root/src/events
diff options
context:
space:
mode:
authorOwen Jacobson <owen@grimoire.ca>2024-09-25 01:05:38 -0400
committerOwen Jacobson <owen@grimoire.ca>2024-09-25 01:05:38 -0400
commitaf7ece7dd5433051d67526ae15ad64f0f5b5e568 (patch)
tree636b0e804ea1f0fbf5675ec6d60aa7972822e821 /src/events
parent0b89800d55c21aaa11d69c1dba89277cc4a46809 (diff)
Code organization changes considered during implementation of vector-of-sequence-numbers stream resume.
Diffstat (limited to 'src/events')
-rw-r--r--src/events/app/broadcaster.rs (renamed from src/events/app.rs)8
-rw-r--r--src/events/app/events.rs138
-rw-r--r--src/events/app/mod.rs5
-rw-r--r--src/events/extract.rs86
-rw-r--r--src/events/mod.rs1
-rw-r--r--src/events/routes.rs12
-rw-r--r--src/events/routes/test.rs3
7 files changed, 240 insertions, 13 deletions
diff --git a/src/events/app.rs b/src/events/app/broadcaster.rs
index 99e849e..6a1219a 100644
--- a/src/events/app.rs
+++ b/src/events/app/broadcaster.rs
@@ -6,8 +6,10 @@ use sqlx::sqlite::SqlitePool;
use tokio::sync::broadcast::{channel, Sender};
use tokio_stream::wrappers::{errors::BroadcastStreamRecvError, BroadcastStream};
-use super::repo::broadcast;
-use crate::repo::channel::{self, Provider as _};
+use crate::{
+ events::repo::broadcast,
+ repo::channel::{self, Provider as _},
+};
// Clones will share the same senders collection.
#[derive(Clone)]
@@ -69,7 +71,7 @@ impl Broadcaster {
// panic: if ``channel`` has not been previously registered, and was not
// part of the initial set of channels.
- pub fn listen(
+ pub fn subscribe(
&self,
channel: &channel::Id,
) -> impl Stream<Item = broadcast::Message> + std::fmt::Debug {
diff --git a/src/events/app/events.rs b/src/events/app/events.rs
new file mode 100644
index 0000000..a8814c9
--- /dev/null
+++ b/src/events/app/events.rs
@@ -0,0 +1,138 @@
+use chrono::TimeDelta;
+use futures::{
+ future,
+ stream::{self, StreamExt as _},
+ Stream,
+};
+use sqlx::sqlite::SqlitePool;
+
+use super::Broadcaster;
+use crate::{
+ clock::DateTime,
+ events::repo::broadcast::{self, Provider as _},
+ repo::{
+ channel::{self, Provider as _},
+ error::NotFound as _,
+ login::Login,
+ },
+};
+
+pub struct Events<'a> {
+ db: &'a SqlitePool,
+ broadcaster: &'a Broadcaster,
+}
+
+impl<'a> Events<'a> {
+ pub const fn new(db: &'a SqlitePool, broadcaster: &'a Broadcaster) -> Self {
+ Self { db, broadcaster }
+ }
+
+ pub async fn send(
+ &self,
+ login: &Login,
+ channel: &channel::Id,
+ body: &str,
+ sent_at: &DateTime,
+ ) -> Result<broadcast::Message, EventsError> {
+ let mut tx = self.db.begin().await?;
+ let channel = tx
+ .channels()
+ .by_id(channel)
+ .await
+ .not_found(|| EventsError::ChannelNotFound(channel.clone()))?;
+ let message = tx
+ .broadcast()
+ .create(login, &channel, body, sent_at)
+ .await?;
+ tx.commit().await?;
+
+ self.broadcaster.broadcast(&channel.id, &message);
+ Ok(message)
+ }
+
+ pub async fn subscribe(
+ &self,
+ channel: &channel::Id,
+ subscribed_at: &DateTime,
+ resume_at: Option<broadcast::Sequence>,
+ ) -> Result<impl Stream<Item = broadcast::Message> + std::fmt::Debug, EventsError> {
+ // Somewhat arbitrarily, expire after 90 days.
+ let expire_at = subscribed_at.to_owned() - TimeDelta::days(90);
+
+ let mut tx = self.db.begin().await?;
+ let channel = tx
+ .channels()
+ .by_id(channel)
+ .await
+ .not_found(|| EventsError::ChannelNotFound(channel.clone()))?;
+
+ // Subscribe before retrieving, to catch messages broadcast while we're
+ // querying the DB. We'll prune out duplicates later.
+ let live_messages = self.broadcaster.subscribe(&channel.id);
+
+ tx.broadcast().expire(&expire_at).await?;
+ let stored_messages = tx.broadcast().replay(&channel, resume_at).await?;
+ tx.commit().await?;
+
+ let resume_broadcast_at = stored_messages
+ .last()
+ .map(|message| message.sequence)
+ .or(resume_at);
+
+ // This should always be the case, up to integer rollover, primarily
+ // because every message in stored_messages has a sequence not less
+ // than `resume_at`, or `resume_at` is None. We use the last message
+ // (if any) to decide when to resume the `live_messages` stream.
+ //
+ // It probably simplifies to assert!(resume_at <= resume_broadcast_at), but
+ // this form captures more of the reasoning.
+ assert!(
+ (resume_at.is_none() && resume_broadcast_at.is_none())
+ || (stored_messages.is_empty() && resume_at == resume_broadcast_at)
+ || resume_at < resume_broadcast_at
+ );
+
+ // no skip_expired or resume transforms for stored_messages, as it's
+ // constructed not to contain messages meeting either criterion.
+ //
+ // * skip_expired is redundant with the `tx.broadcasts().expire(…)` call;
+ // * resume is redundant with the resume_at argument to
+ // `tx.broadcasts().replay(…)`.
+ let stored_messages = stream::iter(stored_messages);
+ let live_messages = live_messages
+ // Sure, it's temporally improbable that we'll ever skip a message
+ // that's 90 days old, but there's no reason not to be thorough.
+ .filter(Self::skip_expired(&expire_at))
+ // Filtering on the broadcast resume point filters out messages
+ // before resume_at, and filters out messages duplicated from
+ // stored_messages.
+ .filter(Self::resume(resume_broadcast_at));
+
+ Ok(stored_messages.chain(live_messages))
+ }
+
+ fn resume(
+ resume_at: Option<broadcast::Sequence>,
+ ) -> impl for<'m> FnMut(&'m broadcast::Message) -> future::Ready<bool> {
+ move |msg| {
+ future::ready(match resume_at {
+ None => true,
+ Some(resume_at) => msg.sequence > resume_at,
+ })
+ }
+ }
+ fn skip_expired(
+ expire_at: &DateTime,
+ ) -> impl for<'m> FnMut(&'m broadcast::Message) -> future::Ready<bool> {
+ let expire_at = expire_at.to_owned();
+ move |msg| future::ready(msg.sent_at > expire_at)
+ }
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum EventsError {
+ #[error("channel {0} not found")]
+ ChannelNotFound(channel::Id),
+ #[error(transparent)]
+ DatabaseError(#[from] sqlx::Error),
+}
diff --git a/src/events/app/mod.rs b/src/events/app/mod.rs
new file mode 100644
index 0000000..03a7da2
--- /dev/null
+++ b/src/events/app/mod.rs
@@ -0,0 +1,5 @@
+mod broadcaster;
+mod events;
+
+pub use self::broadcaster::Broadcaster;
+pub use self::events::{Events, EventsError};
diff --git a/src/events/extract.rs b/src/events/extract.rs
new file mode 100644
index 0000000..683c1f9
--- /dev/null
+++ b/src/events/extract.rs
@@ -0,0 +1,86 @@
+use std::ops::Deref;
+
+use axum::{
+ extract::FromRequestParts,
+ http::{request::Parts, HeaderName, HeaderValue},
+};
+use axum_extra::typed_header::TypedHeader;
+use serde::{de::DeserializeOwned, Serialize};
+
+/// A typed header. When used as a bare extractor, reads from the
+/// `Last-Event-Id` HTTP header.
+pub struct LastEventId<T>(pub T);
+
+static LAST_EVENT_ID: HeaderName = HeaderName::from_static("last-event-id");
+
+impl<T> headers::Header for LastEventId<T>
+where
+ T: Serialize + DeserializeOwned,
+{
+ fn name() -> &'static HeaderName {
+ &LAST_EVENT_ID
+ }
+
+ fn decode<'i, I>(values: &mut I) -> Result<Self, headers::Error>
+ where
+ I: Iterator<Item = &'i HeaderValue>,
+ {
+ let value = values.next().ok_or_else(headers::Error::invalid)?;
+ let value = value.to_str().map_err(|_| headers::Error::invalid())?;
+ let value = serde_json::from_str(value).map_err(|_| headers::Error::invalid())?;
+ Ok(Self(value))
+ }
+
+ fn encode<E>(&self, values: &mut E)
+ where
+ E: Extend<HeaderValue>,
+ {
+ let Self(value) = self;
+ // Must panic or suppress; the trait provides no other options.
+ let value = serde_json::to_string(value).expect("value can be encoded as JSON");
+ let value = HeaderValue::from_str(&value).expect("LastEventId is a valid header value");
+
+ values.extend(std::iter::once(value));
+ }
+}
+
+#[async_trait::async_trait]
+impl<S, T> FromRequestParts<S> for LastEventId<T>
+where
+ S: Send + Sync,
+ T: Serialize + DeserializeOwned,
+{
+ type Rejection = <TypedHeader<Self> as FromRequestParts<S>>::Rejection;
+
+ async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {
+ // This is purely for ergonomics: it allows `RequestedAt` to be extracted
+ // without having to wrap it in `Extension<>`. Callers _can_ still do that,
+ // but they aren't forced to.
+ let TypedHeader(requested_at) =
+ TypedHeader::<Self>::from_request_parts(parts, state).await?;
+
+ Ok(requested_at)
+ }
+}
+
+impl<T> Deref for LastEventId<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ let Self(header) = self;
+ header
+ }
+}
+
+impl<T> From<T> for LastEventId<T> {
+ fn from(value: T) -> Self {
+ Self(value)
+ }
+}
+
+impl<T> LastEventId<T> {
+ pub fn into_inner(self) -> T {
+ let Self(value) = self;
+ value
+ }
+}
diff --git a/src/events/mod.rs b/src/events/mod.rs
index f67ea04..e76d67c 100644
--- a/src/events/mod.rs
+++ b/src/events/mod.rs
@@ -1,4 +1,5 @@
pub mod app;
+mod extract;
pub mod repo;
mod routes;
diff --git a/src/events/routes.rs b/src/events/routes.rs
index 7731680..5181370 100644
--- a/src/events/routes.rs
+++ b/src/events/routes.rs
@@ -16,13 +16,12 @@ use futures::{
stream::{self, Stream, StreamExt as _, TryStreamExt as _},
};
-use super::repo::broadcast;
+use super::{extract::LastEventId, repo::broadcast};
use crate::{
app::App,
- channel::app::EventsError,
clock::RequestedAt,
error::InternalError,
- header::LastEventId,
+ events::app::EventsError,
repo::{channel, login::Login},
};
@@ -67,8 +66,8 @@ async fn events(
let resume_at = resume_at.get(&channel).copied();
let events = app
- .channels()
- .events(&channel, &now, resume_at)
+ .events()
+ .subscribe(&channel, &now, resume_at)
.await?
.map(ChannelEvent::wrap(channel));
@@ -122,9 +121,6 @@ impl IntoResponse for ErrorResponse {
not_found @ EventsError::ChannelNotFound(_) => {
(StatusCode::NOT_FOUND, not_found.to_string()).into_response()
}
- resume_at @ EventsError::ResumeAtError(_) => {
- (StatusCode::BAD_REQUEST, resume_at.to_string()).into_response()
- }
other => InternalError::from(other).into_response(),
}
}
diff --git a/src/events/routes/test.rs b/src/events/routes/test.rs
index 131c751..d3f3fd6 100644
--- a/src/events/routes/test.rs
+++ b/src/events/routes/test.rs
@@ -6,8 +6,7 @@ use futures::{
};
use crate::{
- channel::app,
- events::routes,
+ events::{app, routes},
repo::channel::{self},
test::fixtures::{self, future::Immediately as _},
};