From af7ece7dd5433051d67526ae15ad64f0f5b5e568 Mon Sep 17 00:00:00 2001 From: Owen Jacobson Date: Wed, 25 Sep 2024 01:05:38 -0400 Subject: Code organization changes considered during implementation of vector-of-sequence-numbers stream resume. --- src/app.rs | 10 ++- src/channel/app.rs | 129 +--------------------------------- src/channel/routes.rs | 5 +- src/channel/routes/test/on_send.rs | 11 +-- src/events/app.rs | 114 ------------------------------ src/events/app/broadcaster.rs | 116 +++++++++++++++++++++++++++++++ src/events/app/events.rs | 138 +++++++++++++++++++++++++++++++++++++ src/events/app/mod.rs | 5 ++ src/events/extract.rs | 86 +++++++++++++++++++++++ src/events/mod.rs | 1 + src/events/routes.rs | 12 ++-- src/events/routes/test.rs | 3 +- src/header.rs | 86 ----------------------- src/lib.rs | 1 - src/test/fixtures/message.rs | 2 +- 15 files changed, 372 insertions(+), 347 deletions(-) delete mode 100644 src/events/app.rs create mode 100644 src/events/app/broadcaster.rs create mode 100644 src/events/app/events.rs create mode 100644 src/events/app/mod.rs create mode 100644 src/events/extract.rs delete mode 100644 src/header.rs diff --git a/src/app.rs b/src/app.rs index e448436..1cf56c9 100644 --- a/src/app.rs +++ b/src/app.rs @@ -1,6 +1,10 @@ use sqlx::sqlite::SqlitePool; -use crate::{channel::app::Channels, events::app::Broadcaster, login::app::Logins}; +use crate::{ + channel::app::Channels, + events::app::{Broadcaster, Events}, + login::app::Logins, +}; #[derive(Clone)] pub struct App { @@ -20,6 +24,10 @@ impl App { Logins::new(&self.db) } + pub const fn events(&self) -> Events { + Events::new(&self.db, &self.broadcaster) + } + pub const fn channels(&self) -> Channels { Channels::new(&self.db, &self.broadcaster) } diff --git a/src/channel/app.rs b/src/channel/app.rs index 2da25d2..bb87734 100644 --- a/src/channel/app.rs +++ b/src/channel/app.rs @@ -1,22 +1,8 @@ -use chrono::TimeDelta; -use futures::{ - future, - stream::{self, StreamExt as _}, - Stream, -}; use sqlx::sqlite::SqlitePool; use crate::{ - clock::DateTime, - events::{ - app::Broadcaster, - repo::broadcast::{self, Provider as _}, - }, - repo::{ - channel::{self, Channel, Provider as _}, - error::NotFound as _, - login::Login, - }, + events::app::Broadcaster, + repo::channel::{Channel, Provider as _}, }; pub struct Channels<'a> { @@ -49,107 +35,6 @@ impl<'a> Channels<'a> { Ok(channels) } - - pub async fn send( - &self, - login: &Login, - channel: &channel::Id, - body: &str, - sent_at: &DateTime, - ) -> Result { - let mut tx = self.db.begin().await?; - let channel = tx - .channels() - .by_id(channel) - .await - .not_found(|| EventsError::ChannelNotFound(channel.clone()))?; - let message = tx - .broadcast() - .create(login, &channel, body, sent_at) - .await?; - tx.commit().await?; - - self.broadcaster.broadcast(&channel.id, &message); - Ok(message) - } - - pub async fn events( - &self, - channel: &channel::Id, - subscribed_at: &DateTime, - resume_at: Option, - ) -> Result + std::fmt::Debug, EventsError> { - // Somewhat arbitrarily, expire after 90 days. - let expire_at = subscribed_at.to_owned() - TimeDelta::days(90); - - let mut tx = self.db.begin().await?; - let channel = tx - .channels() - .by_id(channel) - .await - .not_found(|| EventsError::ChannelNotFound(channel.clone()))?; - - // Subscribe before retrieving, to catch messages broadcast while we're - // querying the DB. We'll prune out duplicates later. - let live_messages = self.broadcaster.listen(&channel.id); - - tx.broadcast().expire(&expire_at).await?; - let stored_messages = tx.broadcast().replay(&channel, resume_at).await?; - tx.commit().await?; - - let resume_broadcast_at = stored_messages - .last() - .map(|message| message.sequence) - .or(resume_at); - - // This should always be the case, up to integer rollover, primarily - // because every message in stored_messages has a sequence not less - // than `resume_at`, or `resume_at` is None. We use the last message - // (if any) to decide when to resume the `live_messages` stream. - // - // It probably simplifies to assert!(resume_at <= resume_broadcast_at), but - // this form captures more of the reasoning. - assert!( - (resume_at.is_none() && resume_broadcast_at.is_none()) - || (stored_messages.is_empty() && resume_at == resume_broadcast_at) - || resume_at < resume_broadcast_at - ); - - // no skip_expired or resume transforms for stored_messages, as it's - // constructed not to contain messages meeting either criterion. - // - // * skip_expired is redundant with the `tx.broadcasts().expire(…)` call; - // * resume is redundant with the resume_at argument to - // `tx.broadcasts().replay(…)`. - let stored_messages = stream::iter(stored_messages); - let live_messages = live_messages - // Sure, it's temporally improbable that we'll ever skip a message - // that's 90 days old, but there's no reason not to be thorough. - .filter(Self::skip_expired(&expire_at)) - // Filtering on the broadcast resume point filters out messages - // before resume_at, and filters out messages duplicated from - // stored_messages. - .filter(Self::resume(resume_broadcast_at)); - - Ok(stored_messages.chain(live_messages)) - } - - fn resume( - resume_at: Option, - ) -> impl for<'m> FnMut(&'m broadcast::Message) -> future::Ready { - move |msg| { - future::ready(match resume_at { - None => true, - Some(resume_at) => msg.sequence > resume_at, - }) - } - } - fn skip_expired( - expire_at: &DateTime, - ) -> impl for<'m> FnMut(&'m broadcast::Message) -> future::Ready { - let expire_at = expire_at.to_owned(); - move |msg| future::ready(msg.sent_at > expire_at) - } } #[derive(Debug, thiserror::Error)] @@ -177,13 +62,3 @@ pub enum InternalError { #[error(transparent)] DatabaseError(#[from] sqlx::Error), } - -#[derive(Debug, thiserror::Error)] -pub enum EventsError { - #[error("channel {0} not found")] - ChannelNotFound(channel::Id), - #[error(transparent)] - ResumeAtError(#[from] chrono::ParseError), - #[error(transparent)] - DatabaseError(#[from] sqlx::Error), -} diff --git a/src/channel/routes.rs b/src/channel/routes.rs index 674c876..bb6cde6 100644 --- a/src/channel/routes.rs +++ b/src/channel/routes.rs @@ -6,11 +6,12 @@ use axum::{ Router, }; -use super::app::{self, EventsError}; +use super::app; use crate::{ app::App, clock::RequestedAt, error::InternalError, + events::app::EventsError, repo::{ channel::{self, Channel}, login::Login, @@ -89,7 +90,7 @@ async fn on_send( login: Login, Json(request): Json, ) -> Result { - app.channels() + app.events() .send(&login, &channel, &request.message, &sent_at) .await // Could impl `From` here, but it's more code and this is used once. diff --git a/src/channel/routes/test/on_send.rs b/src/channel/routes/test/on_send.rs index eab7c32..6690374 100644 --- a/src/channel/routes/test/on_send.rs +++ b/src/channel/routes/test/on_send.rs @@ -5,7 +5,8 @@ use axum::{ use futures::stream::StreamExt; use crate::{ - channel::{app, routes}, + channel::routes, + events::app, repo::channel, test::fixtures::{self, future::Immediately as _}, }; @@ -42,8 +43,8 @@ async fn channel_exists() { let subscribed_at = fixtures::now(); let mut events = app - .channels() - .events(&channel.id, &subscribed_at, None) + .events() + .subscribe(&channel.id, &subscribed_at, None) .await .expect("subscribing to a valid channel"); @@ -99,8 +100,8 @@ async fn messages_in_order() { let subscribed_at = fixtures::now(); let events = app - .channels() - .events(&channel.id, &subscribed_at, None) + .events() + .subscribe(&channel.id, &subscribed_at, None) .await .expect("subscribing to a valid channel") .take(requests.len()); diff --git a/src/events/app.rs b/src/events/app.rs deleted file mode 100644 index 99e849e..0000000 --- a/src/events/app.rs +++ /dev/null @@ -1,114 +0,0 @@ -use std::collections::{hash_map::Entry, HashMap}; -use std::sync::{Arc, Mutex, MutexGuard}; - -use futures::{future, stream::StreamExt as _, Stream}; -use sqlx::sqlite::SqlitePool; -use tokio::sync::broadcast::{channel, Sender}; -use tokio_stream::wrappers::{errors::BroadcastStreamRecvError, BroadcastStream}; - -use super::repo::broadcast; -use crate::repo::channel::{self, Provider as _}; - -// Clones will share the same senders collection. -#[derive(Clone)] -pub struct Broadcaster { - // The use of std::sync::Mutex, and not tokio::sync::Mutex, follows Tokio's - // own advice: . Methods that - // lock it must be sync. - senders: Arc>>>, -} - -impl Broadcaster { - pub async fn from_database(db: &SqlitePool) -> Result { - let mut tx = db.begin().await?; - let channels = tx.channels().all().await?; - tx.commit().await?; - - let channels = channels.iter().map(|c| &c.id); - let broadcaster = Self::new(channels); - Ok(broadcaster) - } - - fn new<'i>(channels: impl IntoIterator) -> Self { - let senders: HashMap<_, _> = channels - .into_iter() - .cloned() - .map(|id| (id, Self::make_sender())) - .collect(); - - Self { - senders: Arc::new(Mutex::new(senders)), - } - } - - // panic: if ``channel`` is already registered. - pub fn register_channel(&self, channel: &channel::Id) { - match self.senders().entry(channel.clone()) { - // This ever happening indicates a serious logic error. - Entry::Occupied(_) => panic!("duplicate channel registration for channel {channel}"), - Entry::Vacant(entry) => { - entry.insert(Self::make_sender()); - } - } - } - - // panic: if ``channel`` has not been previously registered, and was not - // part of the initial set of channels. - pub fn broadcast(&self, channel: &channel::Id, message: &broadcast::Message) { - let tx = self.sender(channel); - - // Per the Tokio docs, the returned error is only used to indicate that - // there are no receivers. In this use case, that's fine; a lack of - // listening consumers (chat clients) when a message is sent isn't an - // error. - // - // The successful return value, which includes the number of active - // receivers, also isn't that interesting to us. - let _ = tx.send(message.clone()); - } - - // panic: if ``channel`` has not been previously registered, and was not - // part of the initial set of channels. - pub fn listen( - &self, - channel: &channel::Id, - ) -> impl Stream + std::fmt::Debug { - let rx = self.sender(channel).subscribe(); - - BroadcastStream::from(rx) - .take_while(|r| { - future::ready(match r { - Ok(_) => true, - // Stop the stream here. This will disconnect SSE clients - // (see `routes.rs`), who will then resume from - // `Last-Event-ID`, allowing them to catch up by reading - // the skipped messages from the database. - Err(BroadcastStreamRecvError::Lagged(_)) => false, - }) - }) - .map(|r| { - // Since the previous transform stops at the first error, this - // should always hold. - // - // See also . - r.expect("after filtering, only `Ok` messages should remain") - }) - } - - // panic: if ``channel`` has not been previously registered, and was not - // part of the initial set of channels. - fn sender(&self, channel: &channel::Id) -> Sender { - self.senders()[channel].clone() - } - - fn senders(&self) -> MutexGuard>> { - self.senders.lock().unwrap() // propagate panics when mutex is poisoned - } - - fn make_sender() -> Sender { - // Queue depth of 16 chosen entirely arbitrarily. Don't read too much - // into it. - let (tx, _) = channel(16); - tx - } -} diff --git a/src/events/app/broadcaster.rs b/src/events/app/broadcaster.rs new file mode 100644 index 0000000..6a1219a --- /dev/null +++ b/src/events/app/broadcaster.rs @@ -0,0 +1,116 @@ +use std::collections::{hash_map::Entry, HashMap}; +use std::sync::{Arc, Mutex, MutexGuard}; + +use futures::{future, stream::StreamExt as _, Stream}; +use sqlx::sqlite::SqlitePool; +use tokio::sync::broadcast::{channel, Sender}; +use tokio_stream::wrappers::{errors::BroadcastStreamRecvError, BroadcastStream}; + +use crate::{ + events::repo::broadcast, + repo::channel::{self, Provider as _}, +}; + +// Clones will share the same senders collection. +#[derive(Clone)] +pub struct Broadcaster { + // The use of std::sync::Mutex, and not tokio::sync::Mutex, follows Tokio's + // own advice: . Methods that + // lock it must be sync. + senders: Arc>>>, +} + +impl Broadcaster { + pub async fn from_database(db: &SqlitePool) -> Result { + let mut tx = db.begin().await?; + let channels = tx.channels().all().await?; + tx.commit().await?; + + let channels = channels.iter().map(|c| &c.id); + let broadcaster = Self::new(channels); + Ok(broadcaster) + } + + fn new<'i>(channels: impl IntoIterator) -> Self { + let senders: HashMap<_, _> = channels + .into_iter() + .cloned() + .map(|id| (id, Self::make_sender())) + .collect(); + + Self { + senders: Arc::new(Mutex::new(senders)), + } + } + + // panic: if ``channel`` is already registered. + pub fn register_channel(&self, channel: &channel::Id) { + match self.senders().entry(channel.clone()) { + // This ever happening indicates a serious logic error. + Entry::Occupied(_) => panic!("duplicate channel registration for channel {channel}"), + Entry::Vacant(entry) => { + entry.insert(Self::make_sender()); + } + } + } + + // panic: if ``channel`` has not been previously registered, and was not + // part of the initial set of channels. + pub fn broadcast(&self, channel: &channel::Id, message: &broadcast::Message) { + let tx = self.sender(channel); + + // Per the Tokio docs, the returned error is only used to indicate that + // there are no receivers. In this use case, that's fine; a lack of + // listening consumers (chat clients) when a message is sent isn't an + // error. + // + // The successful return value, which includes the number of active + // receivers, also isn't that interesting to us. + let _ = tx.send(message.clone()); + } + + // panic: if ``channel`` has not been previously registered, and was not + // part of the initial set of channels. + pub fn subscribe( + &self, + channel: &channel::Id, + ) -> impl Stream + std::fmt::Debug { + let rx = self.sender(channel).subscribe(); + + BroadcastStream::from(rx) + .take_while(|r| { + future::ready(match r { + Ok(_) => true, + // Stop the stream here. This will disconnect SSE clients + // (see `routes.rs`), who will then resume from + // `Last-Event-ID`, allowing them to catch up by reading + // the skipped messages from the database. + Err(BroadcastStreamRecvError::Lagged(_)) => false, + }) + }) + .map(|r| { + // Since the previous transform stops at the first error, this + // should always hold. + // + // See also . + r.expect("after filtering, only `Ok` messages should remain") + }) + } + + // panic: if ``channel`` has not been previously registered, and was not + // part of the initial set of channels. + fn sender(&self, channel: &channel::Id) -> Sender { + self.senders()[channel].clone() + } + + fn senders(&self) -> MutexGuard>> { + self.senders.lock().unwrap() // propagate panics when mutex is poisoned + } + + fn make_sender() -> Sender { + // Queue depth of 16 chosen entirely arbitrarily. Don't read too much + // into it. + let (tx, _) = channel(16); + tx + } +} diff --git a/src/events/app/events.rs b/src/events/app/events.rs new file mode 100644 index 0000000..a8814c9 --- /dev/null +++ b/src/events/app/events.rs @@ -0,0 +1,138 @@ +use chrono::TimeDelta; +use futures::{ + future, + stream::{self, StreamExt as _}, + Stream, +}; +use sqlx::sqlite::SqlitePool; + +use super::Broadcaster; +use crate::{ + clock::DateTime, + events::repo::broadcast::{self, Provider as _}, + repo::{ + channel::{self, Provider as _}, + error::NotFound as _, + login::Login, + }, +}; + +pub struct Events<'a> { + db: &'a SqlitePool, + broadcaster: &'a Broadcaster, +} + +impl<'a> Events<'a> { + pub const fn new(db: &'a SqlitePool, broadcaster: &'a Broadcaster) -> Self { + Self { db, broadcaster } + } + + pub async fn send( + &self, + login: &Login, + channel: &channel::Id, + body: &str, + sent_at: &DateTime, + ) -> Result { + let mut tx = self.db.begin().await?; + let channel = tx + .channels() + .by_id(channel) + .await + .not_found(|| EventsError::ChannelNotFound(channel.clone()))?; + let message = tx + .broadcast() + .create(login, &channel, body, sent_at) + .await?; + tx.commit().await?; + + self.broadcaster.broadcast(&channel.id, &message); + Ok(message) + } + + pub async fn subscribe( + &self, + channel: &channel::Id, + subscribed_at: &DateTime, + resume_at: Option, + ) -> Result + std::fmt::Debug, EventsError> { + // Somewhat arbitrarily, expire after 90 days. + let expire_at = subscribed_at.to_owned() - TimeDelta::days(90); + + let mut tx = self.db.begin().await?; + let channel = tx + .channels() + .by_id(channel) + .await + .not_found(|| EventsError::ChannelNotFound(channel.clone()))?; + + // Subscribe before retrieving, to catch messages broadcast while we're + // querying the DB. We'll prune out duplicates later. + let live_messages = self.broadcaster.subscribe(&channel.id); + + tx.broadcast().expire(&expire_at).await?; + let stored_messages = tx.broadcast().replay(&channel, resume_at).await?; + tx.commit().await?; + + let resume_broadcast_at = stored_messages + .last() + .map(|message| message.sequence) + .or(resume_at); + + // This should always be the case, up to integer rollover, primarily + // because every message in stored_messages has a sequence not less + // than `resume_at`, or `resume_at` is None. We use the last message + // (if any) to decide when to resume the `live_messages` stream. + // + // It probably simplifies to assert!(resume_at <= resume_broadcast_at), but + // this form captures more of the reasoning. + assert!( + (resume_at.is_none() && resume_broadcast_at.is_none()) + || (stored_messages.is_empty() && resume_at == resume_broadcast_at) + || resume_at < resume_broadcast_at + ); + + // no skip_expired or resume transforms for stored_messages, as it's + // constructed not to contain messages meeting either criterion. + // + // * skip_expired is redundant with the `tx.broadcasts().expire(…)` call; + // * resume is redundant with the resume_at argument to + // `tx.broadcasts().replay(…)`. + let stored_messages = stream::iter(stored_messages); + let live_messages = live_messages + // Sure, it's temporally improbable that we'll ever skip a message + // that's 90 days old, but there's no reason not to be thorough. + .filter(Self::skip_expired(&expire_at)) + // Filtering on the broadcast resume point filters out messages + // before resume_at, and filters out messages duplicated from + // stored_messages. + .filter(Self::resume(resume_broadcast_at)); + + Ok(stored_messages.chain(live_messages)) + } + + fn resume( + resume_at: Option, + ) -> impl for<'m> FnMut(&'m broadcast::Message) -> future::Ready { + move |msg| { + future::ready(match resume_at { + None => true, + Some(resume_at) => msg.sequence > resume_at, + }) + } + } + fn skip_expired( + expire_at: &DateTime, + ) -> impl for<'m> FnMut(&'m broadcast::Message) -> future::Ready { + let expire_at = expire_at.to_owned(); + move |msg| future::ready(msg.sent_at > expire_at) + } +} + +#[derive(Debug, thiserror::Error)] +pub enum EventsError { + #[error("channel {0} not found")] + ChannelNotFound(channel::Id), + #[error(transparent)] + DatabaseError(#[from] sqlx::Error), +} diff --git a/src/events/app/mod.rs b/src/events/app/mod.rs new file mode 100644 index 0000000..03a7da2 --- /dev/null +++ b/src/events/app/mod.rs @@ -0,0 +1,5 @@ +mod broadcaster; +mod events; + +pub use self::broadcaster::Broadcaster; +pub use self::events::{Events, EventsError}; diff --git a/src/events/extract.rs b/src/events/extract.rs new file mode 100644 index 0000000..683c1f9 --- /dev/null +++ b/src/events/extract.rs @@ -0,0 +1,86 @@ +use std::ops::Deref; + +use axum::{ + extract::FromRequestParts, + http::{request::Parts, HeaderName, HeaderValue}, +}; +use axum_extra::typed_header::TypedHeader; +use serde::{de::DeserializeOwned, Serialize}; + +/// A typed header. When used as a bare extractor, reads from the +/// `Last-Event-Id` HTTP header. +pub struct LastEventId(pub T); + +static LAST_EVENT_ID: HeaderName = HeaderName::from_static("last-event-id"); + +impl headers::Header for LastEventId +where + T: Serialize + DeserializeOwned, +{ + fn name() -> &'static HeaderName { + &LAST_EVENT_ID + } + + fn decode<'i, I>(values: &mut I) -> Result + where + I: Iterator, + { + let value = values.next().ok_or_else(headers::Error::invalid)?; + let value = value.to_str().map_err(|_| headers::Error::invalid())?; + let value = serde_json::from_str(value).map_err(|_| headers::Error::invalid())?; + Ok(Self(value)) + } + + fn encode(&self, values: &mut E) + where + E: Extend, + { + let Self(value) = self; + // Must panic or suppress; the trait provides no other options. + let value = serde_json::to_string(value).expect("value can be encoded as JSON"); + let value = HeaderValue::from_str(&value).expect("LastEventId is a valid header value"); + + values.extend(std::iter::once(value)); + } +} + +#[async_trait::async_trait] +impl FromRequestParts for LastEventId +where + S: Send + Sync, + T: Serialize + DeserializeOwned, +{ + type Rejection = as FromRequestParts>::Rejection; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + // This is purely for ergonomics: it allows `RequestedAt` to be extracted + // without having to wrap it in `Extension<>`. Callers _can_ still do that, + // but they aren't forced to. + let TypedHeader(requested_at) = + TypedHeader::::from_request_parts(parts, state).await?; + + Ok(requested_at) + } +} + +impl Deref for LastEventId { + type Target = T; + + fn deref(&self) -> &Self::Target { + let Self(header) = self; + header + } +} + +impl From for LastEventId { + fn from(value: T) -> Self { + Self(value) + } +} + +impl LastEventId { + pub fn into_inner(self) -> T { + let Self(value) = self; + value + } +} diff --git a/src/events/mod.rs b/src/events/mod.rs index f67ea04..e76d67c 100644 --- a/src/events/mod.rs +++ b/src/events/mod.rs @@ -1,4 +1,5 @@ pub mod app; +mod extract; pub mod repo; mod routes; diff --git a/src/events/routes.rs b/src/events/routes.rs index 7731680..5181370 100644 --- a/src/events/routes.rs +++ b/src/events/routes.rs @@ -16,13 +16,12 @@ use futures::{ stream::{self, Stream, StreamExt as _, TryStreamExt as _}, }; -use super::repo::broadcast; +use super::{extract::LastEventId, repo::broadcast}; use crate::{ app::App, - channel::app::EventsError, clock::RequestedAt, error::InternalError, - header::LastEventId, + events::app::EventsError, repo::{channel, login::Login}, }; @@ -67,8 +66,8 @@ async fn events( let resume_at = resume_at.get(&channel).copied(); let events = app - .channels() - .events(&channel, &now, resume_at) + .events() + .subscribe(&channel, &now, resume_at) .await? .map(ChannelEvent::wrap(channel)); @@ -122,9 +121,6 @@ impl IntoResponse for ErrorResponse { not_found @ EventsError::ChannelNotFound(_) => { (StatusCode::NOT_FOUND, not_found.to_string()).into_response() } - resume_at @ EventsError::ResumeAtError(_) => { - (StatusCode::BAD_REQUEST, resume_at.to_string()).into_response() - } other => InternalError::from(other).into_response(), } } diff --git a/src/events/routes/test.rs b/src/events/routes/test.rs index 131c751..d3f3fd6 100644 --- a/src/events/routes/test.rs +++ b/src/events/routes/test.rs @@ -6,8 +6,7 @@ use futures::{ }; use crate::{ - channel::app, - events::routes, + events::{app, routes}, repo::channel::{self}, test::fixtures::{self, future::Immediately as _}, }; diff --git a/src/header.rs b/src/header.rs deleted file mode 100644 index 683c1f9..0000000 --- a/src/header.rs +++ /dev/null @@ -1,86 +0,0 @@ -use std::ops::Deref; - -use axum::{ - extract::FromRequestParts, - http::{request::Parts, HeaderName, HeaderValue}, -}; -use axum_extra::typed_header::TypedHeader; -use serde::{de::DeserializeOwned, Serialize}; - -/// A typed header. When used as a bare extractor, reads from the -/// `Last-Event-Id` HTTP header. -pub struct LastEventId(pub T); - -static LAST_EVENT_ID: HeaderName = HeaderName::from_static("last-event-id"); - -impl headers::Header for LastEventId -where - T: Serialize + DeserializeOwned, -{ - fn name() -> &'static HeaderName { - &LAST_EVENT_ID - } - - fn decode<'i, I>(values: &mut I) -> Result - where - I: Iterator, - { - let value = values.next().ok_or_else(headers::Error::invalid)?; - let value = value.to_str().map_err(|_| headers::Error::invalid())?; - let value = serde_json::from_str(value).map_err(|_| headers::Error::invalid())?; - Ok(Self(value)) - } - - fn encode(&self, values: &mut E) - where - E: Extend, - { - let Self(value) = self; - // Must panic or suppress; the trait provides no other options. - let value = serde_json::to_string(value).expect("value can be encoded as JSON"); - let value = HeaderValue::from_str(&value).expect("LastEventId is a valid header value"); - - values.extend(std::iter::once(value)); - } -} - -#[async_trait::async_trait] -impl FromRequestParts for LastEventId -where - S: Send + Sync, - T: Serialize + DeserializeOwned, -{ - type Rejection = as FromRequestParts>::Rejection; - - async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { - // This is purely for ergonomics: it allows `RequestedAt` to be extracted - // without having to wrap it in `Extension<>`. Callers _can_ still do that, - // but they aren't forced to. - let TypedHeader(requested_at) = - TypedHeader::::from_request_parts(parts, state).await?; - - Ok(requested_at) - } -} - -impl Deref for LastEventId { - type Target = T; - - fn deref(&self) -> &Self::Target { - let Self(header) = self; - header - } -} - -impl From for LastEventId { - fn from(value: T) -> Self { - Self(value) - } -} - -impl LastEventId { - pub fn into_inner(self) -> T { - let Self(value) = self; - value - } -} diff --git a/src/lib.rs b/src/lib.rs index 09bfac4..a7ca18b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,7 +4,6 @@ pub mod cli; mod clock; mod error; mod events; -mod header; mod id; mod login; mod password; diff --git a/src/test/fixtures/message.rs b/src/test/fixtures/message.rs index 7fe3cb9..33feeae 100644 --- a/src/test/fixtures/message.rs +++ b/src/test/fixtures/message.rs @@ -15,7 +15,7 @@ pub async fn send( ) -> broadcast::Message { let body = propose(); - app.channels() + app.events() .send(login, &channel.id, &body, sent_at) .await .expect("should succeed if the channel exists") -- cgit v1.2.3