summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOwen Jacobson <owen@grimoire.ca>2024-09-20 23:01:18 -0400
committerOwen Jacobson <owen@grimoire.ca>2024-09-20 23:05:44 -0400
commit0a05491930fb34ce7c93c33ea0b7599360483fc7 (patch)
tree552906477fd81c6687c0ca9c6bdc25e22461b52a
parent22348bfa35f009e62abe2f30863e0434079a1fe2 (diff)
Push events into a module structure consistent with the rest of the project.
-rw-r--r--src/app.rs5
-rw-r--r--src/channel/app.rs111
-rw-r--r--src/channel/mod.rs1
-rw-r--r--src/events/app.rs111
-rw-r--r--src/events/mod.rs5
-rw-r--r--src/events/repo/broadcast.rs (renamed from src/channel/repo/broadcast.rs)0
-rw-r--r--src/events/repo/mod.rs (renamed from src/channel/repo/mod.rs)0
-rw-r--r--src/events/routes.rs (renamed from src/events.rs)3
8 files changed, 123 insertions, 113 deletions
diff --git a/src/app.rs b/src/app.rs
index 0823a0c..e448436 100644
--- a/src/app.rs
+++ b/src/app.rs
@@ -1,9 +1,6 @@
use sqlx::sqlite::SqlitePool;
-use crate::{
- channel::app::{Broadcaster, Channels},
- login::app::Logins,
-};
+use crate::{channel::app::Channels, events::app::Broadcaster, login::app::Logins};
#[derive(Clone)]
pub struct App {
diff --git a/src/channel/app.rs b/src/channel/app.rs
index 8ae0c3c..f9a75d7 100644
--- a/src/channel/app.rs
+++ b/src/channel/app.rs
@@ -1,6 +1,3 @@
-use std::collections::{hash_map::Entry, HashMap};
-use std::sync::{Arc, Mutex, MutexGuard};
-
use chrono::TimeDelta;
use futures::{
future,
@@ -8,12 +5,13 @@ use futures::{
Stream,
};
use sqlx::sqlite::SqlitePool;
-use tokio::sync::broadcast::{channel, Sender};
-use tokio_stream::wrappers::{errors::BroadcastStreamRecvError, BroadcastStream};
-use super::repo::broadcast::{self, Provider as _};
use crate::{
clock::DateTime,
+ events::{
+ app::Broadcaster,
+ repo::broadcast::{self, Provider as _},
+ },
repo::{
channel::{self, Channel, Provider as _},
error::NotFound as _,
@@ -158,104 +156,3 @@ pub enum EventsError {
#[error(transparent)]
DatabaseError(#[from] sqlx::Error),
}
-
-// Clones will share the same senders collection.
-#[derive(Clone)]
-pub struct Broadcaster {
- // The use of std::sync::Mutex, and not tokio::sync::Mutex, follows Tokio's
- // own advice: <https://tokio.rs/tokio/tutorial/shared-state>. Methods that
- // lock it must be sync.
- senders: Arc<Mutex<HashMap<channel::Id, Sender<broadcast::Message>>>>,
-}
-
-impl Broadcaster {
- pub async fn from_database(db: &SqlitePool) -> Result<Self, sqlx::Error> {
- let mut tx = db.begin().await?;
- let channels = tx.channels().all().await?;
- tx.commit().await?;
-
- let channels = channels.iter().map(|c| &c.id);
- let broadcaster = Self::new(channels);
- Ok(broadcaster)
- }
-
- fn new<'i>(channels: impl IntoIterator<Item = &'i channel::Id>) -> Self {
- let senders: HashMap<_, _> = channels
- .into_iter()
- .cloned()
- .map(|id| (id, Self::make_sender()))
- .collect();
-
- Self {
- senders: Arc::new(Mutex::new(senders)),
- }
- }
-
- // panic: if ``channel`` is already registered.
- pub fn register_channel(&self, channel: &channel::Id) {
- match self.senders().entry(channel.clone()) {
- // This ever happening indicates a serious logic error.
- Entry::Occupied(_) => panic!("duplicate channel registration for channel {channel}"),
- Entry::Vacant(entry) => {
- entry.insert(Self::make_sender());
- }
- }
- }
-
- // panic: if ``channel`` has not been previously registered, and was not
- // part of the initial set of channels.
- pub fn broadcast(&self, channel: &channel::Id, message: broadcast::Message) {
- let tx = self.sender(channel);
-
- // Per the Tokio docs, the returned error is only used to indicate that
- // there are no receivers. In this use case, that's fine; a lack of
- // listening consumers (chat clients) when a message is sent isn't an
- // error.
- //
- // The successful return value, which includes the number of active
- // receivers, also isn't that interesting to us.
- let _ = tx.send(message);
- }
-
- // panic: if ``channel`` has not been previously registered, and was not
- // part of the initial set of channels.
- pub fn listen(&self, channel: &channel::Id) -> impl Stream<Item = broadcast::Message> {
- let rx = self.sender(channel).subscribe();
-
- BroadcastStream::from(rx)
- .take_while(|r| {
- future::ready(match r {
- Ok(_) => true,
- // Stop the stream here. This will disconnect SSE clients
- // (see `routes.rs`), who will then resume from
- // `Last-Event-ID`, allowing them to catch up by reading
- // the skipped messages from the database.
- Err(BroadcastStreamRecvError::Lagged(_)) => false,
- })
- })
- .map(|r| {
- // Since the previous transform stops at the first error, this
- // should always hold.
- //
- // See also <https://users.rust-lang.org/t/taking-from-stream-while-ok/48854>.
- r.expect("after filtering, only `Ok` messages should remain")
- })
- }
-
- // panic: if ``channel`` has not been previously registered, and was not
- // part of the initial set of channels.
- fn sender(&self, channel: &channel::Id) -> Sender<broadcast::Message> {
- self.senders()[channel].clone()
- }
-
- fn senders(&self) -> MutexGuard<HashMap<channel::Id, Sender<broadcast::Message>>> {
- self.senders.lock().unwrap() // propagate panics when mutex is poisoned
- }
-
- fn make_sender() -> Sender<broadcast::Message> {
- // Queue depth of 16 chosen entirely arbitrarily. Don't read too much
- // into it.
- let (tx, _) = channel(16);
- tx
- }
-}
diff --git a/src/channel/mod.rs b/src/channel/mod.rs
index f67ea04..9f79dbb 100644
--- a/src/channel/mod.rs
+++ b/src/channel/mod.rs
@@ -1,5 +1,4 @@
pub mod app;
-pub mod repo;
mod routes;
pub use self::routes::router;
diff --git a/src/events/app.rs b/src/events/app.rs
new file mode 100644
index 0000000..dfb23d7
--- /dev/null
+++ b/src/events/app.rs
@@ -0,0 +1,111 @@
+use std::collections::{hash_map::Entry, HashMap};
+use std::sync::{Arc, Mutex, MutexGuard};
+
+use futures::{future, stream::StreamExt as _, Stream};
+use sqlx::sqlite::SqlitePool;
+use tokio::sync::broadcast::{channel, Sender};
+use tokio_stream::wrappers::{errors::BroadcastStreamRecvError, BroadcastStream};
+
+use super::repo::broadcast;
+use crate::repo::channel::{self, Provider as _};
+
+// Clones will share the same senders collection.
+#[derive(Clone)]
+pub struct Broadcaster {
+ // The use of std::sync::Mutex, and not tokio::sync::Mutex, follows Tokio's
+ // own advice: <https://tokio.rs/tokio/tutorial/shared-state>. Methods that
+ // lock it must be sync.
+ senders: Arc<Mutex<HashMap<channel::Id, Sender<broadcast::Message>>>>,
+}
+
+impl Broadcaster {
+ pub async fn from_database(db: &SqlitePool) -> Result<Self, sqlx::Error> {
+ let mut tx = db.begin().await?;
+ let channels = tx.channels().all().await?;
+ tx.commit().await?;
+
+ let channels = channels.iter().map(|c| &c.id);
+ let broadcaster = Self::new(channels);
+ Ok(broadcaster)
+ }
+
+ fn new<'i>(channels: impl IntoIterator<Item = &'i channel::Id>) -> Self {
+ let senders: HashMap<_, _> = channels
+ .into_iter()
+ .cloned()
+ .map(|id| (id, Self::make_sender()))
+ .collect();
+
+ Self {
+ senders: Arc::new(Mutex::new(senders)),
+ }
+ }
+
+ // panic: if ``channel`` is already registered.
+ pub fn register_channel(&self, channel: &channel::Id) {
+ match self.senders().entry(channel.clone()) {
+ // This ever happening indicates a serious logic error.
+ Entry::Occupied(_) => panic!("duplicate channel registration for channel {channel}"),
+ Entry::Vacant(entry) => {
+ entry.insert(Self::make_sender());
+ }
+ }
+ }
+
+ // panic: if ``channel`` has not been previously registered, and was not
+ // part of the initial set of channels.
+ pub fn broadcast(&self, channel: &channel::Id, message: broadcast::Message) {
+ let tx = self.sender(channel);
+
+ // Per the Tokio docs, the returned error is only used to indicate that
+ // there are no receivers. In this use case, that's fine; a lack of
+ // listening consumers (chat clients) when a message is sent isn't an
+ // error.
+ //
+ // The successful return value, which includes the number of active
+ // receivers, also isn't that interesting to us.
+ let _ = tx.send(message);
+ }
+
+ // panic: if ``channel`` has not been previously registered, and was not
+ // part of the initial set of channels.
+ pub fn listen(&self, channel: &channel::Id) -> impl Stream<Item = broadcast::Message> {
+ let rx = self.sender(channel).subscribe();
+
+ BroadcastStream::from(rx)
+ .take_while(|r| {
+ future::ready(match r {
+ Ok(_) => true,
+ // Stop the stream here. This will disconnect SSE clients
+ // (see `routes.rs`), who will then resume from
+ // `Last-Event-ID`, allowing them to catch up by reading
+ // the skipped messages from the database.
+ Err(BroadcastStreamRecvError::Lagged(_)) => false,
+ })
+ })
+ .map(|r| {
+ // Since the previous transform stops at the first error, this
+ // should always hold.
+ //
+ // See also <https://users.rust-lang.org/t/taking-from-stream-while-ok/48854>.
+ r.expect("after filtering, only `Ok` messages should remain")
+ })
+ }
+
+ // panic: if ``channel`` has not been previously registered, and was not
+ // part of the initial set of channels.
+ fn sender(&self, channel: &channel::Id) -> Sender<broadcast::Message> {
+ self.senders()[channel].clone()
+ }
+
+ fn senders(&self) -> MutexGuard<HashMap<channel::Id, Sender<broadcast::Message>>> {
+ self.senders.lock().unwrap() // propagate panics when mutex is poisoned
+ }
+
+ fn make_sender() -> Sender<broadcast::Message> {
+ // Queue depth of 16 chosen entirely arbitrarily. Don't read too much
+ // into it.
+ let (tx, _) = channel(16);
+ tx
+ }
+}
diff --git a/src/events/mod.rs b/src/events/mod.rs
new file mode 100644
index 0000000..f67ea04
--- /dev/null
+++ b/src/events/mod.rs
@@ -0,0 +1,5 @@
+pub mod app;
+pub mod repo;
+mod routes;
+
+pub use self::routes::router;
diff --git a/src/channel/repo/broadcast.rs b/src/events/repo/broadcast.rs
index 182203a..182203a 100644
--- a/src/channel/repo/broadcast.rs
+++ b/src/events/repo/broadcast.rs
diff --git a/src/channel/repo/mod.rs b/src/events/repo/mod.rs
index 2ed3062..2ed3062 100644
--- a/src/channel/repo/mod.rs
+++ b/src/events/repo/mod.rs
diff --git a/src/events.rs b/src/events/routes.rs
index 9cbb0a3..f880c70 100644
--- a/src/events.rs
+++ b/src/events/routes.rs
@@ -12,9 +12,10 @@ use axum_extra::extract::Query;
use chrono::{self, format::SecondsFormat, DateTime};
use futures::stream::{self, Stream, StreamExt as _, TryStreamExt as _};
+use super::repo::broadcast;
use crate::{
app::App,
- channel::{app::EventsError, repo::broadcast},
+ channel::app::EventsError,
clock::RequestedAt,
error::InternalError,
header::LastEventId,