summaryrefslogtreecommitdiff
path: root/src/broadcast.rs
blob: ee42c08a09b32c626935d38fc6c6e859d19ef8a1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
use std::sync::{Arc, Mutex};

use futures::{Stream, future, stream::StreamExt as _};
use tokio::sync::broadcast::{Sender, channel};
use tokio_stream::wrappers::{BroadcastStream, errors::BroadcastStreamRecvError};

// Clones will share the same sender.
#[derive(Clone)]
pub struct Broadcaster<M> {
    // The use of std::sync::Mutex, and not tokio::sync::Mutex, follows Tokio's
    // own advice: <https://tokio.rs/tokio/tutorial/shared-state>. Methods that
    // lock it must be sync.
    senders: Arc<Mutex<Sender<M>>>,
}

impl<M> Default for Broadcaster<M>
where
    M: Clone + Send + std::fmt::Debug + 'static,
{
    fn default() -> Self {
        let sender = Self::make_sender();

        Self {
            senders: Arc::new(Mutex::new(sender)),
        }
    }
}

impl<M> Broadcaster<M>
where
    M: Clone + Send + std::fmt::Debug + 'static,
{
    pub fn broadcast(&self, message: M) {
        let tx = self.sender();

        // Per the Tokio docs, the returned error is only used to indicate that
        // there are no receivers. In this use case, that's fine; a lack of
        // listening consumers (chat clients) when a message is sent isn't an
        // error.
        //
        // The successful return value, which includes the number of active
        // receivers, also isn't that interesting to us.
        let _ = tx.send(message);
    }

    // If `M` is a type that can be obtained from an iterator, such as a `Vec`, and if `I` is an
    // iterable of items that can be collected into `M`, then this will construct an `M` from the
    // passed event iterator, converting each element as it goes. This emits one message (as `M`),
    // containing whatever we collect out of `messages`.
    //
    // This is mostly meant for handling synchronized entity events, which tend to be generated as
    // iterables of domain-specific event types, like `user::Event`, but broadcast as `Vec<event::Event>`
    // for consumption by outside clients.
    pub fn broadcast_from<I, E>(&self, messages: I)
    where
        I: IntoIterator,
        M: FromIterator<E>,
        E: From<I::Item>,
    {
        let message = messages.into_iter().map(Into::into).collect();
        self.broadcast(message);
    }

    pub fn subscribe(&self) -> impl Stream<Item = M> + std::fmt::Debug + use<M> {
        let rx = self.sender().subscribe();

        BroadcastStream::from(rx).scan((), |(), r| {
            // The following could technically be `r.ok()`, and is exactly
            // equivalent to it, but spelling out the match arms means we'll
            // find out at compile time if new errors get added to
            // `BroadcastStreamRecvError`.
            #[allow(clippy::manual_ok_err)]
            future::ready(match r {
                Ok(event) => Some(event),
                // Stop the stream here. This will disconnect SSE clients (see the `/api/events`
                // endpoint), who will then resume from `Last-Event-ID`, allowing them to catch up
                // by reading the skipped messages from the database.
                //
                // See also:
                // <https://users.rust-lang.org/t/taking-from-stream-while-ok/48854>
                Err(BroadcastStreamRecvError::Lagged(_)) => None,
            })
        })
    }

    fn sender(&self) -> Sender<M> {
        self.senders.lock().unwrap().clone()
    }

    fn make_sender() -> Sender<M> {
        // Queue depth of 16 chosen entirely arbitrarily. Don't read too much
        // into it.
        let (tx, _) = channel(16);
        tx
    }
}