1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
|
use chrono::TimeDelta;
use sqlx::SqlitePool;
use super::{History, repo, repo::Provider as _};
use crate::{
clock::DateTime,
db::NotFound as _,
event::{Broadcaster, Sequence, repo::Provider},
push::repo::Provider as _,
};
pub struct Vapid {
db: SqlitePool,
events: Broadcaster,
}
impl Vapid {
pub const fn new(db: SqlitePool, events: Broadcaster) -> Self {
Self { db, events }
}
pub async fn rotate_key(&self) -> Result<(), sqlx::Error> {
let mut tx = self.db.begin().await?;
// This is called from a separate CLI utility (see `cli.rs`), and we _can't_ deliver events
// to active clients from another process, so don't do anything that would require us to
// send events, like generating a new key.
//
// Instead, the server's next `refresh_key` call will generate a key and notify clients
// of the change. All we have to do is remove the existing key, so that the server can know
// to do so.
tx.vapid().clear().await?;
// Delete outstanding subscriptions for the existing VAPID key, as well. They're
// unserviceable once we lose the key. Clients can resubscribe when they process the next
// key rotation event, which will be quite quickly once the running server notices that the
// VAPID key has been removed.
tx.push().clear().await?;
tx.commit().await?;
Ok(())
}
pub async fn refresh_key(&self, ensure_at: &DateTime) -> Result<(), Error> {
let mut tx = self.db.begin().await?;
let key = tx.vapid().current().await.optional()?;
if key.is_none() {
let changed_at = tx.sequence().next(ensure_at).await?;
let (key, secret) = History::begin(&changed_at);
tx.vapid().clear().await?;
tx.vapid().store_signing_key(&secret).await?;
let events = key.events().filter(Sequence::start_from(changed_at));
tx.vapid().record_events(events.clone()).await?;
tx.commit().await?;
self.events.broadcast_from(events);
} else if let Some(key) = key
// Somewhat arbitrarily, rotate keys every 30 days.
&& key.older_than(ensure_at.to_owned() - TimeDelta::days(30))
{
// If you can think of a way to factor out this duplication, be my guest. I tried.
// The only approach I could think of mirrors `crate::user::create::Create`, encoding
// the process in a state machine made of types, and that's a very complex solution
// to a problem that doesn't seem to merit it. -o
let changed_at = tx.sequence().next(ensure_at).await?;
let (key, secret) = key.rotate(&changed_at);
// This will delete _all_ stored subscriptions. This is fine; they're all for the
// current VAPID key, and we won't be able to use them anyways once the key is rotated.
// We have no way to inform the push broker services of that, unfortunately.
tx.push().clear().await?;
tx.vapid().clear().await?;
tx.vapid().store_signing_key(&secret).await?;
// Refactoring constraint: this `events` iterator borrows `key`. Anything that moves
// `key` has to give it back, but it can't give both `key` back and an event iterator
// borrowing from `key` because Rust doesn't support types that borrow from other
// parts of themselves.
let events = key.events().filter(Sequence::start_from(changed_at));
tx.vapid().record_events(events.clone()).await?;
// Refactoring constraint: we _really_ want to commit the transaction before we send
// out events, so that anything acting on those events is guaranteed to see the state
// of the service at some point at or after the side effects of this. I'd also prefer
// to keep the commit in the same method that the transaction is begun in, for clarity.
tx.commit().await?;
self.events.broadcast_from(events);
}
// else, the key exists and is not stale. Don't bother allocating a sequence number, and
// in fact throw away the whole transaction.
Ok(())
}
}
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub enum Error {
Database(#[from] sqlx::Error),
Ecdsa(#[from] p256::ecdsa::Error),
Pkcs8(#[from] p256::pkcs8::Error),
}
impl From<repo::Error> for Error {
fn from(error: repo::Error) -> Self {
use repo::Error;
match error {
Error::Database(error) => error.into(),
Error::Ecdsa(error) => error.into(),
Error::Pkcs8(error) => error.into(),
}
}
}
|