summaryrefslogtreecommitdiff
path: root/src/vapid
diff options
context:
space:
mode:
Diffstat (limited to 'src/vapid')
-rw-r--r--src/vapid/app.rs89
-rw-r--r--src/vapid/event.rs48
-rw-r--r--src/vapid/history.rs55
-rw-r--r--src/vapid/middleware.rs17
-rw-r--r--src/vapid/mod.rs9
-rw-r--r--src/vapid/repo.rs139
6 files changed, 357 insertions, 0 deletions
diff --git a/src/vapid/app.rs b/src/vapid/app.rs
new file mode 100644
index 0000000..8886c9f
--- /dev/null
+++ b/src/vapid/app.rs
@@ -0,0 +1,89 @@
+use chrono::TimeDelta;
+use sqlx::SqlitePool;
+
+use super::{History, repo, repo::Provider as _};
+use crate::{
+ clock::DateTime,
+ db::NotFound as _,
+ event::{Broadcaster, Sequence, repo::Provider},
+};
+
+pub struct Vapid<'a> {
+ db: &'a SqlitePool,
+ events: &'a Broadcaster,
+}
+
+impl<'a> Vapid<'a> {
+ pub const fn new(db: &'a SqlitePool, events: &'a Broadcaster) -> Self {
+ Self { db, events }
+ }
+
+ pub async fn refresh_key(&self, ensure_at: &DateTime) -> Result<(), Error> {
+ let mut tx = self.db.begin().await?;
+ let key = tx.vapid().current().await.optional()?;
+ if key.is_none() {
+ let changed_at = tx.sequence().next(ensure_at).await?;
+ let (key, secret) = History::begin(&changed_at);
+
+ tx.vapid().clear().await?;
+ tx.vapid().store_signing_key(&secret).await?;
+
+ let events = key.events().filter(Sequence::start_from(changed_at));
+ tx.vapid().record_events(events.clone()).await?;
+
+ tx.commit().await?;
+
+ self.events.broadcast_from(events);
+ } else if let Some(key) = key
+ // Somewhat arbitrarily, rotate keys every 30 days.
+ && key.older_than(ensure_at.to_owned() - TimeDelta::days(30))
+ {
+ // If you can think of a way to factor out this duplication, be my guest. I tried.
+ // The only approach I could think of mirrors `crate::user::create::Create`, encoding
+ // the process in a state machine made of types, and that's a very complex solution
+ // to a problem that doesn't seem to merit it. -o
+ let changed_at = tx.sequence().next(ensure_at).await?;
+ let (key, secret) = key.rotate(&changed_at);
+
+ tx.vapid().clear().await?;
+ tx.vapid().store_signing_key(&secret).await?;
+
+ // Refactoring constraint: this `events` iterator borrows `key`. Anything that moves
+ // `key` has to give it back, but it can't give both `key` back and an event iterator
+ // borrowing from `key` because Rust doesn't support types that borrow from other
+ // parts of themselves.
+ let events = key.events().filter(Sequence::start_from(changed_at));
+ tx.vapid().record_events(events.clone()).await?;
+
+ // Refactoring constraint: we _really_ want to commit the transaction before we send
+ // out events, so that anything acting on those events is guaranteed to see the state
+ // of the service at some point at or after the side effects of this. I'd also prefer
+ // to keep the commit in the same method that the transaction is begun in, for clarity.
+ tx.commit().await?;
+
+ self.events.broadcast_from(events);
+ }
+ // else, the key exists and is not stale. Don't bother allocating a sequence number, and
+ // in fact throw away the whole transaction.
+
+ Ok(())
+ }
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum Error {
+ #[error(transparent)]
+ Database(#[from] sqlx::Error),
+ #[error(transparent)]
+ Ecdsa(#[from] p256::ecdsa::Error),
+}
+
+impl From<repo::Error> for Error {
+ fn from(error: repo::Error) -> Self {
+ use repo::Error;
+ match error {
+ Error::Database(error) => error.into(),
+ Error::Ecdsa(error) => error.into(),
+ }
+ }
+}
diff --git a/src/vapid/event.rs b/src/vapid/event.rs
new file mode 100644
index 0000000..af70ac2
--- /dev/null
+++ b/src/vapid/event.rs
@@ -0,0 +1,48 @@
+use base64::{Engine, engine::general_purpose::URL_SAFE};
+use p256::ecdsa::VerifyingKey;
+use serde::Serialize;
+
+use crate::event::{Instant, Sequenced};
+
+#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)]
+#[serde(tag = "event", rename_all = "snake_case")]
+pub enum Event {
+ Changed(Changed),
+}
+
+impl Sequenced for Event {
+ fn instant(&self) -> Instant {
+ match self {
+ Self::Changed(event) => event.instant(),
+ }
+ }
+}
+
+#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize)]
+pub struct Changed {
+ #[serde(flatten)]
+ pub instant: Instant,
+ #[serde(serialize_with = "as_vapid_key")]
+ pub key: VerifyingKey,
+}
+
+impl From<Changed> for Event {
+ fn from(event: Changed) -> Self {
+ Self::Changed(event)
+ }
+}
+
+impl Sequenced for Changed {
+ fn instant(&self) -> Instant {
+ self.instant
+ }
+}
+
+fn as_vapid_key<S>(key: &VerifyingKey, serializer: S) -> Result<S::Ok, S::Error>
+where
+ S: serde::Serializer,
+{
+ let key = key.to_sec1_bytes();
+ let key = URL_SAFE.encode(key);
+ key.serialize(serializer)
+}
diff --git a/src/vapid/history.rs b/src/vapid/history.rs
new file mode 100644
index 0000000..42f062b
--- /dev/null
+++ b/src/vapid/history.rs
@@ -0,0 +1,55 @@
+use p256::ecdsa::{SigningKey, VerifyingKey};
+use rand::thread_rng;
+
+use super::event::{Changed, Event};
+use crate::{clock::DateTime, event::Instant};
+
+#[derive(Debug)]
+pub struct History {
+ pub key: VerifyingKey,
+ pub changed: Instant,
+}
+
+// Lifecycle interface
+impl History {
+ pub fn begin(changed: &Instant) -> (Self, SigningKey) {
+ let key = SigningKey::random(&mut thread_rng());
+ (
+ Self {
+ key: VerifyingKey::from(&key),
+ changed: *changed,
+ },
+ key,
+ )
+ }
+
+ // `self` _is_ unused here, clippy is right about that. This choice is deliberate, however - it
+ // makes it harder to inadvertently reuse a rotated key via its history, and it makes the
+ // lifecycle interface more obviously consistent between this and other History types.
+ #[allow(clippy::unused_self)]
+ pub fn rotate(self, changed: &Instant) -> (Self, SigningKey) {
+ Self::begin(changed)
+ }
+}
+
+// State interface
+impl History {
+ pub fn older_than(&self, when: DateTime) -> bool {
+ self.changed.at < when
+ }
+}
+
+// Events interface
+impl History {
+ pub fn events(&self) -> impl Iterator<Item = Event> + Clone {
+ [self.changed()].into_iter()
+ }
+
+ fn changed(&self) -> Event {
+ Changed {
+ key: self.key,
+ instant: self.changed,
+ }
+ .into()
+ }
+}
diff --git a/src/vapid/middleware.rs b/src/vapid/middleware.rs
new file mode 100644
index 0000000..02951ba
--- /dev/null
+++ b/src/vapid/middleware.rs
@@ -0,0 +1,17 @@
+use axum::{
+ extract::{Request, State},
+ middleware::Next,
+ response::Response,
+};
+
+use crate::{app::App, clock::RequestedAt, error::Internal};
+
+pub async fn middleware(
+ State(app): State<App>,
+ RequestedAt(now): RequestedAt,
+ request: Request,
+ next: Next,
+) -> Result<Response, Internal> {
+ app.vapid().refresh_key(&now).await?;
+ Ok(next.run(request).await)
+}
diff --git a/src/vapid/mod.rs b/src/vapid/mod.rs
new file mode 100644
index 0000000..9798654
--- /dev/null
+++ b/src/vapid/mod.rs
@@ -0,0 +1,9 @@
+pub mod app;
+pub mod event;
+mod history;
+mod middleware;
+pub mod repo;
+
+pub use event::Event;
+pub use history::History;
+pub use middleware::middleware;
diff --git a/src/vapid/repo.rs b/src/vapid/repo.rs
new file mode 100644
index 0000000..4ac5286
--- /dev/null
+++ b/src/vapid/repo.rs
@@ -0,0 +1,139 @@
+use p256::{NistP256, ecdsa::SigningKey, elliptic_curve::FieldBytes};
+use sqlx::{Sqlite, SqliteConnection, Transaction};
+
+use super::{
+ History,
+ event::{Changed, Event},
+};
+use crate::{
+ clock::DateTime,
+ db::NotFound,
+ event::{Instant, Sequence},
+};
+
+pub trait Provider {
+ fn vapid(&mut self) -> Vapid<'_>;
+}
+
+impl Provider for Transaction<'_, Sqlite> {
+ fn vapid(&mut self) -> Vapid<'_> {
+ Vapid(self)
+ }
+}
+
+pub struct Vapid<'a>(&'a mut SqliteConnection);
+
+impl Vapid<'_> {
+ pub async fn record_events(
+ &mut self,
+ events: impl IntoIterator<Item = Event>,
+ ) -> Result<(), sqlx::Error> {
+ for event in events {
+ self.record_event(&event).await?;
+ }
+ Ok(())
+ }
+
+ pub async fn record_event(&mut self, event: &Event) -> Result<(), sqlx::Error> {
+ match event {
+ Event::Changed(changed) => self.record_changed(changed).await,
+ }
+ }
+
+ async fn record_changed(&mut self, changed: &Changed) -> Result<(), sqlx::Error> {
+ sqlx::query!(
+ r#"
+ insert into vapid_key (changed_at, changed_sequence)
+ values ($1, $2)
+ "#,
+ changed.instant.at,
+ changed.instant.sequence,
+ )
+ .execute(&mut *self.0)
+ .await?;
+
+ Ok(())
+ }
+
+ pub async fn clear(&mut self) -> Result<(), sqlx::Error> {
+ sqlx::query!(
+ r#"
+ delete from vapid_key
+ "#
+ )
+ .execute(&mut *self.0)
+ .await?;
+
+ sqlx::query!(
+ r#"
+ delete from vapid_signing_key
+ "#
+ )
+ .execute(&mut *self.0)
+ .await?;
+
+ Ok(())
+ }
+
+ pub async fn store_signing_key(&mut self, key: &SigningKey) -> Result<(), Error> {
+ let key = key.to_bytes();
+ let key = key.as_slice();
+ sqlx::query!(
+ r#"
+ insert into vapid_signing_key (key)
+ values ($1)
+ "#,
+ key,
+ )
+ .execute(&mut *self.0)
+ .await?;
+
+ Ok(())
+ }
+
+ pub async fn current(&mut self) -> Result<History, Error> {
+ let key = sqlx::query!(
+ r#"
+ select
+ key.changed_at as "changed_at: DateTime",
+ key.changed_sequence as "changed_sequence: Sequence",
+ signing.key as "key: Vec<u8>"
+ from vapid_key as key
+ join vapid_signing_key as signing
+ "#
+ )
+ .map(|row| {
+ let key = FieldBytes::<NistP256>::from_slice(&row.key);
+ let key = SigningKey::from_bytes(key)?;
+ let key = key.verifying_key().to_owned();
+
+ let changed = Instant::new(row.changed_at, row.changed_sequence);
+
+ Ok::<_, Error>(History { key, changed })
+ })
+ .fetch_one(&mut *self.0)
+ .await??;
+
+ Ok(key)
+ }
+}
+
+#[derive(Debug, thiserror::Error)]
+#[error(transparent)]
+pub enum Error {
+ Ecdsa(#[from] p256::ecdsa::Error),
+ Database(#[from] sqlx::Error),
+}
+
+impl<T> NotFound for Result<T, Error> {
+ type Ok = T;
+ type Error = Error;
+
+ fn optional(self) -> Result<Option<T>, Error> {
+ match self {
+ Ok(value) => Ok(Some(value)),
+ Err(Error::Database(sqlx::Error::RowNotFound)) => Ok(None),
+ Err(other) => Err(other),
+ }
+ }
+}