From 3bd63e20777126216777b392615dc8144f21bb9a Mon Sep 17 00:00:00 2001 From: Owen Jacobson Date: Sat, 30 Aug 2025 00:50:24 -0400 Subject: Generate, store, and deliver a VAPID key. VAPID is used to authenticate applications to push brokers, as part of the [Web Push] specification. It's notionally optional, but we believe [Apple requires it][apple], and in any case making it impossible to use subscription URLs without the corresponding private key available, and thus harder to impersonate the server, seems like a good security practice regardless. [Web Push]: https://developer.mozilla.org/en-US/docs/Web/API/Push_API [apple]: https://developer.apple.com/documentation/usernotifications/sending-web-push-notifications-in-web-apps-and-browsers There are several implementations of VAPID for Rust: * [web_push](https://docs.rs/web-push/latest/web_push/) includes an implementation of VAPID but requires callers to provision their own keys. We will likely use this crate for Web Push fulfilment, but we cannot use it for key generation. * [vapid](https://docs.rs/vapid/latest/vapid/) includes an implementation of VAPID key generation. It delegates to `openssl` to handle cryptographic operations. * [p256](https://docs.rs/p256/latest/p256/) implements NIST P-256 in Rust. It's maintained by the RustCrypto team, though as of this writing it is largely written by a single contributor. It isn't specifically designed for use with VAPID. I opted to use p256 for this, as I believe the RustCrypto team are the most likely to produce a correct and secure implementation, and because openssl has consistently left a bad taste in my mouth for years. Because it's a general implementation of the algorithm, I expect that it will require more work for us to adapt it for use with VAPID specifically; I'm willing to chance it and we can swap it out for the vapid crate if it sucks. This has left me with one area of uncertainty: I'm not actually sure I'm using the right parts of p256. The choice of `ecdsa::SigningKey` over `p256::SecretKey` is based on [the MDN docs] using phrases like "This value is part of a signing key pair generated by your application server, and usable with elliptic curve digital signature (ECDSA), over the P-256 curve." and on [RFC 8292]'s "The 'k' parameter includes an ECDSA public key in uncompressed form that is encoded using base64url encoding. However, we won't be able to test my implementation until we implement some other key parts of Web Push, which are out of scope of this commit. [the MDN docs]: https://developer.mozilla.org/en-US/docs/Web/API/PushSubscription/options [RFC 8292]: https://datatracker.ietf.org/doc/html/rfc8292#section-3.2 Following the design used for storing logins and users, VAPID keys are split into a non-synchronized part (consisting of the private key), whose exposure would allow others to impersonate the Pilcrow server, and a synchronized part (consisting of event coordinates and, notionally, the public key), which is non-sensitive and can be safely shared with any user. However, the public key is derived from the stored private key, rather than being stored directly, to minimize redundancy in the stored data. Following the design used for expiring stale entities, the app checks for and creates, or rotates, its VAPID key using middleware that runs before most API requests. If, at that point, the key is either absent, or more than 30 days old, it is replaced. This imposes a small tax on API request latency, which is used to fund prompt and automatic key rotation without the need for an operator-facing key management interface. VAPID keys are delivered to clients via the event stream, as laid out in `docs/api/events.md`. There are a few reasons for this, but the big one is that changing the VAPID key would immediately invalidate push subscriptions: we throw away the private key, so we wouldn't be able to publish to them any longer. Clients must replace their push subscriptions in order to resume delivery, and doing so promptly when notified that the key has changed will minimize the gap. This design is intended to allow for manual key rotation. The key can be rotated "immedately" by emptying the `vapid_key` and `vapid_signing_key` tables (which destroys the rotated kye); the server will generate a new one before it is needed, and will notify clients that the key has been invalidated. This change includes client support for tracking the current VAPID key. The client doesn't _use_ this information anywhere, yet, but it has it. --- src/event/app.rs | 22 +++++++ src/event/handlers/stream/test/mod.rs | 1 + src/event/handlers/stream/test/vapid.rs | 111 ++++++++++++++++++++++++++++++++ src/event/mod.rs | 10 ++- 4 files changed, 143 insertions(+), 1 deletion(-) create mode 100644 src/event/handlers/stream/test/vapid.rs (limited to 'src/event') diff --git a/src/event/app.rs b/src/event/app.rs index 7359bfb..fe90465 100644 --- a/src/event/app.rs +++ b/src/event/app.rs @@ -8,9 +8,12 @@ use sqlx::sqlite::SqlitePool; use super::{Event, Sequence, Sequenced, broadcaster::Broadcaster}; use crate::{ conversation::{self, repo::Provider as _}, + db::NotFound, message::{self, repo::Provider as _}, name, user::{self, repo::Provider as _}, + vapid, + vapid::repo::Provider as _, }; pub struct Events<'a> { @@ -57,9 +60,17 @@ impl<'a> Events<'a> { .filter(Sequence::after(resume_at)) .map(Event::from); + let vapid = tx.vapid().current().await.optional()?; + let vapid_events = vapid + .iter() + .flat_map(vapid::History::events) + .filter(Sequence::after(resume_at)) + .map(Event::from); + let replay_events = user_events .merge_by(conversation_events, Sequence::merge) .merge_by(message_events, Sequence::merge) + .merge_by(vapid_events, Sequence::merge) .collect::>(); let resume_live_at = replay_events.last().map_or(resume_at, Sequenced::sequence); @@ -86,6 +97,7 @@ impl<'a> Events<'a> { pub enum Error { Database(#[from] sqlx::Error), Name(#[from] name::Error), + Ecdsa(#[from] p256::ecdsa::Error), } impl From for Error { @@ -107,3 +119,13 @@ impl From for Error { } } } + +impl From for Error { + fn from(error: vapid::repo::Error) -> Self { + use vapid::repo::Error; + match error { + Error::Database(error) => error.into(), + Error::Ecdsa(error) => error.into(), + } + } +} diff --git a/src/event/handlers/stream/test/mod.rs b/src/event/handlers/stream/test/mod.rs index 3bc634f..c3a6ce6 100644 --- a/src/event/handlers/stream/test/mod.rs +++ b/src/event/handlers/stream/test/mod.rs @@ -4,5 +4,6 @@ mod message; mod resume; mod setup; mod token; +mod vapid; use super::{QueryParams, Response, handler}; diff --git a/src/event/handlers/stream/test/vapid.rs b/src/event/handlers/stream/test/vapid.rs new file mode 100644 index 0000000..dbc3929 --- /dev/null +++ b/src/event/handlers/stream/test/vapid.rs @@ -0,0 +1,111 @@ +use axum::extract::State; +use axum_extra::extract::Query; +use futures::StreamExt as _; + +use crate::test::{fixtures, fixtures::future::Expect as _}; + +#[tokio::test] +async fn live_vapid_key_changes() { + // Set up the context + let app = fixtures::scratch_app().await; + let resume_point = fixtures::boot::resume_point(&app).await; + + // Subscribe to events + + let subscriber = fixtures::identity::create(&app, &fixtures::now()).await; + let super::Response(events) = super::handler( + State(app.clone()), + subscriber, + None, + Query(super::QueryParams { resume_point }), + ) + .await + .expect("subscribe never fails"); + + // Rotate the VAPID key + + app.vapid() + .refresh_key(&fixtures::now()) + .await + .expect("refreshing the vapid key always succeeds"); + + // Verify that there's a key rotation event + + events + .filter_map(fixtures::event::stream::vapid) + .filter_map(fixtures::event::stream::vapid::changed) + .next() + .expect_some("a vapid key change event is sent") + .await; +} + +#[tokio::test] +async fn stored_vapid_key_changes() { + // Set up the context + let app = fixtures::scratch_app().await; + let resume_point = fixtures::boot::resume_point(&app).await; + + // Rotate the VAPID key + + app.vapid() + .refresh_key(&fixtures::now()) + .await + .expect("refreshing the vapid key always succeeds"); + + // Subscribe to events + + let subscriber = fixtures::identity::create(&app, &fixtures::now()).await; + let super::Response(events) = super::handler( + State(app.clone()), + subscriber, + None, + Query(super::QueryParams { resume_point }), + ) + .await + .expect("subscribe never fails"); + + // Verify that there's a key rotation event + + events + .filter_map(fixtures::event::stream::vapid) + .filter_map(fixtures::event::stream::vapid::changed) + .next() + .expect_some("a vapid key change event is sent") + .await; +} + +#[tokio::test] +async fn no_past_vapid_key_changes() { + // Set up the context + let app = fixtures::scratch_app().await; + + // Rotate the VAPID key + + app.vapid() + .refresh_key(&fixtures::now()) + .await + .expect("refreshing the vapid key always succeeds"); + + // Subscribe to events + + let resume_point = fixtures::boot::resume_point(&app).await; + + let subscriber = fixtures::identity::create(&app, &fixtures::now()).await; + let super::Response(events) = super::handler( + State(app.clone()), + subscriber, + None, + Query(super::QueryParams { resume_point }), + ) + .await + .expect("subscribe never fails"); + + // Verify that there's a key rotation event + + events + .filter_map(fixtures::event::stream::vapid) + .filter_map(fixtures::event::stream::vapid::changed) + .next() + .expect_wait("a vapid key change event is not sent") + .await; +} diff --git a/src/event/mod.rs b/src/event/mod.rs index f41dc9c..83b0ce7 100644 --- a/src/event/mod.rs +++ b/src/event/mod.rs @@ -2,7 +2,7 @@ use std::time::Duration; use axum::response::sse::{self, KeepAlive}; -use crate::{conversation, message, user}; +use crate::{conversation, message, user, vapid}; pub mod app; mod broadcaster; @@ -22,6 +22,7 @@ pub enum Event { User(user::Event), Conversation(conversation::Event), Message(message::Event), + Vapid(vapid::Event), } // Serialized representation is intended to look like the serialized representation of `Event`, @@ -40,6 +41,7 @@ impl Sequenced for Event { Self::User(event) => event.instant(), Self::Conversation(event) => event.instant(), Self::Message(event) => event.instant(), + Self::Vapid(event) => event.instant(), } } } @@ -62,6 +64,12 @@ impl From for Event { } } +impl From for Event { + fn from(event: vapid::Event) -> Self { + Self::Vapid(event) + } +} + impl Heartbeat { // The following values are a first-rough-guess attempt to balance noticing connection problems // quickly with managing the (modest) costs of delivering and processing heartbeats. Feel -- cgit v1.2.3 From 11f4f36a689b6447c9898a2840418e581cb3eb11 Mon Sep 17 00:00:00 2001 From: Owen Jacobson Date: Tue, 28 Oct 2025 14:10:48 -0400 Subject: Use PKCS8 PEM, not raw SEC1 bytes, to store VAPID keys. The `web-push` crate's VAPID signing support requires a private key. The `p256` crate is more than capable of generating one, but the easiest way to get a key from a `p256::ecdsa::SigningKey` to a `web_push::PartialVapidSignature` is via PKCS #8 PEM, not via the bytes. Since we'll need it in that form anyways, store it that way, so that we don't have to decode it using `p256`, re-encode to PEM, then decode to `PartialVapidSignature`. The migration in this commit invalidates existing VAPID keys. We could include support for re-encoding them on read, but there's little point: this code is still in flux anyways, and only development deployments exist. By the time this is final, the schema will have settled. --- ...0f6acfdfbf9898a1a407e8a562a181542834d05eb0.json | 32 ++++++++++++++++++++++ ...c7c204a4b9a518af418cc7e7fce9a6f0a106a6d66e.json | 32 ---------------------- migrations/20251028173914_pem_vapid_keys.sql | 15 ++++++++++ src/boot/app.rs | 2 ++ src/event/app.rs | 2 ++ src/vapid/app.rs | 5 ++-- src/vapid/repo.rs | 15 ++++++---- 7 files changed, 63 insertions(+), 40 deletions(-) create mode 100644 .sqlx/query-be295f56960d083d1f4c760f6acfdfbf9898a1a407e8a562a181542834d05eb0.json delete mode 100644 .sqlx/query-edd16f1507f3b40270d652c7c204a4b9a518af418cc7e7fce9a6f0a106a6d66e.json create mode 100644 migrations/20251028173914_pem_vapid_keys.sql (limited to 'src/event') diff --git a/.sqlx/query-be295f56960d083d1f4c760f6acfdfbf9898a1a407e8a562a181542834d05eb0.json b/.sqlx/query-be295f56960d083d1f4c760f6acfdfbf9898a1a407e8a562a181542834d05eb0.json new file mode 100644 index 0000000..ccec274 --- /dev/null +++ b/.sqlx/query-be295f56960d083d1f4c760f6acfdfbf9898a1a407e8a562a181542834d05eb0.json @@ -0,0 +1,32 @@ +{ + "db_name": "SQLite", + "query": "\n select\n key.changed_at as \"changed_at: DateTime\",\n key.changed_sequence as \"changed_sequence: Sequence\",\n signing.key\n from vapid_key as key\n join vapid_signing_key as signing\n ", + "describe": { + "columns": [ + { + "name": "changed_at: DateTime", + "ordinal": 0, + "type_info": "Text" + }, + { + "name": "changed_sequence: Sequence", + "ordinal": 1, + "type_info": "Integer" + }, + { + "name": "key", + "ordinal": 2, + "type_info": "Text" + } + ], + "parameters": { + "Right": 0 + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "be295f56960d083d1f4c760f6acfdfbf9898a1a407e8a562a181542834d05eb0" +} diff --git a/.sqlx/query-edd16f1507f3b40270d652c7c204a4b9a518af418cc7e7fce9a6f0a106a6d66e.json b/.sqlx/query-edd16f1507f3b40270d652c7c204a4b9a518af418cc7e7fce9a6f0a106a6d66e.json deleted file mode 100644 index 2481fa9..0000000 --- a/.sqlx/query-edd16f1507f3b40270d652c7c204a4b9a518af418cc7e7fce9a6f0a106a6d66e.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "db_name": "SQLite", - "query": "\n select\n key.changed_at as \"changed_at: DateTime\",\n key.changed_sequence as \"changed_sequence: Sequence\",\n signing.key as \"key: Vec\"\n from vapid_key as key\n join vapid_signing_key as signing\n ", - "describe": { - "columns": [ - { - "name": "changed_at: DateTime", - "ordinal": 0, - "type_info": "Text" - }, - { - "name": "changed_sequence: Sequence", - "ordinal": 1, - "type_info": "Integer" - }, - { - "name": "key: Vec", - "ordinal": 2, - "type_info": "Blob" - } - ], - "parameters": { - "Right": 0 - }, - "nullable": [ - false, - false, - false - ] - }, - "hash": "edd16f1507f3b40270d652c7c204a4b9a518af418cc7e7fce9a6f0a106a6d66e" -} diff --git a/migrations/20251028173914_pem_vapid_keys.sql b/migrations/20251028173914_pem_vapid_keys.sql new file mode 100644 index 0000000..6302504 --- /dev/null +++ b/migrations/20251028173914_pem_vapid_keys.sql @@ -0,0 +1,15 @@ +drop table vapid_signing_key; + +create table vapid_signing_key ( + key text + not null +); + +create unique index vapid_signing_key_singleton + on vapid_signing_key (0); + +-- Whatever key we had, if any, was just destroyed by dropping the table. Delete the metadata +-- as well so that the server will issue a new one. +delete +from + vapid_key; diff --git a/src/boot/app.rs b/src/boot/app.rs index 8da3e90..88255b0 100644 --- a/src/boot/app.rs +++ b/src/boot/app.rs @@ -78,6 +78,7 @@ pub enum Error { Database(#[from] sqlx::Error), Name(#[from] name::Error), Ecdsa(#[from] p256::ecdsa::Error), + Pkcs8(#[from] p256::pkcs8::Error), } impl From for Error { @@ -106,6 +107,7 @@ impl From for Error { match error { Error::Database(error) => error.into(), Error::Ecdsa(error) => error.into(), + Error::Pkcs8(error) => error.into(), } } } diff --git a/src/event/app.rs b/src/event/app.rs index 6c657c7..1e471f1 100644 --- a/src/event/app.rs +++ b/src/event/app.rs @@ -98,6 +98,7 @@ pub enum Error { Database(#[from] sqlx::Error), Name(#[from] name::Error), Ecdsa(#[from] p256::ecdsa::Error), + Pkcs8(#[from] p256::pkcs8::Error), } impl From for Error { @@ -126,6 +127,7 @@ impl From for Error { match error { Error::Database(error) => error.into(), Error::Ecdsa(error) => error.into(), + Error::Pkcs8(error) => error.into(), } } } diff --git a/src/vapid/app.rs b/src/vapid/app.rs index 5814ba0..b6e1bc5 100644 --- a/src/vapid/app.rs +++ b/src/vapid/app.rs @@ -86,11 +86,11 @@ impl<'a> Vapid<'a> { } #[derive(Debug, thiserror::Error)] +#[error(transparent)] pub enum Error { - #[error(transparent)] Database(#[from] sqlx::Error), - #[error(transparent)] Ecdsa(#[from] p256::ecdsa::Error), + Pkcs8(#[from] p256::pkcs8::Error), } impl From for Error { @@ -99,6 +99,7 @@ impl From for Error { match error { Error::Database(error) => error.into(), Error::Ecdsa(error) => error.into(), + Error::Pkcs8(error) => error.into(), } } } diff --git a/src/vapid/repo.rs b/src/vapid/repo.rs index 4ac5286..98b3bae 100644 --- a/src/vapid/repo.rs +++ b/src/vapid/repo.rs @@ -1,4 +1,7 @@ -use p256::{NistP256, ecdsa::SigningKey, elliptic_curve::FieldBytes}; +use p256::{ + ecdsa::SigningKey, + pkcs8::{DecodePrivateKey as _, EncodePrivateKey as _, LineEnding}, +}; use sqlx::{Sqlite, SqliteConnection, Transaction}; use super::{ @@ -76,8 +79,8 @@ impl Vapid<'_> { } pub async fn store_signing_key(&mut self, key: &SigningKey) -> Result<(), Error> { - let key = key.to_bytes(); - let key = key.as_slice(); + let key = key.to_pkcs8_pem(LineEnding::CRLF)?; + let key = key.as_str(); sqlx::query!( r#" insert into vapid_signing_key (key) @@ -97,14 +100,13 @@ impl Vapid<'_> { select key.changed_at as "changed_at: DateTime", key.changed_sequence as "changed_sequence: Sequence", - signing.key as "key: Vec" + signing.key from vapid_key as key join vapid_signing_key as signing "# ) .map(|row| { - let key = FieldBytes::::from_slice(&row.key); - let key = SigningKey::from_bytes(key)?; + let key = SigningKey::from_pkcs8_pem(&row.key)?; let key = key.verifying_key().to_owned(); let changed = Instant::new(row.changed_at, row.changed_sequence); @@ -122,6 +124,7 @@ impl Vapid<'_> { #[error(transparent)] pub enum Error { Ecdsa(#[from] p256::ecdsa::Error), + Pkcs8(#[from] p256::pkcs8::Error), Database(#[from] sqlx::Error), } -- cgit v1.2.3 From 6bab5b4405c9adafb2ce76540595a62eea80acc0 Mon Sep 17 00:00:00 2001 From: Owen Jacobson Date: Fri, 7 Nov 2025 21:39:39 -0500 Subject: De minimis "send me a notification" implementation. When a user clicks "send a test notification," Pilcrow delivers a push message (with a fixed payload) to all active subscriptions. The included client then displays this as a notification, using browser APIs to do so. This lets us verify that push notification works, end to end - and it appears to. The API endpoint for sending a test notification is not documented. I didn't feel it prudent to extensively document an endpoint that is intended to be temporary and whose side effects are very much subject to change. However, for posterity, the endpoint is POST /api/push/ping {} and the push message payload is ping Subscriptions with permanent delivery failures are nuked when we encounter them. Subscriptions with temporary failures cause the `ping` endpoint to return an internal server error, and are not retried. We'll likely want retry logic - including retry logic to handle server restarts - for any more serious use, but for a smoke test, giving up immediately is fine. To make the push implementation testable, `App` is now generic over it. Tests use a dummy implementation that stores sent messages in memory. This has some significant limitations, documented in the test suite, but it beats sending real notifications to nowhere in tests. --- ...968d2884e5091ced5a739f025656fcd66e05200af3.json | 32 ++++++ ...9e8531fd810dcc0c86aba831c384f5e31b3d5c0b79.json | 12 +++ ...0afb36060fd7450bfb4675069ede7aff15d575127e.json | 20 ++++ Cargo.lock | 1 + Cargo.toml | 1 + src/app.rs | 61 ++++++----- src/boot/app.rs | 2 + src/cli.rs | 10 +- src/event/app.rs | 2 + src/event/handlers/stream/mod.rs | 4 +- src/expire.rs | 4 +- src/push/app.rs | 114 +++++++++++++++++++-- src/push/handlers/mod.rs | 2 + src/push/handlers/ping/mod.rs | 23 +++++ src/push/handlers/ping/test.rs | 40 ++++++++ src/push/handlers/subscribe/mod.rs | 7 +- src/push/repo.rs | 35 +++++++ src/routes.rs | 7 +- src/test/fixtures/identity.rs | 2 +- src/test/fixtures/login.rs | 2 +- src/test/fixtures/mod.rs | 6 +- src/test/fixtures/user.rs | 4 +- src/test/mod.rs | 1 + src/test/webpush.rs | 37 +++++++ src/vapid/app.rs | 2 + src/vapid/repo.rs | 19 ++++ ui/lib/apiServer.js | 8 +- ui/lib/components/PushSubscription.svelte | 9 +- ui/routes/(app)/me/+page.svelte | 6 +- ui/routes/(swatch)/.swatch/+page.svelte | 1 + .../(swatch)/.swatch/PushSubscription/+page.svelte | 79 ++++++++++++++ ui/service-worker.js | 9 ++ 32 files changed, 508 insertions(+), 54 deletions(-) create mode 100644 .sqlx/query-19abe80d3fffd112a8f7c1968d2884e5091ced5a739f025656fcd66e05200af3.json create mode 100644 .sqlx/query-82d682f5579c3abb09fd9c9e8531fd810dcc0c86aba831c384f5e31b3d5c0b79.json create mode 100644 .sqlx/query-8e0d1fb04dcbaaf13a56550afb36060fd7450bfb4675069ede7aff15d575127e.json create mode 100644 src/push/handlers/ping/mod.rs create mode 100644 src/push/handlers/ping/test.rs create mode 100644 src/test/webpush.rs create mode 100644 ui/routes/(swatch)/.swatch/PushSubscription/+page.svelte (limited to 'src/event') diff --git a/.sqlx/query-19abe80d3fffd112a8f7c1968d2884e5091ced5a739f025656fcd66e05200af3.json b/.sqlx/query-19abe80d3fffd112a8f7c1968d2884e5091ced5a739f025656fcd66e05200af3.json new file mode 100644 index 0000000..7382bfc --- /dev/null +++ b/.sqlx/query-19abe80d3fffd112a8f7c1968d2884e5091ced5a739f025656fcd66e05200af3.json @@ -0,0 +1,32 @@ +{ + "db_name": "SQLite", + "query": "\n select\n subscription.endpoint,\n subscription.p256dh,\n subscription.auth\n from push_subscription as subscription\n join token on subscription.token = token.id\n where token.login = $1\n ", + "describe": { + "columns": [ + { + "name": "endpoint", + "ordinal": 0, + "type_info": "Text" + }, + { + "name": "p256dh", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "auth", + "ordinal": 2, + "type_info": "Text" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "19abe80d3fffd112a8f7c1968d2884e5091ced5a739f025656fcd66e05200af3" +} diff --git a/.sqlx/query-82d682f5579c3abb09fd9c9e8531fd810dcc0c86aba831c384f5e31b3d5c0b79.json b/.sqlx/query-82d682f5579c3abb09fd9c9e8531fd810dcc0c86aba831c384f5e31b3d5c0b79.json new file mode 100644 index 0000000..8d23fa0 --- /dev/null +++ b/.sqlx/query-82d682f5579c3abb09fd9c9e8531fd810dcc0c86aba831c384f5e31b3d5c0b79.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n delete from push_subscription\n where endpoint = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 1 + }, + "nullable": [] + }, + "hash": "82d682f5579c3abb09fd9c9e8531fd810dcc0c86aba831c384f5e31b3d5c0b79" +} diff --git a/.sqlx/query-8e0d1fb04dcbaaf13a56550afb36060fd7450bfb4675069ede7aff15d575127e.json b/.sqlx/query-8e0d1fb04dcbaaf13a56550afb36060fd7450bfb4675069ede7aff15d575127e.json new file mode 100644 index 0000000..172c4df --- /dev/null +++ b/.sqlx/query-8e0d1fb04dcbaaf13a56550afb36060fd7450bfb4675069ede7aff15d575127e.json @@ -0,0 +1,20 @@ +{ + "db_name": "SQLite", + "query": "\n select key\n from vapid_signing_key\n ", + "describe": { + "columns": [ + { + "name": "key", + "ordinal": 0, + "type_info": "Text" + } + ], + "parameters": { + "Right": 0 + }, + "nullable": [ + false + ] + }, + "hash": "8e0d1fb04dcbaaf13a56550afb36060fd7450bfb4675069ede7aff15d575127e" +} diff --git a/Cargo.lock b/Cargo.lock index 63a36ec..f7d3337 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2052,6 +2052,7 @@ name = "pilcrow" version = "0.1.0" dependencies = [ "argon2", + "async-trait", "axum", "axum-extra", "base64 0.22.1", diff --git a/Cargo.toml b/Cargo.toml index 821ff6a..1f5fa3d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,6 +59,7 @@ uuid = { version = "1.13.2", features = ["v4"] } web-push = "0.11.0" [dev-dependencies] +async-trait = "0.1.89" faker_rand = "0.1.1" pin-project = "1.1.9" rand = "0.8.5" diff --git a/src/app.rs b/src/app.rs index e24331b..098ae9f 100644 --- a/src/app.rs +++ b/src/app.rs @@ -17,25 +17,27 @@ use crate::{ }; #[derive(Clone)] -pub struct App { +pub struct App

{ db: SqlitePool, + webpush: P, events: event::Broadcaster, token_events: token::Broadcaster, } -impl App { - pub fn from(db: SqlitePool) -> Self { +impl

App

{ + pub fn from(db: SqlitePool, webpush: P) -> Self { let events = event::Broadcaster::default(); let token_events = token::Broadcaster::default(); Self { db, + webpush, events, token_events, } } } -impl App { +impl

App

{ pub fn boot(&self) -> Boot { Boot::new(self.db.clone()) } @@ -60,8 +62,11 @@ impl App { Messages::new(self.db.clone(), self.events.clone()) } - pub fn push(&self) -> Push { - Push::new(self.db.clone()) + pub fn push(&self) -> Push

+ where + P: Clone, + { + Push::new(self.db.clone(), self.webpush.clone()) } pub fn setup(&self) -> Setup { @@ -80,58 +85,66 @@ impl App { pub fn vapid(&self) -> Vapid { Vapid::new(self.db.clone(), self.events.clone()) } + + #[cfg(test)] + pub fn webpush(&self) -> &P { + &self.webpush + } } -impl FromRef for Boot { - fn from_ref(app: &App) -> Self { +impl

FromRef> for Boot { + fn from_ref(app: &App

) -> Self { app.boot() } } -impl FromRef for Conversations { - fn from_ref(app: &App) -> Self { +impl

FromRef> for Conversations { + fn from_ref(app: &App

) -> Self { app.conversations() } } -impl FromRef for Invites { - fn from_ref(app: &App) -> Self { +impl

FromRef> for Invites { + fn from_ref(app: &App

) -> Self { app.invites() } } -impl FromRef for Logins { - fn from_ref(app: &App) -> Self { +impl

FromRef> for Logins { + fn from_ref(app: &App

) -> Self { app.logins() } } -impl FromRef for Messages { - fn from_ref(app: &App) -> Self { +impl

FromRef> for Messages { + fn from_ref(app: &App

) -> Self { app.messages() } } -impl FromRef for Push { - fn from_ref(app: &App) -> Self { +impl

FromRef> for Push

+where + P: Clone, +{ + fn from_ref(app: &App

) -> Self { app.push() } } -impl FromRef for Setup { - fn from_ref(app: &App) -> Self { +impl

FromRef> for Setup { + fn from_ref(app: &App

) -> Self { app.setup() } } -impl FromRef for Tokens { - fn from_ref(app: &App) -> Self { +impl

FromRef> for Tokens { + fn from_ref(app: &App

) -> Self { app.tokens() } } -impl FromRef for Vapid { - fn from_ref(app: &App) -> Self { +impl

FromRef> for Vapid { + fn from_ref(app: &App

) -> Self { app.vapid() } } diff --git a/src/boot/app.rs b/src/boot/app.rs index 88255b0..1ca8adb 100644 --- a/src/boot/app.rs +++ b/src/boot/app.rs @@ -79,6 +79,7 @@ pub enum Error { Name(#[from] name::Error), Ecdsa(#[from] p256::ecdsa::Error), Pkcs8(#[from] p256::pkcs8::Error), + WebPush(#[from] web_push::WebPushError), } impl From for Error { @@ -108,6 +109,7 @@ impl From for Error { Error::Database(error) => error.into(), Error::Ecdsa(error) => error.into(), Error::Pkcs8(error) => error.into(), + Error::WebPush(error) => error.into(), } } } diff --git a/src/cli.rs b/src/cli.rs index 8d73109..154771b 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -13,6 +13,7 @@ use axum::{ use clap::{CommandFactory, Parser, Subcommand}; use sqlx::sqlite::SqlitePool; use tokio::net; +use web_push::{IsahcWebPushClient, WebPushClient}; use crate::{ app::App, @@ -97,7 +98,8 @@ impl Args { self.umask.set(); let pool = self.pool().await?; - let app = App::from(pool); + let webpush = IsahcWebPushClient::new()?; + let app = App::from(pool, webpush); match self.command { None => self.serve(app).await?, @@ -107,7 +109,10 @@ impl Args { Result::<_, Error>::Ok(()) } - async fn serve(self, app: App) -> Result<(), Error> { + async fn serve

(self, app: App

) -> Result<(), Error> + where + P: WebPushClient + Clone + Send + Sync + 'static, + { let app = routes::routes(&app) .route_layer(middleware::from_fn(clock::middleware)) .route_layer(middleware::map_response(Self::server_info())) @@ -161,4 +166,5 @@ enum Error { Database(#[from] db::Error), Sqlx(#[from] sqlx::Error), Umask(#[from] umask::Error), + Webpush(#[from] web_push::WebPushError), } diff --git a/src/event/app.rs b/src/event/app.rs index 1e471f1..e422de9 100644 --- a/src/event/app.rs +++ b/src/event/app.rs @@ -99,6 +99,7 @@ pub enum Error { Name(#[from] name::Error), Ecdsa(#[from] p256::ecdsa::Error), Pkcs8(#[from] p256::pkcs8::Error), + WebPush(#[from] web_push::WebPushError), } impl From for Error { @@ -128,6 +129,7 @@ impl From for Error { Error::Database(error) => error.into(), Error::Ecdsa(error) => error.into(), Error::Pkcs8(error) => error.into(), + Error::WebPush(error) => error.into(), } } } diff --git a/src/event/handlers/stream/mod.rs b/src/event/handlers/stream/mod.rs index 63bfff3..8b89c31 100644 --- a/src/event/handlers/stream/mod.rs +++ b/src/event/handlers/stream/mod.rs @@ -18,8 +18,8 @@ use crate::{ #[cfg(test)] mod test; -pub async fn handler( - State(app): State, +pub async fn handler

( + State(app): State>, identity: Identity, last_event_id: Option>, Query(query): Query, diff --git a/src/expire.rs b/src/expire.rs index 4177a53..c3b0117 100644 --- a/src/expire.rs +++ b/src/expire.rs @@ -7,8 +7,8 @@ use axum::{ use crate::{app::App, clock::RequestedAt, error::Internal}; // Expires messages and conversations before each request. -pub async fn middleware( - State(app): State, +pub async fn middleware

( + State(app): State>, RequestedAt(expired_at): RequestedAt, req: Request, next: Next, diff --git a/src/push/app.rs b/src/push/app.rs index 358a8cc..56b9a02 100644 --- a/src/push/app.rs +++ b/src/push/app.rs @@ -1,17 +1,23 @@ +use futures::future::join_all; +use itertools::Itertools as _; use p256::ecdsa::VerifyingKey; use sqlx::SqlitePool; -use web_push::SubscriptionInfo; +use web_push::{ + ContentEncoding, PartialVapidSignatureBuilder, SubscriptionInfo, WebPushClient, WebPushError, + WebPushMessage, WebPushMessageBuilder, +}; use super::repo::Provider as _; -use crate::{token::extract::Identity, vapid, vapid::repo::Provider as _}; +use crate::{login::Login, token::extract::Identity, vapid, vapid::repo::Provider as _}; -pub struct Push { +pub struct Push

{ db: SqlitePool, + webpush: P, } -impl Push { - pub const fn new(db: SqlitePool) -> Self { - Self { db } +impl

Push

{ + pub const fn new(db: SqlitePool, webpush: P) -> Self { + Self { db, webpush } } pub async fn subscribe( @@ -60,6 +66,76 @@ impl Push { } } +impl

Push

+where + P: WebPushClient, +{ + fn prepare_ping( + signer: &PartialVapidSignatureBuilder, + subscription: &SubscriptionInfo, + ) -> Result { + let signature = signer.clone().add_sub_info(subscription).build()?; + + let payload = "ping".as_bytes(); + + let mut message = WebPushMessageBuilder::new(subscription); + message.set_payload(ContentEncoding::Aes128Gcm, payload); + message.set_vapid_signature(signature); + let message = message.build()?; + + Ok(message) + } + + pub async fn ping(&self, recipient: &Login) -> Result<(), PushError> { + let mut tx = self.db.begin().await?; + + let signer = tx.vapid().signer().await?; + let subscriptions = tx.push().by_login(recipient).await?; + + let pings: Vec<_> = subscriptions + .into_iter() + .map(|sub| Self::prepare_ping(&signer, &sub).map(|message| (sub, message))) + .try_collect()?; + + let deliveries = pings + .into_iter() + .map(async |(sub, message)| (sub, self.webpush.send(message).await)); + + let failures: Vec<_> = join_all(deliveries) + .await + .into_iter() + .filter_map(|(sub, result)| result.err().map(|err| (sub, err))) + .collect(); + + if !failures.is_empty() { + for (sub, err) in &failures { + match err { + // I _think_ this is the complete set of permanent failures. See + // for a complete + // list. + WebPushError::Unauthorized(_) + | WebPushError::InvalidUri + | WebPushError::EndpointNotValid(_) + | WebPushError::EndpointNotFound(_) + | WebPushError::InvalidCryptoKeys + | WebPushError::MissingCryptoKeys => { + tx.push().unsubscribe(sub).await?; + } + _ => (), + } + } + + return Err(PushError::Delivery( + failures.into_iter().map(|(_, err)| err).collect(), + )); + } + + tx.commit().await?; + + Ok(()) + } +} + #[derive(Debug, thiserror::Error)] pub enum SubscribeError { #[error(transparent)] @@ -74,3 +150,29 @@ pub enum SubscribeError { // client, which already knows the endpoint anyways and doesn't need us to tell them. Duplicate, } + +#[derive(Debug, thiserror::Error)] +pub enum PushError { + #[error(transparent)] + Database(#[from] sqlx::Error), + #[error(transparent)] + Ecdsa(#[from] p256::ecdsa::Error), + #[error(transparent)] + Pkcs8(#[from] p256::pkcs8::Error), + #[error(transparent)] + WebPush(#[from] WebPushError), + #[error("push message delivery failures: {0:?}")] + Delivery(Vec), +} + +impl From for PushError { + fn from(error: vapid::repo::Error) -> Self { + use vapid::repo::Error; + match error { + Error::Database(error) => error.into(), + Error::Ecdsa(error) => error.into(), + Error::Pkcs8(error) => error.into(), + Error::WebPush(error) => error.into(), + } + } +} diff --git a/src/push/handlers/mod.rs b/src/push/handlers/mod.rs index 86eeea0..bb58774 100644 --- a/src/push/handlers/mod.rs +++ b/src/push/handlers/mod.rs @@ -1,3 +1,5 @@ +mod ping; mod subscribe; +pub use ping::handler as ping; pub use subscribe::handler as subscribe; diff --git a/src/push/handlers/ping/mod.rs b/src/push/handlers/ping/mod.rs new file mode 100644 index 0000000..db828fa --- /dev/null +++ b/src/push/handlers/ping/mod.rs @@ -0,0 +1,23 @@ +use axum::{Json, extract::State, http::StatusCode}; +use web_push::WebPushClient; + +use crate::{error::Internal, push::app::Push, token::extract::Identity}; + +#[cfg(test)] +mod test; + +#[derive(serde::Deserialize)] +pub struct Request {} + +pub async fn handler

( + State(push): State>, + identity: Identity, + Json(_): Json, +) -> Result +where + P: WebPushClient, +{ + push.ping(&identity.login).await?; + + Ok(StatusCode::ACCEPTED) +} diff --git a/src/push/handlers/ping/test.rs b/src/push/handlers/ping/test.rs new file mode 100644 index 0000000..5725131 --- /dev/null +++ b/src/push/handlers/ping/test.rs @@ -0,0 +1,40 @@ +use axum::{ + extract::{Json, State}, + http::StatusCode, +}; + +use crate::test::fixtures; + +#[tokio::test] +async fn ping_without_subscriptions() { + let app = fixtures::scratch_app().await; + + let recipient = fixtures::identity::create(&app, &fixtures::now()).await; + + app.vapid() + .refresh_key(&fixtures::now()) + .await + .expect("refreshing the VAPID key always succeeds"); + + let response = super::handler(State(app.push()), recipient, Json(super::Request {})) + .await + .expect("sending a ping with no subscriptions always succeeds"); + + assert_eq!(StatusCode::ACCEPTED, response); + + assert!(app.webpush().sent().is_empty()); +} + +// More complete testing requires that we figure out how to generate working p256 ECDH keys for +// testing _with_, as `web_push` will actually parse and use those keys even if push messages are +// ultimately never serialized or sent over HTTP. +// +// Tests that are missing: +// +// * Verify that subscribing and sending a ping causes a ping to be delivered to that subscription. +// * Verify that two subscriptions both get pings. +// * Verify that other users' subscriptions are not pinged. +// * Verify that a ping that causes a permanent error causes the subscription to be deleted. +// * Verify that a ping that causes a non-permanent error does not cause the subscription to be +// deleted. +// * Verify that a failure on one subscription doesn't affect delivery on other subscriptions. diff --git a/src/push/handlers/subscribe/mod.rs b/src/push/handlers/subscribe/mod.rs index d142df6..a1a5899 100644 --- a/src/push/handlers/subscribe/mod.rs +++ b/src/push/handlers/subscribe/mod.rs @@ -36,8 +36,8 @@ pub struct Keys { auth: String, } -pub async fn handler( - State(push): State, +pub async fn handler

( + State(push): State>, identity: Identity, Json(request): Json, ) -> Result { @@ -58,8 +58,7 @@ impl From for SubscriptionInfo { endpoint, keys: Keys { p256dh, auth }, } = request; - let info = SubscriptionInfo::new(endpoint, p256dh, auth); - info + SubscriptionInfo::new(endpoint, p256dh, auth) } } diff --git a/src/push/repo.rs b/src/push/repo.rs index 6c18c6e..4183489 100644 --- a/src/push/repo.rs +++ b/src/push/repo.rs @@ -37,6 +37,24 @@ impl Push<'_> { Ok(()) } + pub async fn by_login(&mut self, login: &Login) -> Result, sqlx::Error> { + sqlx::query!( + r#" + select + subscription.endpoint, + subscription.p256dh, + subscription.auth + from push_subscription as subscription + join token on subscription.token = token.id + where token.login = $1 + "#, + login.id, + ) + .map(|row| SubscriptionInfo::new(row.endpoint, row.p256dh, row.auth)) + .fetch_all(&mut *self.0) + .await + } + pub async fn by_endpoint( &mut self, subscriber: &Login, @@ -65,6 +83,23 @@ impl Push<'_> { Ok(info) } + pub async fn unsubscribe( + &mut self, + subscription: &SubscriptionInfo, + ) -> Result<(), sqlx::Error> { + sqlx::query!( + r#" + delete from push_subscription + where endpoint = $1 + "#, + subscription.endpoint, + ) + .execute(&mut *self.0) + .await?; + + Ok(()) + } + pub async fn unsubscribe_token(&mut self, token: &Token) -> Result<(), sqlx::Error> { sqlx::query!( r#" diff --git a/src/routes.rs b/src/routes.rs index 00d9d3e..1c07e78 100644 --- a/src/routes.rs +++ b/src/routes.rs @@ -3,12 +3,16 @@ use axum::{ response::Redirect, routing::{delete, get, post}, }; +use web_push::WebPushClient; use crate::{ app::App, boot, conversation, event, expire, invite, login, message, push, setup, ui, vapid, }; -pub fn routes(app: &App) -> Router { +pub fn routes

(app: &App

) -> Router> +where + P: WebPushClient + Clone + Send + Sync + 'static, +{ // UI routes that can be accessed before the administrator completes setup. let ui_bootstrap = Router::new() .route("/{*path}", get(ui::handlers::asset)) @@ -46,6 +50,7 @@ pub fn routes(app: &App) -> Router { .route("/api/invite/{invite}", get(invite::handlers::get)) .route("/api/invite/{invite}", post(invite::handlers::accept)) .route("/api/messages/{message}", delete(message::handlers::delete)) + .route("/api/push/ping", post(push::handlers::ping)) .route("/api/push/subscribe", post(push::handlers::subscribe)) .route("/api/password", post(login::handlers::change_password)) // Run expiry whenever someone accesses the API. This was previously a blanket middleware diff --git a/src/test/fixtures/identity.rs b/src/test/fixtures/identity.rs index 20929f9..adc3e73 100644 --- a/src/test/fixtures/identity.rs +++ b/src/test/fixtures/identity.rs @@ -14,7 +14,7 @@ use crate::{ }, }; -pub async fn create(app: &App, created_at: &RequestedAt) -> Identity { +pub async fn create

(app: &App

, created_at: &RequestedAt) -> Identity { let credentials = fixtures::user::create_with_password(app, created_at).await; logged_in(app, &credentials, created_at).await } diff --git a/src/test/fixtures/login.rs b/src/test/fixtures/login.rs index d9aca81..839a412 100644 --- a/src/test/fixtures/login.rs +++ b/src/test/fixtures/login.rs @@ -5,7 +5,7 @@ use crate::{ test::fixtures::user::{propose, propose_name}, }; -pub async fn create(app: &App, created_at: &DateTime) -> Login { +pub async fn create

(app: &App

, created_at: &DateTime) -> Login { let (name, password) = propose(); app.users() .create(&name, &password, created_at) diff --git a/src/test/fixtures/mod.rs b/src/test/fixtures/mod.rs index 3d69cfa..53bf31b 100644 --- a/src/test/fixtures/mod.rs +++ b/src/test/fixtures/mod.rs @@ -1,6 +1,6 @@ use chrono::{TimeDelta, Utc}; -use crate::{app::App, clock::RequestedAt, db}; +use crate::{app::App, clock::RequestedAt, db, test::webpush::Client}; pub mod boot; pub mod conversation; @@ -13,11 +13,11 @@ pub mod login; pub mod message; pub mod user; -pub async fn scratch_app() -> App { +pub async fn scratch_app() -> App { let pool = db::prepare("sqlite::memory:", "sqlite::memory:") .await .expect("setting up in-memory sqlite database"); - App::from(pool) + App::from(pool, Client::new()) } pub fn now() -> RequestedAt { diff --git a/src/test/fixtures/user.rs b/src/test/fixtures/user.rs index d4d8db4..3ad4436 100644 --- a/src/test/fixtures/user.rs +++ b/src/test/fixtures/user.rs @@ -3,7 +3,7 @@ use uuid::Uuid; use crate::{app::App, clock::RequestedAt, login::Login, name::Name, password::Password}; -pub async fn create_with_password(app: &App, created_at: &RequestedAt) -> (Name, Password) { +pub async fn create_with_password

(app: &App

, created_at: &RequestedAt) -> (Name, Password) { let (name, password) = propose(); let user = app .users() @@ -14,7 +14,7 @@ pub async fn create_with_password(app: &App, created_at: &RequestedAt) -> (Name, (user.name, password) } -pub async fn create(app: &App, created_at: &RequestedAt) -> Login { +pub async fn create

(app: &App

, created_at: &RequestedAt) -> Login { super::login::create(app, created_at).await } diff --git a/src/test/mod.rs b/src/test/mod.rs index ebbbfef..f798b9c 100644 --- a/src/test/mod.rs +++ b/src/test/mod.rs @@ -1,2 +1,3 @@ pub mod fixtures; pub mod verify; +pub mod webpush; diff --git a/src/test/webpush.rs b/src/test/webpush.rs new file mode 100644 index 0000000..c86d03f --- /dev/null +++ b/src/test/webpush.rs @@ -0,0 +1,37 @@ +use std::{ + mem, + sync::{Arc, Mutex}, +}; + +use web_push::{WebPushClient, WebPushError, WebPushMessage}; + +#[derive(Clone)] +pub struct Client { + sent: Arc>>, +} + +impl Client { + pub fn new() -> Self { + Self { + sent: Arc::default(), + } + } + + // Clears the list of sent messages (for all clones of this Client) when called, because we + // can't clone `WebPushMessage`s so we either need to move them or try to reconstruct them, + // either of which sucks but moving them sucks less. + pub fn sent(&self) -> Vec { + let mut sent = self.sent.lock().unwrap(); + mem::replace(&mut *sent, Vec::new()) + } +} + +#[async_trait::async_trait] +impl WebPushClient for Client { + async fn send(&self, message: WebPushMessage) -> Result<(), WebPushError> { + let mut sent = self.sent.lock().unwrap(); + sent.push(message); + + Ok(()) + } +} diff --git a/src/vapid/app.rs b/src/vapid/app.rs index ebd2446..9949aa5 100644 --- a/src/vapid/app.rs +++ b/src/vapid/app.rs @@ -101,6 +101,7 @@ pub enum Error { Database(#[from] sqlx::Error), Ecdsa(#[from] p256::ecdsa::Error), Pkcs8(#[from] p256::pkcs8::Error), + WebPush(#[from] web_push::WebPushError), } impl From for Error { @@ -110,6 +111,7 @@ impl From for Error { Error::Database(error) => error.into(), Error::Ecdsa(error) => error.into(), Error::Pkcs8(error) => error.into(), + Error::WebPush(error) => error.into(), } } } diff --git a/src/vapid/repo.rs b/src/vapid/repo.rs index 98b3bae..9db61e1 100644 --- a/src/vapid/repo.rs +++ b/src/vapid/repo.rs @@ -1,8 +1,11 @@ +use std::io::Cursor; + use p256::{ ecdsa::SigningKey, pkcs8::{DecodePrivateKey as _, EncodePrivateKey as _, LineEnding}, }; use sqlx::{Sqlite, SqliteConnection, Transaction}; +use web_push::{PartialVapidSignatureBuilder, VapidSignatureBuilder}; use super::{ History, @@ -118,6 +121,21 @@ impl Vapid<'_> { Ok(key) } + + pub async fn signer(&mut self) -> Result { + let key = sqlx::query_scalar!( + r#" + select key + from vapid_signing_key + "# + ) + .fetch_one(&mut *self.0) + .await?; + let key = Cursor::new(&key); + let signer = VapidSignatureBuilder::from_pem_no_sub(key)?; + + Ok(signer) + } } #[derive(Debug, thiserror::Error)] @@ -125,6 +143,7 @@ impl Vapid<'_> { pub enum Error { Ecdsa(#[from] p256::ecdsa::Error), Pkcs8(#[from] p256::pkcs8::Error), + WebPush(#[from] web_push::WebPushError), Database(#[from] sqlx::Error), } diff --git a/ui/lib/apiServer.js b/ui/lib/apiServer.js index f55f271..9eeb128 100644 --- a/ui/lib/apiServer.js +++ b/ui/lib/apiServer.js @@ -47,7 +47,7 @@ export async function getInvite(inviteId) { } export async function acceptInvite(inviteId, name, password) { - return apiServer + return await apiServer .post(`/invite/${inviteId}`, { name, password, @@ -56,7 +56,11 @@ export async function acceptInvite(inviteId, name, password) { } export async function createPushSubscription(subscription, vapid) { - return apiServer.post('/push/subscribe', { subscription, vapid }).catch(responseError); + return await apiServer.post('/push/subscribe', { subscription, vapid }).catch(responseError); +} + +export async function sendPing() { + return await apiServer.post('/push/ping', {}).catch(responseError); } export function subscribeToEvents(resumePoint) { diff --git a/ui/lib/components/PushSubscription.svelte b/ui/lib/components/PushSubscription.svelte index a85cbb3..aab4929 100644 --- a/ui/lib/components/PushSubscription.svelte +++ b/ui/lib/components/PushSubscription.svelte @@ -1,5 +1,5 @@ -{#if vapid !== null} - {#if subscription === null} +{#if !!vapid} + {#if !subscription}

{/if} +
+ +
{:else} Waiting for VAPID key… {/if} diff --git a/ui/routes/(app)/me/+page.svelte b/ui/routes/(app)/me/+page.svelte index ddb1245..a21d160 100644 --- a/ui/routes/(app)/me/+page.svelte +++ b/ui/routes/(app)/me/+page.svelte @@ -35,11 +35,15 @@ async function subscribe() { await session.push.subscribe(); } + + async function ping() { + await api.sendPing(); + }
- +

diff --git a/ui/routes/(swatch)/.swatch/+page.svelte b/ui/routes/(swatch)/.swatch/+page.svelte index 5334438..c1969e5 100644 --- a/ui/routes/(swatch)/.swatch/+page.svelte +++ b/ui/routes/(swatch)/.swatch/+page.svelte @@ -19,5 +19,6 @@
  • MessageRun
  • MessageInput
  • Message
  • +
  • PushSubscription
  • swatch/EventLog
  • diff --git a/ui/routes/(swatch)/.swatch/PushSubscription/+page.svelte b/ui/routes/(swatch)/.swatch/PushSubscription/+page.svelte new file mode 100644 index 0000000..3d564a3 --- /dev/null +++ b/ui/routes/(swatch)/.swatch/PushSubscription/+page.svelte @@ -0,0 +1,79 @@ + + +

    PushSubscription

    + + + +

    properties

    + +
    + +
    + interesting values: + + +
    + + +
    + +

    rendered

    + +
    + +
    + +

    events

    + + diff --git a/ui/service-worker.js b/ui/service-worker.js index d9b2a7c..cb32d0d 100644 --- a/ui/service-worker.js +++ b/ui/service-worker.js @@ -52,3 +52,12 @@ async function cacheFirst(request) { self.addEventListener('fetch', (event) => { event.respondWith(cacheFirst(event.request)); }); + +self.addEventListener('push', (event) => { + event.waitUntil( + self.registration.showNotification('Test notification', { + actions: [], + body: event.data.text(), + }), + ); +}); -- cgit v1.2.3