summaryrefslogtreecommitdiff
path: root/migrations/20241009031441_login_created_at.sql
blob: 001c48e717ae97628c3073cb343a8490b87ca173 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
-- figure out new event seqeuence.
--
-- The main problem here is ensuring that the resulting stream is _consistent_.
-- For any message, the login it refers to should be created in the event stream
-- before the message does, and the channel it refers to should likewise be
-- created first.
--
-- Messages come after channels by time (clock slew allowing), so we can reuse
-- those timestamps to recover a global ordering between channels and messages.
--
-- We synthesize a login's created_at from the earliest of:
--
-- * the earliest message they sent that we know of, or
-- * right now (no messages in recorded history).
--
-- This produces a consistent sequence, up to clock slew, at the expense of
-- renumbering every event.

create table unsequenced (
	at text
		not null,
	login text
		references login (id),
	channel text
		references channel (id),
	message text
		references message (id),
	check (
		(login is not null and channel is null and message is null)
		or (login is null and channel is not null and message is null)
		or (login is null and channel is null and message is not null)
	)
);

insert into unsequenced (at, login)
select
	coalesce (
		min(message.sent_at),
		strftime('%FT%R:%f+00:00', 'now', 'utc')
	),
	login.id
from login
left join message
	on login.id = message.sender;

insert into unsequenced (at, channel)
select created_at, id
from channel;

insert into unsequenced (at, message)
select sent_at, id
from message;

create table event (
	at text
		not null,
	sequence
		primary key
		not null,
	login text
		references login (id),
	channel text
		references channel (id),
	message text
		references message (id),
	check (
		(login is not null and channel is null and message is null)
		or (login is null and channel is not null and message is null)
		or (login is null and channel is null and message is not null)
	)
);

insert into event (at, sequence, login, channel, message)
select
	at, row_number() over (order by at, login is null, message is null), login, channel, message
from unsequenced;

-- Get this out of memory.
drop table unsequenced;

-- Because of how foundational `login` is, we pretty much have to recreate the
-- whole schema.
alter table message
rename to old_message;

alter table channel
rename to old_channel;

alter table token
rename to old_token;

alter table login
rename to old_login;

create table login (
	id text
		not null
		primary key,
	name text
		not null
		unique,
	password_hash text
		not null,
	created_sequence bigint
		unique
		not null,
	created_at text
		not null
);

create table token (
	id text
		not null
		primary key,
	secret text
		not null
		unique,
	login text
		not null
		references login (id),
	issued_at text
		not null,
	last_used_at text
		not null
);

create table channel (
	id text
		not null
		primary key,
	name text
		unique
		not null,
	created_sequence bigint
		unique
		not null,
	created_at text
		not null
);

create table message (
	id text
		not null
		primary key,
	channel text
		not null
		references channel (id),
	sender text
		not null
		references login (id),
	sent_sequence bigint
		unique
		not null,
	sent_at text
		not null,
	body text
		not null
);

-- Copy data from the original tables, assigning sequence numbers as we go.
insert into login (id, name, password_hash, created_sequence, created_at)
select
	old.id,
	old.name,
	old.password_hash,
	event.sequence,
	event.at
from old_login as old
join event on old.id = event.login
order by event.sequence;

insert into token (id, secret, login, issued_at, last_used_at)
select id, secret, login, issued_at, last_used_at from old_token;

insert into channel (id, name, created_sequence, created_at)
select
	old.id,
	old.name,
	event.sequence,
	old.created_at
from old_channel as old
join event on old.id = event.channel
order by event.sequence;

insert into message (id, channel, sender, sent_sequence, sent_at, body)
select
	old.id,
	old.channel,
	old.sender,
	event.sequence,
	old.sent_at,
	old.body
from old_message as old
join event on old.id = event.message
order by event.sequence;

-- Restart the event sequence, using the highest sequence number in the new
-- event series.
update event_sequence
set last_value = (select coalesce(max(sequence), 0) from event);

-- Clean up the now-unused original tables, plus the resequencing temp table
drop table event;
drop table old_message;
drop table old_channel;
drop table old_token;
drop table old_login;

-- Reindex, now that the original indices are no longer in the way
create index token_issued_at
on token (issued_at);

create index message_sent_at
on message (sent_at);

create index channel_created_at
on channel (created_at);