mz_storage/source/postgres/replication.rs
1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10//! Renders the logical replication side of the [`PostgresSourceConnection`] ingestion dataflow.
11//!
12//! ```text
13//! o
14//! │rewind
15//! │requests
16//! ╭───┴────╮
17//! │exchange│ (collect all requests to one worker)
18//! ╰───┬────╯
19//! ┏━━v━━━━━━━━━━┓
20//! ┃ replication ┃ (single worker)
21//! ┃ reader ┃
22//! ┗━┯━━━━━━━━┯━━┛
23//! │raw │
24//! │data │
25//! ╭────┴─────╮ │
26//! │distribute│ │ (distribute to all workers)
27//! ╰────┬─────╯ │
28//! ┏━━━━━━━━━━━┷━┓ │
29//! ┃ replication ┃ │ (parallel decode)
30//! ┃ decoder ┃ │
31//! ┗━━━━━┯━━━━━━━┛ │
32//! │ replication │ progress
33//! │ updates │ output
34//! v v
35//! ```
36//!
37//! # Progress tracking
38//!
39//! In order to avoid causing excessive resource usage in the upstream server it's important to
40//! track the LSN that we have successfully committed to persist and communicate that back to
41//! PostgreSQL. Under normal operation this gauge of progress is provided by the presence of
42//! transactions themselves. Since at a given LSN offset there can be only a single message, when a
43//! transaction is received and processed we can infer that we have seen all the messages that are
44//! not beyond `commit_lsn + 1`.
45//!
46//! Things are a bit more complicated in the absence of transactions though because even though we
47//! don't receive any the server might very well be generating WAL records. This can happen if
48//! there is a separate logical database performing writes (which is the case for RDS databases),
49//! or, in servers running PostgreSQL version 15 or greater, the logical replication process
50//! includes an optimization that omits empty transactions, which can happen if you're only
51//! replicating a subset of the tables and there writes going to the other ones.
52//!
53//! If we fail to detect this situation and don't send LSN feedback in a timely manner the server
54//! will be forced to keep around WAL data that can eventually lead to disk space exhaustion.
55//!
56//! In the absence of transactions the only available piece of information in the replication
57//! stream are keepalive messages. Keepalive messages are documented[1] to contain the current end
58//! of WAL on the server. That is a useless number when it comes to progress tracking because there
59//! might be pending messages at LSNs between the last received commit_lsn and the current end of
60//! WAL.
61//!
62//! Fortunately for us, the documentation for PrimaryKeepalive messages is wrong and it actually
63//! contains the last *sent* LSN[2]. Here sent doesn't necessarily mean sent over the wire, but
64//! sent to the upstream process that is handling producing the logical stream. Therefore, if we
65//! receive a keepalive with a particular LSN we can be certain that there are no other replication
66//! messages at previous LSNs, because they would have been already generated and received. We
67//! therefore connect the keepalive messages directly to our capability.
68//!
69//! [1]: https://www.postgresql.org/docs/15/protocol-replication.html#PROTOCOL-REPLICATION-START-REPLICATION
70//! [2]: https://www.postgresql.org/message-id/CAFPTHDZS9O9WG02EfayBd6oONzK%2BqfUxS6AbVLJ7W%2BKECza2gg%40mail.gmail.com
71
72use std::collections::BTreeMap;
73use std::convert::Infallible;
74use std::pin::pin;
75use std::rc::Rc;
76use std::str::FromStr;
77use std::sync::Arc;
78use std::sync::LazyLock;
79use std::time::Instant;
80use std::time::{Duration, SystemTime, UNIX_EPOCH};
81
82use differential_dataflow::AsCollection;
83use futures::{FutureExt, Stream as AsyncStream, StreamExt, TryStreamExt};
84use mz_dyncfg::ConfigSet;
85use mz_ore::cast::CastFrom;
86use mz_ore::future::InTask;
87use mz_postgres_util::PostgresError;
88use mz_postgres_util::{Client, simple_query_opt};
89use mz_repr::{Datum, DatumVec, Diff, Row};
90use mz_sql_parser::ast::Ident;
91use mz_sql_parser::ast::display::{AstDisplay, escaped_string_literal};
92use mz_storage_types::dyncfgs::PG_SCHEMA_VALIDATION_INTERVAL;
93use mz_storage_types::dyncfgs::PG_SOURCE_VALIDATE_TIMELINE;
94use mz_storage_types::errors::DataflowError;
95use mz_storage_types::sources::{MzOffset, PostgresSourceConnection};
96use mz_timely_util::builder_async::{
97 AsyncOutputHandle, Event as AsyncEvent, OperatorBuilder as AsyncOperatorBuilder,
98 PressOnDropButton,
99};
100use postgres_replication::LogicalReplicationStream;
101use postgres_replication::protocol::{LogicalReplicationMessage, ReplicationMessage, TupleData};
102use serde::{Deserialize, Serialize};
103use timely::container::CapacityContainerBuilder;
104use timely::dataflow::channels::pact::{Exchange, Pipeline};
105use timely::dataflow::operators::Capability;
106use timely::dataflow::operators::Concat;
107use timely::dataflow::operators::Operator;
108use timely::dataflow::operators::core::Map;
109use timely::dataflow::{Scope, StreamVec};
110use timely::progress::Antichain;
111use tokio::sync::{mpsc, watch};
112use tokio_postgres::error::SqlState;
113use tokio_postgres::types::PgLsn;
114use tracing::{error, trace};
115
116use crate::metrics::source::postgres::PgSourceMetrics;
117use crate::source::RawSourceCreationConfig;
118use crate::source::postgres::verify_schema;
119use crate::source::postgres::{DefiniteError, ReplicationError, SourceOutputInfo, TransientError};
120use crate::source::probe;
121use crate::source::types::{Probe, SignaledFuture, SourceMessage, StackedCollection};
122
123/// A logical replication message from the server.
124type LogicalReplMsg = ReplicationMessage<LogicalReplicationMessage>;
125
126/// A decoded row from a transaction with source information.
127type DecodedRow = (u32, usize, Result<Row, DefiniteError>, Diff);
128
129/// Postgres epoch is 2000-01-01T00:00:00Z
130static PG_EPOCH: LazyLock<SystemTime> =
131 LazyLock::new(|| UNIX_EPOCH + Duration::from_secs(946_684_800));
132
133// A request to rewind a snapshot taken at `snapshot_lsn` to the initial LSN of the replication
134// slot. This is accomplished by emitting `(data, 0, -diff)` for all updates `(data, lsn, diff)`
135// whose `lsn <= snapshot_lsn`. By convention the snapshot is always emitted at LSN 0.
136#[derive(Debug, Clone, Serialize, Deserialize)]
137pub(crate) struct RewindRequest {
138 /// The output index that should be rewound.
139 pub(crate) output_index: usize,
140 /// The LSN that the snapshot was taken at.
141 pub(crate) snapshot_lsn: MzOffset,
142}
143
144/// Renders the replication dataflow. See the module documentation for more information.
145pub(crate) fn render<G: Scope<Timestamp = MzOffset>>(
146 scope: G,
147 config: RawSourceCreationConfig,
148 connection: PostgresSourceConnection,
149 table_info: BTreeMap<u32, BTreeMap<usize, SourceOutputInfo>>,
150 rewind_stream: StreamVec<G, RewindRequest>,
151 slot_ready_stream: StreamVec<G, Infallible>,
152 committed_uppers: impl futures::Stream<Item = Antichain<MzOffset>> + 'static,
153 metrics: PgSourceMetrics,
154) -> (
155 StackedCollection<G, (usize, Result<SourceMessage, DataflowError>)>,
156 StreamVec<G, Probe<MzOffset>>,
157 StreamVec<G, ReplicationError>,
158 PressOnDropButton,
159) {
160 let op_name = format!("ReplicationReader({})", config.id);
161 let mut builder = AsyncOperatorBuilder::new(op_name, scope.clone());
162
163 let slot_reader = u64::cast_from(config.responsible_worker("slot"));
164 let (data_output, data_stream) = builder.new_output();
165 let (definite_error_handle, definite_errors) =
166 builder.new_output::<CapacityContainerBuilder<_>>();
167 let (probe_output, probe_stream) = builder.new_output::<CapacityContainerBuilder<_>>();
168
169 let mut rewind_input =
170 builder.new_disconnected_input(rewind_stream, Exchange::new(move |_| slot_reader));
171 let mut slot_ready_input = builder.new_disconnected_input(slot_ready_stream, Pipeline);
172 let output_uppers = table_info
173 .iter()
174 .flat_map(|(_, outputs)| outputs.values().map(|o| o.resume_upper.clone()))
175 .collect::<Vec<_>>();
176 metrics.tables.set(u64::cast_from(output_uppers.len()));
177
178 let reader_table_info = table_info.clone();
179 let (button, transient_errors) = builder.build_fallible(move |caps| {
180 let mut table_info = reader_table_info;
181 let busy_signal = Arc::clone(&config.busy_signal);
182 Box::pin(SignaledFuture::new(busy_signal, async move {
183 let (id, worker_id) = (config.id, config.worker_id);
184 let [data_cap_set, definite_error_cap_set, probe_cap]: &mut [_; 3] =
185 caps.try_into().unwrap();
186
187 if !config.responsible_for("slot") {
188 // Emit 0, to mark this worker as having started up correctly.
189 for stat in config.statistics.values() {
190 stat.set_offset_known(0);
191 stat.set_offset_committed(0);
192 }
193 return Ok(());
194 }
195
196 // Determine the slot lsn.
197 let connection_config = connection
198 .connection
199 .config(
200 &config.config.connection_context.secrets_reader,
201 &config.config,
202 InTask::Yes,
203 )
204 .await?;
205
206 let slot = &connection.publication_details.slot;
207 let replication_client = connection_config
208 .connect_replication(&config.config.connection_context.ssh_tunnel_manager)
209 .await?;
210
211 let metadata_client = connection_config
212 .connect(
213 "replication metadata",
214 &config.config.connection_context.ssh_tunnel_manager,
215 )
216 .await?;
217 let metadata_client = Arc::new(metadata_client);
218
219 while let Some(_) = slot_ready_input.next().await {
220 // Wait for the slot to be created
221 }
222
223 // The slot is always created by the snapshot operator. If the slot doesn't exist,
224 // when this check runs, this operator will return an error.
225 let slot_metadata = super::fetch_slot_metadata(
226 &*metadata_client,
227 slot,
228 mz_storage_types::dyncfgs::PG_FETCH_SLOT_RESUME_LSN_INTERVAL
229 .get(config.config.config_set()),
230 )
231 .await?;
232
233 // We're the only application that should be using this replication
234 // slot. The only way that there can be another connection using
235 // this slot under normal operation is if there's a stale TCP
236 // connection from a prior incarnation of the source holding on to
237 // the slot. We don't want to wait for the WAL sender timeout and/or
238 // TCP keepalives to time out that connection, because these values
239 // are generally under the control of the DBA and may not time out
240 // the connection for multiple minutes, or at all. Instead we just
241 // force kill the connection that's using the slot.
242 //
243 // Note that there's a small risk that *we're* the zombie cluster
244 // that should not be using the replication slot. Kubernetes cannot
245 // 100% guarantee that only one cluster is alive at a time. However,
246 // this situation should not last long, and the worst that can
247 // happen is a bit of transient thrashing over ownership of the
248 // replication slot.
249 if let Some(active_pid) = slot_metadata.active_pid {
250 tracing::warn!(
251 %id, %active_pid,
252 "replication slot already in use; will attempt to kill existing connection",
253 );
254
255 match metadata_client
256 .execute("SELECT pg_terminate_backend($1)", &[&active_pid])
257 .await
258 {
259 Ok(_) => {
260 tracing::info!(
261 "successfully killed existing connection; \
262 starting replication is likely to succeed"
263 );
264 // Note that `pg_terminate_backend` does not wait for
265 // the termination of the targeted connection to
266 // complete. We may try to start replication before the
267 // targeted connection has cleaned up its state. That's
268 // okay. If that happens we'll just try again from the
269 // top via the suspend-and-restart flow.
270 }
271 Err(e) => {
272 tracing::warn!(
273 %e,
274 "failed to kill existing replication connection; \
275 replication will likely fail to start"
276 );
277 // Continue on anyway, just in case the replication slot
278 // is actually available. Maybe PostgreSQL has some
279 // staleness when it reports `active_pid`, for example.
280 }
281 }
282 }
283
284 // The overall resumption point for this source is the minimum of the resumption points
285 // contributed by each of the outputs.
286 let resume_lsn = output_uppers
287 .iter()
288 .flat_map(|f| f.elements())
289 .map(|&lsn| {
290 // An output is either an output that has never had data committed to it or one
291 // that has and needs to resume. We differentiate between the two by checking
292 // whether an output wishes to "resume" from the minimum timestamp. In that case
293 // its contribution to the overal resumption point is the earliest point available
294 // in the slot. This information would normally be something that the storage
295 // controller figures out in the form of an as-of frontier, but at the moment the
296 // storage controller does not have visibility into what the replication slot is
297 // doing.
298 if lsn == MzOffset::from(0) {
299 slot_metadata.confirmed_flush_lsn
300 } else {
301 lsn
302 }
303 })
304 .min();
305 let Some(resume_lsn) = resume_lsn else {
306 std::future::pending::<()>().await;
307 return Ok(());
308 };
309 // If we don't set "offset_committed" now, it'll be stuck at 0 (the default value)
310 // until we finish processing the table snapshot. If the snapshot is large, that could be a long time.
311 // This confuses the ingestion lag calculation in the UI, causing it to yield erroneously high values.
312 for stat in config.statistics.values() {
313 stat.set_offset_committed(resume_lsn.offset);
314 }
315 trace!(%id, "timely-{worker_id} replication reader started lsn={resume_lsn}");
316
317 // Emitting an initial probe before we start waiting for rewinds ensures that we will
318 // have a timestamp binding in the remap collection while the snapshot is processed.
319 // This is important because otherwise the snapshot updates would need to be buffered
320 // in the reclock operator, instead of being spilled to S3 in the persist sink.
321 //
322 // Note that we need to fetch the probe LSN _after_ having created the replication
323 // slot, to make sure the fetched LSN will be included in the replication stream.
324 let probe_ts = (config.now_fn)().into();
325 let max_lsn = super::fetch_max_lsn(&*metadata_client).await?;
326 let probe = Probe {
327 probe_ts,
328 upstream_frontier: Antichain::from_elem(max_lsn),
329 };
330 probe_output.give(&probe_cap[0], probe);
331
332 let mut rewinds = BTreeMap::new();
333 while let Some(event) = rewind_input.next().await {
334 if let AsyncEvent::Data(_, data) = event {
335 for req in data {
336 if resume_lsn > req.snapshot_lsn + 1 {
337 let err = DefiniteError::SlotCompactedPastResumePoint(
338 req.snapshot_lsn + 1,
339 resume_lsn,
340 );
341 // If the replication stream cannot be obtained from the resume point there is nothing
342 // else to do. These errors are not retractable.
343 for (oid, outputs) in table_info.iter() {
344 for output_index in outputs.keys() {
345 // We pick `u64::MAX` as the LSN which will (in practice) never conflict
346 // any previously revealed portions of the TVC.
347 let update = (
348 (
349 *oid,
350 *output_index,
351 Err(DataflowError::from(err.clone())),
352 ),
353 MzOffset::from(u64::MAX),
354 Diff::ONE,
355 );
356 data_output.give_fueled(&data_cap_set[0], update).await;
357 }
358 }
359 definite_error_handle.give(
360 &definite_error_cap_set[0],
361 ReplicationError::Definite(Rc::new(err)),
362 );
363 return Ok(());
364 }
365 rewinds.insert(req.output_index, req);
366 }
367 }
368 }
369 trace!(%id, "timely-{worker_id} pending rewinds {rewinds:?}");
370
371 let mut committed_uppers = pin!(committed_uppers);
372
373 let stream_result = raw_stream(
374 &config,
375 replication_client,
376 Arc::clone(&metadata_client),
377 &connection.publication_details.slot,
378 &connection.publication_details.timeline_id,
379 &connection.publication,
380 resume_lsn,
381 committed_uppers.as_mut(),
382 &probe_output,
383 &probe_cap[0],
384 )
385 .await?;
386
387 let stream = match stream_result {
388 Ok(stream) => stream,
389 Err(err) => {
390 // If the replication stream cannot be obtained in a definite way there is
391 // nothing else to do. These errors are not retractable.
392 for (oid, outputs) in table_info.iter() {
393 for output_index in outputs.keys() {
394 // We pick `u64::MAX` as the LSN which will (in practice) never conflict
395 // any previously revealed portions of the TVC.
396 let update = (
397 (*oid, *output_index, Err(DataflowError::from(err.clone()))),
398 MzOffset::from(u64::MAX),
399 Diff::ONE,
400 );
401 data_output.give_fueled(&data_cap_set[0], update).await;
402 }
403 }
404
405 definite_error_handle.give(
406 &definite_error_cap_set[0],
407 ReplicationError::Definite(Rc::new(err)),
408 );
409 return Ok(());
410 }
411 };
412 let mut stream = pin!(stream.peekable());
413
414 // Run the periodic schema validation on a separate task using a separate client,
415 // to prevent it from blocking the replication reading progress.
416 let ssh_tunnel_manager = &config.config.connection_context.ssh_tunnel_manager;
417 let client = connection_config
418 .connect("schema validation", ssh_tunnel_manager)
419 .await?;
420 let mut schema_errors = spawn_schema_validator(
421 client,
422 &config,
423 connection.publication.clone(),
424 table_info.clone(),
425 );
426
427 // Instead of downgrading the capability for every transaction we process we only do it
428 // if we're about to yield, which is checked at the bottom of the loop. This avoids
429 // creating excessive progress tracking traffic when there are multiple small
430 // transactions ready to go.
431 let mut data_upper = resume_lsn;
432 // A stash of reusable vectors to convert from bytes::Bytes based data, which is not
433 // compatible with `columnation`, to Vec<u8> data that is.
434 while let Some(event) = stream.as_mut().next().await {
435 use LogicalReplicationMessage::*;
436 use ReplicationMessage::*;
437 match event {
438 Ok(XLogData(data)) => match data.data() {
439 Begin(begin) => {
440 let commit_lsn = MzOffset::from(begin.final_lsn());
441
442 let mut tx = pin!(extract_transaction(
443 stream.by_ref(),
444 &*metadata_client,
445 commit_lsn,
446 &mut table_info,
447 &metrics,
448 &connection.publication,
449 ));
450
451 trace!(
452 %id,
453 "timely-{worker_id} extracting transaction \
454 at {commit_lsn}"
455 );
456 assert!(
457 data_upper <= commit_lsn,
458 "new_upper={data_upper} tx_lsn={commit_lsn}",
459 );
460 data_upper = commit_lsn + 1;
461 while let Some((oid, output_index, event, diff)) = tx.try_next().await?
462 {
463 let event = event.map_err(Into::into);
464 let mut data = (oid, output_index, event);
465 if let Some(req) = rewinds.get(&output_index) {
466 if commit_lsn <= req.snapshot_lsn {
467 let update = (data, MzOffset::from(0), -diff);
468 data_output.give_fueled(&data_cap_set[0], &update).await;
469 data = update.0;
470 }
471 }
472 let update = (data, commit_lsn, diff);
473 data_output.give_fueled(&data_cap_set[0], &update).await;
474 }
475 }
476 _ => return Err(TransientError::BareTransactionEvent),
477 },
478 Ok(PrimaryKeepAlive(keepalive)) => {
479 trace!( %id,
480 "timely-{worker_id} received keepalive lsn={}",
481 keepalive.wal_end()
482 );
483
484 // Take the opportunity to report any schema validation errors.
485 while let Ok(error) = schema_errors.try_recv() {
486 use SchemaValidationError::*;
487 match error {
488 Postgres(PostgresError::PublicationMissing(publication)) => {
489 let err = DefiniteError::PublicationDropped(publication);
490 for (oid, outputs) in table_info.iter() {
491 for output_index in outputs.keys() {
492 let update = (
493 (
494 *oid,
495 *output_index,
496 Err(DataflowError::from(err.clone())),
497 ),
498 data_cap_set[0].time().clone(),
499 Diff::ONE,
500 );
501 data_output.give_fueled(&data_cap_set[0], update).await;
502 }
503 }
504 definite_error_handle.give(
505 &definite_error_cap_set[0],
506 ReplicationError::Definite(Rc::new(err)),
507 );
508 return Ok(());
509 }
510 Postgres(pg_error) => Err(TransientError::from(pg_error))?,
511 Schema {
512 oid,
513 output_index,
514 error,
515 } => {
516 let table = table_info.get_mut(&oid).unwrap();
517 if table.remove(&output_index).is_none() {
518 continue;
519 }
520
521 let update = (
522 (oid, output_index, Err(error.into())),
523 data_cap_set[0].time().clone(),
524 Diff::ONE,
525 );
526 data_output.give_fueled(&data_cap_set[0], update).await;
527 }
528 }
529 }
530 data_upper = std::cmp::max(data_upper, keepalive.wal_end().into());
531 }
532 Ok(_) => return Err(TransientError::UnknownReplicationMessage),
533 Err(err) => return Err(err),
534 }
535
536 let will_yield = stream.as_mut().peek().now_or_never().is_none();
537 if will_yield {
538 trace!(%id, "timely-{worker_id} yielding at lsn={data_upper}");
539 rewinds.retain(|_, req| data_upper <= req.snapshot_lsn);
540 // As long as there are pending rewinds we can't downgrade our data capability
541 // since we must be able to produce data at offset 0.
542 if rewinds.is_empty() {
543 data_cap_set.downgrade([&data_upper]);
544 }
545 }
546 }
547 // We never expect the replication stream to gracefully end
548 Err(TransientError::ReplicationEOF)
549 }))
550 });
551
552 // We now process the slot updates and apply the cast expressions
553 let mut final_row = Row::default();
554 let mut datum_vec = DatumVec::new();
555 let mut next_worker = (0..u64::cast_from(scope.peers()))
556 // Round robin on 1000-records basis to avoid creating tiny containers when there are a
557 // small number of updates and a large number of workers.
558 .flat_map(|w| std::iter::repeat_n(w, 1000))
559 .cycle();
560 let round_robin = Exchange::new(move |_| next_worker.next().unwrap());
561 let replication_updates = data_stream
562 .map::<Vec<_>, _, _>(Clone::clone)
563 .unary(round_robin, "PgCastReplicationRows", |_, _| {
564 move |input, output| {
565 input.for_each_time(|time, data| {
566 let mut session = output.session(&time);
567 for ((oid, output_index, event), time, diff) in
568 data.flat_map(|data| data.drain(..))
569 {
570 let output = &table_info
571 .get(&oid)
572 .and_then(|outputs| outputs.get(&output_index))
573 .expect("table_info contains all outputs");
574 let event = event.and_then(|row| {
575 let datums = datum_vec.borrow_with(&row);
576 super::cast_row(&output.casts, &datums, &mut final_row)?;
577 Ok(SourceMessage {
578 key: Row::default(),
579 value: final_row.clone(),
580 metadata: Row::default(),
581 })
582 });
583
584 session.give(((output_index, event), time, diff));
585 }
586 });
587 }
588 })
589 .as_collection();
590
591 let errors = definite_errors.concat(transient_errors.map(ReplicationError::from));
592
593 (
594 replication_updates,
595 probe_stream,
596 errors,
597 button.press_on_drop(),
598 )
599}
600
601/// Produces the logical replication stream while taking care of regularly sending standby
602/// keepalive messages with the provided `uppers` stream.
603///
604/// The returned stream will contain all transactions that whose commit LSN is beyond `resume_lsn`.
605async fn raw_stream<'a>(
606 config: &'a RawSourceCreationConfig,
607 replication_client: Client,
608 metadata_client: Arc<Client>,
609 slot: &'a str,
610 timeline_id: &'a Option<u64>,
611 publication: &'a str,
612 resume_lsn: MzOffset,
613 uppers: impl futures::Stream<Item = Antichain<MzOffset>> + 'a,
614 probe_output: &'a AsyncOutputHandle<MzOffset, CapacityContainerBuilder<Vec<Probe<MzOffset>>>>,
615 probe_cap: &'a Capability<MzOffset>,
616) -> Result<
617 Result<impl AsyncStream<Item = Result<LogicalReplMsg, TransientError>> + 'a, DefiniteError>,
618 TransientError,
619> {
620 if let Err(err) = ensure_publication_exists(&*metadata_client, publication).await? {
621 // If the publication gets deleted there is nothing else to do. These errors
622 // are not retractable.
623 return Ok(Err(err));
624 }
625
626 // Skip the timeline ID check for sources without a known timeline ID
627 // (sources created before the timeline ID was added to the source details)
628 if let Some(expected_timeline_id) = timeline_id {
629 if let Err(err) = ensure_replication_timeline_id(
630 &replication_client,
631 expected_timeline_id,
632 config.config.config_set(),
633 )
634 .await?
635 {
636 return Ok(Err(err));
637 }
638 }
639
640 // How often a proactive standby status update message should be sent to the server.
641 //
642 // The upstream will periodically request status updates by setting the keepalive's reply field
643 // value to 1. However, we cannot rely on these messages arriving on time. For example, when
644 // the upstream is sending a big transaction its keepalive messages are queued and can be
645 // delayed arbitrarily.
646 //
647 // See: <https://www.postgresql.org/message-id/CAMsr+YE2dSfHVr7iEv1GSPZihitWX-PMkD9QALEGcTYa+sdsgg@mail.gmail.com>
648 //
649 // For this reason we query the server's timeout value and proactively send a keepalive at
650 // twice the frequency to have a healthy margin from the deadline.
651 //
652 // Note: We must use the metadata client here which is NOT in replication mode. Some Aurora
653 // Postgres versions disallow SHOW commands from within replication connection.
654 // See: https://github.com/readysettech/readyset/discussions/28#discussioncomment-4405671
655 let row = simple_query_opt(&*metadata_client, "SHOW wal_sender_timeout;")
656 .await?
657 .unwrap();
658 let wal_sender_timeout = match row.get("wal_sender_timeout") {
659 // When this parameter is zero the timeout mechanism is disabled
660 Some("0") => None,
661 Some(value) => Some(
662 mz_repr::adt::interval::Interval::from_str(value)
663 .unwrap()
664 .duration()
665 .unwrap(),
666 ),
667 None => panic!("ubiquitous parameter missing"),
668 };
669
670 // This interval controls the cadence at which we send back status updates and, crucially,
671 // request PrimaryKeepAlive messages. PrimaryKeepAlive messages drive the frontier forward in
672 // the absence of data updates and we don't want a large `wal_sender_timeout` value to slow us
673 // down. For this reason the feedback interval is set to one second, or less if the
674 // wal_sender_timeout is less than 2 seconds.
675 let feedback_interval = match wal_sender_timeout {
676 Some(t) => std::cmp::min(Duration::from_secs(1), t.checked_div(2).unwrap()),
677 None => Duration::from_secs(1),
678 };
679
680 let mut feedback_timer = tokio::time::interval(feedback_interval);
681 // 'Delay' ensures we always tick at least 'feedback_interval'.
682 feedback_timer.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
683
684 // Postgres will return all transactions that commit *at or after* after the provided LSN,
685 // following the timely upper semantics.
686 let lsn = PgLsn::from(resume_lsn.offset);
687 let query = format!(
688 r#"START_REPLICATION SLOT "{}" LOGICAL {} ("proto_version" '1', "publication_names" {})"#,
689 Ident::new_unchecked(slot).to_ast_string_simple(),
690 lsn,
691 escaped_string_literal(publication),
692 );
693 let copy_stream = match replication_client.copy_both_simple(&query).await {
694 Ok(copy_stream) => copy_stream,
695 Err(err) if err.code() == Some(&SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE) => {
696 return Ok(Err(DefiniteError::InvalidReplicationSlot));
697 }
698 Err(err) => return Err(err.into()),
699 };
700
701 // According to the documentation [1] we must check that the slot LSN matches our
702 // expectations otherwise we risk getting silently fast-forwarded to a future LSN. In order
703 // to avoid a TOCTOU issue we must do this check after starting the replication stream. We
704 // cannot use the replication client to do that because it's already in CopyBoth mode.
705 // [1] https://www.postgresql.org/docs/15/protocol-replication.html#PROTOCOL-REPLICATION-START-REPLICATION-SLOT-LOGICAL
706 let slot_metadata = super::fetch_slot_metadata(
707 &*metadata_client,
708 slot,
709 mz_storage_types::dyncfgs::PG_FETCH_SLOT_RESUME_LSN_INTERVAL
710 .get(config.config.config_set()),
711 )
712 .await?;
713 let min_resume_lsn = slot_metadata.confirmed_flush_lsn;
714 tracing::info!(
715 %config.id,
716 "started replication using backend PID={:?}. wal_sender_timeout={:?}",
717 slot_metadata.active_pid, wal_sender_timeout
718 );
719
720 let (probe_tx, mut probe_rx) = watch::channel(None);
721 let timestamp_interval = config.timestamp_interval;
722 let now_fn = config.now_fn.clone();
723 let max_lsn_task_handle =
724 mz_ore::task::spawn(|| format!("pg_current_wal_lsn:{}", config.id), async move {
725 let mut probe_ticker = probe::Ticker::new(move || timestamp_interval, now_fn);
726
727 while !probe_tx.is_closed() {
728 let probe_ts = probe_ticker.tick().await;
729 let probe_or_err = super::fetch_max_lsn(&*metadata_client)
730 .await
731 .map(|lsn| Probe {
732 probe_ts,
733 upstream_frontier: Antichain::from_elem(lsn),
734 });
735 let _ = probe_tx.send(Some(probe_or_err));
736 }
737 })
738 .abort_on_drop();
739
740 let stream = async_stream::try_stream!({
741 // Ensure we don't pre-drop the task
742 let _max_lsn_task_handle = max_lsn_task_handle;
743
744 // ensure we don't drop the replication client!
745 let _replication_client = replication_client;
746
747 let mut uppers = pin!(uppers);
748 let mut last_committed_upper = resume_lsn;
749
750 let mut stream = pin!(LogicalReplicationStream::new(copy_stream));
751
752 if !(resume_lsn == MzOffset::from(0) || min_resume_lsn <= resume_lsn) {
753 let err = TransientError::OvercompactedReplicationSlot {
754 available_lsn: min_resume_lsn,
755 requested_lsn: resume_lsn,
756 };
757 error!("timely-{} ({}) {err}", config.worker_id, config.id);
758 Err(err)?;
759 }
760
761 loop {
762 tokio::select! {
763 Some(next_message) = stream.next() => match next_message {
764 Ok(ReplicationMessage::XLogData(data)) => {
765 yield ReplicationMessage::XLogData(data);
766 Ok(())
767 }
768 Ok(ReplicationMessage::PrimaryKeepAlive(keepalive)) => {
769 yield ReplicationMessage::PrimaryKeepAlive(keepalive);
770 Ok(())
771 }
772 Err(err) => Err(err.into()),
773 _ => Err(TransientError::UnknownReplicationMessage),
774 },
775 _ = feedback_timer.tick() => {
776 let ts: i64 = PG_EPOCH.elapsed().unwrap().as_micros().try_into().unwrap();
777 let lsn = PgLsn::from(last_committed_upper.offset);
778 trace!("timely-{} ({}) sending keepalive {lsn:?}", config.worker_id, config.id);
779 // Postgres only sends PrimaryKeepAlive messages when *it* wants a reply, which
780 // happens when out status update is late. Since we send them proactively this
781 // may never happen. It is therefore *crucial* that we set the last parameter
782 // (the reply flag) to 1 here. This will cause the upstream server to send us a
783 // PrimaryKeepAlive message promptly which will give us frontier advancement
784 // information in the absence of data updates.
785 let res = stream.as_mut().standby_status_update(lsn, lsn, lsn, ts, 1).await;
786 res.map_err(|e| e.into())
787 },
788 Some(upper) = uppers.next() => match upper.into_option() {
789 Some(lsn) => {
790 if last_committed_upper < lsn {
791 last_committed_upper = lsn;
792 for stat in config.statistics.values() {
793 stat.set_offset_committed(last_committed_upper.offset);
794 }
795 }
796 Ok(())
797 }
798 None => Ok(()),
799 },
800 Ok(()) = probe_rx.changed() => match &*probe_rx.borrow() {
801 Some(Ok(probe)) => {
802 if let Some(offset_known) = probe.upstream_frontier.as_option() {
803 for stat in config.statistics.values() {
804 stat.set_offset_known(offset_known.offset);
805 }
806 }
807 probe_output.give(probe_cap, probe);
808 Ok(())
809 },
810 Some(Err(err)) => Err(anyhow::anyhow!("{err}").into()),
811 None => Ok(()),
812 },
813 else => return
814 }?;
815 }
816 });
817 Ok(Ok(stream))
818}
819
820/// Extracts a single transaction from the replication stream delimited by a BEGIN and COMMIT
821/// message. The BEGIN message must have already been consumed from the stream before calling this
822/// function.
823fn extract_transaction<'a>(
824 stream: impl AsyncStream<Item = Result<LogicalReplMsg, TransientError>> + 'a,
825 metadata_client: &'a Client,
826 commit_lsn: MzOffset,
827 table_info: &'a mut BTreeMap<u32, BTreeMap<usize, SourceOutputInfo>>,
828 metrics: &'a PgSourceMetrics,
829 publication: &'a str,
830) -> impl AsyncStream<Item = Result<DecodedRow, TransientError>> + 'a {
831 use LogicalReplicationMessage::*;
832 let mut row = Row::default();
833 async_stream::try_stream!({
834 let mut stream = pin!(stream);
835 metrics.transactions.inc();
836 metrics.lsn.set(commit_lsn.offset);
837 while let Some(event) = stream.try_next().await? {
838 // We can ignore keepalive messages while processing a transaction because the
839 // commit_lsn will drive progress.
840 let message = match event {
841 ReplicationMessage::XLogData(data) => data.into_data(),
842 ReplicationMessage::PrimaryKeepAlive(_) => {
843 metrics.ignored.inc();
844 continue;
845 }
846 _ => Err(TransientError::UnknownReplicationMessage)?,
847 };
848 metrics.total.inc();
849 match message {
850 Insert(body) if !table_info.contains_key(&body.rel_id()) => metrics.ignored.inc(),
851 Update(body) if !table_info.contains_key(&body.rel_id()) => metrics.ignored.inc(),
852 Delete(body) if !table_info.contains_key(&body.rel_id()) => metrics.ignored.inc(),
853 Relation(body) if !table_info.contains_key(&body.rel_id()) => metrics.ignored.inc(),
854 Insert(body) => {
855 metrics.inserts.inc();
856 let rel = body.rel_id();
857 for (output, info) in table_info.get(&rel).into_iter().flatten() {
858 let tuple_data = body.tuple().tuple_data();
859 let Some(ref projection) = info.projection else {
860 panic!("missing projection for {rel}");
861 };
862 let datums = projection.iter().map(|idx| &tuple_data[*idx]);
863 let row = unpack_tuple(datums, &mut row);
864 yield (rel, *output, row, Diff::ONE);
865 }
866 }
867 Update(body) => match body.old_tuple() {
868 Some(old_tuple) => {
869 metrics.updates.inc();
870 let new_tuple = body.new_tuple();
871 let rel = body.rel_id();
872 for (output, info) in table_info.get(&rel).into_iter().flatten() {
873 let Some(ref projection) = info.projection else {
874 panic!("missing projection for {rel}");
875 };
876 let old_tuple =
877 projection.iter().map(|idx| &old_tuple.tuple_data()[*idx]);
878 // If the new tuple contains unchanged toast values we reference the old ones
879 let new_tuple = std::iter::zip(
880 projection.iter().map(|idx| &new_tuple.tuple_data()[*idx]),
881 old_tuple.clone(),
882 )
883 .map(|(new, old)| match new {
884 TupleData::UnchangedToast => old,
885 _ => new,
886 });
887 let old_row = unpack_tuple(old_tuple, &mut row);
888 let new_row = unpack_tuple(new_tuple, &mut row);
889
890 yield (rel, *output, old_row, Diff::MINUS_ONE);
891 yield (rel, *output, new_row, Diff::ONE);
892 }
893 }
894 None => {
895 let rel = body.rel_id();
896 for (output, _) in table_info.get(&rel).into_iter().flatten() {
897 yield (
898 rel,
899 *output,
900 Err(DefiniteError::DefaultReplicaIdentity),
901 Diff::ONE,
902 );
903 }
904 }
905 },
906 Delete(body) => match body.old_tuple() {
907 Some(old_tuple) => {
908 metrics.deletes.inc();
909 let rel = body.rel_id();
910 for (output, info) in table_info.get(&rel).into_iter().flatten() {
911 let Some(ref projection) = info.projection else {
912 panic!("missing projection for {rel}");
913 };
914 let datums = projection.iter().map(|idx| &old_tuple.tuple_data()[*idx]);
915 let row = unpack_tuple(datums, &mut row);
916 yield (rel, *output, row, Diff::MINUS_ONE);
917 }
918 }
919 None => {
920 let rel = body.rel_id();
921 for (output, _) in table_info.get(&rel).into_iter().flatten() {
922 yield (
923 rel,
924 *output,
925 Err(DefiniteError::DefaultReplicaIdentity),
926 Diff::ONE,
927 );
928 }
929 }
930 },
931 Relation(body) => {
932 let rel_id = body.rel_id();
933 if let Some(outputs) = table_info.get_mut(&body.rel_id()) {
934 // Because the replication stream doesn't include columns' attnums, we need
935 // to check the current local schema against the current remote schema to
936 // ensure e.g. we haven't received a schema update with the same terminal
937 // column name which is actually a different column.
938 let upstream_info = mz_postgres_util::publication_info(
939 metadata_client,
940 publication,
941 Some(&[rel_id]),
942 )
943 .await?;
944
945 let mut schema_errors = vec![];
946
947 outputs.retain(|output_index, info| {
948 match verify_schema(rel_id, info, &upstream_info) {
949 Ok(()) => true,
950 Err(err) => {
951 schema_errors.push((
952 rel_id,
953 *output_index,
954 Err(err),
955 Diff::ONE,
956 ));
957 false
958 }
959 }
960 });
961 // Recalculate projection vector for the retained valid outputs. Here we
962 // must use the column names in the RelationBody message and not the
963 // upstream_info obtained above, since that one represents the current
964 // schema upstream which may be many versions head of the one we're about
965 // to receive after this Relation message.
966 let column_positions: BTreeMap<_, _> = body
967 .columns()
968 .iter()
969 .enumerate()
970 .map(|(idx, col)| (col.name().unwrap(), idx))
971 .collect();
972 for info in outputs.values_mut() {
973 let mut projection = vec![];
974 for col in info.desc.columns.iter() {
975 projection.push(column_positions[&*col.name]);
976 }
977 info.projection = Some(projection);
978 }
979 for schema_error in schema_errors {
980 yield schema_error;
981 }
982 }
983 }
984 Truncate(body) => {
985 for &rel_id in body.rel_ids() {
986 if let Some(outputs) = table_info.get_mut(&rel_id) {
987 for (output, _) in std::mem::take(outputs) {
988 yield (
989 rel_id,
990 output,
991 Err(DefiniteError::TableTruncated),
992 Diff::ONE,
993 );
994 }
995 }
996 }
997 }
998 Commit(body) => {
999 if commit_lsn != body.commit_lsn().into() {
1000 Err(TransientError::InvalidTransaction)?
1001 }
1002 return;
1003 }
1004 // TODO: We should handle origin messages and emit an error as they indicate that
1005 // the upstream performed a point in time restore so all bets are off about the
1006 // continuity of the stream.
1007 Origin(_) | Type(_) => metrics.ignored.inc(),
1008 Begin(_) => Err(TransientError::NestedTransaction)?,
1009 // The enum is marked as non_exhaustive. Better to be conservative
1010 _ => Err(TransientError::UnknownLogicalReplicationMessage)?,
1011 }
1012 }
1013 Err(TransientError::ReplicationEOF)?;
1014 })
1015}
1016
1017/// Unpacks an iterator of TupleData into a list of nullable bytes or an error if this can't be
1018/// done.
1019#[inline]
1020fn unpack_tuple<'a, I>(tuple_data: I, row: &mut Row) -> Result<Row, DefiniteError>
1021where
1022 I: IntoIterator<Item = &'a TupleData>,
1023 I::IntoIter: ExactSizeIterator,
1024{
1025 let iter = tuple_data.into_iter();
1026 let mut packer = row.packer();
1027 for data in iter {
1028 let datum = match data {
1029 TupleData::Text(bytes) => super::decode_utf8_text(bytes)?,
1030 TupleData::Null => Datum::Null,
1031 TupleData::UnchangedToast => return Err(DefiniteError::MissingToast),
1032 TupleData::Binary(_) => return Err(DefiniteError::UnexpectedBinaryData),
1033 };
1034 packer.push(datum);
1035 }
1036 Ok(row.clone())
1037}
1038
1039/// Ensures the publication exists on the server. It returns an outer transient error in case of
1040/// connection issues and an inner definite error if the publication is dropped.
1041async fn ensure_publication_exists(
1042 client: &Client,
1043 publication: &str,
1044) -> Result<Result<(), DefiniteError>, TransientError> {
1045 // Figure out the last written LSN and then add one to convert it into an upper.
1046 let result = client
1047 .query_opt(
1048 "SELECT 1 FROM pg_publication WHERE pubname = $1;",
1049 &[&publication],
1050 )
1051 .await?;
1052 match result {
1053 Some(_) => Ok(Ok(())),
1054 None => Ok(Err(DefiniteError::PublicationDropped(
1055 publication.to_owned(),
1056 ))),
1057 }
1058}
1059
1060/// Ensure the active replication timeline_id matches the one we expect such that we can safely
1061/// resume replication. It returns an outer transient error in case of
1062/// connection issues and an inner definite error if the timeline id does not match.
1063async fn ensure_replication_timeline_id(
1064 replication_client: &Client,
1065 expected_timeline_id: &u64,
1066 config_set: &ConfigSet,
1067) -> Result<Result<(), DefiniteError>, TransientError> {
1068 let timeline_id = mz_postgres_util::get_timeline_id(replication_client).await?;
1069 if timeline_id == *expected_timeline_id {
1070 Ok(Ok(()))
1071 } else {
1072 if PG_SOURCE_VALIDATE_TIMELINE.get(config_set) {
1073 Ok(Err(DefiniteError::InvalidTimelineId {
1074 expected: *expected_timeline_id,
1075 actual: timeline_id,
1076 }))
1077 } else {
1078 tracing::warn!(
1079 "Timeline ID mismatch ignored: expected={expected_timeline_id} actual={timeline_id}"
1080 );
1081 Ok(Ok(()))
1082 }
1083 }
1084}
1085
1086enum SchemaValidationError {
1087 Postgres(PostgresError),
1088 Schema {
1089 oid: u32,
1090 output_index: usize,
1091 error: DefiniteError,
1092 },
1093}
1094
1095fn spawn_schema_validator(
1096 client: Client,
1097 config: &RawSourceCreationConfig,
1098 publication: String,
1099 table_info: BTreeMap<u32, BTreeMap<usize, SourceOutputInfo>>,
1100) -> mpsc::UnboundedReceiver<SchemaValidationError> {
1101 let (tx, rx) = mpsc::unbounded_channel();
1102 let source_id = config.id;
1103 let config_set = Arc::clone(config.config.config_set());
1104
1105 mz_ore::task::spawn(|| format!("schema-validator:{}", source_id), async move {
1106 while !tx.is_closed() {
1107 trace!(%source_id, "validating schemas");
1108
1109 let validation_start = Instant::now();
1110
1111 let upstream_info = match mz_postgres_util::publication_info(
1112 &*client,
1113 &publication,
1114 Some(&table_info.keys().copied().collect::<Vec<_>>()),
1115 )
1116 .await
1117 {
1118 Ok(info) => info,
1119 Err(error) => {
1120 let _ = tx.send(SchemaValidationError::Postgres(error));
1121 continue;
1122 }
1123 };
1124
1125 for (&oid, outputs) in table_info.iter() {
1126 for (&output_index, info) in outputs {
1127 if let Err(error) = verify_schema(oid, info, &upstream_info) {
1128 trace!(
1129 %source_id,
1130 "schema of output index {output_index} for oid {oid} invalid",
1131 );
1132 let _ = tx.send(SchemaValidationError::Schema {
1133 oid,
1134 output_index,
1135 error,
1136 });
1137 } else {
1138 trace!(
1139 %source_id,
1140 "schema of output index {output_index} for oid {oid} valid",
1141 );
1142 }
1143 }
1144 }
1145
1146 let interval = PG_SCHEMA_VALIDATION_INTERVAL.get(&config_set);
1147 let elapsed = validation_start.elapsed();
1148 let wait = interval.saturating_sub(elapsed);
1149 tokio::time::sleep(wait).await;
1150 }
1151 });
1152
1153 rx
1154}