mz_storage/source/
postgres.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10//! Code to render the ingestion dataflow of a [`PostgresSourceConnection`]. The dataflow consists
11//! of multiple operators in order to take advantage of all the available workers.
12//!
13//! # Snapshot
14//!
15//! One part of the dataflow deals with snapshotting the tables involved in the ingestion. Each
16//! table that needs a snapshot is assigned to a specific worker which performs a `COPY` query
17//! and distributes the raw COPY bytes to all workers to decode the text encoded rows.
18//!
19//! For all tables that ended up being snapshotted the snapshot reader also emits a rewind request
20//! to the replication reader which will ensure that the requested portion of the replication
21//! stream is subtracted from the snapshot.
22//!
23//! See the [snapshot] module for more information on the snapshot strategy.
24//!
25//! # Replication
26//!
27//! The other part of the dataflow deals with reading the logical replication slot, which must
28//! happen from a single worker. The minimum amount of processing is performed from that worker
29//! and the data is then distributed among all workers for decoding.
30//!
31//! See the [replication] module for more information on the replication strategy.
32//!
33//! # Error handling
34//!
35//! There are two kinds of errors that can happen during ingestion that are represented as two
36//! separate error types:
37//!
38//! [`DefiniteError`]s are errors that happen during processing of a specific
39//! collection record at a specific LSN. These are the only errors that can ever end up in the
40//! error collection of a subsource.
41//!
42//! Transient errors are any errors that can happen for reasons that are unrelated to the data
43//! itself. This could be authentication failures, connection failures, etc. The only operators
44//! that can emit such errors are the `TableReader` and the `ReplicationReader` operators, which
45//! are the ones that talk to the external world. Both of these operators are built with the
46//! `AsyncOperatorBuilder::build_fallible` method which allows transient errors to be propagated
47//! upwards with the standard `?` operator without risking downgrading the capability and producing
48//! bogus frontiers.
49//!
50//! The error streams from both of those operators are published to the source status and also
51//! trigger a restart of the dataflow.
52//!
53//! ```text
54//!    ┏━━━━━━━━━━━━━━┓
55//!    ┃    table     ┃
56//!    ┃    reader    ┃
57//!    ┗━┯━━━━━━━━━━┯━┛
58//!      │          │rewind
59//!      │          │requests
60//!      │          ╰────╮
61//!      │             ┏━v━━━━━━━━━━━┓
62//!      │             ┃ replication ┃
63//!      │             ┃   reader    ┃
64//!      │             ┗━┯━━━━━━━━━┯━┛
65//!  COPY│           slot│         │
66//!  data│           data│         │
67//! ┏━━━━v━━━━━┓ ┏━━━━━━━v━━━━━┓   │
68//! ┃  COPY    ┃ ┃ replication ┃   │
69//! ┃ decoder  ┃ ┃   decoder   ┃   │
70//! ┗━━━━┯━━━━━┛ ┗━━━━━┯━━━━━━━┛   │
71//!      │snapshot     │replication│
72//!      │updates      │updates    │
73//!      ╰────╮    ╭───╯           │
74//!          ╭┴────┴╮              │
75//!          │concat│              │
76//!          ╰──┬───╯              │
77//!             │ data             │progress
78//!             │ output           │output
79//!             v                  v
80//! ```
81
82use std::collections::BTreeMap;
83use std::convert::Infallible;
84use std::rc::Rc;
85use std::time::Duration;
86
87use differential_dataflow::AsCollection;
88use itertools::Itertools as _;
89use mz_expr::{EvalError, MirScalarExpr};
90use mz_ore::cast::CastFrom;
91use mz_ore::error::ErrorExt;
92use mz_postgres_util::desc::PostgresTableDesc;
93use mz_postgres_util::{Client, PostgresError, simple_query_opt};
94use mz_repr::{Datum, Diff, GlobalId, Row};
95use mz_sql_parser::ast::Ident;
96use mz_sql_parser::ast::display::AstDisplay;
97use mz_storage_types::errors::{DataflowError, SourceError, SourceErrorDetails};
98use mz_storage_types::sources::postgres::CastType;
99use mz_storage_types::sources::{
100    MzOffset, PostgresSourceConnection, SourceExport, SourceExportDetails, SourceTimestamp,
101};
102use mz_timely_util::builder_async::PressOnDropButton;
103use serde::{Deserialize, Serialize};
104use timely::container::CapacityContainerBuilder;
105use timely::dataflow::operators::core::Partition;
106use timely::dataflow::operators::{Concat, Map, ToStream};
107use timely::dataflow::{Scope, Stream};
108use timely::progress::Antichain;
109use tokio_postgres::error::SqlState;
110use tokio_postgres::types::PgLsn;
111
112use crate::healthcheck::{HealthStatusMessage, HealthStatusUpdate, StatusNamespace};
113use crate::source::types::{Probe, SourceRender, StackedCollection};
114use crate::source::{RawSourceCreationConfig, SourceMessage};
115
116mod replication;
117mod snapshot;
118
119impl SourceRender for PostgresSourceConnection {
120    type Time = MzOffset;
121
122    const STATUS_NAMESPACE: StatusNamespace = StatusNamespace::Postgres;
123
124    /// Render the ingestion dataflow. This function only connects things together and contains no
125    /// actual processing logic.
126    fn render<G: Scope<Timestamp = MzOffset>>(
127        self,
128        scope: &mut G,
129        config: &RawSourceCreationConfig,
130        resume_uppers: impl futures::Stream<Item = Antichain<MzOffset>> + 'static,
131        _start_signal: impl std::future::Future<Output = ()> + 'static,
132    ) -> (
133        BTreeMap<GlobalId, StackedCollection<G, Result<SourceMessage, DataflowError>>>,
134        Stream<G, Infallible>,
135        Stream<G, HealthStatusMessage>,
136        Option<Stream<G, Probe<MzOffset>>>,
137        Vec<PressOnDropButton>,
138    ) {
139        // Collect the source outputs that we will be exporting into a per-table map.
140        let mut table_info = BTreeMap::new();
141        for (idx, (id, export)) in config.source_exports.iter().enumerate() {
142            let SourceExport {
143                details,
144                storage_metadata: _,
145                data_config: _,
146            } = export;
147            let details = match details {
148                SourceExportDetails::Postgres(details) => details,
149                // This is an export that doesn't need any data output to it.
150                SourceExportDetails::None => continue,
151                _ => panic!("unexpected source export details: {:?}", details),
152            };
153            let desc = details.table.clone();
154            let casts = details.column_casts.clone();
155            let resume_upper = Antichain::from_iter(
156                config
157                    .source_resume_uppers
158                    .get(id)
159                    .expect("all source exports must be present in source resume uppers")
160                    .iter()
161                    .map(MzOffset::decode_row),
162            );
163            let output = SourceOutputInfo {
164                desc,
165                projection: None,
166                casts,
167                resume_upper,
168                export_id: id.clone(),
169            };
170            table_info
171                .entry(output.desc.oid)
172                .or_insert_with(BTreeMap::new)
173                .insert(idx, output);
174        }
175
176        let metrics = config.metrics.get_postgres_source_metrics(config.id);
177
178        let (snapshot_updates, rewinds, slot_ready, snapshot_err, snapshot_token) =
179            snapshot::render(
180                scope.clone(),
181                config.clone(),
182                self.clone(),
183                table_info.clone(),
184                metrics.snapshot_metrics.clone(),
185            );
186
187        let (repl_updates, uppers, probe_stream, repl_err, repl_token) = replication::render(
188            scope.clone(),
189            config.clone(),
190            self,
191            table_info,
192            &rewinds,
193            &slot_ready,
194            resume_uppers,
195            metrics,
196        );
197
198        let updates = snapshot_updates.concat(&repl_updates);
199        let partition_count = u64::cast_from(config.source_exports.len());
200        let data_streams: Vec<_> = updates
201            .inner
202            .partition::<CapacityContainerBuilder<_>, _, _>(
203                partition_count,
204                |((output, data), time, diff): &(
205                    (usize, Result<SourceMessage, DataflowError>),
206                    MzOffset,
207                    Diff,
208                )| {
209                    let output = u64::cast_from(*output);
210                    (output, (data.clone(), time.clone(), diff.clone()))
211                },
212            );
213        let mut data_collections = BTreeMap::new();
214        for (id, data_stream) in config.source_exports.keys().zip_eq(data_streams) {
215            data_collections.insert(*id, data_stream.as_collection());
216        }
217
218        let export_ids = config.source_exports.keys().copied();
219        let health_init = export_ids
220            .map(Some)
221            .chain(std::iter::once(None))
222            .map(|id| HealthStatusMessage {
223                id,
224                namespace: Self::STATUS_NAMESPACE,
225                update: HealthStatusUpdate::Running,
226            })
227            .collect::<Vec<_>>()
228            .to_stream(scope);
229
230        // N.B. Note that we don't check ssh tunnel statuses here. We could, but immediately on
231        // restart we are going to set the status to an ssh error correctly, so we don't do this
232        // extra work.
233        let errs = snapshot_err.concat(&repl_err).map(move |err| {
234            // This update will cause the dataflow to restart
235            let err_string = err.display_with_causes().to_string();
236            let update = HealthStatusUpdate::halting(err_string.clone(), None);
237
238            let namespace = match err {
239                ReplicationError::Transient(err)
240                    if matches!(
241                        &*err,
242                        TransientError::PostgresError(PostgresError::Ssh(_))
243                            | TransientError::PostgresError(PostgresError::SshIo(_))
244                    ) =>
245                {
246                    StatusNamespace::Ssh
247                }
248                _ => Self::STATUS_NAMESPACE,
249            };
250
251            HealthStatusMessage {
252                id: None,
253                namespace: namespace.clone(),
254                update,
255            }
256        });
257
258        let health = health_init.concat(&errs);
259
260        (
261            data_collections,
262            uppers,
263            health,
264            probe_stream,
265            vec![snapshot_token, repl_token],
266        )
267    }
268}
269
270#[derive(Clone, Debug)]
271struct SourceOutputInfo {
272    /// The expected upstream schema of this output.
273    desc: PostgresTableDesc,
274    /// A projection of the upstream columns into the columns expected by this output. This field
275    /// is recalculated every time we observe an upstream schema change. On dataflow initialization
276    /// this field is None since we haven't yet observed any schemas.
277    projection: Option<Vec<usize>>,
278    casts: Vec<(CastType, MirScalarExpr)>,
279    resume_upper: Antichain<MzOffset>,
280    export_id: GlobalId,
281}
282
283#[derive(Clone, Debug, thiserror::Error)]
284pub enum ReplicationError {
285    #[error(transparent)]
286    Transient(#[from] Rc<TransientError>),
287    #[error(transparent)]
288    Definite(#[from] Rc<DefiniteError>),
289}
290
291/// A transient error that never ends up in the collection of a specific table.
292#[derive(Debug, thiserror::Error)]
293pub enum TransientError {
294    #[error("replication slot mysteriously missing")]
295    MissingReplicationSlot,
296    #[error(
297        "slot overcompacted. Requested LSN {requested_lsn} but only LSNs >= {available_lsn} are available"
298    )]
299    OvercompactedReplicationSlot {
300        requested_lsn: MzOffset,
301        available_lsn: MzOffset,
302    },
303    #[error("replication slot already exists")]
304    ReplicationSlotAlreadyExists,
305    #[error("stream ended prematurely")]
306    ReplicationEOF,
307    #[error("unexpected replication message")]
308    UnknownReplicationMessage,
309    #[error("unexpected logical replication message")]
310    UnknownLogicalReplicationMessage,
311    #[error("received replication event outside of transaction")]
312    BareTransactionEvent,
313    #[error("lsn mismatch between BEGIN and COMMIT")]
314    InvalidTransaction,
315    #[error("BEGIN within existing BEGIN stream")]
316    NestedTransaction,
317    #[error("recoverable errors should crash the process during snapshots")]
318    SyntheticError,
319    #[error("sql client error")]
320    SQLClient(#[from] tokio_postgres::Error),
321    #[error(transparent)]
322    PostgresError(#[from] PostgresError),
323    #[error(transparent)]
324    Generic(#[from] anyhow::Error),
325}
326
327/// A definite error that always ends up in the collection of a specific table.
328#[derive(Debug, Clone, Serialize, Deserialize, thiserror::Error)]
329pub enum DefiniteError {
330    #[error("slot compacted past snapshot point. snapshot consistent point={0} resume_lsn={1}")]
331    SlotCompactedPastResumePoint(MzOffset, MzOffset),
332    #[error("table was truncated")]
333    TableTruncated,
334    #[error("table was dropped")]
335    TableDropped,
336    #[error("publication {0:?} does not exist")]
337    PublicationDropped(String),
338    #[error("replication slot has been invalidated because it exceeded the maximum reserved size")]
339    InvalidReplicationSlot,
340    #[error("unexpected number of columns while parsing COPY output")]
341    MissingColumn,
342    #[error("failed to parse COPY protocol")]
343    InvalidCopyInput,
344    #[error(
345        "unsupported action: database restored from point-in-time backup. Expected timeline ID {expected} but got {actual}"
346    )]
347    InvalidTimelineId { expected: u64, actual: u64 },
348    #[error(
349        "TOASTed value missing from old row. Did you forget to set REPLICA IDENTITY to FULL for your table?"
350    )]
351    MissingToast,
352    #[error(
353        "old row missing from replication stream. Did you forget to set REPLICA IDENTITY to FULL for your table?"
354    )]
355    DefaultReplicaIdentity,
356    #[error("incompatible schema change: {0}")]
357    // TODO: proper error variants for all the expected schema violations
358    IncompatibleSchema(String),
359    #[error("invalid UTF8 string: {0:?}")]
360    InvalidUTF8(Vec<u8>),
361    #[error("failed to cast raw column: {0}")]
362    CastError(#[source] EvalError),
363    #[error("unexpected binary data in replication stream")]
364    UnexpectedBinaryData,
365}
366
367impl From<DefiniteError> for DataflowError {
368    fn from(err: DefiniteError) -> Self {
369        let m = err.to_string().into();
370        DataflowError::SourceError(Box::new(SourceError {
371            error: match &err {
372                DefiniteError::SlotCompactedPastResumePoint(_, _) => SourceErrorDetails::Other(m),
373                DefiniteError::TableTruncated => SourceErrorDetails::Other(m),
374                DefiniteError::TableDropped => SourceErrorDetails::Other(m),
375                DefiniteError::PublicationDropped(_) => SourceErrorDetails::Initialization(m),
376                DefiniteError::InvalidReplicationSlot => SourceErrorDetails::Initialization(m),
377                DefiniteError::MissingColumn => SourceErrorDetails::Other(m),
378                DefiniteError::InvalidCopyInput => SourceErrorDetails::Other(m),
379                DefiniteError::InvalidTimelineId { .. } => SourceErrorDetails::Initialization(m),
380                DefiniteError::MissingToast => SourceErrorDetails::Other(m),
381                DefiniteError::DefaultReplicaIdentity => SourceErrorDetails::Other(m),
382                DefiniteError::IncompatibleSchema(_) => SourceErrorDetails::Other(m),
383                DefiniteError::InvalidUTF8(_) => SourceErrorDetails::Other(m),
384                DefiniteError::CastError(_) => SourceErrorDetails::Other(m),
385                DefiniteError::UnexpectedBinaryData => SourceErrorDetails::Other(m),
386            },
387        }))
388    }
389}
390
391async fn ensure_replication_slot(client: &Client, slot: &str) -> Result<(), TransientError> {
392    // Note: Using unchecked here is okay because we're using it in a SQL query.
393    let slot = Ident::new_unchecked(slot).to_ast_string_simple();
394    let query = format!("CREATE_REPLICATION_SLOT {slot} LOGICAL \"pgoutput\" NOEXPORT_SNAPSHOT");
395    match simple_query_opt(client, &query).await {
396        Ok(_) => Ok(()),
397        // If the slot already exists that's still ok
398        Err(PostgresError::Postgres(err)) if err.code() == Some(&SqlState::DUPLICATE_OBJECT) => {
399            tracing::trace!("replication slot {slot} already existed");
400            Ok(())
401        }
402        Err(err) => Err(TransientError::PostgresError(err)),
403    }
404}
405
406/// The state of a replication slot.
407struct SlotMetadata {
408    /// The process ID of the session using this slot if the slot is currently actively being used.
409    /// None if inactive.
410    active_pid: Option<i32>,
411    /// The address (LSN) up to which the logical slot's consumer has confirmed receiving data.
412    /// Data corresponding to the transactions committed before this LSN is not available anymore.
413    confirmed_flush_lsn: MzOffset,
414}
415
416/// Fetches the minimum LSN at which this slot can safely resume.
417async fn fetch_slot_metadata(
418    client: &Client,
419    slot: &str,
420    interval: Duration,
421) -> Result<SlotMetadata, TransientError> {
422    loop {
423        let query = "SELECT active_pid, confirmed_flush_lsn
424                FROM pg_replication_slots WHERE slot_name = $1";
425        let Some(row) = client.query_opt(query, &[&slot]).await? else {
426            return Err(TransientError::MissingReplicationSlot);
427        };
428
429        match row.get::<_, Option<PgLsn>>("confirmed_flush_lsn") {
430            // For postgres, `confirmed_flush_lsn` means that the slot is able to produce
431            // all transactions that happen at tx_lsn >= confirmed_flush_lsn. Therefore this value
432            // already has "upper" semantics.
433            Some(lsn) => {
434                return Ok(SlotMetadata {
435                    confirmed_flush_lsn: MzOffset::from(lsn),
436                    active_pid: row.get("active_pid"),
437                });
438            }
439            // It can happen that confirmed_flush_lsn is NULL as the slot initializes
440            // This could probably be a `tokio::time::interval`, but its only is called twice,
441            // so its fine like this.
442            None => tokio::time::sleep(interval).await,
443        };
444    }
445}
446
447/// Fetch the `pg_current_wal_lsn`, used to report metrics.
448async fn fetch_max_lsn(client: &Client) -> Result<MzOffset, TransientError> {
449    let query = "SELECT pg_current_wal_lsn()";
450    let row = simple_query_opt(client, query).await?;
451
452    match row.and_then(|row| {
453        row.get("pg_current_wal_lsn")
454            .map(|lsn| lsn.parse::<PgLsn>().unwrap())
455    }) {
456        // Based on the documentation, it appears that `pg_current_wal_lsn` has
457        // the same "upper" semantics of `confirmed_flush_lsn`:
458        // <https://www.postgresql.org/docs/current/functions-admin.html#FUNCTIONS-ADMIN-BACKUP>
459        // We may need to revisit this and use `pg_current_wal_flush_lsn`.
460        Some(lsn) => Ok(MzOffset::from(lsn)),
461        None => Err(TransientError::Generic(anyhow::anyhow!(
462            "pg_current_wal_lsn() mysteriously has no value"
463        ))),
464    }
465}
466
467// Ensures that the table with oid `oid` and expected schema `expected_schema` is still compatible
468// with the current upstream schema `upstream_info`.
469fn verify_schema(
470    oid: u32,
471    info: &SourceOutputInfo,
472    upstream_info: &BTreeMap<u32, PostgresTableDesc>,
473) -> Result<(), DefiniteError> {
474    let current_desc = upstream_info.get(&oid).ok_or(DefiniteError::TableDropped)?;
475
476    let allow_oids_to_change_by_col_num = info
477        .desc
478        .columns
479        .iter()
480        .zip_eq(info.casts.iter())
481        .flat_map(|(col, (cast_type, _))| match cast_type {
482            CastType::Text => Some(col.col_num),
483            CastType::Natural => None,
484        })
485        .collect();
486
487    match info
488        .desc
489        .determine_compatibility(current_desc, &allow_oids_to_change_by_col_num)
490    {
491        Ok(()) => Ok(()),
492        Err(err) => Err(DefiniteError::IncompatibleSchema(err.to_string())),
493    }
494}
495
496/// Casts a text row into the target types
497fn cast_row(
498    casts: &[(CastType, MirScalarExpr)],
499    datums: &[Datum<'_>],
500    row: &mut Row,
501) -> Result<(), DefiniteError> {
502    let arena = mz_repr::RowArena::new();
503    let mut packer = row.packer();
504    for (_, column_cast) in casts {
505        let datum = column_cast
506            .eval(datums, &arena)
507            .map_err(DefiniteError::CastError)?;
508        packer.push(datum);
509    }
510    Ok(())
511}
512
513/// Converts raw bytes that are expected to be UTF8 encoded into a `Datum::String`
514fn decode_utf8_text(bytes: &[u8]) -> Result<Datum<'_>, DefiniteError> {
515    match std::str::from_utf8(bytes) {
516        Ok(text) => Ok(Datum::String(text)),
517        Err(_) => Err(DefiniteError::InvalidUTF8(bytes.to_vec())),
518    }
519}