Skip to main content

mz_storage_types/
sources.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10//! Types and traits related to the introduction of changing collections into `dataflow`.
11
12use std::collections::BTreeMap;
13use std::fmt::Debug;
14use std::hash::Hash;
15use std::ops::{Add, AddAssign, Deref, DerefMut};
16use std::str::FromStr;
17use std::sync::Arc;
18use std::time::Duration;
19
20use arrow::array::{Array, ArrayRef, BinaryArray, BinaryBuilder, NullArray, StructArray};
21use arrow::datatypes::{Field, Fields};
22use bytes::{BufMut, Bytes};
23use columnation::Columnation;
24use itertools::EitherOrBoth::Both;
25use itertools::Itertools;
26use kafka::KafkaSourceExportDetails;
27use load_generator::{LoadGeneratorOutput, LoadGeneratorSourceExportDetails};
28use mz_ore::assert_none;
29use mz_persist_types::Codec;
30use mz_persist_types::arrow::ArrayOrd;
31use mz_persist_types::columnar::{ColumnDecoder, ColumnEncoder, Schema};
32use mz_persist_types::stats::{
33    ColumnNullStats, ColumnStatKinds, ColumnarStats, ColumnarStatsBuilder, PrimitiveStats,
34    StructStats,
35};
36use mz_proto::{IntoRustIfSome, ProtoType, RustType, TryFromProtoError};
37use mz_repr::{
38    CatalogItemId, Datum, GlobalId, ProtoRelationDesc, ProtoRow, RelationDesc, Row,
39    RowColumnarDecoder, RowColumnarEncoder, arb_row_for_relation,
40};
41use mz_sql_parser::ast::{Ident, IdentError, UnresolvedItemName};
42use proptest::prelude::any;
43use proptest::strategy::Strategy;
44use prost::Message;
45use serde::{Deserialize, Serialize};
46use timely::order::{PartialOrder, TotalOrder};
47use timely::progress::timestamp::Refines;
48use timely::progress::{PathSummary, Timestamp};
49
50use crate::AlterCompatible;
51use crate::connections::inline::{
52    ConnectionAccess, ConnectionResolver, InlinedConnection, IntoInlineConnection,
53    ReferencedConnection,
54};
55use crate::controller::AlterError;
56use crate::errors::{DataflowError, ProtoDataflowError};
57use crate::instances::StorageInstanceId;
58use crate::sources::sql_server::SqlServerSourceExportDetails;
59
60pub mod encoding;
61pub mod envelope;
62pub mod kafka;
63pub mod load_generator;
64pub mod mysql;
65pub mod postgres;
66pub mod sql_server;
67
68pub use crate::sources::envelope::SourceEnvelope;
69pub use crate::sources::kafka::KafkaSourceConnection;
70pub use crate::sources::load_generator::LoadGeneratorSourceConnection;
71pub use crate::sources::mysql::{MySqlSourceConnection, MySqlSourceExportDetails};
72pub use crate::sources::postgres::{PostgresSourceConnection, PostgresSourceExportDetails};
73pub use crate::sources::sql_server::{SqlServerSourceConnection, SqlServerSourceExtras};
74
75include!(concat!(env!("OUT_DIR"), "/mz_storage_types.sources.rs"));
76
77/// A description of a source ingestion
78#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
79pub struct IngestionDescription<S: 'static = (), C: ConnectionAccess = InlinedConnection> {
80    /// The source description.
81    pub desc: SourceDesc<C>,
82    /// Collections to be exported by this ingestion.
83    ///
84    /// # Notes
85    /// - For multi-output sources:
86    ///     - Add exports by adding a new [`SourceExport`].
87    ///     - Remove exports by removing the [`SourceExport`].
88    ///
89    ///   Re-rendering/executing the source after making these modifications
90    ///   adds and drops the subsource, respectively.
91    /// - This field includes the primary source's ID, which might need to be
92    ///   filtered out to understand which exports are explicit ingestion exports.
93    /// - This field does _not_ include the remap collection, which is tracked
94    ///   in its own field.
95    pub source_exports: BTreeMap<GlobalId, SourceExport<S>>,
96    /// The ID of the instance in which to install the source.
97    pub instance_id: StorageInstanceId,
98    /// The ID of this ingestion's remap/progress collection.
99    pub remap_collection_id: GlobalId,
100    /// The storage metadata for the remap/progress collection
101    pub remap_metadata: S,
102}
103
104impl IngestionDescription {
105    pub fn new(
106        desc: SourceDesc,
107        instance_id: StorageInstanceId,
108        remap_collection_id: GlobalId,
109    ) -> Self {
110        Self {
111            desc,
112            remap_metadata: (),
113            source_exports: BTreeMap::new(),
114            instance_id,
115            remap_collection_id,
116        }
117    }
118}
119
120impl<S> IngestionDescription<S> {
121    /// Return an iterator over the `GlobalId`s of `self`'s collections.
122    /// This will contain ids for the remap collection, subsources,
123    /// tables for this source, and the primary collection ID, even if
124    /// no data will be exported to the primary collection.
125    pub fn collection_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
126        // Expand self so that any new fields added generate a compiler error to
127        // increase the likelihood of developers seeing this function.
128        let IngestionDescription {
129            desc: _,
130            remap_metadata: _,
131            source_exports,
132            instance_id: _,
133            remap_collection_id,
134        } = &self;
135
136        source_exports
137            .keys()
138            .copied()
139            .chain(std::iter::once(*remap_collection_id))
140    }
141}
142
143impl<S: Debug + Eq + PartialEq + AlterCompatible> AlterCompatible for IngestionDescription<S> {
144    fn alter_compatible(
145        &self,
146        id: GlobalId,
147        other: &IngestionDescription<S>,
148    ) -> Result<(), AlterError> {
149        if self == other {
150            return Ok(());
151        }
152        let IngestionDescription {
153            desc,
154            remap_metadata,
155            source_exports,
156            instance_id,
157            remap_collection_id,
158        } = self;
159
160        let compatibility_checks = [
161            (desc.alter_compatible(id, &other.desc).is_ok(), "desc"),
162            (remap_metadata == &other.remap_metadata, "remap_metadata"),
163            (
164                source_exports
165                    .iter()
166                    .merge_join_by(&other.source_exports, |(l_key, _), (r_key, _)| {
167                        l_key.cmp(r_key)
168                    })
169                    .all(|r| match r {
170                        Both(
171                            (
172                                _,
173                                SourceExport {
174                                    storage_metadata: l_metadata,
175                                    details: l_details,
176                                    data_config: l_data_config,
177                                },
178                            ),
179                            (
180                                _,
181                                SourceExport {
182                                    storage_metadata: r_metadata,
183                                    details: r_details,
184                                    data_config: r_data_config,
185                                },
186                            ),
187                        ) => {
188                            l_metadata.alter_compatible(id, r_metadata).is_ok()
189                                && l_details.alter_compatible(id, r_details).is_ok()
190                                && l_data_config.alter_compatible(id, r_data_config).is_ok()
191                        }
192                        _ => true,
193                    }),
194                "source_exports",
195            ),
196            (instance_id == &other.instance_id, "instance_id"),
197            (
198                remap_collection_id == &other.remap_collection_id,
199                "remap_collection_id",
200            ),
201        ];
202        for (compatible, field) in compatibility_checks {
203            if !compatible {
204                tracing::warn!(
205                    "IngestionDescription incompatible at {field}:\nself:\n{:#?}\n\nother\n{:#?}",
206                    self,
207                    other
208                );
209
210                return Err(AlterError { id });
211            }
212        }
213
214        Ok(())
215    }
216}
217
218impl<R: ConnectionResolver> IntoInlineConnection<IngestionDescription, R>
219    for IngestionDescription<(), ReferencedConnection>
220{
221    fn into_inline_connection(self, r: R) -> IngestionDescription {
222        let IngestionDescription {
223            desc,
224            remap_metadata,
225            source_exports,
226            instance_id,
227            remap_collection_id,
228        } = self;
229
230        IngestionDescription {
231            desc: desc.into_inline_connection(r),
232            remap_metadata,
233            source_exports,
234            instance_id,
235            remap_collection_id,
236        }
237    }
238}
239
240#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
241pub struct SourceExport<S = (), C: ConnectionAccess = InlinedConnection> {
242    /// The collection metadata needed to write the exported data
243    pub storage_metadata: S,
244    /// Details necessary for the source to export data to this export's collection.
245    pub details: SourceExportDetails,
246    /// Config necessary to handle (e.g. decode and envelope) the data for this export.
247    pub data_config: SourceExportDataConfig<C>,
248}
249
250pub trait SourceTimestamp:
251    Timestamp + Columnation + Refines<()> + std::fmt::Display + Sync
252{
253    fn encode_row(&self) -> Row;
254    fn decode_row(row: &Row) -> Self;
255}
256
257impl SourceTimestamp for MzOffset {
258    fn encode_row(&self) -> Row {
259        Row::pack([Datum::UInt64(self.offset)])
260    }
261
262    fn decode_row(row: &Row) -> Self {
263        let mut datums = row.iter();
264        match (datums.next(), datums.next()) {
265            (Some(Datum::UInt64(offset)), None) => MzOffset::from(offset),
266            _ => panic!("invalid row {row:?}"),
267        }
268    }
269}
270
271/// Universal language for describing message positions in Materialize, in a source independent
272/// way. Individual sources like Kafka or File sources should explicitly implement their own offset
273/// type that converts to/From MzOffsets. A 0-MzOffset denotes an empty stream.
274#[derive(
275    Copy,
276    Clone,
277    Default,
278    Debug,
279    PartialEq,
280    PartialOrd,
281    Eq,
282    Ord,
283    Hash,
284    Serialize,
285    Deserialize
286)]
287pub struct MzOffset {
288    pub offset: u64,
289}
290
291impl differential_dataflow::difference::Semigroup for MzOffset {
292    fn plus_equals(&mut self, rhs: &Self) {
293        self.offset.plus_equals(&rhs.offset)
294    }
295}
296
297impl differential_dataflow::difference::IsZero for MzOffset {
298    fn is_zero(&self) -> bool {
299        self.offset.is_zero()
300    }
301}
302
303impl mz_persist_types::Codec64 for MzOffset {
304    fn codec_name() -> String {
305        "MzOffset".to_string()
306    }
307
308    fn encode(&self) -> [u8; 8] {
309        mz_persist_types::Codec64::encode(&self.offset)
310    }
311
312    fn decode(buf: [u8; 8]) -> Self {
313        Self {
314            offset: mz_persist_types::Codec64::decode(buf),
315        }
316    }
317}
318
319impl columnation::Columnation for MzOffset {
320    type InnerRegion = columnation::CopyRegion<MzOffset>;
321}
322
323impl MzOffset {
324    pub fn checked_sub(self, other: Self) -> Option<Self> {
325        self.offset
326            .checked_sub(other.offset)
327            .map(|offset| Self { offset })
328    }
329}
330
331/// Convert from MzOffset to Kafka::Offset as long as
332/// the offset is not negative
333impl From<u64> for MzOffset {
334    fn from(offset: u64) -> Self {
335        Self { offset }
336    }
337}
338
339impl std::fmt::Display for MzOffset {
340    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
341        write!(f, "{}", self.offset)
342    }
343}
344
345// Assume overflow does not occur for addition
346impl Add<u64> for MzOffset {
347    type Output = MzOffset;
348
349    fn add(self, x: u64) -> MzOffset {
350        MzOffset {
351            offset: self.offset + x,
352        }
353    }
354}
355impl Add<Self> for MzOffset {
356    type Output = Self;
357
358    fn add(self, x: Self) -> Self {
359        MzOffset {
360            offset: self.offset + x.offset,
361        }
362    }
363}
364impl AddAssign<u64> for MzOffset {
365    fn add_assign(&mut self, x: u64) {
366        self.offset += x;
367    }
368}
369impl AddAssign<Self> for MzOffset {
370    fn add_assign(&mut self, x: Self) {
371        self.offset += x.offset;
372    }
373}
374
375/// Convert from `PgLsn` to MzOffset
376impl From<tokio_postgres::types::PgLsn> for MzOffset {
377    fn from(lsn: tokio_postgres::types::PgLsn) -> Self {
378        MzOffset { offset: lsn.into() }
379    }
380}
381
382impl Timestamp for MzOffset {
383    type Summary = MzOffset;
384
385    fn minimum() -> Self {
386        MzOffset {
387            offset: Timestamp::minimum(),
388        }
389    }
390}
391
392impl PathSummary<MzOffset> for MzOffset {
393    fn results_in(&self, src: &MzOffset) -> Option<MzOffset> {
394        Some(MzOffset {
395            offset: self.offset.results_in(&src.offset)?,
396        })
397    }
398
399    fn followed_by(&self, other: &Self) -> Option<Self> {
400        Some(MzOffset {
401            offset: PathSummary::<u64>::followed_by(&self.offset, &other.offset)?,
402        })
403    }
404}
405
406impl Refines<()> for MzOffset {
407    fn to_inner(_: ()) -> Self {
408        MzOffset::minimum()
409    }
410    fn to_outer(self) {}
411    fn summarize(_: Self::Summary) {}
412}
413
414impl PartialOrder for MzOffset {
415    #[inline]
416    fn less_equal(&self, other: &Self) -> bool {
417        self.offset.less_equal(&other.offset)
418    }
419}
420
421impl TotalOrder for MzOffset {}
422
423/// The meaning of the timestamp number produced by data sources. This type
424/// is not concerned with the source of the timestamp (like if the data came
425/// from a Debezium consistency topic or a CDCv2 stream), instead only what the
426/// timestamp number means.
427///
428/// Some variants here have attached data used to differentiate incomparable
429/// instantiations. These attached data types should be expanded in the future
430/// if we need to tell apart more kinds of sources.
431#[derive(
432    Clone,
433    Debug,
434    Ord,
435    PartialOrd,
436    Eq,
437    PartialEq,
438    Serialize,
439    Deserialize,
440    Hash
441)]
442pub enum Timeline {
443    /// EpochMilliseconds means the timestamp is the number of milliseconds since
444    /// the Unix epoch.
445    EpochMilliseconds,
446    /// External means the timestamp comes from an external data source and we
447    /// don't know what the number means. The attached String is the source's name,
448    /// which will result in different sources being incomparable.
449    External(String),
450    /// User means the user has manually specified a timeline. The attached
451    /// String is specified by the user, allowing them to decide sources that are
452    /// joinable.
453    User(String),
454}
455
456impl Timeline {
457    const EPOCH_MILLISECOND_ID_CHAR: char = 'M';
458    const EXTERNAL_ID_CHAR: char = 'E';
459    const USER_ID_CHAR: char = 'U';
460
461    fn id_char(&self) -> char {
462        match self {
463            Self::EpochMilliseconds => Self::EPOCH_MILLISECOND_ID_CHAR,
464            Self::External(_) => Self::EXTERNAL_ID_CHAR,
465            Self::User(_) => Self::USER_ID_CHAR,
466        }
467    }
468}
469
470impl ToString for Timeline {
471    fn to_string(&self) -> String {
472        match self {
473            Self::EpochMilliseconds => format!("{}", self.id_char()),
474            Self::External(id) => format!("{}.{id}", self.id_char()),
475            Self::User(id) => format!("{}.{id}", self.id_char()),
476        }
477    }
478}
479
480impl FromStr for Timeline {
481    type Err = String;
482
483    fn from_str(s: &str) -> Result<Self, Self::Err> {
484        if s.is_empty() {
485            return Err("empty timeline".to_string());
486        }
487        let mut chars = s.chars();
488        match chars.next().expect("non-empty string") {
489            Self::EPOCH_MILLISECOND_ID_CHAR => match chars.next() {
490                None => Ok(Self::EpochMilliseconds),
491                Some(_) => Err(format!("unknown timeline: {s}")),
492            },
493            Self::EXTERNAL_ID_CHAR => match chars.next() {
494                Some('.') => Ok(Self::External(chars.as_str().to_string())),
495                _ => Err(format!("unknown timeline: {s}")),
496            },
497            Self::USER_ID_CHAR => match chars.next() {
498                Some('.') => Ok(Self::User(chars.as_str().to_string())),
499                _ => Err(format!("unknown timeline: {s}")),
500            },
501            _ => Err(format!("unknown timeline: {s}")),
502        }
503    }
504}
505
506/// A connection to an external system
507pub trait SourceConnection: Debug + Clone + PartialEq + AlterCompatible {
508    /// The name of the external system (e.g kafka, postgres, etc).
509    fn name(&self) -> &'static str;
510
511    /// The name of the resource in the external system (e.g kafka topic) if any
512    fn external_reference(&self) -> Option<&str>;
513
514    /// Defines the key schema to use by default for this source connection type.
515    /// This will be used for the primary export of the source and as the default
516    /// pre-encoding key schema for the source.
517    fn default_key_desc(&self) -> RelationDesc;
518
519    /// Defines the value schema to use by default for this source connection type.
520    /// This will be used for the primary export of the source and as the default
521    /// pre-encoding value schema for the source.
522    fn default_value_desc(&self) -> RelationDesc;
523
524    /// The schema of this connection's timestamp type. This will also be the schema of the
525    /// progress relation.
526    fn timestamp_desc(&self) -> RelationDesc;
527
528    /// The id of the connection object (i.e the one obtained from running `CREATE CONNECTION`) in
529    /// the catalog, if any.
530    fn connection_id(&self) -> Option<CatalogItemId>;
531
532    /// Whether the source type supports read only mode.
533    fn supports_read_only(&self) -> bool;
534
535    /// Whether the source type prefers to run on only one replica of a multi-replica cluster.
536    fn prefers_single_replica(&self) -> bool;
537}
538
539#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)]
540pub enum Compression {
541    Gzip,
542    None,
543}
544
545/// Defines the configuration for how to handle data that is exported for a given
546/// Source Export.
547#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
548pub struct SourceExportDataConfig<C: ConnectionAccess = InlinedConnection> {
549    pub encoding: Option<encoding::SourceDataEncoding<C>>,
550    pub envelope: SourceEnvelope,
551}
552
553impl<R: ConnectionResolver> IntoInlineConnection<SourceExportDataConfig, R>
554    for SourceExportDataConfig<ReferencedConnection>
555{
556    fn into_inline_connection(self, r: R) -> SourceExportDataConfig {
557        let SourceExportDataConfig { encoding, envelope } = self;
558
559        SourceExportDataConfig {
560            encoding: encoding.map(|e| e.into_inline_connection(r)),
561            envelope,
562        }
563    }
564}
565
566impl<C: ConnectionAccess> AlterCompatible for SourceExportDataConfig<C> {
567    fn alter_compatible(&self, id: GlobalId, other: &Self) -> Result<(), AlterError> {
568        if self == other {
569            return Ok(());
570        }
571        let Self { encoding, envelope } = &self;
572
573        let compatibility_checks = [
574            (
575                match (encoding, &other.encoding) {
576                    (Some(s), Some(o)) => s.alter_compatible(id, o).is_ok(),
577                    (s, o) => s == o,
578                },
579                "encoding",
580            ),
581            (envelope == &other.envelope, "envelope"),
582        ];
583
584        for (compatible, field) in compatibility_checks {
585            if !compatible {
586                tracing::warn!(
587                    "SourceDesc incompatible {field}:\nself:\n{:#?}\n\nother\n{:#?}",
588                    self,
589                    other
590                );
591
592                return Err(AlterError { id });
593            }
594        }
595        Ok(())
596    }
597}
598
599impl<C: ConnectionAccess> SourceExportDataConfig<C> {
600    /// Returns `true` if this connection yields data that is
601    /// append-only/monotonic. Append-monly means the source
602    /// never produces retractions.
603    // TODO(guswynn): consider enforcing this more completely at the
604    // parsing/typechecking level, by not using an `envelope`
605    // for sources like pg
606    pub fn monotonic(&self, connection: &GenericSourceConnection<C>) -> bool {
607        match &self.envelope {
608            // Upsert and CdcV2 may produce retractions.
609            SourceEnvelope::Upsert(_) | SourceEnvelope::CdcV2 => false,
610            SourceEnvelope::None(_) => {
611                match connection {
612                    // Postgres can produce retractions (deletes).
613                    GenericSourceConnection::Postgres(_) => false,
614                    // MySQL can produce retractions (deletes).
615                    GenericSourceConnection::MySql(_) => false,
616                    // SQL Server can produce retractions (deletes).
617                    GenericSourceConnection::SqlServer(_) => false,
618                    // Whether or not a Loadgen source can produce retractions varies.
619                    GenericSourceConnection::LoadGenerator(g) => g.load_generator.is_monotonic(),
620                    // Kafka exports with `None` envelope are append-only.
621                    GenericSourceConnection::Kafka(_) => true,
622                }
623            }
624        }
625    }
626}
627
628/// An external source of updates for a relational collection.
629#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
630pub struct SourceDesc<C: ConnectionAccess = InlinedConnection> {
631    pub connection: GenericSourceConnection<C>,
632    pub timestamp_interval: Duration,
633}
634
635impl<R: ConnectionResolver> IntoInlineConnection<SourceDesc, R>
636    for SourceDesc<ReferencedConnection>
637{
638    fn into_inline_connection(self, r: R) -> SourceDesc {
639        let SourceDesc {
640            connection,
641            timestamp_interval,
642        } = self;
643
644        SourceDesc {
645            connection: connection.into_inline_connection(&r),
646            timestamp_interval,
647        }
648    }
649}
650
651impl<C: ConnectionAccess> AlterCompatible for SourceDesc<C> {
652    /// Determines if `self` is compatible with another `SourceDesc`, in such a
653    /// way that it is possible to turn `self` into `other` through a valid
654    /// series of transformations (e.g. no transformation or `ALTER SOURCE`).
655    fn alter_compatible(&self, id: GlobalId, other: &Self) -> Result<(), AlterError> {
656        if self == other {
657            return Ok(());
658        }
659        let Self {
660            connection,
661            timestamp_interval,
662        } = &self;
663
664        let compatibility_checks = [
665            (
666                connection.alter_compatible(id, &other.connection).is_ok(),
667                "connection",
668            ),
669            (
670                timestamp_interval == &other.timestamp_interval,
671                "timestamp_interval",
672            ),
673        ];
674
675        for (compatible, field) in compatibility_checks {
676            if !compatible {
677                tracing::warn!(
678                    "SourceDesc incompatible {field}:\nself:\n{:#?}\n\nother\n{:#?}",
679                    self,
680                    other
681                );
682
683                return Err(AlterError { id });
684            }
685        }
686
687        Ok(())
688    }
689}
690
691#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
692pub enum GenericSourceConnection<C: ConnectionAccess = InlinedConnection> {
693    Kafka(KafkaSourceConnection<C>),
694    Postgres(PostgresSourceConnection<C>),
695    MySql(MySqlSourceConnection<C>),
696    SqlServer(SqlServerSourceConnection<C>),
697    LoadGenerator(LoadGeneratorSourceConnection),
698}
699
700impl<C: ConnectionAccess> From<KafkaSourceConnection<C>> for GenericSourceConnection<C> {
701    fn from(conn: KafkaSourceConnection<C>) -> Self {
702        Self::Kafka(conn)
703    }
704}
705
706impl<C: ConnectionAccess> From<PostgresSourceConnection<C>> for GenericSourceConnection<C> {
707    fn from(conn: PostgresSourceConnection<C>) -> Self {
708        Self::Postgres(conn)
709    }
710}
711
712impl<C: ConnectionAccess> From<MySqlSourceConnection<C>> for GenericSourceConnection<C> {
713    fn from(conn: MySqlSourceConnection<C>) -> Self {
714        Self::MySql(conn)
715    }
716}
717
718impl<C: ConnectionAccess> From<SqlServerSourceConnection<C>> for GenericSourceConnection<C> {
719    fn from(conn: SqlServerSourceConnection<C>) -> Self {
720        Self::SqlServer(conn)
721    }
722}
723
724impl<C: ConnectionAccess> From<LoadGeneratorSourceConnection> for GenericSourceConnection<C> {
725    fn from(conn: LoadGeneratorSourceConnection) -> Self {
726        Self::LoadGenerator(conn)
727    }
728}
729
730impl<R: ConnectionResolver> IntoInlineConnection<GenericSourceConnection, R>
731    for GenericSourceConnection<ReferencedConnection>
732{
733    fn into_inline_connection(self, r: R) -> GenericSourceConnection {
734        match self {
735            GenericSourceConnection::Kafka(kafka) => {
736                GenericSourceConnection::Kafka(kafka.into_inline_connection(r))
737            }
738            GenericSourceConnection::Postgres(pg) => {
739                GenericSourceConnection::Postgres(pg.into_inline_connection(r))
740            }
741            GenericSourceConnection::MySql(mysql) => {
742                GenericSourceConnection::MySql(mysql.into_inline_connection(r))
743            }
744            GenericSourceConnection::SqlServer(sql_server) => {
745                GenericSourceConnection::SqlServer(sql_server.into_inline_connection(r))
746            }
747            GenericSourceConnection::LoadGenerator(lg) => {
748                GenericSourceConnection::LoadGenerator(lg)
749            }
750        }
751    }
752}
753
754impl<C: ConnectionAccess> SourceConnection for GenericSourceConnection<C> {
755    fn name(&self) -> &'static str {
756        match self {
757            Self::Kafka(conn) => conn.name(),
758            Self::Postgres(conn) => conn.name(),
759            Self::MySql(conn) => conn.name(),
760            Self::SqlServer(conn) => conn.name(),
761            Self::LoadGenerator(conn) => conn.name(),
762        }
763    }
764
765    fn external_reference(&self) -> Option<&str> {
766        match self {
767            Self::Kafka(conn) => conn.external_reference(),
768            Self::Postgres(conn) => conn.external_reference(),
769            Self::MySql(conn) => conn.external_reference(),
770            Self::SqlServer(conn) => conn.external_reference(),
771            Self::LoadGenerator(conn) => conn.external_reference(),
772        }
773    }
774
775    fn default_key_desc(&self) -> RelationDesc {
776        match self {
777            Self::Kafka(conn) => conn.default_key_desc(),
778            Self::Postgres(conn) => conn.default_key_desc(),
779            Self::MySql(conn) => conn.default_key_desc(),
780            Self::SqlServer(conn) => conn.default_key_desc(),
781            Self::LoadGenerator(conn) => conn.default_key_desc(),
782        }
783    }
784
785    fn default_value_desc(&self) -> RelationDesc {
786        match self {
787            Self::Kafka(conn) => conn.default_value_desc(),
788            Self::Postgres(conn) => conn.default_value_desc(),
789            Self::MySql(conn) => conn.default_value_desc(),
790            Self::SqlServer(conn) => conn.default_value_desc(),
791            Self::LoadGenerator(conn) => conn.default_value_desc(),
792        }
793    }
794
795    fn timestamp_desc(&self) -> RelationDesc {
796        match self {
797            Self::Kafka(conn) => conn.timestamp_desc(),
798            Self::Postgres(conn) => conn.timestamp_desc(),
799            Self::MySql(conn) => conn.timestamp_desc(),
800            Self::SqlServer(conn) => conn.timestamp_desc(),
801            Self::LoadGenerator(conn) => conn.timestamp_desc(),
802        }
803    }
804
805    fn connection_id(&self) -> Option<CatalogItemId> {
806        match self {
807            Self::Kafka(conn) => conn.connection_id(),
808            Self::Postgres(conn) => conn.connection_id(),
809            Self::MySql(conn) => conn.connection_id(),
810            Self::SqlServer(conn) => conn.connection_id(),
811            Self::LoadGenerator(conn) => conn.connection_id(),
812        }
813    }
814
815    fn supports_read_only(&self) -> bool {
816        match self {
817            GenericSourceConnection::Kafka(conn) => conn.supports_read_only(),
818            GenericSourceConnection::Postgres(conn) => conn.supports_read_only(),
819            GenericSourceConnection::MySql(conn) => conn.supports_read_only(),
820            GenericSourceConnection::SqlServer(conn) => conn.supports_read_only(),
821            GenericSourceConnection::LoadGenerator(conn) => conn.supports_read_only(),
822        }
823    }
824
825    fn prefers_single_replica(&self) -> bool {
826        match self {
827            GenericSourceConnection::Kafka(conn) => conn.prefers_single_replica(),
828            GenericSourceConnection::Postgres(conn) => conn.prefers_single_replica(),
829            GenericSourceConnection::MySql(conn) => conn.prefers_single_replica(),
830            GenericSourceConnection::SqlServer(conn) => conn.prefers_single_replica(),
831            GenericSourceConnection::LoadGenerator(conn) => conn.prefers_single_replica(),
832        }
833    }
834}
835impl<C: ConnectionAccess> crate::AlterCompatible for GenericSourceConnection<C> {
836    fn alter_compatible(&self, id: GlobalId, other: &Self) -> Result<(), AlterError> {
837        if self == other {
838            return Ok(());
839        }
840        let r = match (self, other) {
841            (Self::Kafka(conn), Self::Kafka(other)) => conn.alter_compatible(id, other),
842            (Self::Postgres(conn), Self::Postgres(other)) => conn.alter_compatible(id, other),
843            (Self::MySql(conn), Self::MySql(other)) => conn.alter_compatible(id, other),
844            (Self::SqlServer(conn), Self::SqlServer(other)) => conn.alter_compatible(id, other),
845            (Self::LoadGenerator(conn), Self::LoadGenerator(other)) => {
846                conn.alter_compatible(id, other)
847            }
848            _ => Err(AlterError { id }),
849        };
850
851        if r.is_err() {
852            tracing::warn!(
853                "GenericSourceConnection incompatible:\nself:\n{:#?}\n\nother\n{:#?}",
854                self,
855                other
856            );
857        }
858
859        r
860    }
861}
862
863/// Details necessary for each source export to allow the source implementations
864/// to export data to the export's collection.
865#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
866pub enum SourceExportDetails {
867    /// Used when the primary collection of a source isn't an export to
868    /// output to.
869    None,
870    Kafka(KafkaSourceExportDetails),
871    Postgres(PostgresSourceExportDetails),
872    MySql(MySqlSourceExportDetails),
873    SqlServer(SqlServerSourceExportDetails),
874    LoadGenerator(LoadGeneratorSourceExportDetails),
875}
876
877impl crate::AlterCompatible for SourceExportDetails {
878    fn alter_compatible(&self, id: GlobalId, other: &Self) -> Result<(), AlterError> {
879        if self == other {
880            return Ok(());
881        }
882        let r = match (self, other) {
883            (Self::None, Self::None) => Ok(()),
884            (Self::Kafka(s), Self::Kafka(o)) => s.alter_compatible(id, o),
885            (Self::Postgres(s), Self::Postgres(o)) => s.alter_compatible(id, o),
886            (Self::MySql(s), Self::MySql(o)) => s.alter_compatible(id, o),
887            (Self::LoadGenerator(s), Self::LoadGenerator(o)) => s.alter_compatible(id, o),
888            _ => Err(AlterError { id }),
889        };
890
891        if r.is_err() {
892            tracing::warn!(
893                "SourceExportDetails incompatible:\nself:\n{:#?}\n\nother\n{:#?}",
894                self,
895                other
896            );
897        }
898
899        r
900    }
901}
902
903/// Details necessary to store in the `Details` option of a source export
904/// statement (`CREATE SUBSOURCE` and `CREATE TABLE .. FROM SOURCE` statements),
905/// to generate the appropriate `SourceExportDetails` struct during planning.
906/// NOTE that this is serialized as proto to the catalog, so any changes here
907/// must be backwards compatible or will require a migration.
908pub enum SourceExportStatementDetails {
909    Postgres {
910        table: mz_postgres_util::desc::PostgresTableDesc,
911    },
912    MySql {
913        table: mz_mysql_util::MySqlTableDesc,
914        initial_gtid_set: String,
915    },
916    SqlServer {
917        table: mz_sql_server_util::desc::SqlServerTableDesc,
918        capture_instance: Arc<str>,
919        initial_lsn: mz_sql_server_util::cdc::Lsn,
920    },
921    LoadGenerator {
922        output: LoadGeneratorOutput,
923    },
924    Kafka {},
925}
926
927impl RustType<ProtoSourceExportStatementDetails> for SourceExportStatementDetails {
928    fn into_proto(&self) -> ProtoSourceExportStatementDetails {
929        match self {
930            SourceExportStatementDetails::Postgres { table } => ProtoSourceExportStatementDetails {
931                kind: Some(proto_source_export_statement_details::Kind::Postgres(
932                    postgres::ProtoPostgresSourceExportStatementDetails {
933                        table: Some(table.into_proto()),
934                    },
935                )),
936            },
937            SourceExportStatementDetails::MySql {
938                table,
939                initial_gtid_set,
940            } => ProtoSourceExportStatementDetails {
941                kind: Some(proto_source_export_statement_details::Kind::Mysql(
942                    mysql::ProtoMySqlSourceExportStatementDetails {
943                        table: Some(table.into_proto()),
944                        initial_gtid_set: initial_gtid_set.clone(),
945                    },
946                )),
947            },
948            SourceExportStatementDetails::SqlServer {
949                table,
950                capture_instance,
951                initial_lsn,
952            } => ProtoSourceExportStatementDetails {
953                kind: Some(proto_source_export_statement_details::Kind::SqlServer(
954                    sql_server::ProtoSqlServerSourceExportStatementDetails {
955                        table: Some(table.into_proto()),
956                        capture_instance: capture_instance.to_string(),
957                        initial_lsn: initial_lsn.as_bytes().to_vec(),
958                    },
959                )),
960            },
961            SourceExportStatementDetails::LoadGenerator { output } => {
962                ProtoSourceExportStatementDetails {
963                    kind: Some(proto_source_export_statement_details::Kind::Loadgen(
964                        load_generator::ProtoLoadGeneratorSourceExportStatementDetails {
965                            output: output.into_proto().into(),
966                        },
967                    )),
968                }
969            }
970            SourceExportStatementDetails::Kafka {} => ProtoSourceExportStatementDetails {
971                kind: Some(proto_source_export_statement_details::Kind::Kafka(
972                    kafka::ProtoKafkaSourceExportStatementDetails {},
973                )),
974            },
975        }
976    }
977
978    fn from_proto(proto: ProtoSourceExportStatementDetails) -> Result<Self, TryFromProtoError> {
979        use proto_source_export_statement_details::Kind;
980        Ok(match proto.kind {
981            Some(Kind::Postgres(details)) => SourceExportStatementDetails::Postgres {
982                table: details
983                    .table
984                    .into_rust_if_some("ProtoPostgresSourceExportStatementDetails::table")?,
985            },
986            Some(Kind::Mysql(details)) => SourceExportStatementDetails::MySql {
987                table: details
988                    .table
989                    .into_rust_if_some("ProtoMySqlSourceExportStatementDetails::table")?,
990
991                initial_gtid_set: details.initial_gtid_set,
992            },
993            Some(Kind::SqlServer(details)) => SourceExportStatementDetails::SqlServer {
994                table: details
995                    .table
996                    .into_rust_if_some("ProtoSqlServerSourceExportStatementDetails::table")?,
997                capture_instance: details.capture_instance.into(),
998                initial_lsn: mz_sql_server_util::cdc::Lsn::try_from(details.initial_lsn.as_slice())
999                    .map_err(|e| TryFromProtoError::InvalidFieldError(e.to_string()))?,
1000            },
1001            Some(Kind::Loadgen(details)) => SourceExportStatementDetails::LoadGenerator {
1002                output: details
1003                    .output
1004                    .into_rust_if_some("ProtoLoadGeneratorSourceExportStatementDetails::output")?,
1005            },
1006            Some(Kind::Kafka(_details)) => SourceExportStatementDetails::Kafka {},
1007            None => {
1008                return Err(TryFromProtoError::missing_field(
1009                    "ProtoSourceExportStatementDetails::kind",
1010                ));
1011            }
1012        })
1013    }
1014}
1015
1016#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
1017#[repr(transparent)]
1018pub struct SourceData(pub Result<Row, DataflowError>);
1019
1020impl Default for SourceData {
1021    fn default() -> Self {
1022        SourceData(Ok(Row::default()))
1023    }
1024}
1025
1026impl Deref for SourceData {
1027    type Target = Result<Row, DataflowError>;
1028
1029    fn deref(&self) -> &Self::Target {
1030        &self.0
1031    }
1032}
1033
1034impl DerefMut for SourceData {
1035    fn deref_mut(&mut self) -> &mut Self::Target {
1036        &mut self.0
1037    }
1038}
1039
1040impl RustType<ProtoSourceData> for SourceData {
1041    fn into_proto(&self) -> ProtoSourceData {
1042        use proto_source_data::Kind;
1043        ProtoSourceData {
1044            kind: Some(match &**self {
1045                Ok(row) => Kind::Ok(row.into_proto()),
1046                Err(err) => Kind::Err(err.into_proto()),
1047            }),
1048        }
1049    }
1050
1051    fn from_proto(proto: ProtoSourceData) -> Result<Self, TryFromProtoError> {
1052        use proto_source_data::Kind;
1053        match proto.kind {
1054            Some(kind) => match kind {
1055                Kind::Ok(row) => Ok(SourceData(Ok(row.into_rust()?))),
1056                Kind::Err(err) => Ok(SourceData(Err(err.into_rust()?))),
1057            },
1058            None => Result::Err(TryFromProtoError::missing_field("ProtoSourceData::kind")),
1059        }
1060    }
1061}
1062
1063impl Codec for SourceData {
1064    type Storage = ProtoRow;
1065    type Schema = RelationDesc;
1066
1067    fn codec_name() -> String {
1068        "protobuf[SourceData]".into()
1069    }
1070
1071    fn encode<B: BufMut>(&self, buf: &mut B) {
1072        self.into_proto()
1073            .encode(buf)
1074            .expect("no required fields means no initialization errors");
1075    }
1076
1077    fn decode(buf: &[u8], schema: &RelationDesc) -> Result<Self, String> {
1078        let mut val = SourceData::default();
1079        <Self as Codec>::decode_from(&mut val, buf, &mut None, schema)?;
1080        Ok(val)
1081    }
1082
1083    fn decode_from<'a>(
1084        &mut self,
1085        buf: &'a [u8],
1086        storage: &mut Option<ProtoRow>,
1087        schema: &RelationDesc,
1088    ) -> Result<(), String> {
1089        // Optimize for common case of `Ok` by leaving a (cleared) `ProtoRow` in
1090        // the `Ok` variant of `ProtoSourceData`. prost's `Message::merge` impl
1091        // is smart about reusing the `Vec<Datum>` when it can.
1092        let mut proto = storage.take().unwrap_or_default();
1093        proto.clear();
1094        let mut proto = ProtoSourceData {
1095            kind: Some(proto_source_data::Kind::Ok(proto)),
1096        };
1097        proto.merge(buf).map_err(|err| err.to_string())?;
1098        match (proto.kind, &mut self.0) {
1099            // Again, optimize for the common case...
1100            (Some(proto_source_data::Kind::Ok(proto)), Ok(row)) => {
1101                let ret = row.decode_from_proto(&proto, schema);
1102                storage.replace(proto);
1103                ret
1104            }
1105            // ...otherwise fall back to the obvious thing.
1106            (kind, _) => {
1107                let proto = ProtoSourceData { kind };
1108                *self = proto.into_rust().map_err(|err| err.to_string())?;
1109                // Nothing to put back in storage.
1110                Ok(())
1111            }
1112        }
1113    }
1114
1115    fn validate(val: &Self, desc: &Self::Schema) -> Result<(), String> {
1116        match &val.0 {
1117            Ok(row) => Row::validate(row, desc),
1118            Err(_) => Ok(()),
1119        }
1120    }
1121
1122    fn encode_schema(schema: &Self::Schema) -> Bytes {
1123        schema.into_proto().encode_to_vec().into()
1124    }
1125
1126    fn decode_schema(buf: &Bytes) -> Self::Schema {
1127        let proto = ProtoRelationDesc::decode(buf.as_ref()).expect("valid schema");
1128        proto.into_rust().expect("valid schema")
1129    }
1130}
1131
1132/// Given a [`RelationDesc`] returns an arbitrary [`SourceData`].
1133pub fn arb_source_data_for_relation_desc(
1134    desc: &RelationDesc,
1135) -> impl Strategy<Value = SourceData> + use<> {
1136    let row_strat = arb_row_for_relation(desc).no_shrink();
1137
1138    proptest::strategy::Union::new_weighted(vec![
1139        (50, row_strat.prop_map(|row| SourceData(Ok(row))).boxed()),
1140        (
1141            1,
1142            any::<DataflowError>()
1143                .prop_map(|err| SourceData(Err(err)))
1144                .no_shrink()
1145                .boxed(),
1146        ),
1147    ])
1148}
1149
1150/// Describes how external references should be organized in a multi-level
1151/// hierarchy.
1152///
1153/// For both PostgreSQL and MySQL sources, these levels of reference are
1154/// intrinsic to the items which we're referencing. If there are other naming
1155/// schemas for other types of sources we discover, we might need to revisit
1156/// this.
1157pub trait ExternalCatalogReference {
1158    /// The "second" level of namespacing for the reference.
1159    fn schema_name(&self) -> &str;
1160    /// The lowest level of namespacing for the reference.
1161    fn item_name(&self) -> &str;
1162}
1163
1164impl ExternalCatalogReference for &mz_mysql_util::MySqlTableDesc {
1165    fn schema_name(&self) -> &str {
1166        &self.schema_name
1167    }
1168
1169    fn item_name(&self) -> &str {
1170        &self.name
1171    }
1172}
1173
1174impl ExternalCatalogReference for mz_postgres_util::desc::PostgresTableDesc {
1175    fn schema_name(&self) -> &str {
1176        &self.namespace
1177    }
1178
1179    fn item_name(&self) -> &str {
1180        &self.name
1181    }
1182}
1183
1184impl ExternalCatalogReference for &mz_sql_server_util::desc::SqlServerTableDesc {
1185    fn schema_name(&self) -> &str {
1186        &*self.schema_name
1187    }
1188
1189    fn item_name(&self) -> &str {
1190        &*self.name
1191    }
1192}
1193
1194// This implementation provides a means of converting arbitrary objects into a
1195// `SubsourceCatalogReference`, e.g. load generator view names.
1196impl<'a> ExternalCatalogReference for (&'a str, &'a str) {
1197    fn schema_name(&self) -> &str {
1198        self.0
1199    }
1200
1201    fn item_name(&self) -> &str {
1202        self.1
1203    }
1204}
1205
1206/// Stores and resolves references to a `&[T: ExternalCatalogReference]`.
1207///
1208/// This is meant to provide an API to quickly look up a source's subsources.
1209///
1210/// For sources that do not provide any subsources, use the `Default`
1211/// implementation, which is empty and will not be able to resolve any
1212/// references.
1213#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
1214pub struct SourceReferenceResolver {
1215    inner: BTreeMap<Ident, BTreeMap<Ident, BTreeMap<Ident, usize>>>,
1216}
1217
1218#[derive(Debug, Clone, thiserror::Error)]
1219pub enum ExternalReferenceResolutionError {
1220    #[error("reference to {name} not found in source")]
1221    DoesNotExist { name: String },
1222    #[error(
1223        "reference {name} is ambiguous, consider specifying an additional \
1224    layer of qualification"
1225    )]
1226    Ambiguous { name: String },
1227    #[error("invalid identifier: {0}")]
1228    Ident(#[from] IdentError),
1229}
1230
1231impl<'a> SourceReferenceResolver {
1232    /// Constructs a new `SourceReferenceResolver` from a slice of `T:
1233    /// SubsourceCatalogReference`.
1234    ///
1235    /// # Errors
1236    /// - If any `&str` provided cannot be taken to an [`Ident`].
1237    pub fn new<T: ExternalCatalogReference>(
1238        database: &str,
1239        referenceable_items: &'a [T],
1240    ) -> Result<SourceReferenceResolver, ExternalReferenceResolutionError> {
1241        // An index from table name -> schema name -> database name -> index in
1242        // `referenceable_items`.
1243        let mut inner = BTreeMap::new();
1244
1245        let database = Ident::new(database)?;
1246
1247        for (reference_idx, item) in referenceable_items.iter().enumerate() {
1248            let item_name = Ident::new(item.item_name())?;
1249            let schema_name = Ident::new(item.schema_name())?;
1250
1251            inner
1252                .entry(item_name)
1253                .or_insert_with(BTreeMap::new)
1254                .entry(schema_name)
1255                .or_insert_with(BTreeMap::new)
1256                .entry(database.clone())
1257                .or_insert(reference_idx);
1258        }
1259
1260        Ok(SourceReferenceResolver { inner })
1261    }
1262
1263    /// Returns the canonical reference and index from which it originated in
1264    /// the `referenceable_items` provided to [`Self::new`].
1265    ///
1266    /// # Args
1267    /// - `name` is `&[Ident]` to let users provide the inner element of
1268    ///   [`UnresolvedItemName`].
1269    /// - `canonicalize_to_width` limits the number of elements in the returned
1270    ///   [`UnresolvedItemName`];this is useful if the source type requires
1271    ///   contriving database and schema names that a subsource should not
1272    ///   persist as its reference.
1273    ///
1274    /// # Errors
1275    /// - If `name` does not resolve to an item in `self.inner`.
1276    ///
1277    /// # Panics
1278    /// - If `canonicalize_to_width`` is not in `1..=3`.
1279    pub fn resolve(
1280        &self,
1281        name: &[Ident],
1282        canonicalize_to_width: usize,
1283    ) -> Result<(UnresolvedItemName, usize), ExternalReferenceResolutionError> {
1284        let (db, schema, idx) = self.resolve_inner(name)?;
1285
1286        let item = name.last().expect("must have provided at least 1 element");
1287
1288        let canonical_name = match canonicalize_to_width {
1289            1 => vec![item.clone()],
1290            2 => vec![schema.clone(), item.clone()],
1291            3 => vec![db.clone(), schema.clone(), item.clone()],
1292            o => panic!("canonicalize_to_width values must be 1..=3, but got {}", o),
1293        };
1294
1295        Ok((UnresolvedItemName(canonical_name), idx))
1296    }
1297
1298    /// Returns the index from which it originated in the `referenceable_items`
1299    /// provided to [`Self::new`].
1300    ///
1301    /// # Args
1302    /// `name` is `&[Ident]` to let users provide the inner element of
1303    /// [`UnresolvedItemName`].
1304    ///
1305    /// # Errors
1306    /// - If `name` does not resolve to an item in `self.inner`.
1307    pub fn resolve_idx(&self, name: &[Ident]) -> Result<usize, ExternalReferenceResolutionError> {
1308        let (_db, _schema, idx) = self.resolve_inner(name)?;
1309        Ok(idx)
1310    }
1311
1312    /// Returns the index from which it originated in the `referenceable_items`
1313    /// provided to [`Self::new`].
1314    ///
1315    /// # Args
1316    /// `name` is `&[Ident]` to let users provide the inner element of
1317    /// [`UnresolvedItemName`].
1318    ///
1319    /// # Return
1320    /// Returns a tuple whose elements are:
1321    /// 1. The "database"- or top-level namespace of the reference.
1322    /// 2. The "schema"- or second-level namespace of the reference.
1323    /// 3. The index to find the item in `referenceable_items` argument provided
1324    ///    to `SourceReferenceResolver::new`.
1325    ///
1326    /// # Errors
1327    /// - If `name` does not resolve to an item in `self.inner`.
1328    fn resolve_inner<'name: 'a>(
1329        &'a self,
1330        name: &'name [Ident],
1331    ) -> Result<(&'a Ident, &'a Ident, usize), ExternalReferenceResolutionError> {
1332        let get_provided_name = || UnresolvedItemName(name.to_vec()).to_string();
1333
1334        // Names must be composed of 1..=3 elements.
1335        if !(1..=3).contains(&name.len()) {
1336            Err(ExternalReferenceResolutionError::DoesNotExist {
1337                name: get_provided_name(),
1338            })?;
1339        }
1340
1341        // Fill on the leading elements with `None` if they aren't present.
1342        let mut names = std::iter::repeat(None)
1343            .take(3 - name.len())
1344            .chain(name.iter().map(Some));
1345
1346        let database = names.next().flatten();
1347        let schema = names.next().flatten();
1348        let item = names
1349            .next()
1350            .flatten()
1351            .expect("must have provided the item name");
1352
1353        assert_none!(names.next(), "expected a 3-element iterator");
1354
1355        let schemas =
1356            self.inner
1357                .get(item)
1358                .ok_or_else(|| ExternalReferenceResolutionError::DoesNotExist {
1359                    name: get_provided_name(),
1360                })?;
1361
1362        let schema = match schema {
1363            Some(schema) => schema,
1364            None => schemas.keys().exactly_one().map_err(|_e| {
1365                ExternalReferenceResolutionError::Ambiguous {
1366                    name: get_provided_name(),
1367                }
1368            })?,
1369        };
1370
1371        let databases =
1372            schemas
1373                .get(schema)
1374                .ok_or_else(|| ExternalReferenceResolutionError::DoesNotExist {
1375                    name: get_provided_name(),
1376                })?;
1377
1378        let database = match database {
1379            Some(database) => database,
1380            None => databases.keys().exactly_one().map_err(|_e| {
1381                ExternalReferenceResolutionError::Ambiguous {
1382                    name: get_provided_name(),
1383                }
1384            })?,
1385        };
1386
1387        let reference_idx = databases.get(database).ok_or_else(|| {
1388            ExternalReferenceResolutionError::DoesNotExist {
1389                name: get_provided_name(),
1390            }
1391        })?;
1392
1393        Ok((database, schema, *reference_idx))
1394    }
1395}
1396
1397/// A decoder for [`Row`]s within [`SourceData`].
1398///
1399/// This type exists as a wrapper around [`RowColumnarDecoder`] to handle the
1400/// case where the [`RelationDesc`] we're encoding with has no columns. See
1401/// [`SourceDataRowColumnarEncoder`] for more details.
1402#[derive(Debug)]
1403pub enum SourceDataRowColumnarDecoder {
1404    Row(RowColumnarDecoder),
1405    EmptyRow,
1406}
1407
1408impl SourceDataRowColumnarDecoder {
1409    pub fn decode(&self, idx: usize, row: &mut Row) {
1410        match self {
1411            SourceDataRowColumnarDecoder::Row(decoder) => decoder.decode(idx, row),
1412            SourceDataRowColumnarDecoder::EmptyRow => {
1413                // Create a packer just to clear the Row.
1414                row.packer();
1415            }
1416        }
1417    }
1418
1419    pub fn goodbytes(&self) -> usize {
1420        match self {
1421            SourceDataRowColumnarDecoder::Row(decoder) => decoder.goodbytes(),
1422            SourceDataRowColumnarDecoder::EmptyRow => 0,
1423        }
1424    }
1425}
1426
1427#[derive(Debug)]
1428pub struct SourceDataColumnarDecoder {
1429    row_decoder: SourceDataRowColumnarDecoder,
1430    err_decoder: BinaryArray,
1431}
1432
1433impl SourceDataColumnarDecoder {
1434    pub fn new(col: StructArray, desc: &RelationDesc) -> Result<Self, anyhow::Error> {
1435        // TODO(parkmcar): We should validate the fields here.
1436        let (_fields, arrays, nullability) = col.into_parts();
1437
1438        if nullability.is_some() {
1439            anyhow::bail!("SourceData is not nullable, but found {nullability:?}");
1440        }
1441        if arrays.len() != 2 {
1442            anyhow::bail!("SourceData should only have two fields, found {arrays:?}");
1443        }
1444
1445        let errs = arrays[1]
1446            .as_any()
1447            .downcast_ref::<BinaryArray>()
1448            .ok_or_else(|| anyhow::anyhow!("expected BinaryArray, found {:?}", arrays[1]))?;
1449
1450        let row_decoder = match arrays[0].data_type() {
1451            arrow::datatypes::DataType::Struct(_) => {
1452                let rows = arrays[0]
1453                    .as_any()
1454                    .downcast_ref::<StructArray>()
1455                    .ok_or_else(|| {
1456                        anyhow::anyhow!("expected StructArray, found {:?}", arrays[0])
1457                    })?;
1458                let decoder = RowColumnarDecoder::new(rows.clone(), desc)?;
1459                SourceDataRowColumnarDecoder::Row(decoder)
1460            }
1461            arrow::datatypes::DataType::Null => SourceDataRowColumnarDecoder::EmptyRow,
1462            other => anyhow::bail!("expected Struct or Null Array, found {other:?}"),
1463        };
1464
1465        Ok(SourceDataColumnarDecoder {
1466            row_decoder,
1467            err_decoder: errs.clone(),
1468        })
1469    }
1470}
1471
1472impl ColumnDecoder<SourceData> for SourceDataColumnarDecoder {
1473    fn decode(&self, idx: usize, val: &mut SourceData) {
1474        let err_null = self.err_decoder.is_null(idx);
1475        let row_null = match &self.row_decoder {
1476            SourceDataRowColumnarDecoder::Row(decoder) => decoder.is_null(idx),
1477            SourceDataRowColumnarDecoder::EmptyRow => !err_null,
1478        };
1479
1480        match (row_null, err_null) {
1481            (true, false) => {
1482                let err = self.err_decoder.value(idx);
1483                let err = ProtoDataflowError::decode(err)
1484                    .expect("proto should be valid")
1485                    .into_rust()
1486                    .expect("error should be valid");
1487                val.0 = Err(err);
1488            }
1489            (false, true) => {
1490                let row = match val.0.as_mut() {
1491                    Ok(row) => row,
1492                    Err(_) => {
1493                        val.0 = Ok(Row::default());
1494                        val.0.as_mut().unwrap()
1495                    }
1496                };
1497                self.row_decoder.decode(idx, row);
1498            }
1499            (true, true) => panic!("should have one of 'ok' or 'err'"),
1500            (false, false) => panic!("cannot have both 'ok' and 'err'"),
1501        }
1502    }
1503
1504    fn is_null(&self, idx: usize) -> bool {
1505        let err_null = self.err_decoder.is_null(idx);
1506        let row_null = match &self.row_decoder {
1507            SourceDataRowColumnarDecoder::Row(decoder) => decoder.is_null(idx),
1508            SourceDataRowColumnarDecoder::EmptyRow => !err_null,
1509        };
1510        assert!(!err_null || !row_null, "SourceData should never be null!");
1511
1512        false
1513    }
1514
1515    fn goodbytes(&self) -> usize {
1516        self.row_decoder.goodbytes() + ArrayOrd::Binary(self.err_decoder.clone()).goodbytes()
1517    }
1518
1519    fn stats(&self) -> StructStats {
1520        let len = self.err_decoder.len();
1521        let err_stats = ColumnarStats {
1522            nulls: Some(ColumnNullStats {
1523                count: self.err_decoder.null_count(),
1524            }),
1525            values: PrimitiveStats::<Vec<u8>>::from_column(&self.err_decoder).into(),
1526        };
1527        // The top level struct is non-nullable and every entry is either an
1528        // `Ok(Row)` or an `Err(String)`. As a result, we can compute the number
1529        // of `Ok` entries by subtracting the number of `Err` entries from the
1530        // total count.
1531        let row_null_count = len - self.err_decoder.null_count();
1532        let row_stats = match &self.row_decoder {
1533            SourceDataRowColumnarDecoder::Row(encoder) => {
1534                // Sanity check that the number of row nulls/nones we calculated
1535                // using the error column matches what the row column thinks it
1536                // has.
1537                assert_eq!(encoder.null_count(), row_null_count);
1538                encoder.stats()
1539            }
1540            SourceDataRowColumnarDecoder::EmptyRow => StructStats {
1541                len,
1542                cols: BTreeMap::default(),
1543            },
1544        };
1545        let row_stats = ColumnarStats {
1546            nulls: Some(ColumnNullStats {
1547                count: row_null_count,
1548            }),
1549            values: ColumnStatKinds::Struct(row_stats),
1550        };
1551
1552        let stats = [
1553            (
1554                SourceDataColumnarEncoder::OK_COLUMN_NAME.to_string(),
1555                row_stats,
1556            ),
1557            (
1558                SourceDataColumnarEncoder::ERR_COLUMN_NAME.to_string(),
1559                err_stats,
1560            ),
1561        ];
1562        StructStats {
1563            len,
1564            cols: stats.into_iter().map(|(name, s)| (name, s)).collect(),
1565        }
1566    }
1567}
1568
1569/// An encoder for [`Row`]s within [`SourceData`].
1570///
1571/// This type exists as a wrapper around [`RowColumnarEncoder`] to support
1572/// encoding empty [`Row`]s. A [`RowColumnarEncoder`] finishes as a
1573/// [`StructArray`] which is required to have at least one column, and thus
1574/// cannot support empty [`Row`]s.
1575#[derive(Debug)]
1576pub enum SourceDataRowColumnarEncoder {
1577    Row(RowColumnarEncoder),
1578    EmptyRow,
1579}
1580
1581impl SourceDataRowColumnarEncoder {
1582    pub(crate) fn goodbytes(&self) -> usize {
1583        match self {
1584            SourceDataRowColumnarEncoder::Row(e) => e.goodbytes(),
1585            SourceDataRowColumnarEncoder::EmptyRow => 0,
1586        }
1587    }
1588
1589    pub fn append(&mut self, row: &Row) {
1590        match self {
1591            SourceDataRowColumnarEncoder::Row(encoder) => encoder.append(row),
1592            SourceDataRowColumnarEncoder::EmptyRow => {
1593                assert_eq!(row.iter().count(), 0)
1594            }
1595        }
1596    }
1597
1598    pub fn append_null(&mut self) {
1599        match self {
1600            SourceDataRowColumnarEncoder::Row(encoder) => encoder.append_null(),
1601            SourceDataRowColumnarEncoder::EmptyRow => (),
1602        }
1603    }
1604}
1605
1606#[derive(Debug)]
1607pub struct SourceDataColumnarEncoder {
1608    row_encoder: SourceDataRowColumnarEncoder,
1609    err_encoder: BinaryBuilder,
1610}
1611
1612impl SourceDataColumnarEncoder {
1613    const OK_COLUMN_NAME: &'static str = "ok";
1614    const ERR_COLUMN_NAME: &'static str = "err";
1615
1616    pub fn new(desc: &RelationDesc) -> Self {
1617        let row_encoder = match RowColumnarEncoder::new(desc) {
1618            Some(encoder) => SourceDataRowColumnarEncoder::Row(encoder),
1619            None => {
1620                assert!(desc.typ().columns().is_empty());
1621                SourceDataRowColumnarEncoder::EmptyRow
1622            }
1623        };
1624        let err_encoder = BinaryBuilder::new();
1625
1626        SourceDataColumnarEncoder {
1627            row_encoder,
1628            err_encoder,
1629        }
1630    }
1631}
1632
1633impl ColumnEncoder<SourceData> for SourceDataColumnarEncoder {
1634    type FinishedColumn = StructArray;
1635
1636    fn goodbytes(&self) -> usize {
1637        self.row_encoder.goodbytes() + self.err_encoder.values_slice().len()
1638    }
1639
1640    #[inline]
1641    fn append(&mut self, val: &SourceData) {
1642        match val.0.as_ref() {
1643            Ok(row) => {
1644                self.row_encoder.append(row);
1645                self.err_encoder.append_null();
1646            }
1647            Err(err) => {
1648                self.row_encoder.append_null();
1649                self.err_encoder
1650                    .append_value(err.into_proto().encode_to_vec());
1651            }
1652        }
1653    }
1654
1655    #[inline]
1656    fn append_null(&mut self) {
1657        panic!("appending a null into SourceDataColumnarEncoder is not supported");
1658    }
1659
1660    fn finish(self) -> Self::FinishedColumn {
1661        let SourceDataColumnarEncoder {
1662            row_encoder,
1663            mut err_encoder,
1664        } = self;
1665
1666        let err_column = BinaryBuilder::finish(&mut err_encoder);
1667        let row_column: ArrayRef = match row_encoder {
1668            SourceDataRowColumnarEncoder::Row(encoder) => {
1669                let column = encoder.finish();
1670                Arc::new(column)
1671            }
1672            SourceDataRowColumnarEncoder::EmptyRow => Arc::new(NullArray::new(err_column.len())),
1673        };
1674
1675        assert_eq!(row_column.len(), err_column.len());
1676
1677        let fields = vec![
1678            Field::new(Self::OK_COLUMN_NAME, row_column.data_type().clone(), true),
1679            Field::new(Self::ERR_COLUMN_NAME, err_column.data_type().clone(), true),
1680        ];
1681        let arrays: Vec<Arc<dyn Array>> = vec![row_column, Arc::new(err_column)];
1682        StructArray::new(Fields::from(fields), arrays, None)
1683    }
1684}
1685
1686impl Schema<SourceData> for RelationDesc {
1687    type ArrowColumn = StructArray;
1688    type Statistics = StructStats;
1689
1690    type Decoder = SourceDataColumnarDecoder;
1691    type Encoder = SourceDataColumnarEncoder;
1692
1693    fn decoder(&self, col: Self::ArrowColumn) -> Result<Self::Decoder, anyhow::Error> {
1694        SourceDataColumnarDecoder::new(col, self)
1695    }
1696
1697    fn encoder(&self) -> Result<Self::Encoder, anyhow::Error> {
1698        Ok(SourceDataColumnarEncoder::new(self))
1699    }
1700}
1701
1702#[cfg(test)]
1703mod tests {
1704    use arrow::array::{ArrayData, make_comparator};
1705    use base64::Engine;
1706    use bytes::Bytes;
1707    use mz_expr::EvalError;
1708    use mz_ore::assert_err;
1709    use mz_ore::metrics::MetricsRegistry;
1710    use mz_persist::indexed::columnar::arrow::{realloc_any, realloc_array};
1711    use mz_persist::metrics::ColumnarMetrics;
1712    use mz_persist_types::parquet::EncodingConfig;
1713    use mz_persist_types::schema::{Migration, backward_compatible};
1714    use mz_persist_types::stats::{PartStats, PartStatsMetrics};
1715    use mz_repr::{
1716        ColumnIndex, DatumVec, PropRelationDescDiff, ProtoRelationDesc, RelationDescBuilder,
1717        RowArena, SqlScalarType, arb_relation_desc_diff, arb_relation_desc_projection,
1718    };
1719    use proptest::prelude::*;
1720    use proptest::strategy::{Union, ValueTree};
1721
1722    use crate::stats::RelationPartStats;
1723
1724    use super::*;
1725
1726    #[mz_ore::test]
1727    fn test_timeline_parsing() {
1728        assert_eq!(Ok(Timeline::EpochMilliseconds), "M".parse());
1729        assert_eq!(Ok(Timeline::External("JOE".to_string())), "E.JOE".parse());
1730        assert_eq!(Ok(Timeline::User("MIKE".to_string())), "U.MIKE".parse());
1731
1732        assert_err!("Materialize".parse::<Timeline>());
1733        assert_err!("Ejoe".parse::<Timeline>());
1734        assert_err!("Umike".parse::<Timeline>());
1735        assert_err!("Dance".parse::<Timeline>());
1736        assert_err!("".parse::<Timeline>());
1737    }
1738
1739    #[track_caller]
1740    fn roundtrip_source_data(
1741        desc: &RelationDesc,
1742        datas: Vec<SourceData>,
1743        read_desc: &RelationDesc,
1744        config: &EncodingConfig,
1745    ) {
1746        let metrics = ColumnarMetrics::disconnected();
1747        let mut encoder = <RelationDesc as Schema<SourceData>>::encoder(desc).unwrap();
1748        for data in &datas {
1749            encoder.append(data);
1750        }
1751        let col = encoder.finish();
1752
1753        // The top-level StructArray for SourceData should always be non-nullable.
1754        assert!(!col.is_nullable());
1755
1756        // Reallocate our arrays with lgalloc.
1757        let col = realloc_array(&col, &metrics);
1758
1759        // Roundtrip through ProtoArray format.
1760        {
1761            let proto = col.to_data().into_proto();
1762            let bytes = proto.encode_to_vec();
1763            let proto = mz_persist_types::arrow::ProtoArrayData::decode(&bytes[..]).unwrap();
1764            let array_data: ArrayData = proto.into_rust().unwrap();
1765
1766            let col_rnd = StructArray::from(array_data.clone());
1767            assert_eq!(col, col_rnd);
1768
1769            let col_dyn = arrow::array::make_array(array_data);
1770            let col_dyn = col_dyn.as_any().downcast_ref::<StructArray>().unwrap();
1771            assert_eq!(&col, col_dyn);
1772        }
1773
1774        // Encode to Parquet.
1775        let mut buf = Vec::new();
1776        let fields = Fields::from(vec![Field::new("k", col.data_type().clone(), false)]);
1777        let arrays: Vec<Arc<dyn Array>> = vec![Arc::new(col.clone())];
1778        mz_persist_types::parquet::encode_arrays(&mut buf, fields, arrays, config).unwrap();
1779
1780        // Decode from Parquet.
1781        let buf = Bytes::from(buf);
1782        let mut reader = mz_persist_types::parquet::decode_arrays(buf).unwrap();
1783        let maybe_batch = reader.next();
1784
1785        // If we didn't encode any data then our record_batch will be empty.
1786        let Some(record_batch) = maybe_batch else {
1787            assert!(datas.is_empty());
1788            return;
1789        };
1790        let record_batch = record_batch.unwrap();
1791
1792        assert_eq!(record_batch.columns().len(), 1);
1793        let rnd_col = &record_batch.columns()[0];
1794        let rnd_col = realloc_any(Arc::clone(rnd_col), &metrics);
1795        let rnd_col = rnd_col
1796            .as_any()
1797            .downcast_ref::<StructArray>()
1798            .unwrap()
1799            .clone();
1800
1801        // Try generating stats for the data, just to make sure we don't panic.
1802        let stats = <RelationDesc as Schema<SourceData>>::decoder_any(desc, &rnd_col)
1803            .expect("valid decoder")
1804            .stats();
1805
1806        // Read back all of our data and assert it roundtrips.
1807        let mut rnd_data = SourceData(Ok(Row::default()));
1808        let decoder = <RelationDesc as Schema<SourceData>>::decoder(desc, rnd_col.clone()).unwrap();
1809        for (idx, og_data) in datas.iter().enumerate() {
1810            decoder.decode(idx, &mut rnd_data);
1811            assert_eq!(og_data, &rnd_data);
1812        }
1813
1814        // Read back all of our data a second time with a projection applied, and make sure the
1815        // stats are valid.
1816        let stats_metrics = PartStatsMetrics::new(&MetricsRegistry::new());
1817        let stats = RelationPartStats {
1818            name: "test",
1819            metrics: &stats_metrics,
1820            stats: &PartStats { key: stats },
1821            desc: read_desc,
1822        };
1823        let mut datum_vec = DatumVec::new();
1824        let arena = RowArena::default();
1825        let decoder = <RelationDesc as Schema<SourceData>>::decoder(read_desc, rnd_col).unwrap();
1826
1827        for (idx, og_data) in datas.iter().enumerate() {
1828            decoder.decode(idx, &mut rnd_data);
1829            match (&og_data.0, &rnd_data.0) {
1830                (Ok(og_row), Ok(rnd_row)) => {
1831                    // Filter down to just the Datums in the projection schema.
1832                    {
1833                        let datums = datum_vec.borrow_with(og_row);
1834                        let projected_datums =
1835                            datums.iter().enumerate().filter_map(|(idx, datum)| {
1836                                read_desc
1837                                    .contains_index(&ColumnIndex::from_raw(idx))
1838                                    .then_some(datum)
1839                            });
1840                        let og_projected_row = Row::pack(projected_datums);
1841                        assert_eq!(&og_projected_row, rnd_row);
1842                    }
1843
1844                    // Validate the stats for all of our projected columns.
1845                    {
1846                        let proj_datums = datum_vec.borrow_with(rnd_row);
1847                        for (pos, (idx, _, _)) in read_desc.iter_all().enumerate() {
1848                            let spec = stats.col_stats(idx, &arena);
1849                            assert!(spec.may_contain(proj_datums[pos]));
1850                        }
1851                    }
1852                }
1853                (Err(_), Err(_)) => assert_eq!(og_data, &rnd_data),
1854                (_, _) => panic!("decoded to a different type? {og_data:?} {rnd_data:?}"),
1855            }
1856        }
1857
1858        // Verify that the RelationDesc itself roundtrips through
1859        // {encode,decode}_schema.
1860        let encoded_schema = SourceData::encode_schema(desc);
1861        let roundtrip_desc = SourceData::decode_schema(&encoded_schema);
1862        assert_eq!(desc, &roundtrip_desc);
1863
1864        // Verify that the RelationDesc is backward compatible with itself (this
1865        // mostly checks for `unimplemented!` type panics).
1866        let migration =
1867            mz_persist_types::schema::backward_compatible(col.data_type(), col.data_type());
1868        let migration = migration.expect("should be backward compatible with self");
1869        // Also verify that the Fn doesn't do anything wonky.
1870        let migrated = migration.migrate(Arc::new(col.clone()));
1871        assert_eq!(col.data_type(), migrated.data_type());
1872    }
1873
1874    #[mz_ore::test]
1875    #[cfg_attr(miri, ignore)] // unsupported operation: can't call foreign function `decContextDefault` on OS `linux`
1876    fn all_source_data_roundtrips() {
1877        let mut weights = vec![(500, Just(0..8)), (50, Just(8..32))];
1878        if std::env::var("PROPTEST_LARGE_DATA").is_ok() {
1879            weights.extend([
1880                (10, Just(32..128)),
1881                (5, Just(128..512)),
1882                (3, Just(512..2048)),
1883                (1, Just(2048..8192)),
1884            ]);
1885        }
1886        let num_rows = Union::new_weighted(weights);
1887
1888        // TODO(parkmycar): There are so many clones going on here, and maybe we can avoid them?
1889        let strat = (any::<RelationDesc>(), num_rows)
1890            .prop_flat_map(|(desc, num_rows)| {
1891                arb_relation_desc_projection(desc.clone())
1892                    .prop_map(move |read_desc| (desc.clone(), read_desc, num_rows.clone()))
1893            })
1894            .prop_flat_map(|(desc, read_desc, num_rows)| {
1895                proptest::collection::vec(arb_source_data_for_relation_desc(&desc), num_rows)
1896                    .prop_map(move |datas| (desc.clone(), datas, read_desc.clone()))
1897            });
1898
1899        let combined_strat = (any::<EncodingConfig>(), strat);
1900        proptest!(|((config, (desc, source_datas, read_desc)) in combined_strat)| {
1901            roundtrip_source_data(&desc, source_datas, &read_desc, &config);
1902        });
1903    }
1904
1905    #[mz_ore::test]
1906    fn roundtrip_error_nulls() {
1907        let desc = RelationDescBuilder::default()
1908            .with_column(
1909                "ts",
1910                SqlScalarType::TimestampTz { precision: None }.nullable(false),
1911            )
1912            .finish();
1913        let source_datas = vec![SourceData(Err(DataflowError::EvalError(
1914            EvalError::DateOutOfRange.into(),
1915        )))];
1916        let config = EncodingConfig::default();
1917        roundtrip_source_data(&desc, source_datas, &desc, &config);
1918    }
1919
1920    fn is_sorted(array: &dyn Array) -> bool {
1921        let sort_options = arrow::compute::SortOptions::default();
1922        let Ok(cmp) = make_comparator(array, array, sort_options) else {
1923            // TODO: arrow v51.0.0 doesn't support comparing structs. When
1924            // we migrate to v52+, the `build_compare` function is
1925            // deprecated and replaced by `make_comparator`, which does
1926            // support structs. At which point, this will work (and we
1927            // should switch this early return to an expect, if possible).
1928            return false;
1929        };
1930        (0..array.len())
1931            .tuple_windows()
1932            .all(|(i, j)| cmp(i, j).is_le())
1933    }
1934
1935    fn get_data_type(schema: &impl Schema<SourceData>) -> arrow::datatypes::DataType {
1936        use mz_persist_types::columnar::ColumnEncoder;
1937        let array = Schema::encoder(schema).expect("valid schema").finish();
1938        Array::data_type(&array).clone()
1939    }
1940
1941    #[track_caller]
1942    fn backward_compatible_testcase(
1943        old: &RelationDesc,
1944        new: &RelationDesc,
1945        migration: Migration,
1946        datas: &[SourceData],
1947    ) {
1948        let mut encoder = Schema::<SourceData>::encoder(old).expect("valid schema");
1949        for data in datas {
1950            encoder.append(data);
1951        }
1952        let old = encoder.finish();
1953        let new = Schema::<SourceData>::encoder(new)
1954            .expect("valid schema")
1955            .finish();
1956        let old: Arc<dyn Array> = Arc::new(old);
1957        let new: Arc<dyn Array> = Arc::new(new);
1958        let migrated = migration.migrate(Arc::clone(&old));
1959        assert_eq!(migrated.data_type(), new.data_type());
1960
1961        // Check the sortedness preservation, if we can.
1962        if migration.preserves_order() && is_sorted(&old) {
1963            assert!(is_sorted(&new))
1964        }
1965    }
1966
1967    #[mz_ore::test]
1968    fn backward_compatible_empty_add_column() {
1969        let old = RelationDesc::empty();
1970        let new = RelationDesc::from_names_and_types([("a", SqlScalarType::Bool.nullable(true))]);
1971
1972        let old_data_type = get_data_type(&old);
1973        let new_data_type = get_data_type(&new);
1974
1975        let migration = backward_compatible(&old_data_type, &new_data_type);
1976        assert!(migration.is_some());
1977    }
1978
1979    #[mz_ore::test]
1980    fn backward_compatible_project_away_all() {
1981        let old = RelationDesc::from_names_and_types([("a", SqlScalarType::Bool.nullable(true))]);
1982        let new = RelationDesc::empty();
1983
1984        let old_data_type = get_data_type(&old);
1985        let new_data_type = get_data_type(&new);
1986
1987        let migration = backward_compatible(&old_data_type, &new_data_type);
1988        assert!(migration.is_some());
1989    }
1990
1991    #[mz_ore::test]
1992    #[cfg_attr(miri, ignore)]
1993    fn backward_compatible_migrate() {
1994        let strat = (any::<RelationDesc>(), any::<RelationDesc>()).prop_flat_map(|(old, new)| {
1995            proptest::collection::vec(arb_source_data_for_relation_desc(&old), 2)
1996                .prop_map(move |datas| (old.clone(), new.clone(), datas))
1997        });
1998
1999        proptest!(|((old, new, datas) in strat)| {
2000            let old_data_type = get_data_type(&old);
2001            let new_data_type = get_data_type(&new);
2002
2003            if let Some(migration) = backward_compatible(&old_data_type, &new_data_type) {
2004                backward_compatible_testcase(&old, &new, migration, &datas);
2005            };
2006        });
2007    }
2008
2009    #[mz_ore::test]
2010    #[cfg_attr(miri, ignore)]
2011    fn backward_compatible_migrate_from_common() {
2012        use mz_repr::SqlColumnType;
2013        fn test_case(old: RelationDesc, diffs: Vec<PropRelationDescDiff>, datas: Vec<SourceData>) {
2014            // TODO(parkmycar): As we iterate on schema migrations more things should become compatible.
2015            let should_be_compatible = diffs.iter().all(|diff| match diff {
2016                // We only support adding nullable columns.
2017                PropRelationDescDiff::AddColumn {
2018                    typ: SqlColumnType { nullable, .. },
2019                    ..
2020                } => *nullable,
2021                PropRelationDescDiff::DropColumn { .. } => true,
2022                _ => false,
2023            });
2024
2025            let mut new = old.clone();
2026            for diff in diffs.into_iter() {
2027                diff.apply(&mut new)
2028            }
2029
2030            let old_data_type = get_data_type(&old);
2031            let new_data_type = get_data_type(&new);
2032
2033            if let Some(migration) = backward_compatible(&old_data_type, &new_data_type) {
2034                backward_compatible_testcase(&old, &new, migration, &datas);
2035            } else if should_be_compatible {
2036                panic!("new DataType was not compatible when it should have been!");
2037            }
2038        }
2039
2040        let strat = any::<RelationDesc>()
2041            .prop_flat_map(|desc| {
2042                proptest::collection::vec(arb_source_data_for_relation_desc(&desc), 2)
2043                    .no_shrink()
2044                    .prop_map(move |datas| (desc.clone(), datas))
2045            })
2046            .prop_flat_map(|(desc, datas)| {
2047                arb_relation_desc_diff(&desc)
2048                    .prop_map(move |diffs| (desc.clone(), diffs, datas.clone()))
2049            });
2050
2051        proptest!(|((old, diffs, datas) in strat)| {
2052            test_case(old, diffs, datas);
2053        });
2054    }
2055
2056    #[mz_ore::test]
2057    #[cfg_attr(miri, ignore)] // unsupported operation: can't call foreign function `decContextDefault` on OS `linux`
2058    fn empty_relation_desc_roundtrips() {
2059        let empty = RelationDesc::empty();
2060        let rows = proptest::collection::vec(arb_source_data_for_relation_desc(&empty), 0..8)
2061            .prop_map(move |datas| (empty.clone(), datas));
2062
2063        // Note: This case should be covered by the `all_source_data_roundtrips` test above, but
2064        // it's a special case that we explicitly want to exercise.
2065        proptest!(|((config, (desc, source_datas)) in (any::<EncodingConfig>(), rows))| {
2066            roundtrip_source_data(&desc, source_datas, &desc, &config);
2067        });
2068    }
2069
2070    #[mz_ore::test]
2071    #[cfg_attr(miri, ignore)] // unsupported operation: can't call foreign function `decContextDefault` on OS `linux`
2072    fn arrow_datatype_consistent() {
2073        fn test_case(desc: RelationDesc, datas: Vec<SourceData>) {
2074            let half = datas.len() / 2;
2075
2076            let mut encoder_a = <RelationDesc as Schema<SourceData>>::encoder(&desc).unwrap();
2077            for data in &datas[..half] {
2078                encoder_a.append(data);
2079            }
2080            let col_a = encoder_a.finish();
2081
2082            let mut encoder_b = <RelationDesc as Schema<SourceData>>::encoder(&desc).unwrap();
2083            for data in &datas[half..] {
2084                encoder_b.append(data);
2085            }
2086            let col_b = encoder_b.finish();
2087
2088            // The DataType of the resulting column should not change based on what data was
2089            // encoded.
2090            assert_eq!(col_a.data_type(), col_b.data_type());
2091        }
2092
2093        let num_rows = 12;
2094        let strat = any::<RelationDesc>().prop_flat_map(|desc| {
2095            proptest::collection::vec(arb_source_data_for_relation_desc(&desc), num_rows)
2096                .prop_map(move |datas| (desc.clone(), datas))
2097        });
2098
2099        proptest!(|((desc, data) in strat)| {
2100            test_case(desc, data);
2101        });
2102    }
2103
2104    #[mz_ore::test]
2105    #[cfg_attr(miri, ignore)] // too slow
2106    fn source_proto_serialization_stability() {
2107        let min_protos = 10;
2108        let encoded = include_str!("snapshots/source-datas.txt");
2109
2110        // Decode the pre-generated source datas
2111        let mut decoded: Vec<(RelationDesc, SourceData)> = encoded
2112            .lines()
2113            .map(|s| {
2114                let (desc, data) = s.split_once(',').expect("comma separated data");
2115                let desc = base64::engine::general_purpose::STANDARD
2116                    .decode(desc)
2117                    .expect("valid base64");
2118                let data = base64::engine::general_purpose::STANDARD
2119                    .decode(data)
2120                    .expect("valid base64");
2121                (desc, data)
2122            })
2123            .map(|(desc, data)| {
2124                let desc = ProtoRelationDesc::decode(&desc[..]).expect("valid proto");
2125                let desc = desc.into_rust().expect("valid proto");
2126                let data = SourceData::decode(&data, &desc).expect("valid proto");
2127                (desc, data)
2128            })
2129            .collect();
2130
2131        // If there are fewer than the minimum examples, generate some new ones arbitrarily
2132        let mut runner = proptest::test_runner::TestRunner::deterministic();
2133        let strategy = RelationDesc::arbitrary().prop_flat_map(|desc| {
2134            arb_source_data_for_relation_desc(&desc).prop_map(move |data| (desc.clone(), data))
2135        });
2136        while decoded.len() < min_protos {
2137            let arbitrary_data = strategy
2138                .new_tree(&mut runner)
2139                .expect("source data")
2140                .current();
2141            decoded.push(arbitrary_data);
2142        }
2143
2144        // Reencode and compare the strings
2145        let mut reencoded = String::new();
2146        let mut buf = vec![];
2147        for (desc, data) in decoded {
2148            buf.clear();
2149            desc.into_proto().encode(&mut buf).expect("success");
2150            base64::engine::general_purpose::STANDARD.encode_string(buf.as_slice(), &mut reencoded);
2151            reencoded.push(',');
2152
2153            buf.clear();
2154            data.encode(&mut buf);
2155            base64::engine::general_purpose::STANDARD.encode_string(buf.as_slice(), &mut reencoded);
2156            reencoded.push('\n');
2157        }
2158
2159        // Optimizations in Persist, particularly consolidation on read,
2160        // depend on a stable serialization for the serialized data.
2161        // For example, reordering proto fields could cause us
2162        // to generate a different (equivalent) serialization for a record,
2163        // and the two versions would not consolidate out.
2164        // This can impact correctness!
2165        //
2166        // If you need to change how SourceDatas are encoded, that's still fine...
2167        // but we'll also need to increase
2168        // the MINIMUM_CONSOLIDATED_VERSION as part of the same release.
2169        assert_eq!(
2170            encoded,
2171            reencoded.as_str(),
2172            "SourceData serde should be stable"
2173        )
2174    }
2175}