1use std::collections::BTreeMap;
13use std::fmt::Debug;
14use std::hash::Hash;
15use std::ops::{Add, AddAssign, Deref, DerefMut};
16use std::str::FromStr;
17use std::sync::Arc;
18use std::time::Duration;
19
20use arrow::array::{Array, ArrayRef, BinaryArray, BinaryBuilder, NullArray, StructArray};
21use arrow::datatypes::{Field, Fields};
22use bytes::{BufMut, Bytes};
23use columnation::Columnation;
24use itertools::EitherOrBoth::Both;
25use itertools::Itertools;
26use kafka::KafkaSourceExportDetails;
27use load_generator::{LoadGeneratorOutput, LoadGeneratorSourceExportDetails};
28use mz_ore::assert_none;
29use mz_persist_types::Codec;
30use mz_persist_types::arrow::ArrayOrd;
31use mz_persist_types::columnar::{ColumnDecoder, ColumnEncoder, Schema};
32use mz_persist_types::stats::{
33 ColumnNullStats, ColumnStatKinds, ColumnarStats, ColumnarStatsBuilder, PrimitiveStats,
34 StructStats,
35};
36use mz_proto::{IntoRustIfSome, ProtoType, RustType, TryFromProtoError};
37use mz_repr::{
38 CatalogItemId, Datum, GlobalId, ProtoRelationDesc, ProtoRow, RelationDesc, Row,
39 RowColumnarDecoder, RowColumnarEncoder, arb_row_for_relation,
40};
41use mz_sql_parser::ast::{Ident, IdentError, UnresolvedItemName};
42use proptest::prelude::any;
43use proptest::strategy::Strategy;
44use prost::Message;
45use serde::{Deserialize, Serialize};
46use timely::order::{PartialOrder, TotalOrder};
47use timely::progress::timestamp::Refines;
48use timely::progress::{PathSummary, Timestamp};
49
50use crate::AlterCompatible;
51use crate::connections::inline::{
52 ConnectionAccess, ConnectionResolver, InlinedConnection, IntoInlineConnection,
53 ReferencedConnection,
54};
55use crate::controller::AlterError;
56use crate::errors::{DataflowError, ProtoDataflowError};
57use crate::instances::StorageInstanceId;
58use crate::sources::sql_server::SqlServerSourceExportDetails;
59
60pub mod casts;
61pub mod encoding;
62pub mod envelope;
63pub mod kafka;
64pub mod load_generator;
65pub mod mysql;
66pub mod postgres;
67pub mod sql_server;
68
69pub use crate::sources::envelope::SourceEnvelope;
70pub use crate::sources::kafka::KafkaSourceConnection;
71pub use crate::sources::load_generator::LoadGeneratorSourceConnection;
72pub use crate::sources::mysql::{MySqlSourceConnection, MySqlSourceExportDetails};
73pub use crate::sources::postgres::{PostgresSourceConnection, PostgresSourceExportDetails};
74pub use crate::sources::sql_server::{SqlServerSourceConnection, SqlServerSourceExtras};
75
76include!(concat!(env!("OUT_DIR"), "/mz_storage_types.sources.rs"));
77
78#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
80pub struct IngestionDescription<S: 'static = (), C: ConnectionAccess = InlinedConnection> {
81 pub desc: SourceDesc<C>,
83 pub source_exports: BTreeMap<GlobalId, SourceExport<S>>,
97 pub instance_id: StorageInstanceId,
99 pub remap_collection_id: GlobalId,
101 pub remap_metadata: S,
103}
104
105impl IngestionDescription {
106 pub fn new(
107 desc: SourceDesc,
108 instance_id: StorageInstanceId,
109 remap_collection_id: GlobalId,
110 ) -> Self {
111 Self {
112 desc,
113 remap_metadata: (),
114 source_exports: BTreeMap::new(),
115 instance_id,
116 remap_collection_id,
117 }
118 }
119}
120
121impl<S> IngestionDescription<S> {
122 pub fn collection_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
127 let IngestionDescription {
130 desc: _,
131 remap_metadata: _,
132 source_exports,
133 instance_id: _,
134 remap_collection_id,
135 } = &self;
136
137 source_exports
138 .keys()
139 .copied()
140 .chain(std::iter::once(*remap_collection_id))
141 }
142}
143
144impl<S: Debug + Eq + PartialEq + AlterCompatible> AlterCompatible for IngestionDescription<S> {
145 fn alter_compatible(
146 &self,
147 id: GlobalId,
148 other: &IngestionDescription<S>,
149 ) -> Result<(), AlterError> {
150 if self == other {
151 return Ok(());
152 }
153 let IngestionDescription {
154 desc,
155 remap_metadata,
156 source_exports,
157 instance_id,
158 remap_collection_id,
159 } = self;
160
161 let compatibility_checks = [
162 (desc.alter_compatible(id, &other.desc).is_ok(), "desc"),
163 (remap_metadata == &other.remap_metadata, "remap_metadata"),
164 (
165 source_exports
166 .iter()
167 .merge_join_by(&other.source_exports, |(l_key, _), (r_key, _)| {
168 l_key.cmp(r_key)
169 })
170 .all(|r| match r {
171 Both(
172 (
173 _,
174 SourceExport {
175 storage_metadata: l_metadata,
176 details: l_details,
177 data_config: l_data_config,
178 },
179 ),
180 (
181 _,
182 SourceExport {
183 storage_metadata: r_metadata,
184 details: r_details,
185 data_config: r_data_config,
186 },
187 ),
188 ) => {
189 l_metadata.alter_compatible(id, r_metadata).is_ok()
190 && l_details.alter_compatible(id, r_details).is_ok()
191 && l_data_config.alter_compatible(id, r_data_config).is_ok()
192 }
193 _ => true,
194 }),
195 "source_exports",
196 ),
197 (instance_id == &other.instance_id, "instance_id"),
198 (
199 remap_collection_id == &other.remap_collection_id,
200 "remap_collection_id",
201 ),
202 ];
203 for (compatible, field) in compatibility_checks {
204 if !compatible {
205 tracing::warn!(
206 "IngestionDescription incompatible at {field}:\nself:\n{:#?}\n\nother\n{:#?}",
207 self,
208 other
209 );
210
211 return Err(AlterError { id });
212 }
213 }
214
215 Ok(())
216 }
217}
218
219impl<R: ConnectionResolver> IntoInlineConnection<IngestionDescription, R>
220 for IngestionDescription<(), ReferencedConnection>
221{
222 fn into_inline_connection(self, r: R) -> IngestionDescription {
223 let IngestionDescription {
224 desc,
225 remap_metadata,
226 source_exports,
227 instance_id,
228 remap_collection_id,
229 } = self;
230
231 IngestionDescription {
232 desc: desc.into_inline_connection(r),
233 remap_metadata,
234 source_exports,
235 instance_id,
236 remap_collection_id,
237 }
238 }
239}
240
241#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
242pub struct SourceExport<S = (), C: ConnectionAccess = InlinedConnection> {
243 pub storage_metadata: S,
245 pub details: SourceExportDetails,
247 pub data_config: SourceExportDataConfig<C>,
249}
250
251pub trait SourceTimestamp:
252 Timestamp + Columnation + Refines<()> + std::fmt::Display + Sync
253{
254 fn encode_row(&self) -> Row;
255 fn decode_row(row: &Row) -> Self;
256}
257
258impl SourceTimestamp for MzOffset {
259 fn encode_row(&self) -> Row {
260 Row::pack([Datum::UInt64(self.offset)])
261 }
262
263 fn decode_row(row: &Row) -> Self {
264 let mut datums = row.iter();
265 match (datums.next(), datums.next()) {
266 (Some(Datum::UInt64(offset)), None) => MzOffset::from(offset),
267 _ => panic!("invalid row {row:?}"),
268 }
269 }
270}
271
272#[derive(
276 Copy,
277 Clone,
278 Default,
279 Debug,
280 PartialEq,
281 PartialOrd,
282 Eq,
283 Ord,
284 Hash,
285 Serialize,
286 Deserialize
287)]
288pub struct MzOffset {
289 pub offset: u64,
290}
291
292impl differential_dataflow::difference::Semigroup for MzOffset {
293 fn plus_equals(&mut self, rhs: &Self) {
294 self.offset.plus_equals(&rhs.offset)
295 }
296}
297
298impl differential_dataflow::difference::IsZero for MzOffset {
299 fn is_zero(&self) -> bool {
300 self.offset.is_zero()
301 }
302}
303
304impl mz_persist_types::Codec64 for MzOffset {
305 fn codec_name() -> String {
306 "MzOffset".to_string()
307 }
308
309 fn encode(&self) -> [u8; 8] {
310 mz_persist_types::Codec64::encode(&self.offset)
311 }
312
313 fn decode(buf: [u8; 8]) -> Self {
314 Self {
315 offset: mz_persist_types::Codec64::decode(buf),
316 }
317 }
318}
319
320impl columnation::Columnation for MzOffset {
321 type InnerRegion = columnation::CopyRegion<MzOffset>;
322}
323
324impl MzOffset {
325 pub fn checked_sub(self, other: Self) -> Option<Self> {
326 self.offset
327 .checked_sub(other.offset)
328 .map(|offset| Self { offset })
329 }
330}
331
332impl From<u64> for MzOffset {
335 fn from(offset: u64) -> Self {
336 Self { offset }
337 }
338}
339
340impl std::fmt::Display for MzOffset {
341 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
342 write!(f, "{}", self.offset)
343 }
344}
345
346impl Add<u64> for MzOffset {
348 type Output = MzOffset;
349
350 fn add(self, x: u64) -> MzOffset {
351 MzOffset {
352 offset: self.offset + x,
353 }
354 }
355}
356impl Add<Self> for MzOffset {
357 type Output = Self;
358
359 fn add(self, x: Self) -> Self {
360 MzOffset {
361 offset: self.offset + x.offset,
362 }
363 }
364}
365impl AddAssign<u64> for MzOffset {
366 fn add_assign(&mut self, x: u64) {
367 self.offset += x;
368 }
369}
370impl AddAssign<Self> for MzOffset {
371 fn add_assign(&mut self, x: Self) {
372 self.offset += x.offset;
373 }
374}
375
376impl From<tokio_postgres::types::PgLsn> for MzOffset {
378 fn from(lsn: tokio_postgres::types::PgLsn) -> Self {
379 MzOffset { offset: lsn.into() }
380 }
381}
382
383impl Timestamp for MzOffset {
384 type Summary = MzOffset;
385
386 fn minimum() -> Self {
387 MzOffset {
388 offset: Timestamp::minimum(),
389 }
390 }
391}
392
393impl PathSummary<MzOffset> for MzOffset {
394 fn results_in(&self, src: &MzOffset) -> Option<MzOffset> {
395 Some(MzOffset {
396 offset: self.offset.results_in(&src.offset)?,
397 })
398 }
399
400 fn followed_by(&self, other: &Self) -> Option<Self> {
401 Some(MzOffset {
402 offset: PathSummary::<u64>::followed_by(&self.offset, &other.offset)?,
403 })
404 }
405}
406
407impl Refines<()> for MzOffset {
408 fn to_inner(_: ()) -> Self {
409 MzOffset::minimum()
410 }
411 fn to_outer(self) {}
412 fn summarize(_: Self::Summary) {}
413}
414
415impl PartialOrder for MzOffset {
416 #[inline]
417 fn less_equal(&self, other: &Self) -> bool {
418 self.offset.less_equal(&other.offset)
419 }
420}
421
422impl TotalOrder for MzOffset {}
423
424#[derive(
433 Clone,
434 Debug,
435 Ord,
436 PartialOrd,
437 Eq,
438 PartialEq,
439 Serialize,
440 Deserialize,
441 Hash
442)]
443pub enum Timeline {
444 EpochMilliseconds,
447 External(String),
451 User(String),
455}
456
457impl Timeline {
458 const EPOCH_MILLISECOND_ID_CHAR: char = 'M';
459 const EXTERNAL_ID_CHAR: char = 'E';
460 const USER_ID_CHAR: char = 'U';
461
462 fn id_char(&self) -> char {
463 match self {
464 Self::EpochMilliseconds => Self::EPOCH_MILLISECOND_ID_CHAR,
465 Self::External(_) => Self::EXTERNAL_ID_CHAR,
466 Self::User(_) => Self::USER_ID_CHAR,
467 }
468 }
469}
470
471impl ToString for Timeline {
472 fn to_string(&self) -> String {
473 match self {
474 Self::EpochMilliseconds => format!("{}", self.id_char()),
475 Self::External(id) => format!("{}.{id}", self.id_char()),
476 Self::User(id) => format!("{}.{id}", self.id_char()),
477 }
478 }
479}
480
481impl FromStr for Timeline {
482 type Err = String;
483
484 fn from_str(s: &str) -> Result<Self, Self::Err> {
485 if s.is_empty() {
486 return Err("empty timeline".to_string());
487 }
488 let mut chars = s.chars();
489 match chars.next().expect("non-empty string") {
490 Self::EPOCH_MILLISECOND_ID_CHAR => match chars.next() {
491 None => Ok(Self::EpochMilliseconds),
492 Some(_) => Err(format!("unknown timeline: {s}")),
493 },
494 Self::EXTERNAL_ID_CHAR => match chars.next() {
495 Some('.') => Ok(Self::External(chars.as_str().to_string())),
496 _ => Err(format!("unknown timeline: {s}")),
497 },
498 Self::USER_ID_CHAR => match chars.next() {
499 Some('.') => Ok(Self::User(chars.as_str().to_string())),
500 _ => Err(format!("unknown timeline: {s}")),
501 },
502 _ => Err(format!("unknown timeline: {s}")),
503 }
504 }
505}
506
507pub trait SourceConnection: Debug + Clone + PartialEq + AlterCompatible {
509 fn name(&self) -> &'static str;
511
512 fn external_reference(&self) -> Option<&str>;
514
515 fn default_key_desc(&self) -> RelationDesc;
519
520 fn default_value_desc(&self) -> RelationDesc;
524
525 fn timestamp_desc(&self) -> RelationDesc;
528
529 fn connection_id(&self) -> Option<CatalogItemId>;
532
533 fn supports_read_only(&self) -> bool;
535
536 fn prefers_single_replica(&self) -> bool;
538}
539
540#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)]
541pub enum Compression {
542 Gzip,
543 None,
544}
545
546#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
549pub struct SourceExportDataConfig<C: ConnectionAccess = InlinedConnection> {
550 pub encoding: Option<encoding::SourceDataEncoding<C>>,
551 pub envelope: SourceEnvelope,
552}
553
554impl<R: ConnectionResolver> IntoInlineConnection<SourceExportDataConfig, R>
555 for SourceExportDataConfig<ReferencedConnection>
556{
557 fn into_inline_connection(self, r: R) -> SourceExportDataConfig {
558 let SourceExportDataConfig { encoding, envelope } = self;
559
560 SourceExportDataConfig {
561 encoding: encoding.map(|e| e.into_inline_connection(r)),
562 envelope,
563 }
564 }
565}
566
567impl<C: ConnectionAccess> AlterCompatible for SourceExportDataConfig<C> {
568 fn alter_compatible(&self, id: GlobalId, other: &Self) -> Result<(), AlterError> {
569 if self == other {
570 return Ok(());
571 }
572 let Self { encoding, envelope } = &self;
573
574 let compatibility_checks = [
575 (
576 match (encoding, &other.encoding) {
577 (Some(s), Some(o)) => s.alter_compatible(id, o).is_ok(),
578 (s, o) => s == o,
579 },
580 "encoding",
581 ),
582 (envelope == &other.envelope, "envelope"),
583 ];
584
585 for (compatible, field) in compatibility_checks {
586 if !compatible {
587 tracing::warn!(
588 "SourceDesc incompatible {field}:\nself:\n{:#?}\n\nother\n{:#?}",
589 self,
590 other
591 );
592
593 return Err(AlterError { id });
594 }
595 }
596 Ok(())
597 }
598}
599
600impl<C: ConnectionAccess> SourceExportDataConfig<C> {
601 pub fn monotonic(&self, connection: &GenericSourceConnection<C>) -> bool {
608 match &self.envelope {
609 SourceEnvelope::Upsert(_) | SourceEnvelope::CdcV2 => false,
611 SourceEnvelope::None(_) => {
612 match connection {
613 GenericSourceConnection::Postgres(_) => false,
615 GenericSourceConnection::MySql(_) => false,
617 GenericSourceConnection::SqlServer(_) => false,
619 GenericSourceConnection::LoadGenerator(g) => g.load_generator.is_monotonic(),
621 GenericSourceConnection::Kafka(_) => true,
623 }
624 }
625 }
626 }
627}
628
629#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
631pub struct SourceDesc<C: ConnectionAccess = InlinedConnection> {
632 pub connection: GenericSourceConnection<C>,
633 pub timestamp_interval: Duration,
634}
635
636impl<R: ConnectionResolver> IntoInlineConnection<SourceDesc, R>
637 for SourceDesc<ReferencedConnection>
638{
639 fn into_inline_connection(self, r: R) -> SourceDesc {
640 let SourceDesc {
641 connection,
642 timestamp_interval,
643 } = self;
644
645 SourceDesc {
646 connection: connection.into_inline_connection(&r),
647 timestamp_interval,
648 }
649 }
650}
651
652impl<C: ConnectionAccess> AlterCompatible for SourceDesc<C> {
653 fn alter_compatible(&self, id: GlobalId, other: &Self) -> Result<(), AlterError> {
657 if self == other {
658 return Ok(());
659 }
660 let Self {
661 connection,
662 timestamp_interval: _,
664 } = &self;
665
666 let compatibility_checks = [(
667 connection.alter_compatible(id, &other.connection).is_ok(),
668 "connection",
669 )];
670
671 for (compatible, field) in compatibility_checks {
672 if !compatible {
673 tracing::warn!(
674 "SourceDesc incompatible {field}:\nself:\n{:#?}\n\nother\n{:#?}",
675 self,
676 other
677 );
678
679 return Err(AlterError { id });
680 }
681 }
682
683 Ok(())
684 }
685}
686
687#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
688pub enum GenericSourceConnection<C: ConnectionAccess = InlinedConnection> {
689 Kafka(KafkaSourceConnection<C>),
690 Postgres(PostgresSourceConnection<C>),
691 MySql(MySqlSourceConnection<C>),
692 SqlServer(SqlServerSourceConnection<C>),
693 LoadGenerator(LoadGeneratorSourceConnection),
694}
695
696impl<C: ConnectionAccess> From<KafkaSourceConnection<C>> for GenericSourceConnection<C> {
697 fn from(conn: KafkaSourceConnection<C>) -> Self {
698 Self::Kafka(conn)
699 }
700}
701
702impl<C: ConnectionAccess> From<PostgresSourceConnection<C>> for GenericSourceConnection<C> {
703 fn from(conn: PostgresSourceConnection<C>) -> Self {
704 Self::Postgres(conn)
705 }
706}
707
708impl<C: ConnectionAccess> From<MySqlSourceConnection<C>> for GenericSourceConnection<C> {
709 fn from(conn: MySqlSourceConnection<C>) -> Self {
710 Self::MySql(conn)
711 }
712}
713
714impl<C: ConnectionAccess> From<SqlServerSourceConnection<C>> for GenericSourceConnection<C> {
715 fn from(conn: SqlServerSourceConnection<C>) -> Self {
716 Self::SqlServer(conn)
717 }
718}
719
720impl<C: ConnectionAccess> From<LoadGeneratorSourceConnection> for GenericSourceConnection<C> {
721 fn from(conn: LoadGeneratorSourceConnection) -> Self {
722 Self::LoadGenerator(conn)
723 }
724}
725
726impl<R: ConnectionResolver> IntoInlineConnection<GenericSourceConnection, R>
727 for GenericSourceConnection<ReferencedConnection>
728{
729 fn into_inline_connection(self, r: R) -> GenericSourceConnection {
730 match self {
731 GenericSourceConnection::Kafka(kafka) => {
732 GenericSourceConnection::Kafka(kafka.into_inline_connection(r))
733 }
734 GenericSourceConnection::Postgres(pg) => {
735 GenericSourceConnection::Postgres(pg.into_inline_connection(r))
736 }
737 GenericSourceConnection::MySql(mysql) => {
738 GenericSourceConnection::MySql(mysql.into_inline_connection(r))
739 }
740 GenericSourceConnection::SqlServer(sql_server) => {
741 GenericSourceConnection::SqlServer(sql_server.into_inline_connection(r))
742 }
743 GenericSourceConnection::LoadGenerator(lg) => {
744 GenericSourceConnection::LoadGenerator(lg)
745 }
746 }
747 }
748}
749
750impl<C: ConnectionAccess> SourceConnection for GenericSourceConnection<C> {
751 fn name(&self) -> &'static str {
752 match self {
753 Self::Kafka(conn) => conn.name(),
754 Self::Postgres(conn) => conn.name(),
755 Self::MySql(conn) => conn.name(),
756 Self::SqlServer(conn) => conn.name(),
757 Self::LoadGenerator(conn) => conn.name(),
758 }
759 }
760
761 fn external_reference(&self) -> Option<&str> {
762 match self {
763 Self::Kafka(conn) => conn.external_reference(),
764 Self::Postgres(conn) => conn.external_reference(),
765 Self::MySql(conn) => conn.external_reference(),
766 Self::SqlServer(conn) => conn.external_reference(),
767 Self::LoadGenerator(conn) => conn.external_reference(),
768 }
769 }
770
771 fn default_key_desc(&self) -> RelationDesc {
772 match self {
773 Self::Kafka(conn) => conn.default_key_desc(),
774 Self::Postgres(conn) => conn.default_key_desc(),
775 Self::MySql(conn) => conn.default_key_desc(),
776 Self::SqlServer(conn) => conn.default_key_desc(),
777 Self::LoadGenerator(conn) => conn.default_key_desc(),
778 }
779 }
780
781 fn default_value_desc(&self) -> RelationDesc {
782 match self {
783 Self::Kafka(conn) => conn.default_value_desc(),
784 Self::Postgres(conn) => conn.default_value_desc(),
785 Self::MySql(conn) => conn.default_value_desc(),
786 Self::SqlServer(conn) => conn.default_value_desc(),
787 Self::LoadGenerator(conn) => conn.default_value_desc(),
788 }
789 }
790
791 fn timestamp_desc(&self) -> RelationDesc {
792 match self {
793 Self::Kafka(conn) => conn.timestamp_desc(),
794 Self::Postgres(conn) => conn.timestamp_desc(),
795 Self::MySql(conn) => conn.timestamp_desc(),
796 Self::SqlServer(conn) => conn.timestamp_desc(),
797 Self::LoadGenerator(conn) => conn.timestamp_desc(),
798 }
799 }
800
801 fn connection_id(&self) -> Option<CatalogItemId> {
802 match self {
803 Self::Kafka(conn) => conn.connection_id(),
804 Self::Postgres(conn) => conn.connection_id(),
805 Self::MySql(conn) => conn.connection_id(),
806 Self::SqlServer(conn) => conn.connection_id(),
807 Self::LoadGenerator(conn) => conn.connection_id(),
808 }
809 }
810
811 fn supports_read_only(&self) -> bool {
812 match self {
813 GenericSourceConnection::Kafka(conn) => conn.supports_read_only(),
814 GenericSourceConnection::Postgres(conn) => conn.supports_read_only(),
815 GenericSourceConnection::MySql(conn) => conn.supports_read_only(),
816 GenericSourceConnection::SqlServer(conn) => conn.supports_read_only(),
817 GenericSourceConnection::LoadGenerator(conn) => conn.supports_read_only(),
818 }
819 }
820
821 fn prefers_single_replica(&self) -> bool {
822 match self {
823 GenericSourceConnection::Kafka(conn) => conn.prefers_single_replica(),
824 GenericSourceConnection::Postgres(conn) => conn.prefers_single_replica(),
825 GenericSourceConnection::MySql(conn) => conn.prefers_single_replica(),
826 GenericSourceConnection::SqlServer(conn) => conn.prefers_single_replica(),
827 GenericSourceConnection::LoadGenerator(conn) => conn.prefers_single_replica(),
828 }
829 }
830}
831impl<C: ConnectionAccess> crate::AlterCompatible for GenericSourceConnection<C> {
832 fn alter_compatible(&self, id: GlobalId, other: &Self) -> Result<(), AlterError> {
833 if self == other {
834 return Ok(());
835 }
836 let r = match (self, other) {
837 (Self::Kafka(conn), Self::Kafka(other)) => conn.alter_compatible(id, other),
838 (Self::Postgres(conn), Self::Postgres(other)) => conn.alter_compatible(id, other),
839 (Self::MySql(conn), Self::MySql(other)) => conn.alter_compatible(id, other),
840 (Self::SqlServer(conn), Self::SqlServer(other)) => conn.alter_compatible(id, other),
841 (Self::LoadGenerator(conn), Self::LoadGenerator(other)) => {
842 conn.alter_compatible(id, other)
843 }
844 _ => Err(AlterError { id }),
845 };
846
847 if r.is_err() {
848 tracing::warn!(
849 "GenericSourceConnection incompatible:\nself:\n{:#?}\n\nother\n{:#?}",
850 self,
851 other
852 );
853 }
854
855 r
856 }
857}
858
859#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
862pub enum SourceExportDetails {
863 None,
866 Kafka(KafkaSourceExportDetails),
867 Postgres(PostgresSourceExportDetails),
868 MySql(MySqlSourceExportDetails),
869 SqlServer(SqlServerSourceExportDetails),
870 LoadGenerator(LoadGeneratorSourceExportDetails),
871}
872
873impl crate::AlterCompatible for SourceExportDetails {
874 fn alter_compatible(&self, id: GlobalId, other: &Self) -> Result<(), AlterError> {
875 if self == other {
876 return Ok(());
877 }
878 let r = match (self, other) {
879 (Self::None, Self::None) => Ok(()),
880 (Self::Kafka(s), Self::Kafka(o)) => s.alter_compatible(id, o),
881 (Self::Postgres(s), Self::Postgres(o)) => s.alter_compatible(id, o),
882 (Self::MySql(s), Self::MySql(o)) => s.alter_compatible(id, o),
883 (Self::LoadGenerator(s), Self::LoadGenerator(o)) => s.alter_compatible(id, o),
884 _ => Err(AlterError { id }),
885 };
886
887 if r.is_err() {
888 tracing::warn!(
889 "SourceExportDetails incompatible:\nself:\n{:#?}\n\nother\n{:#?}",
890 self,
891 other
892 );
893 }
894
895 r
896 }
897}
898
899pub enum SourceExportStatementDetails {
905 Postgres {
906 table: mz_postgres_util::desc::PostgresTableDesc,
907 },
908 MySql {
909 table: mz_mysql_util::MySqlTableDesc,
910 initial_gtid_set: String,
911 binlog_full_metadata: bool,
912 },
913 SqlServer {
914 table: mz_sql_server_util::desc::SqlServerTableDesc,
915 capture_instance: Arc<str>,
916 initial_lsn: mz_sql_server_util::cdc::Lsn,
917 },
918 LoadGenerator {
919 output: LoadGeneratorOutput,
920 },
921 Kafka {},
922}
923
924impl RustType<ProtoSourceExportStatementDetails> for SourceExportStatementDetails {
925 fn into_proto(&self) -> ProtoSourceExportStatementDetails {
926 match self {
927 SourceExportStatementDetails::Postgres { table } => ProtoSourceExportStatementDetails {
928 kind: Some(proto_source_export_statement_details::Kind::Postgres(
929 postgres::ProtoPostgresSourceExportStatementDetails {
930 table: Some(table.into_proto()),
931 },
932 )),
933 },
934 SourceExportStatementDetails::MySql {
935 table,
936 initial_gtid_set,
937 binlog_full_metadata,
938 } => ProtoSourceExportStatementDetails {
939 kind: Some(proto_source_export_statement_details::Kind::Mysql(
940 mysql::ProtoMySqlSourceExportStatementDetails {
941 table: Some(table.into_proto()),
942 initial_gtid_set: initial_gtid_set.clone(),
943 binlog_full_metadata: *binlog_full_metadata,
944 },
945 )),
946 },
947 SourceExportStatementDetails::SqlServer {
948 table,
949 capture_instance,
950 initial_lsn,
951 } => ProtoSourceExportStatementDetails {
952 kind: Some(proto_source_export_statement_details::Kind::SqlServer(
953 sql_server::ProtoSqlServerSourceExportStatementDetails {
954 table: Some(table.into_proto()),
955 capture_instance: capture_instance.to_string(),
956 initial_lsn: initial_lsn.as_bytes().to_vec(),
957 },
958 )),
959 },
960 SourceExportStatementDetails::LoadGenerator { output } => {
961 ProtoSourceExportStatementDetails {
962 kind: Some(proto_source_export_statement_details::Kind::Loadgen(
963 load_generator::ProtoLoadGeneratorSourceExportStatementDetails {
964 output: output.into_proto().into(),
965 },
966 )),
967 }
968 }
969 SourceExportStatementDetails::Kafka {} => ProtoSourceExportStatementDetails {
970 kind: Some(proto_source_export_statement_details::Kind::Kafka(
971 kafka::ProtoKafkaSourceExportStatementDetails {},
972 )),
973 },
974 }
975 }
976
977 fn from_proto(proto: ProtoSourceExportStatementDetails) -> Result<Self, TryFromProtoError> {
978 use proto_source_export_statement_details::Kind;
979 Ok(match proto.kind {
980 Some(Kind::Postgres(details)) => SourceExportStatementDetails::Postgres {
981 table: details
982 .table
983 .into_rust_if_some("ProtoPostgresSourceExportStatementDetails::table")?,
984 },
985 Some(Kind::Mysql(details)) => SourceExportStatementDetails::MySql {
986 table: details
987 .table
988 .into_rust_if_some("ProtoMySqlSourceExportStatementDetails::table")?,
989
990 initial_gtid_set: details.initial_gtid_set,
991 binlog_full_metadata: details.binlog_full_metadata,
992 },
993 Some(Kind::SqlServer(details)) => SourceExportStatementDetails::SqlServer {
994 table: details
995 .table
996 .into_rust_if_some("ProtoSqlServerSourceExportStatementDetails::table")?,
997 capture_instance: details.capture_instance.into(),
998 initial_lsn: mz_sql_server_util::cdc::Lsn::try_from(details.initial_lsn.as_slice())
999 .map_err(|e| TryFromProtoError::InvalidFieldError(e.to_string()))?,
1000 },
1001 Some(Kind::Loadgen(details)) => SourceExportStatementDetails::LoadGenerator {
1002 output: details
1003 .output
1004 .into_rust_if_some("ProtoLoadGeneratorSourceExportStatementDetails::output")?,
1005 },
1006 Some(Kind::Kafka(_details)) => SourceExportStatementDetails::Kafka {},
1007 None => {
1008 return Err(TryFromProtoError::missing_field(
1009 "ProtoSourceExportStatementDetails::kind",
1010 ));
1011 }
1012 })
1013 }
1014}
1015
1016#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
1017#[repr(transparent)]
1018pub struct SourceData(pub Result<Row, DataflowError>);
1019
1020impl Default for SourceData {
1021 fn default() -> Self {
1022 SourceData(Ok(Row::default()))
1023 }
1024}
1025
1026impl Deref for SourceData {
1027 type Target = Result<Row, DataflowError>;
1028
1029 fn deref(&self) -> &Self::Target {
1030 &self.0
1031 }
1032}
1033
1034impl DerefMut for SourceData {
1035 fn deref_mut(&mut self) -> &mut Self::Target {
1036 &mut self.0
1037 }
1038}
1039
1040impl RustType<ProtoSourceData> for SourceData {
1041 fn into_proto(&self) -> ProtoSourceData {
1042 use proto_source_data::Kind;
1043 ProtoSourceData {
1044 kind: Some(match &**self {
1045 Ok(row) => Kind::Ok(row.into_proto()),
1046 Err(err) => Kind::Err(err.into_proto()),
1047 }),
1048 }
1049 }
1050
1051 fn from_proto(proto: ProtoSourceData) -> Result<Self, TryFromProtoError> {
1052 use proto_source_data::Kind;
1053 match proto.kind {
1054 Some(kind) => match kind {
1055 Kind::Ok(row) => Ok(SourceData(Ok(row.into_rust()?))),
1056 Kind::Err(err) => Ok(SourceData(Err(err.into_rust()?))),
1057 },
1058 None => Result::Err(TryFromProtoError::missing_field("ProtoSourceData::kind")),
1059 }
1060 }
1061}
1062
1063impl Codec for SourceData {
1064 type Storage = ProtoRow;
1065 type Schema = RelationDesc;
1066
1067 fn codec_name() -> String {
1068 "protobuf[SourceData]".into()
1069 }
1070
1071 fn encode<B: BufMut>(&self, buf: &mut B) {
1072 self.into_proto()
1073 .encode(buf)
1074 .expect("no required fields means no initialization errors");
1075 }
1076
1077 fn decode(buf: &[u8], schema: &RelationDesc) -> Result<Self, String> {
1078 let mut val = SourceData::default();
1079 <Self as Codec>::decode_from(&mut val, buf, &mut None, schema)?;
1080 Ok(val)
1081 }
1082
1083 fn decode_from<'a>(
1084 &mut self,
1085 buf: &'a [u8],
1086 storage: &mut Option<ProtoRow>,
1087 schema: &RelationDesc,
1088 ) -> Result<(), String> {
1089 let mut proto = storage.take().unwrap_or_default();
1093 proto.clear();
1094 let mut proto = ProtoSourceData {
1095 kind: Some(proto_source_data::Kind::Ok(proto)),
1096 };
1097 proto.merge(buf).map_err(|err| err.to_string())?;
1098 match (proto.kind, &mut self.0) {
1099 (Some(proto_source_data::Kind::Ok(proto)), Ok(row)) => {
1101 let ret = row.decode_from_proto(&proto, schema);
1102 storage.replace(proto);
1103 ret
1104 }
1105 (kind, _) => {
1107 let proto = ProtoSourceData { kind };
1108 *self = proto.into_rust().map_err(|err| err.to_string())?;
1109 Ok(())
1111 }
1112 }
1113 }
1114
1115 fn validate(val: &Self, desc: &Self::Schema) -> Result<(), String> {
1116 match &val.0 {
1117 Ok(row) => Row::validate(row, desc),
1118 Err(_) => Ok(()),
1119 }
1120 }
1121
1122 fn encode_schema(schema: &Self::Schema) -> Bytes {
1123 schema.into_proto().encode_to_vec().into()
1124 }
1125
1126 fn decode_schema(buf: &Bytes) -> Self::Schema {
1127 let proto = ProtoRelationDesc::decode(buf.as_ref()).expect("valid schema");
1128 proto.into_rust().expect("valid schema")
1129 }
1130}
1131
1132pub fn arb_source_data_for_relation_desc(
1134 desc: &RelationDesc,
1135) -> impl Strategy<Value = SourceData> + use<> {
1136 let row_strat = arb_row_for_relation(desc).no_shrink();
1137
1138 proptest::strategy::Union::new_weighted(vec![
1139 (50, row_strat.prop_map(|row| SourceData(Ok(row))).boxed()),
1140 (
1141 1,
1142 any::<DataflowError>()
1143 .prop_map(|err| SourceData(Err(err)))
1144 .no_shrink()
1145 .boxed(),
1146 ),
1147 ])
1148}
1149
1150pub trait ExternalCatalogReference {
1158 fn schema_name(&self) -> &str;
1160 fn item_name(&self) -> &str;
1162}
1163
1164impl ExternalCatalogReference for &mz_mysql_util::MySqlTableDesc {
1165 fn schema_name(&self) -> &str {
1166 &self.schema_name
1167 }
1168
1169 fn item_name(&self) -> &str {
1170 &self.name
1171 }
1172}
1173
1174impl ExternalCatalogReference for mz_postgres_util::desc::PostgresTableDesc {
1175 fn schema_name(&self) -> &str {
1176 &self.namespace
1177 }
1178
1179 fn item_name(&self) -> &str {
1180 &self.name
1181 }
1182}
1183
1184impl ExternalCatalogReference for &mz_sql_server_util::desc::SqlServerTableDesc {
1185 fn schema_name(&self) -> &str {
1186 &*self.schema_name
1187 }
1188
1189 fn item_name(&self) -> &str {
1190 &*self.name
1191 }
1192}
1193
1194impl<'a> ExternalCatalogReference for (&'a str, &'a str) {
1197 fn schema_name(&self) -> &str {
1198 self.0
1199 }
1200
1201 fn item_name(&self) -> &str {
1202 self.1
1203 }
1204}
1205
1206#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
1214pub struct SourceReferenceResolver {
1215 inner: BTreeMap<Ident, BTreeMap<Ident, BTreeMap<Ident, usize>>>,
1216}
1217
1218#[derive(Debug, Clone, thiserror::Error)]
1219pub enum ExternalReferenceResolutionError {
1220 #[error("reference to {name} not found in source")]
1221 DoesNotExist { name: String },
1222 #[error(
1223 "reference {name} is ambiguous, consider specifying an additional \
1224 layer of qualification"
1225 )]
1226 Ambiguous { name: String },
1227 #[error("invalid identifier: {0}")]
1228 Ident(#[from] IdentError),
1229}
1230
1231impl<'a> SourceReferenceResolver {
1232 pub fn new<T: ExternalCatalogReference>(
1238 database: &str,
1239 referenceable_items: &'a [T],
1240 ) -> Result<SourceReferenceResolver, ExternalReferenceResolutionError> {
1241 let mut inner = BTreeMap::new();
1244
1245 let database = Ident::new(database)?;
1246
1247 for (reference_idx, item) in referenceable_items.iter().enumerate() {
1248 let item_name = Ident::new(item.item_name())?;
1249 let schema_name = Ident::new(item.schema_name())?;
1250
1251 inner
1252 .entry(item_name)
1253 .or_insert_with(BTreeMap::new)
1254 .entry(schema_name)
1255 .or_insert_with(BTreeMap::new)
1256 .entry(database.clone())
1257 .or_insert(reference_idx);
1258 }
1259
1260 Ok(SourceReferenceResolver { inner })
1261 }
1262
1263 pub fn resolve(
1280 &self,
1281 name: &[Ident],
1282 canonicalize_to_width: usize,
1283 ) -> Result<(UnresolvedItemName, usize), ExternalReferenceResolutionError> {
1284 let (db, schema, idx) = self.resolve_inner(name)?;
1285
1286 let item = name.last().expect("must have provided at least 1 element");
1287
1288 let canonical_name = match canonicalize_to_width {
1289 1 => vec![item.clone()],
1290 2 => vec![schema.clone(), item.clone()],
1291 3 => vec![db.clone(), schema.clone(), item.clone()],
1292 o => panic!("canonicalize_to_width values must be 1..=3, but got {}", o),
1293 };
1294
1295 Ok((UnresolvedItemName(canonical_name), idx))
1296 }
1297
1298 pub fn resolve_idx(&self, name: &[Ident]) -> Result<usize, ExternalReferenceResolutionError> {
1308 let (_db, _schema, idx) = self.resolve_inner(name)?;
1309 Ok(idx)
1310 }
1311
1312 fn resolve_inner<'name: 'a>(
1329 &'a self,
1330 name: &'name [Ident],
1331 ) -> Result<(&'a Ident, &'a Ident, usize), ExternalReferenceResolutionError> {
1332 let get_provided_name = || UnresolvedItemName(name.to_vec()).to_string();
1333
1334 if !(1..=3).contains(&name.len()) {
1336 Err(ExternalReferenceResolutionError::DoesNotExist {
1337 name: get_provided_name(),
1338 })?;
1339 }
1340
1341 let mut names = std::iter::repeat(None)
1343 .take(3 - name.len())
1344 .chain(name.iter().map(Some));
1345
1346 let database = names.next().flatten();
1347 let schema = names.next().flatten();
1348 let item = names
1349 .next()
1350 .flatten()
1351 .expect("must have provided the item name");
1352
1353 assert_none!(names.next(), "expected a 3-element iterator");
1354
1355 let schemas =
1356 self.inner
1357 .get(item)
1358 .ok_or_else(|| ExternalReferenceResolutionError::DoesNotExist {
1359 name: get_provided_name(),
1360 })?;
1361
1362 let schema = match schema {
1363 Some(schema) => schema,
1364 None => schemas.keys().exactly_one().map_err(|_e| {
1365 ExternalReferenceResolutionError::Ambiguous {
1366 name: get_provided_name(),
1367 }
1368 })?,
1369 };
1370
1371 let databases =
1372 schemas
1373 .get(schema)
1374 .ok_or_else(|| ExternalReferenceResolutionError::DoesNotExist {
1375 name: get_provided_name(),
1376 })?;
1377
1378 let database = match database {
1379 Some(database) => database,
1380 None => databases.keys().exactly_one().map_err(|_e| {
1381 ExternalReferenceResolutionError::Ambiguous {
1382 name: get_provided_name(),
1383 }
1384 })?,
1385 };
1386
1387 let reference_idx = databases.get(database).ok_or_else(|| {
1388 ExternalReferenceResolutionError::DoesNotExist {
1389 name: get_provided_name(),
1390 }
1391 })?;
1392
1393 Ok((database, schema, *reference_idx))
1394 }
1395}
1396
1397#[derive(Debug)]
1403pub enum SourceDataRowColumnarDecoder {
1404 Row(RowColumnarDecoder),
1405 EmptyRow,
1406}
1407
1408impl SourceDataRowColumnarDecoder {
1409 pub fn decode(&self, idx: usize, row: &mut Row) {
1410 match self {
1411 SourceDataRowColumnarDecoder::Row(decoder) => decoder.decode(idx, row),
1412 SourceDataRowColumnarDecoder::EmptyRow => {
1413 row.packer();
1415 }
1416 }
1417 }
1418
1419 pub fn goodbytes(&self) -> usize {
1420 match self {
1421 SourceDataRowColumnarDecoder::Row(decoder) => decoder.goodbytes(),
1422 SourceDataRowColumnarDecoder::EmptyRow => 0,
1423 }
1424 }
1425}
1426
1427#[derive(Debug)]
1428pub struct SourceDataColumnarDecoder {
1429 row_decoder: SourceDataRowColumnarDecoder,
1430 err_decoder: BinaryArray,
1431}
1432
1433impl SourceDataColumnarDecoder {
1434 pub fn new(col: StructArray, desc: &RelationDesc) -> Result<Self, anyhow::Error> {
1435 let (_fields, arrays, nullability) = col.into_parts();
1437
1438 if nullability.is_some() {
1439 anyhow::bail!("SourceData is not nullable, but found {nullability:?}");
1440 }
1441 if arrays.len() != 2 {
1442 anyhow::bail!("SourceData should only have two fields, found {arrays:?}");
1443 }
1444
1445 let errs = arrays[1]
1446 .as_any()
1447 .downcast_ref::<BinaryArray>()
1448 .ok_or_else(|| anyhow::anyhow!("expected BinaryArray, found {:?}", arrays[1]))?;
1449
1450 let row_decoder = match arrays[0].data_type() {
1451 arrow::datatypes::DataType::Struct(_) => {
1452 let rows = arrays[0]
1453 .as_any()
1454 .downcast_ref::<StructArray>()
1455 .ok_or_else(|| {
1456 anyhow::anyhow!("expected StructArray, found {:?}", arrays[0])
1457 })?;
1458 let decoder = RowColumnarDecoder::new(rows.clone(), desc)?;
1459 SourceDataRowColumnarDecoder::Row(decoder)
1460 }
1461 arrow::datatypes::DataType::Null => SourceDataRowColumnarDecoder::EmptyRow,
1462 other => anyhow::bail!("expected Struct or Null Array, found {other:?}"),
1463 };
1464
1465 Ok(SourceDataColumnarDecoder {
1466 row_decoder,
1467 err_decoder: errs.clone(),
1468 })
1469 }
1470}
1471
1472impl ColumnDecoder<SourceData> for SourceDataColumnarDecoder {
1473 fn decode(&self, idx: usize, val: &mut SourceData) {
1474 let err_null = self.err_decoder.is_null(idx);
1475 let row_null = match &self.row_decoder {
1476 SourceDataRowColumnarDecoder::Row(decoder) => decoder.is_null(idx),
1477 SourceDataRowColumnarDecoder::EmptyRow => !err_null,
1478 };
1479
1480 match (row_null, err_null) {
1481 (true, false) => {
1482 let err = self.err_decoder.value(idx);
1483 let err = ProtoDataflowError::decode(err)
1484 .expect("proto should be valid")
1485 .into_rust()
1486 .expect("error should be valid");
1487 val.0 = Err(err);
1488 }
1489 (false, true) => {
1490 let row = match val.0.as_mut() {
1491 Ok(row) => row,
1492 Err(_) => {
1493 val.0 = Ok(Row::default());
1494 val.0.as_mut().unwrap()
1495 }
1496 };
1497 self.row_decoder.decode(idx, row);
1498 }
1499 (true, true) => panic!("should have one of 'ok' or 'err'"),
1500 (false, false) => panic!("cannot have both 'ok' and 'err'"),
1501 }
1502 }
1503
1504 fn is_null(&self, idx: usize) -> bool {
1505 let err_null = self.err_decoder.is_null(idx);
1506 let row_null = match &self.row_decoder {
1507 SourceDataRowColumnarDecoder::Row(decoder) => decoder.is_null(idx),
1508 SourceDataRowColumnarDecoder::EmptyRow => !err_null,
1509 };
1510 assert!(!err_null || !row_null, "SourceData should never be null!");
1511
1512 false
1513 }
1514
1515 fn goodbytes(&self) -> usize {
1516 self.row_decoder.goodbytes() + ArrayOrd::Binary(self.err_decoder.clone()).goodbytes()
1517 }
1518
1519 fn stats(&self) -> StructStats {
1520 let len = self.err_decoder.len();
1521 let err_stats = ColumnarStats {
1522 nulls: Some(ColumnNullStats {
1523 count: self.err_decoder.null_count(),
1524 }),
1525 values: PrimitiveStats::<Vec<u8>>::from_column(&self.err_decoder).into(),
1526 };
1527 let row_null_count = len - self.err_decoder.null_count();
1532 let row_stats = match &self.row_decoder {
1533 SourceDataRowColumnarDecoder::Row(encoder) => {
1534 assert_eq!(encoder.null_count(), row_null_count);
1538 encoder.stats()
1539 }
1540 SourceDataRowColumnarDecoder::EmptyRow => StructStats {
1541 len,
1542 cols: BTreeMap::default(),
1543 },
1544 };
1545 let row_stats = ColumnarStats {
1546 nulls: Some(ColumnNullStats {
1547 count: row_null_count,
1548 }),
1549 values: ColumnStatKinds::Struct(row_stats),
1550 };
1551
1552 let stats = [
1553 (
1554 SourceDataColumnarEncoder::OK_COLUMN_NAME.to_string(),
1555 row_stats,
1556 ),
1557 (
1558 SourceDataColumnarEncoder::ERR_COLUMN_NAME.to_string(),
1559 err_stats,
1560 ),
1561 ];
1562 StructStats {
1563 len,
1564 cols: stats.into_iter().map(|(name, s)| (name, s)).collect(),
1565 }
1566 }
1567}
1568
1569#[derive(Debug)]
1576pub enum SourceDataRowColumnarEncoder {
1577 Row(RowColumnarEncoder),
1578 EmptyRow,
1579}
1580
1581impl SourceDataRowColumnarEncoder {
1582 pub(crate) fn goodbytes(&self) -> usize {
1583 match self {
1584 SourceDataRowColumnarEncoder::Row(e) => e.goodbytes(),
1585 SourceDataRowColumnarEncoder::EmptyRow => 0,
1586 }
1587 }
1588
1589 pub fn append(&mut self, row: &Row) {
1590 match self {
1591 SourceDataRowColumnarEncoder::Row(encoder) => encoder.append(row),
1592 SourceDataRowColumnarEncoder::EmptyRow => {
1593 assert_eq!(row.iter().count(), 0)
1594 }
1595 }
1596 }
1597
1598 pub fn append_null(&mut self) {
1599 match self {
1600 SourceDataRowColumnarEncoder::Row(encoder) => encoder.append_null(),
1601 SourceDataRowColumnarEncoder::EmptyRow => (),
1602 }
1603 }
1604}
1605
1606#[derive(Debug)]
1607pub struct SourceDataColumnarEncoder {
1608 row_encoder: SourceDataRowColumnarEncoder,
1609 err_encoder: BinaryBuilder,
1610}
1611
1612impl SourceDataColumnarEncoder {
1613 const OK_COLUMN_NAME: &'static str = "ok";
1614 const ERR_COLUMN_NAME: &'static str = "err";
1615
1616 pub fn new(desc: &RelationDesc) -> Self {
1617 let row_encoder = match RowColumnarEncoder::new(desc) {
1618 Some(encoder) => SourceDataRowColumnarEncoder::Row(encoder),
1619 None => {
1620 assert!(desc.typ().columns().is_empty());
1621 SourceDataRowColumnarEncoder::EmptyRow
1622 }
1623 };
1624 let err_encoder = BinaryBuilder::new();
1625
1626 SourceDataColumnarEncoder {
1627 row_encoder,
1628 err_encoder,
1629 }
1630 }
1631}
1632
1633impl ColumnEncoder<SourceData> for SourceDataColumnarEncoder {
1634 type FinishedColumn = StructArray;
1635
1636 fn goodbytes(&self) -> usize {
1637 self.row_encoder.goodbytes() + self.err_encoder.values_slice().len()
1638 }
1639
1640 #[inline]
1641 fn append(&mut self, val: &SourceData) {
1642 match val.0.as_ref() {
1643 Ok(row) => {
1644 self.row_encoder.append(row);
1645 self.err_encoder.append_null();
1646 }
1647 Err(err) => {
1648 self.row_encoder.append_null();
1649 self.err_encoder
1650 .append_value(err.into_proto().encode_to_vec());
1651 }
1652 }
1653 }
1654
1655 #[inline]
1656 fn append_null(&mut self) {
1657 panic!("appending a null into SourceDataColumnarEncoder is not supported");
1658 }
1659
1660 fn finish(self) -> Self::FinishedColumn {
1661 let SourceDataColumnarEncoder {
1662 row_encoder,
1663 mut err_encoder,
1664 } = self;
1665
1666 let err_column = BinaryBuilder::finish(&mut err_encoder);
1667 let row_column: ArrayRef = match row_encoder {
1668 SourceDataRowColumnarEncoder::Row(encoder) => {
1669 let column = encoder.finish();
1670 Arc::new(column)
1671 }
1672 SourceDataRowColumnarEncoder::EmptyRow => Arc::new(NullArray::new(err_column.len())),
1673 };
1674
1675 assert_eq!(row_column.len(), err_column.len());
1676
1677 let fields = vec![
1678 Field::new(Self::OK_COLUMN_NAME, row_column.data_type().clone(), true),
1679 Field::new(Self::ERR_COLUMN_NAME, err_column.data_type().clone(), true),
1680 ];
1681 let arrays: Vec<Arc<dyn Array>> = vec![row_column, Arc::new(err_column)];
1682 StructArray::new(Fields::from(fields), arrays, None)
1683 }
1684}
1685
1686impl Schema<SourceData> for RelationDesc {
1687 type ArrowColumn = StructArray;
1688 type Statistics = StructStats;
1689
1690 type Decoder = SourceDataColumnarDecoder;
1691 type Encoder = SourceDataColumnarEncoder;
1692
1693 fn decoder(&self, col: Self::ArrowColumn) -> Result<Self::Decoder, anyhow::Error> {
1694 SourceDataColumnarDecoder::new(col, self)
1695 }
1696
1697 fn encoder(&self) -> Result<Self::Encoder, anyhow::Error> {
1698 Ok(SourceDataColumnarEncoder::new(self))
1699 }
1700}
1701
1702#[cfg(test)]
1703mod tests {
1704 use arrow::array::{ArrayData, make_comparator};
1705 use base64::Engine;
1706 use bytes::Bytes;
1707 use mz_expr::EvalError;
1708 use mz_ore::assert_err;
1709 use mz_ore::metrics::MetricsRegistry;
1710 use mz_persist::indexed::columnar::arrow::{realloc_any, realloc_array};
1711 use mz_persist::metrics::ColumnarMetrics;
1712 use mz_persist_types::parquet::EncodingConfig;
1713 use mz_persist_types::schema::{Migration, backward_compatible};
1714 use mz_persist_types::stats::{PartStats, PartStatsMetrics};
1715 use mz_repr::{
1716 ColumnIndex, DatumVec, PropRelationDescDiff, ProtoRelationDesc, RelationDescBuilder,
1717 RowArena, SqlScalarType, arb_relation_desc_diff, arb_relation_desc_projection,
1718 };
1719 use proptest::prelude::*;
1720 use proptest::strategy::{Union, ValueTree};
1721
1722 use crate::stats::RelationPartStats;
1723
1724 use super::*;
1725
1726 #[mz_ore::test]
1727 fn test_timeline_parsing() {
1728 assert_eq!(Ok(Timeline::EpochMilliseconds), "M".parse());
1729 assert_eq!(Ok(Timeline::External("JOE".to_string())), "E.JOE".parse());
1730 assert_eq!(Ok(Timeline::User("MIKE".to_string())), "U.MIKE".parse());
1731
1732 assert_err!("Materialize".parse::<Timeline>());
1733 assert_err!("Ejoe".parse::<Timeline>());
1734 assert_err!("Umike".parse::<Timeline>());
1735 assert_err!("Dance".parse::<Timeline>());
1736 assert_err!("".parse::<Timeline>());
1737 }
1738
1739 #[track_caller]
1740 fn roundtrip_source_data(
1741 desc: &RelationDesc,
1742 datas: Vec<SourceData>,
1743 read_desc: &RelationDesc,
1744 config: &EncodingConfig,
1745 ) {
1746 let metrics = ColumnarMetrics::disconnected();
1747 let mut encoder = <RelationDesc as Schema<SourceData>>::encoder(desc).unwrap();
1748 for data in &datas {
1749 encoder.append(data);
1750 }
1751 let col = encoder.finish();
1752
1753 assert!(!col.is_nullable());
1755
1756 let col = realloc_array(&col, &metrics);
1758
1759 {
1761 let proto = col.to_data().into_proto();
1762 let bytes = proto.encode_to_vec();
1763 let proto = mz_persist_types::arrow::ProtoArrayData::decode(&bytes[..]).unwrap();
1764 let array_data: ArrayData = proto.into_rust().unwrap();
1765
1766 let col_rnd = StructArray::from(array_data.clone());
1767 assert_eq!(col, col_rnd);
1768
1769 let col_dyn = arrow::array::make_array(array_data);
1770 let col_dyn = col_dyn.as_any().downcast_ref::<StructArray>().unwrap();
1771 assert_eq!(&col, col_dyn);
1772 }
1773
1774 let mut buf = Vec::new();
1776 let fields = Fields::from(vec![Field::new("k", col.data_type().clone(), false)]);
1777 let arrays: Vec<Arc<dyn Array>> = vec![Arc::new(col.clone())];
1778 mz_persist_types::parquet::encode_arrays(&mut buf, fields, arrays, config).unwrap();
1779
1780 let buf = Bytes::from(buf);
1782 let mut reader = mz_persist_types::parquet::decode_arrays(buf).unwrap();
1783 let maybe_batch = reader.next();
1784
1785 let Some(record_batch) = maybe_batch else {
1787 assert!(datas.is_empty());
1788 return;
1789 };
1790 let record_batch = record_batch.unwrap();
1791
1792 assert_eq!(record_batch.columns().len(), 1);
1793 let rnd_col = &record_batch.columns()[0];
1794 let rnd_col = realloc_any(Arc::clone(rnd_col), &metrics);
1795 let rnd_col = rnd_col
1796 .as_any()
1797 .downcast_ref::<StructArray>()
1798 .unwrap()
1799 .clone();
1800
1801 let stats = <RelationDesc as Schema<SourceData>>::decoder_any(desc, &rnd_col)
1803 .expect("valid decoder")
1804 .stats();
1805
1806 let mut rnd_data = SourceData(Ok(Row::default()));
1808 let decoder = <RelationDesc as Schema<SourceData>>::decoder(desc, rnd_col.clone()).unwrap();
1809 for (idx, og_data) in datas.iter().enumerate() {
1810 decoder.decode(idx, &mut rnd_data);
1811 assert_eq!(og_data, &rnd_data);
1812 }
1813
1814 let stats_metrics = PartStatsMetrics::new(&MetricsRegistry::new());
1817 let stats = RelationPartStats {
1818 name: "test",
1819 metrics: &stats_metrics,
1820 stats: &PartStats { key: stats },
1821 desc: read_desc,
1822 };
1823 let mut datum_vec = DatumVec::new();
1824 let arena = RowArena::default();
1825 let decoder = <RelationDesc as Schema<SourceData>>::decoder(read_desc, rnd_col).unwrap();
1826
1827 for (idx, og_data) in datas.iter().enumerate() {
1828 decoder.decode(idx, &mut rnd_data);
1829 match (&og_data.0, &rnd_data.0) {
1830 (Ok(og_row), Ok(rnd_row)) => {
1831 {
1833 let datums = datum_vec.borrow_with(og_row);
1834 let projected_datums =
1835 datums.iter().enumerate().filter_map(|(idx, datum)| {
1836 read_desc
1837 .contains_index(&ColumnIndex::from_raw(idx))
1838 .then_some(datum)
1839 });
1840 let og_projected_row = Row::pack(projected_datums);
1841 assert_eq!(&og_projected_row, rnd_row);
1842 }
1843
1844 {
1846 let proj_datums = datum_vec.borrow_with(rnd_row);
1847 for (pos, (idx, _, _)) in read_desc.iter_all().enumerate() {
1848 let spec = stats.col_stats(idx, &arena);
1849 assert!(spec.may_contain(proj_datums[pos]));
1850 }
1851 }
1852 }
1853 (Err(_), Err(_)) => assert_eq!(og_data, &rnd_data),
1854 (_, _) => panic!("decoded to a different type? {og_data:?} {rnd_data:?}"),
1855 }
1856 }
1857
1858 let encoded_schema = SourceData::encode_schema(desc);
1861 let roundtrip_desc = SourceData::decode_schema(&encoded_schema);
1862 assert_eq!(desc, &roundtrip_desc);
1863
1864 let migration =
1867 mz_persist_types::schema::backward_compatible(col.data_type(), col.data_type());
1868 let migration = migration.expect("should be backward compatible with self");
1869 let migrated = migration.migrate(Arc::new(col.clone()));
1871 assert_eq!(col.data_type(), migrated.data_type());
1872 }
1873
1874 #[mz_ore::test]
1875 #[cfg_attr(miri, ignore)] fn all_source_data_roundtrips() {
1877 let mut weights = vec![(500, Just(0..8)), (50, Just(8..32))];
1878 if std::env::var("PROPTEST_LARGE_DATA").is_ok() {
1879 weights.extend([
1880 (10, Just(32..128)),
1881 (5, Just(128..512)),
1882 (3, Just(512..2048)),
1883 (1, Just(2048..8192)),
1884 ]);
1885 }
1886 let num_rows = Union::new_weighted(weights);
1887
1888 let strat = (any::<RelationDesc>(), num_rows)
1890 .prop_flat_map(|(desc, num_rows)| {
1891 arb_relation_desc_projection(desc.clone())
1892 .prop_map(move |read_desc| (desc.clone(), read_desc, num_rows.clone()))
1893 })
1894 .prop_flat_map(|(desc, read_desc, num_rows)| {
1895 proptest::collection::vec(arb_source_data_for_relation_desc(&desc), num_rows)
1896 .prop_map(move |datas| (desc.clone(), datas, read_desc.clone()))
1897 });
1898
1899 let combined_strat = (any::<EncodingConfig>(), strat);
1900 proptest!(|((config, (desc, source_datas, read_desc)) in combined_strat)| {
1901 roundtrip_source_data(&desc, source_datas, &read_desc, &config);
1902 });
1903 }
1904
1905 #[mz_ore::test]
1906 fn roundtrip_error_nulls() {
1907 let desc = RelationDescBuilder::default()
1908 .with_column(
1909 "ts",
1910 SqlScalarType::TimestampTz { precision: None }.nullable(false),
1911 )
1912 .finish();
1913 let source_datas = vec![SourceData(Err(DataflowError::EvalError(
1914 EvalError::DateOutOfRange.into(),
1915 )))];
1916 let config = EncodingConfig::default();
1917 roundtrip_source_data(&desc, source_datas, &desc, &config);
1918 }
1919
1920 fn is_sorted(array: &dyn Array) -> bool {
1921 let sort_options = arrow::compute::SortOptions::default();
1922 let Ok(cmp) = make_comparator(array, array, sort_options) else {
1923 return false;
1929 };
1930 (0..array.len())
1931 .tuple_windows()
1932 .all(|(i, j)| cmp(i, j).is_le())
1933 }
1934
1935 fn get_data_type(schema: &impl Schema<SourceData>) -> arrow::datatypes::DataType {
1936 use mz_persist_types::columnar::ColumnEncoder;
1937 let array = Schema::encoder(schema).expect("valid schema").finish();
1938 Array::data_type(&array).clone()
1939 }
1940
1941 #[track_caller]
1942 fn backward_compatible_testcase(
1943 old: &RelationDesc,
1944 new: &RelationDesc,
1945 migration: Migration,
1946 datas: &[SourceData],
1947 ) {
1948 let mut encoder = Schema::<SourceData>::encoder(old).expect("valid schema");
1949 for data in datas {
1950 encoder.append(data);
1951 }
1952 let old = encoder.finish();
1953 let new = Schema::<SourceData>::encoder(new)
1954 .expect("valid schema")
1955 .finish();
1956 let old: Arc<dyn Array> = Arc::new(old);
1957 let new: Arc<dyn Array> = Arc::new(new);
1958 let migrated = migration.migrate(Arc::clone(&old));
1959 assert_eq!(migrated.data_type(), new.data_type());
1960
1961 if migration.preserves_order() && is_sorted(&old) {
1963 assert!(is_sorted(&new))
1964 }
1965 }
1966
1967 #[mz_ore::test]
1968 fn backward_compatible_empty_add_column() {
1969 let old = RelationDesc::empty();
1970 let new = RelationDesc::from_names_and_types([("a", SqlScalarType::Bool.nullable(true))]);
1971
1972 let old_data_type = get_data_type(&old);
1973 let new_data_type = get_data_type(&new);
1974
1975 let migration = backward_compatible(&old_data_type, &new_data_type);
1976 assert!(migration.is_some());
1977 }
1978
1979 #[mz_ore::test]
1980 fn backward_compatible_project_away_all() {
1981 let old = RelationDesc::from_names_and_types([("a", SqlScalarType::Bool.nullable(true))]);
1982 let new = RelationDesc::empty();
1983
1984 let old_data_type = get_data_type(&old);
1985 let new_data_type = get_data_type(&new);
1986
1987 let migration = backward_compatible(&old_data_type, &new_data_type);
1988 assert!(migration.is_some());
1989 }
1990
1991 #[mz_ore::test]
1992 #[cfg_attr(miri, ignore)]
1993 fn backward_compatible_migrate() {
1994 let strat = (any::<RelationDesc>(), any::<RelationDesc>()).prop_flat_map(|(old, new)| {
1995 proptest::collection::vec(arb_source_data_for_relation_desc(&old), 2)
1996 .prop_map(move |datas| (old.clone(), new.clone(), datas))
1997 });
1998
1999 proptest!(|((old, new, datas) in strat)| {
2000 let old_data_type = get_data_type(&old);
2001 let new_data_type = get_data_type(&new);
2002
2003 if let Some(migration) = backward_compatible(&old_data_type, &new_data_type) {
2004 backward_compatible_testcase(&old, &new, migration, &datas);
2005 };
2006 });
2007 }
2008
2009 #[mz_ore::test]
2010 #[cfg_attr(miri, ignore)]
2011 fn backward_compatible_migrate_from_common() {
2012 use mz_repr::SqlColumnType;
2013 fn test_case(old: RelationDesc, diffs: Vec<PropRelationDescDiff>, datas: Vec<SourceData>) {
2014 let should_be_compatible = diffs.iter().all(|diff| match diff {
2016 PropRelationDescDiff::AddColumn {
2018 typ: SqlColumnType { nullable, .. },
2019 ..
2020 } => *nullable,
2021 PropRelationDescDiff::DropColumn { .. } => true,
2022 _ => false,
2023 });
2024
2025 let mut new = old.clone();
2026 for diff in diffs.into_iter() {
2027 diff.apply(&mut new)
2028 }
2029
2030 let old_data_type = get_data_type(&old);
2031 let new_data_type = get_data_type(&new);
2032
2033 if let Some(migration) = backward_compatible(&old_data_type, &new_data_type) {
2034 backward_compatible_testcase(&old, &new, migration, &datas);
2035 } else if should_be_compatible {
2036 panic!("new DataType was not compatible when it should have been!");
2037 }
2038 }
2039
2040 let strat = any::<RelationDesc>()
2041 .prop_flat_map(|desc| {
2042 proptest::collection::vec(arb_source_data_for_relation_desc(&desc), 2)
2043 .no_shrink()
2044 .prop_map(move |datas| (desc.clone(), datas))
2045 })
2046 .prop_flat_map(|(desc, datas)| {
2047 arb_relation_desc_diff(&desc)
2048 .prop_map(move |diffs| (desc.clone(), diffs, datas.clone()))
2049 });
2050
2051 proptest!(|((old, diffs, datas) in strat)| {
2052 test_case(old, diffs, datas);
2053 });
2054 }
2055
2056 #[mz_ore::test]
2057 #[cfg_attr(miri, ignore)] fn empty_relation_desc_roundtrips() {
2059 let empty = RelationDesc::empty();
2060 let rows = proptest::collection::vec(arb_source_data_for_relation_desc(&empty), 0..8)
2061 .prop_map(move |datas| (empty.clone(), datas));
2062
2063 proptest!(|((config, (desc, source_datas)) in (any::<EncodingConfig>(), rows))| {
2066 roundtrip_source_data(&desc, source_datas, &desc, &config);
2067 });
2068 }
2069
2070 #[mz_ore::test]
2071 #[cfg_attr(miri, ignore)] fn arrow_datatype_consistent() {
2073 fn test_case(desc: RelationDesc, datas: Vec<SourceData>) {
2074 let half = datas.len() / 2;
2075
2076 let mut encoder_a = <RelationDesc as Schema<SourceData>>::encoder(&desc).unwrap();
2077 for data in &datas[..half] {
2078 encoder_a.append(data);
2079 }
2080 let col_a = encoder_a.finish();
2081
2082 let mut encoder_b = <RelationDesc as Schema<SourceData>>::encoder(&desc).unwrap();
2083 for data in &datas[half..] {
2084 encoder_b.append(data);
2085 }
2086 let col_b = encoder_b.finish();
2087
2088 assert_eq!(col_a.data_type(), col_b.data_type());
2091 }
2092
2093 let num_rows = 12;
2094 let strat = any::<RelationDesc>().prop_flat_map(|desc| {
2095 proptest::collection::vec(arb_source_data_for_relation_desc(&desc), num_rows)
2096 .prop_map(move |datas| (desc.clone(), datas))
2097 });
2098
2099 proptest!(|((desc, data) in strat)| {
2100 test_case(desc, data);
2101 });
2102 }
2103
2104 #[mz_ore::test]
2105 #[cfg_attr(miri, ignore)] fn source_proto_serialization_stability() {
2107 let min_protos = 10;
2108 let encoded = include_str!("snapshots/source-datas.txt");
2109
2110 let mut decoded: Vec<(RelationDesc, SourceData)> = encoded
2112 .lines()
2113 .map(|s| {
2114 let (desc, data) = s.split_once(',').expect("comma separated data");
2115 let desc = base64::engine::general_purpose::STANDARD
2116 .decode(desc)
2117 .expect("valid base64");
2118 let data = base64::engine::general_purpose::STANDARD
2119 .decode(data)
2120 .expect("valid base64");
2121 (desc, data)
2122 })
2123 .map(|(desc, data)| {
2124 let desc = ProtoRelationDesc::decode(&desc[..]).expect("valid proto");
2125 let desc = desc.into_rust().expect("valid proto");
2126 let data = SourceData::decode(&data, &desc).expect("valid proto");
2127 (desc, data)
2128 })
2129 .collect();
2130
2131 let mut runner = proptest::test_runner::TestRunner::deterministic();
2133 let strategy = RelationDesc::arbitrary().prop_flat_map(|desc| {
2134 arb_source_data_for_relation_desc(&desc).prop_map(move |data| (desc.clone(), data))
2135 });
2136 while decoded.len() < min_protos {
2137 let arbitrary_data = strategy
2138 .new_tree(&mut runner)
2139 .expect("source data")
2140 .current();
2141 decoded.push(arbitrary_data);
2142 }
2143
2144 let mut reencoded = String::new();
2146 let mut buf = vec![];
2147 for (desc, data) in decoded {
2148 buf.clear();
2149 desc.into_proto().encode(&mut buf).expect("success");
2150 base64::engine::general_purpose::STANDARD.encode_string(buf.as_slice(), &mut reencoded);
2151 reencoded.push(',');
2152
2153 buf.clear();
2154 data.encode(&mut buf);
2155 base64::engine::general_purpose::STANDARD.encode_string(buf.as_slice(), &mut reencoded);
2156 reencoded.push('\n');
2157 }
2158
2159 assert_eq!(
2170 encoded,
2171 reencoded.as_str(),
2172 "SourceData serde should be stable"
2173 )
2174 }
2175}