Skip to main content

mz_catalog/memory/
objects.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10//! The current types used by the in-memory Catalog. Many of the objects in this module are
11//! extremely similar to the objects found in [`crate::durable::objects`] but in a format that is
12//! easier consumed by higher layers.
13
14use std::borrow::Cow;
15use std::collections::{BTreeMap, BTreeSet};
16use std::ops::{Deref, DerefMut};
17use std::sync::{Arc, LazyLock};
18use std::time::Duration;
19
20use chrono::{DateTime, Utc};
21use mz_adapter_types::compaction::CompactionWindow;
22use mz_adapter_types::connection::ConnectionId;
23use mz_compute_client::logging::LogVariant;
24use mz_controller::clusters::{ClusterRole, ClusterStatus, ReplicaConfig, ReplicaLogging};
25use mz_controller_types::{ClusterId, ReplicaId};
26use mz_expr::{MirScalarExpr, OptimizedMirRelationExpr};
27use mz_ore::collections::CollectionExt;
28use mz_repr::adt::mz_acl_item::{AclMode, MzAclItem, PrivilegeMap};
29use mz_repr::network_policy_id::NetworkPolicyId;
30use mz_repr::optimize::OptimizerFeatureOverrides;
31use mz_repr::refresh_schedule::RefreshSchedule;
32use mz_repr::role_id::RoleId;
33use mz_repr::{
34    CatalogItemId, ColumnName, Diff, GlobalId, RelationDesc, RelationVersion,
35    RelationVersionSelector, SqlColumnType, Timestamp, VersionedRelationDesc,
36};
37use mz_sql::ast::display::AstDisplay;
38use mz_sql::ast::{
39    ColumnDef, ColumnOption, ColumnOptionDef, ColumnVersioned, Expr, Raw, RawDataType, Statement,
40    UnresolvedItemName, Value, WithOptionValue,
41};
42use mz_sql::catalog::{
43    CatalogClusterReplica, CatalogError as SqlCatalogError, CatalogItem as SqlCatalogItem,
44    CatalogItemType as SqlCatalogItemType, CatalogItemType, CatalogSchema, CatalogType,
45    CatalogTypeDetails, DefaultPrivilegeAclItem, DefaultPrivilegeObject, IdReference,
46    RoleAttributes, RoleMembership, RoleVars, SystemObjectType,
47};
48use mz_sql::names::{
49    Aug, CommentObjectId, DatabaseId, DependencyIds, FullItemName, QualifiedItemName,
50    QualifiedSchemaName, ResolvedDatabaseSpecifier, ResolvedIds, SchemaId, SchemaSpecifier,
51};
52use mz_sql::plan::{
53    ClusterSchedule, ComputeReplicaConfig, ComputeReplicaIntrospectionConfig, ConnectionDetails,
54    CreateClusterManagedPlan, CreateClusterPlan, CreateClusterVariant, CreateSourcePlan,
55    HirRelationExpr, NetworkPolicyRule, PlanError, WebhookBodyFormat, WebhookHeaders,
56    WebhookValidation,
57};
58use mz_sql::rbac;
59use mz_sql::session::vars::OwnedVarInput;
60use mz_storage_client::controller::IntrospectionType;
61use mz_storage_types::connections::inline::ReferencedConnection;
62use mz_storage_types::sinks::{SinkEnvelope, StorageSinkConnection};
63use mz_storage_types::sources::load_generator::LoadGenerator;
64use mz_storage_types::sources::{
65    GenericSourceConnection, SourceConnection, SourceDesc, SourceEnvelope, SourceExportDataConfig,
66    SourceExportDetails, Timeline,
67};
68use serde::ser::SerializeSeq;
69use serde::{Deserialize, Serialize};
70use timely::progress::Antichain;
71use tracing::debug;
72
73use crate::builtin::{MZ_CATALOG_SERVER_CLUSTER, MZ_SYSTEM_CLUSTER};
74use crate::durable;
75use crate::durable::objects::item_type;
76
77/// Used to update `self` from the input value while consuming the input value.
78pub trait UpdateFrom<T>: From<T> {
79    fn update_from(&mut self, from: T);
80}
81
82#[derive(Debug, Serialize, Clone, PartialEq, Eq)]
83pub struct Database {
84    pub name: String,
85    pub id: DatabaseId,
86    pub oid: u32,
87    #[serde(serialize_with = "mz_ore::serde::map_key_to_string")]
88    pub schemas_by_id: BTreeMap<SchemaId, Schema>,
89    pub schemas_by_name: BTreeMap<String, SchemaId>,
90    pub owner_id: RoleId,
91    pub privileges: PrivilegeMap,
92}
93
94impl From<Database> for durable::Database {
95    fn from(database: Database) -> durable::Database {
96        durable::Database {
97            id: database.id,
98            oid: database.oid,
99            name: database.name,
100            owner_id: database.owner_id,
101            privileges: database.privileges.into_all_values().collect(),
102        }
103    }
104}
105
106impl From<durable::Database> for Database {
107    fn from(
108        durable::Database {
109            id,
110            oid,
111            name,
112            owner_id,
113            privileges,
114        }: durable::Database,
115    ) -> Database {
116        Database {
117            id,
118            oid,
119            schemas_by_id: BTreeMap::new(),
120            schemas_by_name: BTreeMap::new(),
121            name,
122            owner_id,
123            privileges: PrivilegeMap::from_mz_acl_items(privileges),
124        }
125    }
126}
127
128impl UpdateFrom<durable::Database> for Database {
129    fn update_from(
130        &mut self,
131        durable::Database {
132            id,
133            oid,
134            name,
135            owner_id,
136            privileges,
137        }: durable::Database,
138    ) {
139        self.id = id;
140        self.oid = oid;
141        self.name = name;
142        self.owner_id = owner_id;
143        self.privileges = PrivilegeMap::from_mz_acl_items(privileges);
144    }
145}
146
147#[derive(Debug, Serialize, Clone, PartialEq, Eq)]
148pub struct Schema {
149    pub name: QualifiedSchemaName,
150    pub id: SchemaSpecifier,
151    pub oid: u32,
152    pub items: BTreeMap<String, CatalogItemId>,
153    pub functions: BTreeMap<String, CatalogItemId>,
154    pub types: BTreeMap<String, CatalogItemId>,
155    pub owner_id: RoleId,
156    pub privileges: PrivilegeMap,
157}
158
159impl From<Schema> for durable::Schema {
160    fn from(schema: Schema) -> durable::Schema {
161        durable::Schema {
162            id: schema.id.into(),
163            oid: schema.oid,
164            name: schema.name.schema,
165            database_id: schema.name.database.id(),
166            owner_id: schema.owner_id,
167            privileges: schema.privileges.into_all_values().collect(),
168        }
169    }
170}
171
172impl From<durable::Schema> for Schema {
173    fn from(
174        durable::Schema {
175            id,
176            oid,
177            name,
178            database_id,
179            owner_id,
180            privileges,
181        }: durable::Schema,
182    ) -> Schema {
183        Schema {
184            name: QualifiedSchemaName {
185                database: database_id.into(),
186                schema: name,
187            },
188            id: id.into(),
189            oid,
190            items: BTreeMap::new(),
191            functions: BTreeMap::new(),
192            types: BTreeMap::new(),
193            owner_id,
194            privileges: PrivilegeMap::from_mz_acl_items(privileges),
195        }
196    }
197}
198
199impl UpdateFrom<durable::Schema> for Schema {
200    fn update_from(
201        &mut self,
202        durable::Schema {
203            id,
204            oid,
205            name,
206            database_id,
207            owner_id,
208            privileges,
209        }: durable::Schema,
210    ) {
211        self.name = QualifiedSchemaName {
212            database: database_id.into(),
213            schema: name,
214        };
215        self.id = id.into();
216        self.oid = oid;
217        self.owner_id = owner_id;
218        self.privileges = PrivilegeMap::from_mz_acl_items(privileges);
219    }
220}
221
222#[derive(Debug, Serialize, Clone, PartialEq, Eq)]
223pub struct Role {
224    pub name: String,
225    pub id: RoleId,
226    pub oid: u32,
227    pub attributes: RoleAttributes,
228    pub membership: RoleMembership,
229    pub vars: RoleVars,
230}
231
232impl Role {
233    pub fn is_user(&self) -> bool {
234        self.id.is_user()
235    }
236
237    pub fn vars<'a>(&'a self) -> impl Iterator<Item = (&'a str, &'a OwnedVarInput)> {
238        self.vars.map.iter().map(|(name, val)| (name.as_str(), val))
239    }
240}
241
242impl From<Role> for durable::Role {
243    fn from(role: Role) -> durable::Role {
244        durable::Role {
245            id: role.id,
246            oid: role.oid,
247            name: role.name,
248            attributes: role.attributes,
249            membership: role.membership,
250            vars: role.vars,
251        }
252    }
253}
254
255impl From<durable::Role> for Role {
256    fn from(
257        durable::Role {
258            id,
259            oid,
260            name,
261            attributes,
262            membership,
263            vars,
264        }: durable::Role,
265    ) -> Self {
266        Role {
267            name,
268            id,
269            oid,
270            attributes,
271            membership,
272            vars,
273        }
274    }
275}
276
277impl UpdateFrom<durable::Role> for Role {
278    fn update_from(
279        &mut self,
280        durable::Role {
281            id,
282            oid,
283            name,
284            attributes,
285            membership,
286            vars,
287        }: durable::Role,
288    ) {
289        self.id = id;
290        self.oid = oid;
291        self.name = name;
292        self.attributes = attributes;
293        self.membership = membership;
294        self.vars = vars;
295    }
296}
297
298#[derive(Debug, Serialize, Clone, PartialEq, Eq)]
299pub struct RoleAuth {
300    pub role_id: RoleId,
301    pub password_hash: Option<String>,
302    pub updated_at: u64,
303}
304
305impl From<RoleAuth> for durable::RoleAuth {
306    fn from(role_auth: RoleAuth) -> durable::RoleAuth {
307        durable::RoleAuth {
308            role_id: role_auth.role_id,
309            password_hash: role_auth.password_hash,
310            updated_at: role_auth.updated_at,
311        }
312    }
313}
314
315impl From<durable::RoleAuth> for RoleAuth {
316    fn from(
317        durable::RoleAuth {
318            role_id,
319            password_hash,
320            updated_at,
321        }: durable::RoleAuth,
322    ) -> RoleAuth {
323        RoleAuth {
324            role_id,
325            password_hash,
326            updated_at,
327        }
328    }
329}
330
331impl UpdateFrom<durable::RoleAuth> for RoleAuth {
332    fn update_from(&mut self, from: durable::RoleAuth) {
333        self.role_id = from.role_id;
334        self.password_hash = from.password_hash;
335    }
336}
337
338#[derive(Debug, Serialize, Clone, PartialEq)]
339pub struct Cluster {
340    pub name: String,
341    pub id: ClusterId,
342    pub config: ClusterConfig,
343    #[serde(skip)]
344    pub log_indexes: BTreeMap<LogVariant, GlobalId>,
345    /// Objects bound to this cluster. Does not include introspection source
346    /// indexes.
347    pub bound_objects: BTreeSet<CatalogItemId>,
348    pub replica_id_by_name_: BTreeMap<String, ReplicaId>,
349    #[serde(serialize_with = "mz_ore::serde::map_key_to_string")]
350    pub replicas_by_id_: BTreeMap<ReplicaId, ClusterReplica>,
351    pub owner_id: RoleId,
352    pub privileges: PrivilegeMap,
353}
354
355impl Cluster {
356    /// The role of the cluster. Currently used to set alert severity.
357    pub fn role(&self) -> ClusterRole {
358        // NOTE - These roles power monitoring systems. Do not change
359        // them without talking to the cloud or observability groups.
360        if self.name == MZ_SYSTEM_CLUSTER.name {
361            ClusterRole::SystemCritical
362        } else if self.name == MZ_CATALOG_SERVER_CLUSTER.name {
363            ClusterRole::System
364        } else {
365            ClusterRole::User
366        }
367    }
368
369    /// Returns `true` if the cluster is a managed cluster.
370    pub fn is_managed(&self) -> bool {
371        matches!(self.config.variant, ClusterVariant::Managed { .. })
372    }
373
374    /// Lists the user replicas, which are those that do not have the internal flag set.
375    pub fn user_replicas(&self) -> impl Iterator<Item = &ClusterReplica> {
376        self.replicas().filter(|r| !r.config.location.internal())
377    }
378
379    /// Lists all replicas in the cluster
380    pub fn replicas(&self) -> impl Iterator<Item = &ClusterReplica> {
381        self.replicas_by_id_.values()
382    }
383
384    /// Lookup a replica by ID.
385    pub fn replica(&self, replica_id: ReplicaId) -> Option<&ClusterReplica> {
386        self.replicas_by_id_.get(&replica_id)
387    }
388
389    /// Lookup a replica ID by name.
390    pub fn replica_id(&self, name: &str) -> Option<ReplicaId> {
391        self.replica_id_by_name_.get(name).copied()
392    }
393
394    /// Returns the availability zones of this cluster, if they exist.
395    pub fn availability_zones(&self) -> Option<&[String]> {
396        match &self.config.variant {
397            ClusterVariant::Managed(managed) => Some(&managed.availability_zones),
398            ClusterVariant::Unmanaged => None,
399        }
400    }
401
402    pub fn try_to_plan(&self) -> Result<CreateClusterPlan, PlanError> {
403        let name = self.name.clone();
404        let variant = match &self.config.variant {
405            ClusterVariant::Managed(ClusterVariantManaged {
406                size,
407                availability_zones,
408                logging,
409                replication_factor,
410                optimizer_feature_overrides,
411                schedule,
412            }) => {
413                let introspection = match logging {
414                    ReplicaLogging {
415                        log_logging,
416                        interval: Some(interval),
417                    } => Some(ComputeReplicaIntrospectionConfig {
418                        debugging: *log_logging,
419                        interval: interval.clone(),
420                    }),
421                    ReplicaLogging {
422                        log_logging: _,
423                        interval: None,
424                    } => None,
425                };
426                let compute = ComputeReplicaConfig { introspection };
427                CreateClusterVariant::Managed(CreateClusterManagedPlan {
428                    replication_factor: replication_factor.clone(),
429                    size: size.clone(),
430                    availability_zones: availability_zones.clone(),
431                    compute,
432                    optimizer_feature_overrides: optimizer_feature_overrides.clone(),
433                    schedule: schedule.clone(),
434                })
435            }
436            ClusterVariant::Unmanaged => {
437                // Unmanaged clusters are deprecated, so hopefully we can remove
438                // them before we have to implement this.
439                return Err(PlanError::Unsupported {
440                    feature: "SHOW CREATE for unmanaged clusters".to_string(),
441                    discussion_no: None,
442                });
443            }
444        };
445        let workload_class = self.config.workload_class.clone();
446        Ok(CreateClusterPlan {
447            name,
448            variant,
449            workload_class,
450        })
451    }
452}
453
454impl From<Cluster> for durable::Cluster {
455    fn from(cluster: Cluster) -> durable::Cluster {
456        durable::Cluster {
457            id: cluster.id,
458            name: cluster.name,
459            owner_id: cluster.owner_id,
460            privileges: cluster.privileges.into_all_values().collect(),
461            config: cluster.config.into(),
462        }
463    }
464}
465
466impl From<durable::Cluster> for Cluster {
467    fn from(
468        durable::Cluster {
469            id,
470            name,
471            owner_id,
472            privileges,
473            config,
474        }: durable::Cluster,
475    ) -> Self {
476        Cluster {
477            name: name.clone(),
478            id,
479            bound_objects: BTreeSet::new(),
480            log_indexes: BTreeMap::new(),
481            replica_id_by_name_: BTreeMap::new(),
482            replicas_by_id_: BTreeMap::new(),
483            owner_id,
484            privileges: PrivilegeMap::from_mz_acl_items(privileges),
485            config: config.into(),
486        }
487    }
488}
489
490impl UpdateFrom<durable::Cluster> for Cluster {
491    fn update_from(
492        &mut self,
493        durable::Cluster {
494            id,
495            name,
496            owner_id,
497            privileges,
498            config,
499        }: durable::Cluster,
500    ) {
501        self.id = id;
502        self.name = name;
503        self.owner_id = owner_id;
504        self.privileges = PrivilegeMap::from_mz_acl_items(privileges);
505        self.config = config.into();
506    }
507}
508
509#[derive(Debug, Serialize, Clone, PartialEq)]
510pub struct ClusterReplica {
511    pub name: String,
512    pub cluster_id: ClusterId,
513    pub replica_id: ReplicaId,
514    pub config: ReplicaConfig,
515    pub owner_id: RoleId,
516}
517
518impl From<ClusterReplica> for durable::ClusterReplica {
519    fn from(replica: ClusterReplica) -> durable::ClusterReplica {
520        durable::ClusterReplica {
521            cluster_id: replica.cluster_id,
522            replica_id: replica.replica_id,
523            name: replica.name,
524            config: replica.config.into(),
525            owner_id: replica.owner_id,
526        }
527    }
528}
529
530#[derive(Debug, Serialize, Clone, PartialEq, Eq)]
531pub struct ClusterReplicaProcessStatus {
532    pub status: ClusterStatus,
533    pub time: DateTime<Utc>,
534}
535
536#[derive(Debug, Serialize, Clone, PartialEq)]
537pub struct SourceReferences {
538    pub updated_at: u64,
539    pub references: Vec<SourceReference>,
540}
541
542#[derive(Debug, Serialize, Clone, PartialEq)]
543pub struct SourceReference {
544    pub name: String,
545    pub namespace: Option<String>,
546    pub columns: Vec<String>,
547}
548
549impl From<SourceReference> for durable::SourceReference {
550    fn from(source_reference: SourceReference) -> durable::SourceReference {
551        durable::SourceReference {
552            name: source_reference.name,
553            namespace: source_reference.namespace,
554            columns: source_reference.columns,
555        }
556    }
557}
558
559impl SourceReferences {
560    pub fn to_durable(self, source_id: CatalogItemId) -> durable::SourceReferences {
561        durable::SourceReferences {
562            source_id,
563            updated_at: self.updated_at,
564            references: self.references.into_iter().map(Into::into).collect(),
565        }
566    }
567}
568
569impl From<durable::SourceReference> for SourceReference {
570    fn from(source_reference: durable::SourceReference) -> SourceReference {
571        SourceReference {
572            name: source_reference.name,
573            namespace: source_reference.namespace,
574            columns: source_reference.columns,
575        }
576    }
577}
578
579impl From<durable::SourceReferences> for SourceReferences {
580    fn from(source_references: durable::SourceReferences) -> SourceReferences {
581        SourceReferences {
582            updated_at: source_references.updated_at,
583            references: source_references
584                .references
585                .into_iter()
586                .map(|source_reference| source_reference.into())
587                .collect(),
588        }
589    }
590}
591
592impl From<mz_sql::plan::SourceReference> for SourceReference {
593    fn from(source_reference: mz_sql::plan::SourceReference) -> SourceReference {
594        SourceReference {
595            name: source_reference.name,
596            namespace: source_reference.namespace,
597            columns: source_reference.columns,
598        }
599    }
600}
601
602impl From<mz_sql::plan::SourceReferences> for SourceReferences {
603    fn from(source_references: mz_sql::plan::SourceReferences) -> SourceReferences {
604        SourceReferences {
605            updated_at: source_references.updated_at,
606            references: source_references
607                .references
608                .into_iter()
609                .map(|source_reference| source_reference.into())
610                .collect(),
611        }
612    }
613}
614
615impl From<SourceReferences> for mz_sql::plan::SourceReferences {
616    fn from(source_references: SourceReferences) -> mz_sql::plan::SourceReferences {
617        mz_sql::plan::SourceReferences {
618            updated_at: source_references.updated_at,
619            references: source_references
620                .references
621                .into_iter()
622                .map(|source_reference| source_reference.into())
623                .collect(),
624        }
625    }
626}
627
628impl From<SourceReference> for mz_sql::plan::SourceReference {
629    fn from(source_reference: SourceReference) -> mz_sql::plan::SourceReference {
630        mz_sql::plan::SourceReference {
631            name: source_reference.name,
632            namespace: source_reference.namespace,
633            columns: source_reference.columns,
634        }
635    }
636}
637
638#[derive(Clone, Debug, Serialize)]
639pub struct CatalogEntry {
640    pub item: CatalogItem,
641    #[serde(skip)]
642    pub referenced_by: Vec<CatalogItemId>,
643    // TODO(database-issues#7922)––this should have an invariant tied to it that all
644    // dependents (i.e. entries in this field) have IDs greater than this
645    // entry's ID.
646    #[serde(skip)]
647    pub used_by: Vec<CatalogItemId>,
648    pub id: CatalogItemId,
649    pub oid: u32,
650    pub name: QualifiedItemName,
651    pub owner_id: RoleId,
652    pub privileges: PrivilegeMap,
653}
654
655/// A [`CatalogEntry`] that is associated with a specific "collection" of data.
656/// A single item in the catalog may be associated with multiple "collections".
657///
658/// Here "collection" generally means a pTVC, e.g. a Persist Shard, an Index, a
659/// currently running dataflow, etc.
660///
661/// Items in the Catalog have a stable name -> ID mapping, in other words for
662/// the entire lifetime of an object its [`CatalogItemId`] will _never_ change.
663/// Similarly, we need to maintain a stable mapping from [`GlobalId`] to pTVC.
664/// This presents a challenge when `ALTER`-ing an object, e.g. adding columns
665/// to a table. We can't just change the schema of the underlying Persist Shard
666/// because that would be rebinding the [`GlobalId`] of the pTVC. Instead we
667/// allocate a new [`GlobalId`] to refer to the new version of the table, and
668/// then the [`CatalogEntry`] tracks the [`GlobalId`] for each version.
669///
670/// TODO(ct): Add a note here if we end up using this for associating continual
671/// tasks with a single catalog item.
672#[derive(Clone, Debug)]
673pub struct CatalogCollectionEntry {
674    pub entry: CatalogEntry,
675    pub version: RelationVersionSelector,
676}
677
678impl CatalogCollectionEntry {
679    pub fn relation_desc(&self) -> Option<Cow<'_, RelationDesc>> {
680        self.item().relation_desc(self.version)
681    }
682}
683
684impl mz_sql::catalog::CatalogCollectionItem for CatalogCollectionEntry {
685    fn relation_desc(&self) -> Option<Cow<'_, RelationDesc>> {
686        self.item().relation_desc(self.version)
687    }
688
689    fn global_id(&self) -> GlobalId {
690        self.entry
691            .item()
692            .global_id_for_version(self.version)
693            .expect("catalog corruption, missing version!")
694    }
695}
696
697impl Deref for CatalogCollectionEntry {
698    type Target = CatalogEntry;
699
700    fn deref(&self) -> &CatalogEntry {
701        &self.entry
702    }
703}
704
705impl mz_sql::catalog::CatalogItem for CatalogCollectionEntry {
706    fn name(&self) -> &QualifiedItemName {
707        self.entry.name()
708    }
709
710    fn id(&self) -> CatalogItemId {
711        self.entry.id()
712    }
713
714    fn global_ids(&self) -> Box<dyn Iterator<Item = GlobalId> + '_> {
715        Box::new(self.entry.global_ids())
716    }
717
718    fn oid(&self) -> u32 {
719        self.entry.oid()
720    }
721
722    fn func(&self) -> Result<&'static mz_sql::func::Func, SqlCatalogError> {
723        self.entry.func()
724    }
725
726    fn source_desc(&self) -> Result<Option<&SourceDesc<ReferencedConnection>>, SqlCatalogError> {
727        self.entry.source_desc()
728    }
729
730    fn connection(
731        &self,
732    ) -> Result<mz_storage_types::connections::Connection<ReferencedConnection>, SqlCatalogError>
733    {
734        mz_sql::catalog::CatalogItem::connection(&self.entry)
735    }
736
737    fn create_sql(&self) -> &str {
738        self.entry.create_sql()
739    }
740
741    fn item_type(&self) -> SqlCatalogItemType {
742        self.entry.item_type()
743    }
744
745    fn index_details(&self) -> Option<(&[MirScalarExpr], GlobalId)> {
746        self.entry.index_details()
747    }
748
749    fn writable_table_details(&self) -> Option<&[Expr<Aug>]> {
750        self.entry.writable_table_details()
751    }
752
753    fn replacement_target(&self) -> Option<CatalogItemId> {
754        self.entry.replacement_target()
755    }
756
757    fn type_details(&self) -> Option<&CatalogTypeDetails<IdReference>> {
758        self.entry.type_details()
759    }
760
761    fn references(&self) -> &ResolvedIds {
762        self.entry.references()
763    }
764
765    fn uses(&self) -> BTreeSet<CatalogItemId> {
766        self.entry.uses()
767    }
768
769    fn referenced_by(&self) -> &[CatalogItemId] {
770        self.entry.referenced_by()
771    }
772
773    fn used_by(&self) -> &[CatalogItemId] {
774        self.entry.used_by()
775    }
776
777    fn subsource_details(
778        &self,
779    ) -> Option<(CatalogItemId, &UnresolvedItemName, &SourceExportDetails)> {
780        self.entry.subsource_details()
781    }
782
783    fn source_export_details(
784        &self,
785    ) -> Option<(
786        CatalogItemId,
787        &UnresolvedItemName,
788        &SourceExportDetails,
789        &SourceExportDataConfig<ReferencedConnection>,
790    )> {
791        self.entry.source_export_details()
792    }
793
794    fn is_progress_source(&self) -> bool {
795        self.entry.is_progress_source()
796    }
797
798    fn progress_id(&self) -> Option<CatalogItemId> {
799        self.entry.progress_id()
800    }
801
802    fn owner_id(&self) -> RoleId {
803        *self.entry.owner_id()
804    }
805
806    fn privileges(&self) -> &PrivilegeMap {
807        self.entry.privileges()
808    }
809
810    fn cluster_id(&self) -> Option<ClusterId> {
811        self.entry.item().cluster_id()
812    }
813
814    fn at_version(
815        &self,
816        version: RelationVersionSelector,
817    ) -> Box<dyn mz_sql::catalog::CatalogCollectionItem> {
818        Box::new(CatalogCollectionEntry {
819            entry: self.entry.clone(),
820            version,
821        })
822    }
823
824    fn latest_version(&self) -> Option<RelationVersion> {
825        self.entry.latest_version()
826    }
827}
828
829#[derive(Debug, Clone, Serialize)]
830pub enum CatalogItem {
831    Table(Table),
832    Source(Source),
833    Log(Log),
834    View(View),
835    MaterializedView(MaterializedView),
836    Sink(Sink),
837    Index(Index),
838    Type(Type),
839    Func(Func),
840    Secret(Secret),
841    Connection(Connection),
842    ContinualTask(ContinualTask),
843}
844
845impl From<CatalogEntry> for durable::Item {
846    fn from(entry: CatalogEntry) -> durable::Item {
847        let (create_sql, global_id, extra_versions) = entry.item.into_serialized();
848        durable::Item {
849            id: entry.id,
850            oid: entry.oid,
851            global_id,
852            schema_id: entry.name.qualifiers.schema_spec.into(),
853            name: entry.name.item,
854            create_sql,
855            owner_id: entry.owner_id,
856            privileges: entry.privileges.into_all_values().collect(),
857            extra_versions,
858        }
859    }
860}
861
862#[derive(Debug, Clone, Serialize)]
863pub struct Table {
864    /// Parse-able SQL that defines this table.
865    pub create_sql: Option<String>,
866    /// [`VersionedRelationDesc`] of this table, derived from the `create_sql`.
867    pub desc: VersionedRelationDesc,
868    /// Versions of this table, and the [`GlobalId`]s that refer to them.
869    #[serde(serialize_with = "mz_ore::serde::map_key_to_string")]
870    pub collections: BTreeMap<RelationVersion, GlobalId>,
871    /// If created in the `TEMPORARY` schema, the [`ConnectionId`] for that session.
872    #[serde(skip)]
873    pub conn_id: Option<ConnectionId>,
874    /// Other catalog objects referenced by this table, e.g. custom types.
875    pub resolved_ids: ResolvedIds,
876    /// Custom compaction window, e.g. set via `ALTER RETAIN HISTORY`.
877    pub custom_logical_compaction_window: Option<CompactionWindow>,
878    /// Whether the table's logical compaction window is controlled by the ['metrics_retention']
879    /// session variable.
880    ///
881    /// ['metrics_retention']: mz_sql::session::vars::METRICS_RETENTION
882    pub is_retained_metrics_object: bool,
883    /// Where data for this table comes from, e.g. `INSERT` statements or an upstream source.
884    pub data_source: TableDataSource,
885}
886
887impl Table {
888    pub fn timeline(&self) -> Timeline {
889        match &self.data_source {
890            // The Coordinator controls insertions for writable tables
891            // (including system tables), so they are realtime.
892            TableDataSource::TableWrites { .. } => Timeline::EpochMilliseconds,
893            TableDataSource::DataSource { timeline, .. } => timeline.clone(),
894        }
895    }
896
897    /// Returns all of the [`GlobalId`]s that this [`Table`] can be referenced by.
898    pub fn global_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
899        self.collections.values().copied()
900    }
901
902    /// Returns the latest [`GlobalId`] for this [`Table`] which should be used for writes.
903    pub fn global_id_writes(&self) -> GlobalId {
904        *self
905            .collections
906            .last_key_value()
907            .expect("at least one version of a table")
908            .1
909    }
910
911    /// Returns all of the collections and their [`RelationDesc`]s associated with this [`Table`].
912    pub fn collection_descs(
913        &self,
914    ) -> impl Iterator<Item = (GlobalId, RelationVersion, RelationDesc)> + '_ {
915        self.collections.iter().map(|(version, gid)| {
916            let desc = self
917                .desc
918                .at_version(RelationVersionSelector::Specific(*version));
919            (*gid, *version, desc)
920        })
921    }
922
923    /// Returns the [`RelationDesc`] for a specific [`GlobalId`].
924    pub fn desc_for(&self, id: &GlobalId) -> RelationDesc {
925        let (version, _gid) = self
926            .collections
927            .iter()
928            .find(|(_version, gid)| *gid == id)
929            .expect("GlobalId to exist");
930        self.desc
931            .at_version(RelationVersionSelector::Specific(*version))
932    }
933}
934
935#[derive(Clone, Debug, Serialize)]
936pub enum TableDataSource {
937    /// The table owns data created via INSERT/UPDATE/DELETE statements.
938    TableWrites {
939        #[serde(skip)]
940        defaults: Vec<Expr<Aug>>,
941    },
942
943    /// The table receives its data from the identified `DataSourceDesc`.
944    /// This table type does not support INSERT/UPDATE/DELETE statements.
945    DataSource {
946        desc: DataSourceDesc,
947        timeline: Timeline,
948    },
949}
950
951#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
952pub enum DataSourceDesc {
953    /// Receives data from an external system
954    Ingestion {
955        desc: SourceDesc<ReferencedConnection>,
956        cluster_id: ClusterId,
957    },
958    /// Receives data from an external system
959    OldSyntaxIngestion {
960        desc: SourceDesc<ReferencedConnection>,
961        cluster_id: ClusterId,
962        // If we're dealing with an old syntax ingestion the progress id will be some other collection
963        // and the ingestion itself will have the data from an external reference
964        progress_subsource: CatalogItemId,
965        data_config: SourceExportDataConfig<ReferencedConnection>,
966        details: SourceExportDetails,
967    },
968    /// This source receives its data from the identified ingestion,
969    /// specifically the output identified by `external_reference`.
970    /// N.B. that `external_reference` should not be used to identify
971    /// anything downstream of purification, as the purification process
972    /// encodes source-specific identifiers into the `details` struct.
973    /// The `external_reference` field is only used here for displaying
974    /// human-readable names in system tables.
975    IngestionExport {
976        ingestion_id: CatalogItemId,
977        external_reference: UnresolvedItemName,
978        details: SourceExportDetails,
979        data_config: SourceExportDataConfig<ReferencedConnection>,
980    },
981    /// Receives introspection data from an internal system
982    Introspection(IntrospectionType),
983    /// Receives data from the source's reclocking/remapping operations.
984    Progress,
985    /// Receives data from HTTP requests.
986    Webhook {
987        /// Optional components used to validation a webhook request.
988        validate_using: Option<WebhookValidation>,
989        /// Describes how we deserialize the body of a webhook request.
990        body_format: WebhookBodyFormat,
991        /// Describes whether or not to include headers and how to map them.
992        headers: WebhookHeaders,
993        /// The cluster which this source is associated with.
994        cluster_id: ClusterId,
995    },
996    /// Exposes the contents of the catalog shard.
997    Catalog,
998}
999
1000impl From<IntrospectionType> for DataSourceDesc {
1001    fn from(typ: IntrospectionType) -> Self {
1002        Self::Introspection(typ)
1003    }
1004}
1005
1006impl DataSourceDesc {
1007    /// The key and value formats of the data source.
1008    pub fn formats(&self) -> (Option<&str>, Option<&str>) {
1009        match &self {
1010            DataSourceDesc::Ingestion { .. } => (None, None),
1011            DataSourceDesc::OldSyntaxIngestion { data_config, .. } => {
1012                match &data_config.encoding.as_ref() {
1013                    Some(encoding) => match &encoding.key {
1014                        Some(key) => (Some(key.type_()), Some(encoding.value.type_())),
1015                        None => (None, Some(encoding.value.type_())),
1016                    },
1017                    None => (None, None),
1018                }
1019            }
1020            DataSourceDesc::IngestionExport { data_config, .. } => match &data_config.encoding {
1021                Some(encoding) => match &encoding.key {
1022                    Some(key) => (Some(key.type_()), Some(encoding.value.type_())),
1023                    None => (None, Some(encoding.value.type_())),
1024                },
1025                None => (None, None),
1026            },
1027            DataSourceDesc::Introspection(_)
1028            | DataSourceDesc::Webhook { .. }
1029            | DataSourceDesc::Progress
1030            | DataSourceDesc::Catalog => (None, None),
1031        }
1032    }
1033
1034    /// Envelope of the data source.
1035    pub fn envelope(&self) -> Option<&str> {
1036        // Note how "none"/"append-only" is different from `None`. Source
1037        // sources don't have an envelope (internal logs, for example), while
1038        // other sources have an envelope that we call the "NONE"-envelope.
1039
1040        fn envelope_string(envelope: &SourceEnvelope) -> &str {
1041            match envelope {
1042                SourceEnvelope::None(_) => "none",
1043                SourceEnvelope::Upsert(upsert_envelope) => match upsert_envelope.style {
1044                    mz_storage_types::sources::envelope::UpsertStyle::Default(_) => "upsert",
1045                    mz_storage_types::sources::envelope::UpsertStyle::Debezium { .. } => {
1046                        // NOTE(aljoscha): Should we somehow mark that this is
1047                        // using upsert internally? See note above about
1048                        // DEBEZIUM.
1049                        "debezium"
1050                    }
1051                    mz_storage_types::sources::envelope::UpsertStyle::ValueErrInline { .. } => {
1052                        "upsert-value-err-inline"
1053                    }
1054                },
1055                SourceEnvelope::CdcV2 => {
1056                    // TODO(aljoscha): Should we even report this? It's
1057                    // currently not exposed.
1058                    "materialize"
1059                }
1060            }
1061        }
1062
1063        match self {
1064            // NOTE(aljoscha): We could move the block for ingestions into
1065            // `SourceEnvelope` itself, but that one feels more like an internal
1066            // thing and adapter should own how we represent envelopes as a
1067            // string? It would not be hard to convince me otherwise, though.
1068            DataSourceDesc::Ingestion { .. } => None,
1069            DataSourceDesc::OldSyntaxIngestion { data_config, .. } => {
1070                Some(envelope_string(&data_config.envelope))
1071            }
1072            DataSourceDesc::IngestionExport { data_config, .. } => {
1073                Some(envelope_string(&data_config.envelope))
1074            }
1075            DataSourceDesc::Introspection(_)
1076            | DataSourceDesc::Webhook { .. }
1077            | DataSourceDesc::Progress
1078            | DataSourceDesc::Catalog => None,
1079        }
1080    }
1081}
1082
1083#[derive(Debug, Clone, Serialize)]
1084pub struct Source {
1085    /// Parse-able SQL that defines this table.
1086    pub create_sql: Option<String>,
1087    /// [`GlobalId`] used to reference this source from outside the catalog.
1088    pub global_id: GlobalId,
1089    // TODO: Unskip: currently blocked on some inner BTreeMap<X, _> problems.
1090    #[serde(skip)]
1091    pub data_source: DataSourceDesc,
1092    /// [`RelationDesc`] of this source, derived from the `create_sql`.
1093    pub desc: RelationDesc,
1094    /// The timeline this source exists on.
1095    pub timeline: Timeline,
1096    /// Other catalog objects referenced by this table, e.g. custom types.
1097    pub resolved_ids: ResolvedIds,
1098    /// This value is ignored for subsources, i.e. for
1099    /// [`DataSourceDesc::IngestionExport`]. Instead, it uses the primary
1100    /// sources logical compaction window.
1101    pub custom_logical_compaction_window: Option<CompactionWindow>,
1102    /// Whether the source's logical compaction window is controlled by
1103    /// METRICS_RETENTION
1104    pub is_retained_metrics_object: bool,
1105}
1106
1107impl Source {
1108    /// Creates a new `Source`.
1109    ///
1110    /// # Panics
1111    /// - If an ingestion-based plan is not given a cluster_id.
1112    /// - If a non-ingestion-based source has a defined cluster config in its plan.
1113    /// - If a non-ingestion-based source is given a cluster_id.
1114    pub fn new(
1115        plan: CreateSourcePlan,
1116        global_id: GlobalId,
1117        resolved_ids: ResolvedIds,
1118        custom_logical_compaction_window: Option<CompactionWindow>,
1119        is_retained_metrics_object: bool,
1120    ) -> Source {
1121        Source {
1122            create_sql: Some(plan.source.create_sql),
1123            data_source: match plan.source.data_source {
1124                mz_sql::plan::DataSourceDesc::Ingestion(desc) => DataSourceDesc::Ingestion {
1125                    desc,
1126                    cluster_id: plan
1127                        .in_cluster
1128                        .expect("ingestion-based sources must be given a cluster ID"),
1129                },
1130                mz_sql::plan::DataSourceDesc::OldSyntaxIngestion {
1131                    desc,
1132                    progress_subsource,
1133                    data_config,
1134                    details,
1135                } => DataSourceDesc::OldSyntaxIngestion {
1136                    desc,
1137                    cluster_id: plan
1138                        .in_cluster
1139                        .expect("ingestion-based sources must be given a cluster ID"),
1140                    progress_subsource,
1141                    data_config,
1142                    details,
1143                },
1144                mz_sql::plan::DataSourceDesc::Progress => {
1145                    assert!(
1146                        plan.in_cluster.is_none(),
1147                        "subsources must not have a host config or cluster_id defined"
1148                    );
1149                    DataSourceDesc::Progress
1150                }
1151                mz_sql::plan::DataSourceDesc::IngestionExport {
1152                    ingestion_id,
1153                    external_reference,
1154                    details,
1155                    data_config,
1156                } => {
1157                    assert!(
1158                        plan.in_cluster.is_none(),
1159                        "subsources must not have a host config or cluster_id defined"
1160                    );
1161                    DataSourceDesc::IngestionExport {
1162                        ingestion_id,
1163                        external_reference,
1164                        details,
1165                        data_config,
1166                    }
1167                }
1168                mz_sql::plan::DataSourceDesc::Webhook {
1169                    validate_using,
1170                    body_format,
1171                    headers,
1172                    cluster_id,
1173                } => {
1174                    mz_ore::soft_assert_or_log!(
1175                        cluster_id.is_none(),
1176                        "cluster_id set at Source level for Webhooks"
1177                    );
1178                    DataSourceDesc::Webhook {
1179                        validate_using,
1180                        body_format,
1181                        headers,
1182                        cluster_id: plan
1183                            .in_cluster
1184                            .expect("webhook sources must be given a cluster ID"),
1185                    }
1186                }
1187            },
1188            desc: plan.source.desc,
1189            global_id,
1190            timeline: plan.timeline,
1191            resolved_ids,
1192            custom_logical_compaction_window: plan
1193                .source
1194                .compaction_window
1195                .or(custom_logical_compaction_window),
1196            is_retained_metrics_object,
1197        }
1198    }
1199
1200    /// Type of the source.
1201    pub fn source_type(&self) -> &str {
1202        match &self.data_source {
1203            DataSourceDesc::Ingestion { desc, .. }
1204            | DataSourceDesc::OldSyntaxIngestion { desc, .. } => desc.connection.name(),
1205            DataSourceDesc::Progress => "progress",
1206            DataSourceDesc::IngestionExport { .. } => "subsource",
1207            DataSourceDesc::Introspection(_) | DataSourceDesc::Catalog => "source",
1208            DataSourceDesc::Webhook { .. } => "webhook",
1209        }
1210    }
1211
1212    /// Connection ID of the source, if one exists.
1213    pub fn connection_id(&self) -> Option<CatalogItemId> {
1214        match &self.data_source {
1215            DataSourceDesc::Ingestion { desc, .. }
1216            | DataSourceDesc::OldSyntaxIngestion { desc, .. } => desc.connection.connection_id(),
1217            DataSourceDesc::IngestionExport { .. }
1218            | DataSourceDesc::Introspection(_)
1219            | DataSourceDesc::Webhook { .. }
1220            | DataSourceDesc::Progress
1221            | DataSourceDesc::Catalog => None,
1222        }
1223    }
1224
1225    /// The single [`GlobalId`] that refers to this Source.
1226    pub fn global_id(&self) -> GlobalId {
1227        self.global_id
1228    }
1229
1230    /// The expensive resource that each source consumes is persist shards. To
1231    /// prevent abuse, we want to prevent users from creating sources that use an
1232    /// unbounded number of persist shards. But we also don't want to count
1233    /// persist shards that are mandated by the system (e.g., the progress
1234    /// shard) so that future versions of Materialize can introduce additional
1235    /// per-source shards (e.g., a per-source status shard) without impacting
1236    /// the limit calculation.
1237    pub fn user_controllable_persist_shard_count(&self) -> i64 {
1238        match &self.data_source {
1239            DataSourceDesc::Ingestion { .. } => 0,
1240            DataSourceDesc::OldSyntaxIngestion { desc, .. } => {
1241                match &desc.connection {
1242                    // These multi-output sources do not use their primary
1243                    // source's data shard, so we don't include it in accounting
1244                    // for users.
1245                    GenericSourceConnection::Postgres(_)
1246                    | GenericSourceConnection::MySql(_)
1247                    | GenericSourceConnection::SqlServer(_) => 0,
1248                    GenericSourceConnection::LoadGenerator(lg) => match lg.load_generator {
1249                        // Load generators that output data in their primary shard
1250                        LoadGenerator::Clock
1251                        | LoadGenerator::Counter { .. }
1252                        | LoadGenerator::Datums
1253                        | LoadGenerator::KeyValue(_) => 1,
1254                        LoadGenerator::Auction
1255                        | LoadGenerator::Marketing
1256                        | LoadGenerator::Tpch { .. } => 0,
1257                    },
1258                    GenericSourceConnection::Kafka(_) => 1,
1259                }
1260            }
1261            //  DataSourceDesc::IngestionExport represents a subsource, which
1262            //  use a data shard.
1263            DataSourceDesc::IngestionExport { .. } => 1,
1264            DataSourceDesc::Webhook { .. } => 1,
1265            // Introspection, catalog, and progress subsources are not under the user's control, so
1266            // shouldn't count toward their quota.
1267            DataSourceDesc::Introspection(_)
1268            | DataSourceDesc::Progress
1269            | DataSourceDesc::Catalog => 0,
1270        }
1271    }
1272}
1273
1274#[derive(Debug, Clone, Serialize)]
1275pub struct Log {
1276    /// The category of data this log stores.
1277    pub variant: LogVariant,
1278    /// [`GlobalId`] used to reference this log from outside the catalog.
1279    pub global_id: GlobalId,
1280}
1281
1282impl Log {
1283    /// The single [`GlobalId`] that refers to this Log.
1284    pub fn global_id(&self) -> GlobalId {
1285        self.global_id
1286    }
1287}
1288
1289#[derive(Debug, Clone, Serialize)]
1290pub struct Sink {
1291    /// Parse-able SQL that defines this sink.
1292    pub create_sql: String,
1293    /// [`GlobalId`] used to reference this sink from outside the catalog, e.g storage.
1294    pub global_id: GlobalId,
1295    /// Collection we read into this sink.
1296    pub from: GlobalId,
1297    /// Connection to the external service we're sinking into, e.g. Kafka.
1298    pub connection: StorageSinkConnection<ReferencedConnection>,
1299    /// Envelope we use to sink into the external system.
1300    ///
1301    /// TODO(guswynn): this probably should just be in the `connection`.
1302    pub envelope: SinkEnvelope,
1303    /// Emit an initial snapshot into the sink.
1304    pub with_snapshot: bool,
1305    /// Used to fence other writes into this sink as we evolve the upstream materialized view.
1306    pub version: u64,
1307    /// Other catalog objects this sink references.
1308    pub resolved_ids: ResolvedIds,
1309    /// Cluster this sink runs on.
1310    pub cluster_id: ClusterId,
1311    /// Commit interval for the sink.
1312    pub commit_interval: Option<Duration>,
1313}
1314
1315impl Sink {
1316    pub fn sink_type(&self) -> &str {
1317        self.connection.name()
1318    }
1319
1320    /// Envelope of the sink.
1321    pub fn envelope(&self) -> Option<&str> {
1322        match &self.envelope {
1323            SinkEnvelope::Debezium => Some("debezium"),
1324            SinkEnvelope::Upsert => Some("upsert"),
1325        }
1326    }
1327
1328    /// Output a combined format string of the sink. For legacy reasons
1329    /// if the key-format is none or the key & value formats are
1330    /// both the same (either avro or json), we return the value format name,
1331    /// otherwise we return a composite name.
1332    pub fn combined_format(&self) -> Option<Cow<'_, str>> {
1333        match &self.connection {
1334            StorageSinkConnection::Kafka(connection) => Some(connection.format.get_format_name()),
1335            _ => None,
1336        }
1337    }
1338
1339    /// Output distinct key_format and value_format of the sink.
1340    pub fn formats(&self) -> Option<(Option<&str>, &str)> {
1341        match &self.connection {
1342            StorageSinkConnection::Kafka(connection) => {
1343                let key_format = connection
1344                    .format
1345                    .key_format
1346                    .as_ref()
1347                    .map(|f| f.get_format_name());
1348                let value_format = connection.format.value_format.get_format_name();
1349                Some((key_format, value_format))
1350            }
1351            _ => None,
1352        }
1353    }
1354
1355    pub fn connection_id(&self) -> Option<CatalogItemId> {
1356        self.connection.connection_id()
1357    }
1358
1359    /// The single [`GlobalId`] that this Sink can be referenced by.
1360    pub fn global_id(&self) -> GlobalId {
1361        self.global_id
1362    }
1363}
1364
1365#[derive(Debug, Clone, Serialize)]
1366pub struct View {
1367    /// Parse-able SQL that defines this view.
1368    pub create_sql: String,
1369    /// [`GlobalId`] used to reference this view from outside the catalog, e.g. compute.
1370    pub global_id: GlobalId,
1371    /// Unoptimized high-level expression from parsing the `create_sql`.
1372    pub raw_expr: Arc<HirRelationExpr>,
1373    /// Optimized mid-level expression from (locally) optimizing the `raw_expr`.
1374    pub optimized_expr: Arc<OptimizedMirRelationExpr>,
1375    /// Columns of this view.
1376    pub desc: RelationDesc,
1377    /// If created in the `TEMPORARY` schema, the [`ConnectionId`] for that session.
1378    pub conn_id: Option<ConnectionId>,
1379    /// Other catalog objects that are referenced by this view, determined at name resolution.
1380    pub resolved_ids: ResolvedIds,
1381    /// All of the catalog objects that are referenced by this view.
1382    pub dependencies: DependencyIds,
1383}
1384
1385impl View {
1386    /// The single [`GlobalId`] this [`View`] can be referenced by.
1387    pub fn global_id(&self) -> GlobalId {
1388        self.global_id
1389    }
1390}
1391
1392#[derive(Debug, Clone, Serialize)]
1393pub struct MaterializedView {
1394    /// Parse-able SQL that defines this materialized view.
1395    pub create_sql: String,
1396    /// Versions of this materialized view, and the [`GlobalId`]s that refer to them.
1397    #[serde(serialize_with = "mz_ore::serde::map_key_to_string")]
1398    pub collections: BTreeMap<RelationVersion, GlobalId>,
1399    /// Raw high-level expression from planning, derived from the `create_sql`.
1400    pub raw_expr: Arc<HirRelationExpr>,
1401    /// Optimized mid-level expression, derived from the `raw_expr`.
1402    pub optimized_expr: Arc<OptimizedMirRelationExpr>,
1403    /// [`VersionedRelationDesc`] of this materialized view, derived from the `create_sql`.
1404    pub desc: VersionedRelationDesc,
1405    /// Other catalog items that this materialized view references, determined at name resolution.
1406    pub resolved_ids: ResolvedIds,
1407    /// All of the catalog objects that are referenced by this view.
1408    pub dependencies: DependencyIds,
1409    /// ID of the materialized view this materialized view is intended to replace.
1410    pub replacement_target: Option<CatalogItemId>,
1411    /// Cluster that this materialized view runs on.
1412    pub cluster_id: ClusterId,
1413    /// If set, only install this materialized view's dataflow on the specified replica.
1414    pub target_replica: Option<ReplicaId>,
1415    /// Column indexes that we assert are not `NULL`.
1416    ///
1417    /// TODO(parkmycar): Switch this to use the `ColumnIdx` type.
1418    pub non_null_assertions: Vec<usize>,
1419    /// Custom compaction window, e.g. set via `ALTER RETAIN HISTORY`.
1420    pub custom_logical_compaction_window: Option<CompactionWindow>,
1421    /// Schedule to refresh this materialized view, e.g. set via `REFRESH EVERY` option.
1422    pub refresh_schedule: Option<RefreshSchedule>,
1423    /// The initial `as_of` of the storage collection associated with the materialized view.
1424    ///
1425    /// Note: This doesn't change upon restarts.
1426    /// (The dataflow's initial `as_of` can be different.)
1427    pub initial_as_of: Option<Antichain<mz_repr::Timestamp>>,
1428}
1429
1430impl MaterializedView {
1431    /// Returns all [`GlobalId`]s that this [`MaterializedView`] can be referenced by.
1432    pub fn global_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
1433        self.collections.values().copied()
1434    }
1435
1436    /// The latest [`GlobalId`] for this [`MaterializedView`] which represents the writing
1437    /// version.
1438    pub fn global_id_writes(&self) -> GlobalId {
1439        *self
1440            .collections
1441            .last_key_value()
1442            .expect("at least one version of a materialized view")
1443            .1
1444    }
1445
1446    /// Returns all collections and their [`RelationDesc`]s associated with this [`MaterializedView`].
1447    pub fn collection_descs(
1448        &self,
1449    ) -> impl Iterator<Item = (GlobalId, RelationVersion, RelationDesc)> + '_ {
1450        self.collections.iter().map(|(version, gid)| {
1451            let desc = self
1452                .desc
1453                .at_version(RelationVersionSelector::Specific(*version));
1454            (*gid, *version, desc)
1455        })
1456    }
1457
1458    /// Returns the [`RelationDesc`] for a specific [`GlobalId`].
1459    pub fn desc_for(&self, id: &GlobalId) -> RelationDesc {
1460        let (version, _gid) = self
1461            .collections
1462            .iter()
1463            .find(|(_version, gid)| *gid == id)
1464            .expect("GlobalId to exist");
1465        self.desc
1466            .at_version(RelationVersionSelector::Specific(*version))
1467    }
1468
1469    /// Apply the given replacement materialized view to this [`MaterializedView`].
1470    pub fn apply_replacement(&mut self, replacement: Self) {
1471        let target_id = replacement
1472            .replacement_target
1473            .expect("replacement has target");
1474
1475        fn parse(create_sql: &str) -> mz_sql::ast::CreateMaterializedViewStatement<Raw> {
1476            let res = mz_sql::parse::parse(create_sql).unwrap_or_else(|e| {
1477                panic!("invalid create_sql persisted in catalog: {e}\n{create_sql}");
1478            });
1479            if let Statement::CreateMaterializedView(cmvs) = res.into_element().ast {
1480                cmvs
1481            } else {
1482                panic!("invalid MV create_sql persisted in catalog\n{create_sql}");
1483            }
1484        }
1485
1486        let old_stmt = parse(&self.create_sql);
1487        let rpl_stmt = parse(&replacement.create_sql);
1488        let new_stmt = mz_sql::ast::CreateMaterializedViewStatement {
1489            if_exists: old_stmt.if_exists,
1490            name: old_stmt.name,
1491            columns: rpl_stmt.columns,
1492            replacement_for: None,
1493            in_cluster: rpl_stmt.in_cluster,
1494            in_cluster_replica: rpl_stmt.in_cluster_replica,
1495            query: rpl_stmt.query,
1496            as_of: rpl_stmt.as_of,
1497            with_options: rpl_stmt.with_options,
1498        };
1499        let create_sql = new_stmt.to_ast_string_stable();
1500
1501        let mut collections = std::mem::take(&mut self.collections);
1502        // Note: We can't use `self.desc.latest_version` here because a replacement doesn't
1503        // necessary evolve the relation schema, so that version might be lower than the actual
1504        // latest version.
1505        let latest_version = collections.keys().max().expect("at least one version");
1506        let new_version = latest_version.bump();
1507        collections.insert(new_version, replacement.global_id_writes());
1508
1509        let mut resolved_ids = replacement.resolved_ids;
1510        resolved_ids.remove_item(&target_id);
1511        let mut dependencies = replacement.dependencies;
1512        dependencies.0.remove(&target_id);
1513
1514        *self = Self {
1515            create_sql,
1516            collections,
1517            raw_expr: replacement.raw_expr,
1518            optimized_expr: replacement.optimized_expr,
1519            desc: replacement.desc,
1520            resolved_ids,
1521            dependencies,
1522            replacement_target: None,
1523            cluster_id: replacement.cluster_id,
1524            target_replica: replacement.target_replica,
1525            non_null_assertions: replacement.non_null_assertions,
1526            custom_logical_compaction_window: replacement.custom_logical_compaction_window,
1527            refresh_schedule: replacement.refresh_schedule,
1528            initial_as_of: replacement.initial_as_of,
1529        };
1530    }
1531}
1532
1533#[derive(Debug, Clone, Serialize)]
1534pub struct Index {
1535    /// Parse-able SQL that defines this table.
1536    pub create_sql: String,
1537    /// [`GlobalId`] used to reference this index from outside the catalog, e.g. compute.
1538    pub global_id: GlobalId,
1539    /// The [`GlobalId`] this Index is on.
1540    pub on: GlobalId,
1541    /// Keys of the index.
1542    pub keys: Arc<[MirScalarExpr]>,
1543    /// If created in the `TEMPORARY` schema, the [`ConnectionId`] for that session.
1544    pub conn_id: Option<ConnectionId>,
1545    /// Other catalog objects referenced by this index, e.g. the object we're indexing.
1546    pub resolved_ids: ResolvedIds,
1547    /// Cluster this index is installed on.
1548    pub cluster_id: ClusterId,
1549    /// Custom compaction window, e.g. set via `ALTER RETAIN HISTORY`.
1550    pub custom_logical_compaction_window: Option<CompactionWindow>,
1551    /// Whether the table's logical compaction window is controlled by the ['metrics_retention']
1552    /// session variable.
1553    ///
1554    /// ['metrics_retention']: mz_sql::session::vars::METRICS_RETENTION
1555    pub is_retained_metrics_object: bool,
1556}
1557
1558impl Index {
1559    /// The [`GlobalId`] that refers to this Index.
1560    pub fn global_id(&self) -> GlobalId {
1561        self.global_id
1562    }
1563}
1564
1565#[derive(Debug, Clone, Serialize)]
1566pub struct Type {
1567    /// Parse-able SQL that defines this type.
1568    pub create_sql: Option<String>,
1569    /// [`GlobalId`] used to reference this type from outside the catalog.
1570    pub global_id: GlobalId,
1571    #[serde(skip)]
1572    pub details: CatalogTypeDetails<IdReference>,
1573    /// Other catalog objects referenced by this type.
1574    pub resolved_ids: ResolvedIds,
1575}
1576
1577#[derive(Debug, Clone, Serialize)]
1578pub struct Func {
1579    /// Static definition of the function.
1580    #[serde(skip)]
1581    pub inner: &'static mz_sql::func::Func,
1582    /// [`GlobalId`] used to reference this function from outside the catalog.
1583    pub global_id: GlobalId,
1584}
1585
1586#[derive(Debug, Clone, Serialize)]
1587pub struct Secret {
1588    /// Parse-able SQL that defines this secret.
1589    pub create_sql: String,
1590    /// [`GlobalId`] used to reference this secret from outside the catalog.
1591    pub global_id: GlobalId,
1592}
1593
1594#[derive(Debug, Clone, Serialize)]
1595pub struct Connection {
1596    /// Parse-able SQL that defines this connection.
1597    pub create_sql: String,
1598    /// [`GlobalId`] used to reference this connection from the storage layer.
1599    pub global_id: GlobalId,
1600    /// The kind of connection.
1601    pub details: ConnectionDetails,
1602    /// Other objects this connection depends on.
1603    pub resolved_ids: ResolvedIds,
1604}
1605
1606impl Connection {
1607    /// The single [`GlobalId`] used to reference this connection.
1608    pub fn global_id(&self) -> GlobalId {
1609        self.global_id
1610    }
1611}
1612
1613#[derive(Debug, Clone, Serialize)]
1614pub struct ContinualTask {
1615    /// Parse-able SQL that defines this continual task.
1616    pub create_sql: String,
1617    /// [`GlobalId`] used to reference this continual task from outside the catalog.
1618    pub global_id: GlobalId,
1619    /// [`GlobalId`] of the collection that we read into this continual task.
1620    pub input_id: GlobalId,
1621    pub with_snapshot: bool,
1622    /// ContinualTasks are self-referential. We make this work by using a
1623    /// placeholder `LocalId` for the CT itself through name resolution and
1624    /// planning. Then we fill in the real `GlobalId` before constructing this
1625    /// catalog item.
1626    pub raw_expr: Arc<HirRelationExpr>,
1627    /// Columns for this continual task.
1628    pub desc: RelationDesc,
1629    /// Other catalog items that this continual task references, determined at name resolution.
1630    pub resolved_ids: ResolvedIds,
1631    /// All of the catalog objects that are referenced by this continual task.
1632    pub dependencies: DependencyIds,
1633    /// Cluster that this continual task runs on.
1634    pub cluster_id: ClusterId,
1635    /// See the comment on [MaterializedView::initial_as_of].
1636    pub initial_as_of: Option<Antichain<mz_repr::Timestamp>>,
1637}
1638
1639impl ContinualTask {
1640    /// The single [`GlobalId`] used to reference this continual task.
1641    pub fn global_id(&self) -> GlobalId {
1642        self.global_id
1643    }
1644}
1645
1646#[derive(Debug, Clone, Serialize, PartialEq, Eq)]
1647pub struct NetworkPolicy {
1648    pub name: String,
1649    pub id: NetworkPolicyId,
1650    pub oid: u32,
1651    pub rules: Vec<NetworkPolicyRule>,
1652    pub owner_id: RoleId,
1653    pub privileges: PrivilegeMap,
1654}
1655
1656impl From<NetworkPolicy> for durable::NetworkPolicy {
1657    fn from(policy: NetworkPolicy) -> durable::NetworkPolicy {
1658        durable::NetworkPolicy {
1659            id: policy.id,
1660            oid: policy.oid,
1661            name: policy.name,
1662            rules: policy.rules,
1663            owner_id: policy.owner_id,
1664            privileges: policy.privileges.into_all_values().collect(),
1665        }
1666    }
1667}
1668
1669impl From<durable::NetworkPolicy> for NetworkPolicy {
1670    fn from(
1671        durable::NetworkPolicy {
1672            id,
1673            oid,
1674            name,
1675            rules,
1676            owner_id,
1677            privileges,
1678        }: durable::NetworkPolicy,
1679    ) -> Self {
1680        NetworkPolicy {
1681            id,
1682            oid,
1683            name,
1684            rules,
1685            owner_id,
1686            privileges: PrivilegeMap::from_mz_acl_items(privileges),
1687        }
1688    }
1689}
1690
1691impl UpdateFrom<durable::NetworkPolicy> for NetworkPolicy {
1692    fn update_from(
1693        &mut self,
1694        durable::NetworkPolicy {
1695            id,
1696            oid,
1697            name,
1698            rules,
1699            owner_id,
1700            privileges,
1701        }: durable::NetworkPolicy,
1702    ) {
1703        self.id = id;
1704        self.oid = oid;
1705        self.name = name;
1706        self.rules = rules;
1707        self.owner_id = owner_id;
1708        self.privileges = PrivilegeMap::from_mz_acl_items(privileges);
1709    }
1710}
1711
1712impl CatalogItem {
1713    /// Returns a string indicating the type of this catalog entry.
1714    pub fn typ(&self) -> mz_sql::catalog::CatalogItemType {
1715        match self {
1716            CatalogItem::Table(_) => CatalogItemType::Table,
1717            CatalogItem::Source(_) => CatalogItemType::Source,
1718            CatalogItem::Log(_) => CatalogItemType::Source,
1719            CatalogItem::Sink(_) => CatalogItemType::Sink,
1720            CatalogItem::View(_) => CatalogItemType::View,
1721            CatalogItem::MaterializedView(_) => CatalogItemType::MaterializedView,
1722            CatalogItem::Index(_) => CatalogItemType::Index,
1723            CatalogItem::Type(_) => CatalogItemType::Type,
1724            CatalogItem::Func(_) => CatalogItemType::Func,
1725            CatalogItem::Secret(_) => CatalogItemType::Secret,
1726            CatalogItem::Connection(_) => CatalogItemType::Connection,
1727            CatalogItem::ContinualTask(_) => CatalogItemType::ContinualTask,
1728        }
1729    }
1730
1731    /// Returns the [`GlobalId`]s that reference this item, if any.
1732    pub fn global_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
1733        let gid = match self {
1734            CatalogItem::Source(source) => source.global_id,
1735            CatalogItem::Log(log) => log.global_id,
1736            CatalogItem::Sink(sink) => sink.global_id,
1737            CatalogItem::View(view) => view.global_id,
1738            CatalogItem::MaterializedView(mv) => {
1739                return itertools::Either::Left(mv.collections.values().copied());
1740            }
1741            CatalogItem::ContinualTask(ct) => ct.global_id,
1742            CatalogItem::Index(index) => index.global_id,
1743            CatalogItem::Func(func) => func.global_id,
1744            CatalogItem::Type(ty) => ty.global_id,
1745            CatalogItem::Secret(secret) => secret.global_id,
1746            CatalogItem::Connection(conn) => conn.global_id,
1747            CatalogItem::Table(table) => {
1748                return itertools::Either::Left(table.collections.values().copied());
1749            }
1750        };
1751        itertools::Either::Right(std::iter::once(gid))
1752    }
1753
1754    /// Returns the most up-to-date [`GlobalId`] for this item.
1755    ///
1756    /// Note: The only type of object that can have multiple [`GlobalId`]s are tables.
1757    pub fn latest_global_id(&self) -> GlobalId {
1758        match self {
1759            CatalogItem::Source(source) => source.global_id,
1760            CatalogItem::Log(log) => log.global_id,
1761            CatalogItem::Sink(sink) => sink.global_id,
1762            CatalogItem::View(view) => view.global_id,
1763            CatalogItem::MaterializedView(mv) => mv.global_id_writes(),
1764            CatalogItem::ContinualTask(ct) => ct.global_id,
1765            CatalogItem::Index(index) => index.global_id,
1766            CatalogItem::Func(func) => func.global_id,
1767            CatalogItem::Type(ty) => ty.global_id,
1768            CatalogItem::Secret(secret) => secret.global_id,
1769            CatalogItem::Connection(conn) => conn.global_id,
1770            CatalogItem::Table(table) => table.global_id_writes(),
1771        }
1772    }
1773
1774    /// Whether this item represents a storage collection.
1775    pub fn is_storage_collection(&self) -> bool {
1776        match self {
1777            CatalogItem::Table(_)
1778            | CatalogItem::Source(_)
1779            | CatalogItem::MaterializedView(_)
1780            | CatalogItem::Sink(_)
1781            | CatalogItem::ContinualTask(_) => true,
1782            CatalogItem::Log(_)
1783            | CatalogItem::View(_)
1784            | CatalogItem::Index(_)
1785            | CatalogItem::Type(_)
1786            | CatalogItem::Func(_)
1787            | CatalogItem::Secret(_)
1788            | CatalogItem::Connection(_) => false,
1789        }
1790    }
1791
1792    /// Returns the [`RelationDesc`] for items that yield rows, at the requested
1793    /// version.
1794    ///
1795    /// Some item types honor `version` so callers can ask for the schema that
1796    /// matches a specific [`GlobalId`] or historical definition. Other relation
1797    /// types ignore `version` because they have a single shape. Non-relational
1798    /// items ( for example functions, indexes, sinks, secrets, and connections)
1799    /// return `None`.
1800    pub fn relation_desc(&self, version: RelationVersionSelector) -> Option<Cow<'_, RelationDesc>> {
1801        match &self {
1802            CatalogItem::Source(src) => Some(Cow::Borrowed(&src.desc)),
1803            CatalogItem::Log(log) => Some(Cow::Owned(log.variant.desc())),
1804            CatalogItem::Table(tbl) => Some(Cow::Owned(tbl.desc.at_version(version))),
1805            CatalogItem::View(view) => Some(Cow::Borrowed(&view.desc)),
1806            CatalogItem::MaterializedView(mview) => {
1807                Some(Cow::Owned(mview.desc.at_version(version)))
1808            }
1809            CatalogItem::ContinualTask(ct) => Some(Cow::Borrowed(&ct.desc)),
1810            CatalogItem::Func(_)
1811            | CatalogItem::Index(_)
1812            | CatalogItem::Sink(_)
1813            | CatalogItem::Secret(_)
1814            | CatalogItem::Connection(_)
1815            | CatalogItem::Type(_) => None,
1816        }
1817    }
1818
1819    pub fn func(
1820        &self,
1821        entry: &CatalogEntry,
1822    ) -> Result<&'static mz_sql::func::Func, SqlCatalogError> {
1823        match &self {
1824            CatalogItem::Func(func) => Ok(func.inner),
1825            _ => Err(SqlCatalogError::UnexpectedType {
1826                name: entry.name().item.to_string(),
1827                actual_type: entry.item_type(),
1828                expected_type: CatalogItemType::Func,
1829            }),
1830        }
1831    }
1832
1833    pub fn source_desc(
1834        &self,
1835        entry: &CatalogEntry,
1836    ) -> Result<Option<&SourceDesc<ReferencedConnection>>, SqlCatalogError> {
1837        match &self {
1838            CatalogItem::Source(source) => match &source.data_source {
1839                DataSourceDesc::Ingestion { desc, .. }
1840                | DataSourceDesc::OldSyntaxIngestion { desc, .. } => Ok(Some(desc)),
1841                DataSourceDesc::IngestionExport { .. }
1842                | DataSourceDesc::Introspection(_)
1843                | DataSourceDesc::Webhook { .. }
1844                | DataSourceDesc::Progress
1845                | DataSourceDesc::Catalog => Ok(None),
1846            },
1847            _ => Err(SqlCatalogError::UnexpectedType {
1848                name: entry.name().item.to_string(),
1849                actual_type: entry.item_type(),
1850                expected_type: CatalogItemType::Source,
1851            }),
1852        }
1853    }
1854
1855    /// Reports whether this catalog entry is a progress source.
1856    pub fn is_progress_source(&self) -> bool {
1857        matches!(
1858            self,
1859            CatalogItem::Source(Source {
1860                data_source: DataSourceDesc::Progress,
1861                ..
1862            })
1863        )
1864    }
1865
1866    /// Collects the identifiers of the objects that were encountered when resolving names in the
1867    /// item's DDL statement.
1868    pub fn references(&self) -> &ResolvedIds {
1869        static EMPTY: LazyLock<ResolvedIds> = LazyLock::new(ResolvedIds::empty);
1870        match self {
1871            CatalogItem::Func(_) => &*EMPTY,
1872            CatalogItem::Index(idx) => &idx.resolved_ids,
1873            CatalogItem::Sink(sink) => &sink.resolved_ids,
1874            CatalogItem::Source(source) => &source.resolved_ids,
1875            CatalogItem::Log(_) => &*EMPTY,
1876            CatalogItem::Table(table) => &table.resolved_ids,
1877            CatalogItem::Type(typ) => &typ.resolved_ids,
1878            CatalogItem::View(view) => &view.resolved_ids,
1879            CatalogItem::MaterializedView(mview) => &mview.resolved_ids,
1880            CatalogItem::Secret(_) => &*EMPTY,
1881            CatalogItem::Connection(connection) => &connection.resolved_ids,
1882            CatalogItem::ContinualTask(ct) => &ct.resolved_ids,
1883        }
1884    }
1885
1886    /// Collects the identifiers of the objects used by this [`CatalogItem`].
1887    ///
1888    /// Like [`CatalogItem::references()`] but also includes objects that are not directly
1889    /// referenced. For example this will include any catalog objects used to implement functions
1890    /// and casts in the item.
1891    pub fn uses(&self) -> BTreeSet<CatalogItemId> {
1892        let mut uses: BTreeSet<_> = self.references().items().copied().collect();
1893        match self {
1894            // TODO(jkosh44) This isn't really correct for functions. They may use other objects in
1895            // their implementation. However, currently there's no way to get that information.
1896            CatalogItem::Func(_) => {}
1897            CatalogItem::Index(_) => {}
1898            CatalogItem::Sink(_) => {}
1899            CatalogItem::Source(_) => {}
1900            CatalogItem::Log(_) => {}
1901            CatalogItem::Table(_) => {}
1902            CatalogItem::Type(_) => {}
1903            CatalogItem::View(view) => uses.extend(view.dependencies.0.iter().copied()),
1904            CatalogItem::MaterializedView(mview) => {
1905                uses.extend(mview.dependencies.0.iter().copied())
1906            }
1907            CatalogItem::ContinualTask(ct) => uses.extend(ct.dependencies.0.iter().copied()),
1908            CatalogItem::Secret(_) => {}
1909            CatalogItem::Connection(_) => {}
1910        }
1911        uses
1912    }
1913
1914    /// Returns the connection ID that this item belongs to, if this item is
1915    /// temporary.
1916    pub fn conn_id(&self) -> Option<&ConnectionId> {
1917        match self {
1918            CatalogItem::View(view) => view.conn_id.as_ref(),
1919            CatalogItem::Index(index) => index.conn_id.as_ref(),
1920            CatalogItem::Table(table) => table.conn_id.as_ref(),
1921            CatalogItem::Log(_)
1922            | CatalogItem::Source(_)
1923            | CatalogItem::Sink(_)
1924            | CatalogItem::MaterializedView(_)
1925            | CatalogItem::Secret(_)
1926            | CatalogItem::Type(_)
1927            | CatalogItem::Func(_)
1928            | CatalogItem::Connection(_)
1929            | CatalogItem::ContinualTask(_) => None,
1930        }
1931    }
1932
1933    /// Sets the connection ID that this item belongs to, which makes it a
1934    /// temporary item.
1935    pub fn set_conn_id(&mut self, conn_id: Option<ConnectionId>) {
1936        match self {
1937            CatalogItem::View(view) => view.conn_id = conn_id,
1938            CatalogItem::Index(index) => index.conn_id = conn_id,
1939            CatalogItem::Table(table) => table.conn_id = conn_id,
1940            CatalogItem::Log(_)
1941            | CatalogItem::Source(_)
1942            | CatalogItem::Sink(_)
1943            | CatalogItem::MaterializedView(_)
1944            | CatalogItem::Secret(_)
1945            | CatalogItem::Type(_)
1946            | CatalogItem::Func(_)
1947            | CatalogItem::Connection(_)
1948            | CatalogItem::ContinualTask(_) => (),
1949        }
1950    }
1951
1952    /// Indicates whether this item is temporary or not.
1953    pub fn is_temporary(&self) -> bool {
1954        self.conn_id().is_some()
1955    }
1956
1957    pub fn rename_schema_refs(
1958        &self,
1959        database_name: &str,
1960        cur_schema_name: &str,
1961        new_schema_name: &str,
1962    ) -> Result<CatalogItem, (String, String)> {
1963        let do_rewrite = |create_sql: String| -> Result<String, (String, String)> {
1964            let mut create_stmt = mz_sql::parse::parse(&create_sql)
1965                .expect("invalid create sql persisted to catalog")
1966                .into_element()
1967                .ast;
1968
1969            // Rename all references to cur_schema_name.
1970            mz_sql::ast::transform::create_stmt_rename_schema_refs(
1971                &mut create_stmt,
1972                database_name,
1973                cur_schema_name,
1974                new_schema_name,
1975            )?;
1976
1977            Ok(create_stmt.to_ast_string_stable())
1978        };
1979
1980        match self {
1981            CatalogItem::Table(i) => {
1982                let mut i = i.clone();
1983                i.create_sql = i.create_sql.map(do_rewrite).transpose()?;
1984                Ok(CatalogItem::Table(i))
1985            }
1986            CatalogItem::Log(i) => Ok(CatalogItem::Log(i.clone())),
1987            CatalogItem::Source(i) => {
1988                let mut i = i.clone();
1989                i.create_sql = i.create_sql.map(do_rewrite).transpose()?;
1990                Ok(CatalogItem::Source(i))
1991            }
1992            CatalogItem::Sink(i) => {
1993                let mut i = i.clone();
1994                i.create_sql = do_rewrite(i.create_sql)?;
1995                Ok(CatalogItem::Sink(i))
1996            }
1997            CatalogItem::View(i) => {
1998                let mut i = i.clone();
1999                i.create_sql = do_rewrite(i.create_sql)?;
2000                Ok(CatalogItem::View(i))
2001            }
2002            CatalogItem::MaterializedView(i) => {
2003                let mut i = i.clone();
2004                i.create_sql = do_rewrite(i.create_sql)?;
2005                Ok(CatalogItem::MaterializedView(i))
2006            }
2007            CatalogItem::Index(i) => {
2008                let mut i = i.clone();
2009                i.create_sql = do_rewrite(i.create_sql)?;
2010                Ok(CatalogItem::Index(i))
2011            }
2012            CatalogItem::Secret(i) => {
2013                let mut i = i.clone();
2014                i.create_sql = do_rewrite(i.create_sql)?;
2015                Ok(CatalogItem::Secret(i))
2016            }
2017            CatalogItem::Connection(i) => {
2018                let mut i = i.clone();
2019                i.create_sql = do_rewrite(i.create_sql)?;
2020                Ok(CatalogItem::Connection(i))
2021            }
2022            CatalogItem::Type(i) => {
2023                let mut i = i.clone();
2024                i.create_sql = i.create_sql.map(do_rewrite).transpose()?;
2025                Ok(CatalogItem::Type(i))
2026            }
2027            CatalogItem::Func(i) => Ok(CatalogItem::Func(i.clone())),
2028            CatalogItem::ContinualTask(i) => {
2029                let mut i = i.clone();
2030                i.create_sql = do_rewrite(i.create_sql)?;
2031                Ok(CatalogItem::ContinualTask(i))
2032            }
2033        }
2034    }
2035
2036    /// Returns a clone of `self` with all instances of `from` renamed to `to`
2037    /// (with the option of including the item's own name) or errors if request
2038    /// is ambiguous.
2039    pub fn rename_item_refs(
2040        &self,
2041        from: FullItemName,
2042        to_item_name: String,
2043        rename_self: bool,
2044    ) -> Result<CatalogItem, String> {
2045        let do_rewrite = |create_sql: String| -> Result<String, String> {
2046            let mut create_stmt = mz_sql::parse::parse(&create_sql)
2047                .expect("invalid create sql persisted to catalog")
2048                .into_element()
2049                .ast;
2050            if rename_self {
2051                mz_sql::ast::transform::create_stmt_rename(&mut create_stmt, to_item_name.clone());
2052            }
2053            // Determination of what constitutes an ambiguous request is done here.
2054            mz_sql::ast::transform::create_stmt_rename_refs(&mut create_stmt, from, to_item_name)?;
2055            Ok(create_stmt.to_ast_string_stable())
2056        };
2057
2058        match self {
2059            CatalogItem::Table(i) => {
2060                let mut i = i.clone();
2061                i.create_sql = i.create_sql.map(do_rewrite).transpose()?;
2062                Ok(CatalogItem::Table(i))
2063            }
2064            CatalogItem::Log(i) => Ok(CatalogItem::Log(i.clone())),
2065            CatalogItem::Source(i) => {
2066                let mut i = i.clone();
2067                i.create_sql = i.create_sql.map(do_rewrite).transpose()?;
2068                Ok(CatalogItem::Source(i))
2069            }
2070            CatalogItem::Sink(i) => {
2071                let mut i = i.clone();
2072                i.create_sql = do_rewrite(i.create_sql)?;
2073                Ok(CatalogItem::Sink(i))
2074            }
2075            CatalogItem::View(i) => {
2076                let mut i = i.clone();
2077                i.create_sql = do_rewrite(i.create_sql)?;
2078                Ok(CatalogItem::View(i))
2079            }
2080            CatalogItem::MaterializedView(i) => {
2081                let mut i = i.clone();
2082                i.create_sql = do_rewrite(i.create_sql)?;
2083                Ok(CatalogItem::MaterializedView(i))
2084            }
2085            CatalogItem::Index(i) => {
2086                let mut i = i.clone();
2087                i.create_sql = do_rewrite(i.create_sql)?;
2088                Ok(CatalogItem::Index(i))
2089            }
2090            CatalogItem::Secret(i) => {
2091                let mut i = i.clone();
2092                i.create_sql = do_rewrite(i.create_sql)?;
2093                Ok(CatalogItem::Secret(i))
2094            }
2095            CatalogItem::Func(_) | CatalogItem::Type(_) => {
2096                unreachable!("{}s cannot be renamed", self.typ())
2097            }
2098            CatalogItem::Connection(i) => {
2099                let mut i = i.clone();
2100                i.create_sql = do_rewrite(i.create_sql)?;
2101                Ok(CatalogItem::Connection(i))
2102            }
2103            CatalogItem::ContinualTask(i) => {
2104                let mut i = i.clone();
2105                i.create_sql = do_rewrite(i.create_sql)?;
2106                Ok(CatalogItem::ContinualTask(i))
2107            }
2108        }
2109    }
2110
2111    /// Returns a clone of `self` with all instances of `old_id` replaced with `new_id`.
2112    pub fn replace_item_refs(&self, old_id: CatalogItemId, new_id: CatalogItemId) -> CatalogItem {
2113        let do_rewrite = |create_sql: String| -> String {
2114            let mut create_stmt = mz_sql::parse::parse(&create_sql)
2115                .expect("invalid create sql persisted to catalog")
2116                .into_element()
2117                .ast;
2118            mz_sql::ast::transform::create_stmt_replace_ids(
2119                &mut create_stmt,
2120                &[(old_id, new_id)].into(),
2121            );
2122            create_stmt.to_ast_string_stable()
2123        };
2124
2125        match self {
2126            CatalogItem::Table(i) => {
2127                let mut i = i.clone();
2128                i.create_sql = i.create_sql.map(do_rewrite);
2129                CatalogItem::Table(i)
2130            }
2131            CatalogItem::Log(i) => CatalogItem::Log(i.clone()),
2132            CatalogItem::Source(i) => {
2133                let mut i = i.clone();
2134                i.create_sql = i.create_sql.map(do_rewrite);
2135                CatalogItem::Source(i)
2136            }
2137            CatalogItem::Sink(i) => {
2138                let mut i = i.clone();
2139                i.create_sql = do_rewrite(i.create_sql);
2140                CatalogItem::Sink(i)
2141            }
2142            CatalogItem::View(i) => {
2143                let mut i = i.clone();
2144                i.create_sql = do_rewrite(i.create_sql);
2145                CatalogItem::View(i)
2146            }
2147            CatalogItem::MaterializedView(i) => {
2148                let mut i = i.clone();
2149                i.create_sql = do_rewrite(i.create_sql);
2150                CatalogItem::MaterializedView(i)
2151            }
2152            CatalogItem::Index(i) => {
2153                let mut i = i.clone();
2154                i.create_sql = do_rewrite(i.create_sql);
2155                CatalogItem::Index(i)
2156            }
2157            CatalogItem::Secret(i) => {
2158                let mut i = i.clone();
2159                i.create_sql = do_rewrite(i.create_sql);
2160                CatalogItem::Secret(i)
2161            }
2162            CatalogItem::Func(_) | CatalogItem::Type(_) => {
2163                unreachable!("references of {}s cannot be replaced", self.typ())
2164            }
2165            CatalogItem::Connection(i) => {
2166                let mut i = i.clone();
2167                i.create_sql = do_rewrite(i.create_sql);
2168                CatalogItem::Connection(i)
2169            }
2170            CatalogItem::ContinualTask(i) => {
2171                let mut i = i.clone();
2172                i.create_sql = do_rewrite(i.create_sql);
2173                CatalogItem::ContinualTask(i)
2174            }
2175        }
2176    }
2177    /// Updates the retain history for an item. Returns the previous retain history value. Returns
2178    /// an error if this item does not support retain history.
2179    pub fn update_retain_history(
2180        &mut self,
2181        value: Option<Value>,
2182        window: CompactionWindow,
2183    ) -> Result<Option<WithOptionValue<Raw>>, ()> {
2184        let update = |mut ast: &mut Statement<Raw>| {
2185            // Each statement type has unique option types. This macro handles them commonly.
2186            macro_rules! update_retain_history {
2187                ( $stmt:ident, $opt:ident, $name:ident ) => {{
2188                    // Replace or add the option.
2189                    let pos = $stmt
2190                        .with_options
2191                        .iter()
2192                        // In case there are ever multiple, look for the last one.
2193                        .rposition(|o| o.name == mz_sql_parser::ast::$name::RetainHistory);
2194                    if let Some(value) = value {
2195                        let next = mz_sql_parser::ast::$opt {
2196                            name: mz_sql_parser::ast::$name::RetainHistory,
2197                            value: Some(WithOptionValue::RetainHistoryFor(value)),
2198                        };
2199                        if let Some(idx) = pos {
2200                            let previous = $stmt.with_options[idx].clone();
2201                            $stmt.with_options[idx] = next;
2202                            previous.value
2203                        } else {
2204                            $stmt.with_options.push(next);
2205                            None
2206                        }
2207                    } else {
2208                        if let Some(idx) = pos {
2209                            $stmt.with_options.swap_remove(idx).value
2210                        } else {
2211                            None
2212                        }
2213                    }
2214                }};
2215            }
2216            let previous = match &mut ast {
2217                Statement::CreateTable(stmt) => {
2218                    update_retain_history!(stmt, TableOption, TableOptionName)
2219                }
2220                Statement::CreateIndex(stmt) => {
2221                    update_retain_history!(stmt, IndexOption, IndexOptionName)
2222                }
2223                Statement::CreateSource(stmt) => {
2224                    update_retain_history!(stmt, CreateSourceOption, CreateSourceOptionName)
2225                }
2226                Statement::CreateMaterializedView(stmt) => {
2227                    update_retain_history!(stmt, MaterializedViewOption, MaterializedViewOptionName)
2228                }
2229                _ => {
2230                    return Err(());
2231                }
2232            };
2233            Ok(previous)
2234        };
2235
2236        let res = self.update_sql(update)?;
2237        let cw = self
2238            .custom_logical_compaction_window_mut()
2239            .expect("item must have compaction window");
2240        *cw = Some(window);
2241        Ok(res)
2242    }
2243
2244    /// Updates the timestamp interval for a source. Returns an error if this item is not a source.
2245    pub fn update_timestamp_interval(
2246        &mut self,
2247        value: Option<Value>,
2248        interval: Duration,
2249    ) -> Result<(), ()> {
2250        let update = |ast: &mut Statement<Raw>| {
2251            match ast {
2252                Statement::CreateSource(stmt) => {
2253                    let pos = stmt.with_options.iter().rposition(|o| {
2254                        o.name == mz_sql_parser::ast::CreateSourceOptionName::TimestampInterval
2255                    });
2256                    if let Some(value) = value {
2257                        let next = mz_sql_parser::ast::CreateSourceOption {
2258                            name: mz_sql_parser::ast::CreateSourceOptionName::TimestampInterval,
2259                            value: Some(WithOptionValue::Value(value)),
2260                        };
2261                        if let Some(idx) = pos {
2262                            stmt.with_options[idx] = next;
2263                        } else {
2264                            stmt.with_options.push(next);
2265                        }
2266                    } else {
2267                        if let Some(idx) = pos {
2268                            stmt.with_options.swap_remove(idx);
2269                        }
2270                    }
2271                }
2272                _ => return Err(()),
2273            };
2274            Ok(())
2275        };
2276
2277        self.update_sql(update)?;
2278
2279        // Update the in-memory SourceDesc timestamp_interval.
2280        match self {
2281            CatalogItem::Source(source) => {
2282                match &mut source.data_source {
2283                    DataSourceDesc::Ingestion { desc, .. }
2284                    | DataSourceDesc::OldSyntaxIngestion { desc, .. } => {
2285                        desc.timestamp_interval = interval;
2286                    }
2287                    _ => return Err(()),
2288                }
2289                Ok(())
2290            }
2291            _ => Err(()),
2292        }
2293    }
2294
2295    pub fn add_column(
2296        &mut self,
2297        name: ColumnName,
2298        typ: SqlColumnType,
2299        sql: RawDataType,
2300    ) -> Result<RelationVersion, PlanError> {
2301        let CatalogItem::Table(table) = self else {
2302            return Err(PlanError::Unsupported {
2303                feature: "adding columns to a non-Table".to_string(),
2304                discussion_no: None,
2305            });
2306        };
2307        let next_version = table.desc.add_column(name.clone(), typ);
2308
2309        let update = |mut ast: &mut Statement<Raw>| match &mut ast {
2310            Statement::CreateTable(stmt) => {
2311                let version = ColumnOptionDef {
2312                    name: None,
2313                    option: ColumnOption::Versioned {
2314                        action: ColumnVersioned::Added,
2315                        version: next_version.into(),
2316                    },
2317                };
2318                let column = ColumnDef {
2319                    name: name.into(),
2320                    data_type: sql,
2321                    collation: None,
2322                    options: vec![version],
2323                };
2324                stmt.columns.push(column);
2325                Ok(())
2326            }
2327            _ => Err(()),
2328        };
2329
2330        self.update_sql(update)
2331            .map_err(|()| PlanError::Unstructured("expected CREATE TABLE statement".to_string()))?;
2332        Ok(next_version)
2333    }
2334
2335    /// Updates the create_sql field of this item. Returns an error if this is a builtin item,
2336    /// otherwise returns f's result.
2337    pub fn update_sql<F, T>(&mut self, f: F) -> Result<T, ()>
2338    where
2339        F: FnOnce(&mut Statement<Raw>) -> Result<T, ()>,
2340    {
2341        let create_sql = match self {
2342            CatalogItem::Table(Table { create_sql, .. })
2343            | CatalogItem::Type(Type { create_sql, .. })
2344            | CatalogItem::Source(Source { create_sql, .. }) => create_sql.as_mut(),
2345            CatalogItem::Sink(Sink { create_sql, .. })
2346            | CatalogItem::View(View { create_sql, .. })
2347            | CatalogItem::MaterializedView(MaterializedView { create_sql, .. })
2348            | CatalogItem::Index(Index { create_sql, .. })
2349            | CatalogItem::Secret(Secret { create_sql, .. })
2350            | CatalogItem::Connection(Connection { create_sql, .. })
2351            | CatalogItem::ContinualTask(ContinualTask { create_sql, .. }) => Some(create_sql),
2352            CatalogItem::Func(_) | CatalogItem::Log(_) => None,
2353        };
2354        let Some(create_sql) = create_sql else {
2355            return Err(());
2356        };
2357        let mut ast = mz_sql_parser::parser::parse_statements(create_sql)
2358            .expect("non-system items must be parseable")
2359            .into_element()
2360            .ast;
2361        debug!("rewrite: {}", ast.to_ast_string_redacted());
2362        let t = f(&mut ast)?;
2363        *create_sql = ast.to_ast_string_stable();
2364        debug!("rewrote: {}", ast.to_ast_string_redacted());
2365        Ok(t)
2366    }
2367
2368    /// If the object is considered a "compute object"
2369    /// (i.e., it is managed by the compute controller),
2370    /// this function returns its cluster ID. Otherwise, it returns nothing.
2371    ///
2372    /// This function differs from `cluster_id` because while all
2373    /// compute objects run on a cluster, the converse is not true.
2374    pub fn is_compute_object_on_cluster(&self) -> Option<ClusterId> {
2375        match self {
2376            CatalogItem::Index(index) => Some(index.cluster_id),
2377            CatalogItem::Table(_)
2378            | CatalogItem::Source(_)
2379            | CatalogItem::Log(_)
2380            | CatalogItem::View(_)
2381            | CatalogItem::MaterializedView(_)
2382            | CatalogItem::Sink(_)
2383            | CatalogItem::Type(_)
2384            | CatalogItem::Func(_)
2385            | CatalogItem::Secret(_)
2386            | CatalogItem::Connection(_)
2387            | CatalogItem::ContinualTask(_) => None,
2388        }
2389    }
2390
2391    pub fn cluster_id(&self) -> Option<ClusterId> {
2392        match self {
2393            CatalogItem::MaterializedView(mv) => Some(mv.cluster_id),
2394            CatalogItem::Index(index) => Some(index.cluster_id),
2395            CatalogItem::Source(source) => match &source.data_source {
2396                DataSourceDesc::Ingestion { cluster_id, .. }
2397                | DataSourceDesc::OldSyntaxIngestion { cluster_id, .. } => Some(*cluster_id),
2398                // This is somewhat of a lie because the export runs on the same
2399                // cluster as its ingestion but we don't yet have a way of
2400                // cross-referencing the items
2401                DataSourceDesc::IngestionExport { .. } => None,
2402                DataSourceDesc::Webhook { cluster_id, .. } => Some(*cluster_id),
2403                DataSourceDesc::Introspection(_)
2404                | DataSourceDesc::Progress
2405                | DataSourceDesc::Catalog => None,
2406            },
2407            CatalogItem::Sink(sink) => Some(sink.cluster_id),
2408            CatalogItem::ContinualTask(ct) => Some(ct.cluster_id),
2409            CatalogItem::Table(_)
2410            | CatalogItem::Log(_)
2411            | CatalogItem::View(_)
2412            | CatalogItem::Type(_)
2413            | CatalogItem::Func(_)
2414            | CatalogItem::Secret(_)
2415            | CatalogItem::Connection(_) => None,
2416        }
2417    }
2418
2419    /// The custom compaction window, if any has been set. This does not reflect any propagated
2420    /// compaction window (i.e., source -> subsource).
2421    pub fn custom_logical_compaction_window(&self) -> Option<CompactionWindow> {
2422        match self {
2423            CatalogItem::Table(table) => table.custom_logical_compaction_window,
2424            CatalogItem::Source(source) => source.custom_logical_compaction_window,
2425            CatalogItem::Index(index) => index.custom_logical_compaction_window,
2426            CatalogItem::MaterializedView(mview) => mview.custom_logical_compaction_window,
2427            CatalogItem::Log(_)
2428            | CatalogItem::View(_)
2429            | CatalogItem::Sink(_)
2430            | CatalogItem::Type(_)
2431            | CatalogItem::Func(_)
2432            | CatalogItem::Secret(_)
2433            | CatalogItem::Connection(_)
2434            | CatalogItem::ContinualTask(_) => None,
2435        }
2436    }
2437
2438    /// Mutable access to the custom compaction window, or None if this type does not support custom
2439    /// compaction windows. This does not reflect any propagated compaction window (i.e., source ->
2440    /// subsource).
2441    pub fn custom_logical_compaction_window_mut(
2442        &mut self,
2443    ) -> Option<&mut Option<CompactionWindow>> {
2444        let cw = match self {
2445            CatalogItem::Table(table) => &mut table.custom_logical_compaction_window,
2446            CatalogItem::Source(source) => &mut source.custom_logical_compaction_window,
2447            CatalogItem::Index(index) => &mut index.custom_logical_compaction_window,
2448            CatalogItem::MaterializedView(mview) => &mut mview.custom_logical_compaction_window,
2449            CatalogItem::Log(_)
2450            | CatalogItem::View(_)
2451            | CatalogItem::Sink(_)
2452            | CatalogItem::Type(_)
2453            | CatalogItem::Func(_)
2454            | CatalogItem::Secret(_)
2455            | CatalogItem::Connection(_)
2456            | CatalogItem::ContinualTask(_) => return None,
2457        };
2458        Some(cw)
2459    }
2460
2461    /// The initial compaction window, for objects that have one; that is, tables, sources, indexes,
2462    /// and MVs. This does not reflect any propagated compaction window (i.e., source -> subsource).
2463    ///
2464    /// If `custom_logical_compaction_window()` returns something, use that.  Otherwise, use a
2465    /// sensible default (currently 1s).
2466    ///
2467    /// For objects that do not have the concept of compaction window, return None.
2468    pub fn initial_logical_compaction_window(&self) -> Option<CompactionWindow> {
2469        let custom_logical_compaction_window = match self {
2470            CatalogItem::Table(_)
2471            | CatalogItem::Source(_)
2472            | CatalogItem::Index(_)
2473            | CatalogItem::MaterializedView(_)
2474            | CatalogItem::ContinualTask(_) => self.custom_logical_compaction_window(),
2475            CatalogItem::Log(_)
2476            | CatalogItem::View(_)
2477            | CatalogItem::Sink(_)
2478            | CatalogItem::Type(_)
2479            | CatalogItem::Func(_)
2480            | CatalogItem::Secret(_)
2481            | CatalogItem::Connection(_) => return None,
2482        };
2483        Some(custom_logical_compaction_window.unwrap_or(CompactionWindow::Default))
2484    }
2485
2486    /// Whether the item's logical compaction window
2487    /// is controlled by the METRICS_RETENTION
2488    /// system var.
2489    pub fn is_retained_metrics_object(&self) -> bool {
2490        match self {
2491            CatalogItem::Table(table) => table.is_retained_metrics_object,
2492            CatalogItem::Source(source) => source.is_retained_metrics_object,
2493            CatalogItem::Index(index) => index.is_retained_metrics_object,
2494            CatalogItem::Log(_)
2495            | CatalogItem::View(_)
2496            | CatalogItem::MaterializedView(_)
2497            | CatalogItem::Sink(_)
2498            | CatalogItem::Type(_)
2499            | CatalogItem::Func(_)
2500            | CatalogItem::Secret(_)
2501            | CatalogItem::Connection(_)
2502            | CatalogItem::ContinualTask(_) => false,
2503        }
2504    }
2505
2506    pub fn to_serialized(&self) -> (String, GlobalId, BTreeMap<RelationVersion, GlobalId>) {
2507        match self {
2508            CatalogItem::Table(table) => {
2509                let create_sql = table
2510                    .create_sql
2511                    .clone()
2512                    .expect("builtin tables cannot be serialized");
2513                let mut collections = table.collections.clone();
2514                let global_id = collections
2515                    .remove(&RelationVersion::root())
2516                    .expect("at least one version");
2517                (create_sql, global_id, collections)
2518            }
2519            CatalogItem::Log(_) => unreachable!("builtin logs cannot be serialized"),
2520            CatalogItem::Source(source) => {
2521                assert!(
2522                    !matches!(source.data_source, DataSourceDesc::Introspection(_)),
2523                    "cannot serialize introspection/builtin sources",
2524                );
2525                let create_sql = source
2526                    .create_sql
2527                    .clone()
2528                    .expect("builtin sources cannot be serialized");
2529                (create_sql, source.global_id, BTreeMap::new())
2530            }
2531            CatalogItem::View(view) => (view.create_sql.clone(), view.global_id, BTreeMap::new()),
2532            CatalogItem::MaterializedView(mview) => {
2533                let mut collections = mview.collections.clone();
2534                let global_id = collections
2535                    .remove(&RelationVersion::root())
2536                    .expect("at least one version");
2537                (mview.create_sql.clone(), global_id, collections)
2538            }
2539            CatalogItem::Index(index) => {
2540                (index.create_sql.clone(), index.global_id, BTreeMap::new())
2541            }
2542            CatalogItem::Sink(sink) => (sink.create_sql.clone(), sink.global_id, BTreeMap::new()),
2543            CatalogItem::Type(typ) => {
2544                let create_sql = typ
2545                    .create_sql
2546                    .clone()
2547                    .expect("builtin types cannot be serialized");
2548                (create_sql, typ.global_id, BTreeMap::new())
2549            }
2550            CatalogItem::Secret(secret) => {
2551                (secret.create_sql.clone(), secret.global_id, BTreeMap::new())
2552            }
2553            CatalogItem::Connection(connection) => (
2554                connection.create_sql.clone(),
2555                connection.global_id,
2556                BTreeMap::new(),
2557            ),
2558            CatalogItem::Func(_) => unreachable!("cannot serialize functions yet"),
2559            CatalogItem::ContinualTask(ct) => {
2560                (ct.create_sql.clone(), ct.global_id, BTreeMap::new())
2561            }
2562        }
2563    }
2564
2565    pub fn into_serialized(self) -> (String, GlobalId, BTreeMap<RelationVersion, GlobalId>) {
2566        match self {
2567            CatalogItem::Table(mut table) => {
2568                let create_sql = table
2569                    .create_sql
2570                    .expect("builtin tables cannot be serialized");
2571                let global_id = table
2572                    .collections
2573                    .remove(&RelationVersion::root())
2574                    .expect("at least one version");
2575                (create_sql, global_id, table.collections)
2576            }
2577            CatalogItem::Log(_) => unreachable!("builtin logs cannot be serialized"),
2578            CatalogItem::Source(source) => {
2579                assert!(
2580                    !matches!(source.data_source, DataSourceDesc::Introspection(_)),
2581                    "cannot serialize introspection/builtin sources",
2582                );
2583                let create_sql = source
2584                    .create_sql
2585                    .expect("builtin sources cannot be serialized");
2586                (create_sql, source.global_id, BTreeMap::new())
2587            }
2588            CatalogItem::View(view) => (view.create_sql, view.global_id, BTreeMap::new()),
2589            CatalogItem::MaterializedView(mut mview) => {
2590                let global_id = mview
2591                    .collections
2592                    .remove(&RelationVersion::root())
2593                    .expect("at least one version");
2594                (mview.create_sql, global_id, mview.collections)
2595            }
2596            CatalogItem::Index(index) => (index.create_sql, index.global_id, BTreeMap::new()),
2597            CatalogItem::Sink(sink) => (sink.create_sql, sink.global_id, BTreeMap::new()),
2598            CatalogItem::Type(typ) => {
2599                let create_sql = typ.create_sql.expect("builtin types cannot be serialized");
2600                (create_sql, typ.global_id, BTreeMap::new())
2601            }
2602            CatalogItem::Secret(secret) => (secret.create_sql, secret.global_id, BTreeMap::new()),
2603            CatalogItem::Connection(connection) => {
2604                (connection.create_sql, connection.global_id, BTreeMap::new())
2605            }
2606            CatalogItem::Func(_) => unreachable!("cannot serialize functions yet"),
2607            CatalogItem::ContinualTask(ct) => (ct.create_sql, ct.global_id, BTreeMap::new()),
2608        }
2609    }
2610
2611    /// Returns a global ID for a specific version selector. Returns `None` if the item does
2612    /// not have versions or if the version does not exist.
2613    pub fn global_id_for_version(&self, version: RelationVersionSelector) -> Option<GlobalId> {
2614        let collections = match self {
2615            CatalogItem::MaterializedView(mv) => &mv.collections,
2616            CatalogItem::Table(table) => &table.collections,
2617            CatalogItem::Source(source) => return Some(source.global_id),
2618            CatalogItem::Log(log) => return Some(log.global_id),
2619            CatalogItem::View(view) => return Some(view.global_id),
2620            CatalogItem::Sink(sink) => return Some(sink.global_id),
2621            CatalogItem::Index(index) => return Some(index.global_id),
2622            CatalogItem::Type(ty) => return Some(ty.global_id),
2623            CatalogItem::Func(func) => return Some(func.global_id),
2624            CatalogItem::Secret(secret) => return Some(secret.global_id),
2625            CatalogItem::Connection(conn) => return Some(conn.global_id),
2626            CatalogItem::ContinualTask(ct) => return Some(ct.global_id),
2627        };
2628        match version {
2629            RelationVersionSelector::Latest => collections.values().last().copied(),
2630            RelationVersionSelector::Specific(version) => collections.get(&version).copied(),
2631        }
2632    }
2633}
2634
2635impl CatalogEntry {
2636    /// Reports the latest [`RelationDesc`] of the rows produced by this [`CatalogEntry`], if it
2637    /// produces rows.
2638    pub fn relation_desc_latest(&self) -> Option<Cow<'_, RelationDesc>> {
2639        self.item.relation_desc(RelationVersionSelector::Latest)
2640    }
2641
2642    /// Reports if the item has columns.
2643    pub fn has_columns(&self) -> bool {
2644        match self.item() {
2645            CatalogItem::Type(Type { details, .. }) => {
2646                matches!(details.typ, CatalogType::Record { .. })
2647            }
2648            _ => self.relation_desc_latest().is_some(),
2649        }
2650    }
2651
2652    /// Returns the [`mz_sql::func::Func`] associated with this `CatalogEntry`.
2653    pub fn func(&self) -> Result<&'static mz_sql::func::Func, SqlCatalogError> {
2654        self.item.func(self)
2655    }
2656
2657    /// Returns the inner [`Index`] if this entry is an index, else `None`.
2658    pub fn index(&self) -> Option<&Index> {
2659        match self.item() {
2660            CatalogItem::Index(idx) => Some(idx),
2661            _ => None,
2662        }
2663    }
2664
2665    /// Returns the inner [`MaterializedView`] if this entry is a materialized view, else `None`.
2666    pub fn materialized_view(&self) -> Option<&MaterializedView> {
2667        match self.item() {
2668            CatalogItem::MaterializedView(mv) => Some(mv),
2669            _ => None,
2670        }
2671    }
2672
2673    /// Returns the inner [`Table`] if this entry is a table, else `None`.
2674    pub fn table(&self) -> Option<&Table> {
2675        match self.item() {
2676            CatalogItem::Table(tbl) => Some(tbl),
2677            _ => None,
2678        }
2679    }
2680
2681    /// Returns the inner [`Source`] if this entry is a source, else `None`.
2682    pub fn source(&self) -> Option<&Source> {
2683        match self.item() {
2684            CatalogItem::Source(src) => Some(src),
2685            _ => None,
2686        }
2687    }
2688
2689    /// Returns the inner [`Sink`] if this entry is a sink, else `None`.
2690    pub fn sink(&self) -> Option<&Sink> {
2691        match self.item() {
2692            CatalogItem::Sink(sink) => Some(sink),
2693            _ => None,
2694        }
2695    }
2696
2697    /// Returns the inner [`Secret`] if this entry is a secret, else `None`.
2698    pub fn secret(&self) -> Option<&Secret> {
2699        match self.item() {
2700            CatalogItem::Secret(secret) => Some(secret),
2701            _ => None,
2702        }
2703    }
2704
2705    pub fn connection(&self) -> Result<&Connection, SqlCatalogError> {
2706        match self.item() {
2707            CatalogItem::Connection(connection) => Ok(connection),
2708            _ => {
2709                let db_name = match self.name().qualifiers.database_spec {
2710                    ResolvedDatabaseSpecifier::Ambient => "".to_string(),
2711                    ResolvedDatabaseSpecifier::Id(id) => format!("{id}."),
2712                };
2713                Err(SqlCatalogError::UnknownConnection(format!(
2714                    "{}{}.{}",
2715                    db_name,
2716                    self.name().qualifiers.schema_spec,
2717                    self.name().item
2718                )))
2719            }
2720        }
2721    }
2722
2723    /// Returns the [`mz_storage_types::sources::SourceDesc`] associated with
2724    /// this `CatalogEntry`, if any.
2725    pub fn source_desc(
2726        &self,
2727    ) -> Result<Option<&SourceDesc<ReferencedConnection>>, SqlCatalogError> {
2728        self.item.source_desc(self)
2729    }
2730
2731    /// Reports whether this catalog entry is a connection.
2732    pub fn is_connection(&self) -> bool {
2733        matches!(self.item(), CatalogItem::Connection(_))
2734    }
2735
2736    /// Reports whether this catalog entry is a table.
2737    pub fn is_table(&self) -> bool {
2738        matches!(self.item(), CatalogItem::Table(_))
2739    }
2740
2741    /// Reports whether this catalog entry is a source. Note that this includes
2742    /// subsources.
2743    pub fn is_source(&self) -> bool {
2744        matches!(self.item(), CatalogItem::Source(_))
2745    }
2746
2747    /// Reports whether this catalog entry is a subsource and, if it is, the
2748    /// ingestion it is an export of, as well as the item it exports.
2749    pub fn subsource_details(
2750        &self,
2751    ) -> Option<(CatalogItemId, &UnresolvedItemName, &SourceExportDetails)> {
2752        match &self.item() {
2753            CatalogItem::Source(source) => match &source.data_source {
2754                DataSourceDesc::IngestionExport {
2755                    ingestion_id,
2756                    external_reference,
2757                    details,
2758                    data_config: _,
2759                } => Some((*ingestion_id, external_reference, details)),
2760                _ => None,
2761            },
2762            _ => None,
2763        }
2764    }
2765
2766    /// Reports whether this catalog entry is a source export and, if it is, the
2767    /// ingestion it is an export of, as well as the item it exports.
2768    pub fn source_export_details(
2769        &self,
2770    ) -> Option<(
2771        CatalogItemId,
2772        &UnresolvedItemName,
2773        &SourceExportDetails,
2774        &SourceExportDataConfig<ReferencedConnection>,
2775    )> {
2776        match &self.item() {
2777            CatalogItem::Source(source) => match &source.data_source {
2778                DataSourceDesc::IngestionExport {
2779                    ingestion_id,
2780                    external_reference,
2781                    details,
2782                    data_config,
2783                } => Some((*ingestion_id, external_reference, details, data_config)),
2784                _ => None,
2785            },
2786            CatalogItem::Table(table) => match &table.data_source {
2787                TableDataSource::DataSource {
2788                    desc:
2789                        DataSourceDesc::IngestionExport {
2790                            ingestion_id,
2791                            external_reference,
2792                            details,
2793                            data_config,
2794                        },
2795                    timeline: _,
2796                } => Some((*ingestion_id, external_reference, details, data_config)),
2797                _ => None,
2798            },
2799            _ => None,
2800        }
2801    }
2802
2803    /// Reports whether this catalog entry is a progress source.
2804    pub fn is_progress_source(&self) -> bool {
2805        self.item().is_progress_source()
2806    }
2807
2808    /// Returns the `GlobalId` of all of this entry's progress ID.
2809    pub fn progress_id(&self) -> Option<CatalogItemId> {
2810        match &self.item() {
2811            CatalogItem::Source(source) => match &source.data_source {
2812                DataSourceDesc::Ingestion { .. } => Some(self.id),
2813                DataSourceDesc::OldSyntaxIngestion {
2814                    progress_subsource, ..
2815                } => Some(*progress_subsource),
2816                DataSourceDesc::IngestionExport { .. }
2817                | DataSourceDesc::Introspection(_)
2818                | DataSourceDesc::Progress
2819                | DataSourceDesc::Webhook { .. }
2820                | DataSourceDesc::Catalog => None,
2821            },
2822            CatalogItem::Table(_)
2823            | CatalogItem::Log(_)
2824            | CatalogItem::View(_)
2825            | CatalogItem::MaterializedView(_)
2826            | CatalogItem::Sink(_)
2827            | CatalogItem::Index(_)
2828            | CatalogItem::Type(_)
2829            | CatalogItem::Func(_)
2830            | CatalogItem::Secret(_)
2831            | CatalogItem::Connection(_)
2832            | CatalogItem::ContinualTask(_) => None,
2833        }
2834    }
2835
2836    /// Reports whether this catalog entry is a sink.
2837    pub fn is_sink(&self) -> bool {
2838        matches!(self.item(), CatalogItem::Sink(_))
2839    }
2840
2841    /// Reports whether this catalog entry is a materialized view.
2842    pub fn is_materialized_view(&self) -> bool {
2843        matches!(self.item(), CatalogItem::MaterializedView(_))
2844    }
2845
2846    /// Reports whether this catalog entry is a view.
2847    pub fn is_view(&self) -> bool {
2848        matches!(self.item(), CatalogItem::View(_))
2849    }
2850
2851    /// Reports whether this catalog entry is a secret.
2852    pub fn is_secret(&self) -> bool {
2853        matches!(self.item(), CatalogItem::Secret(_))
2854    }
2855
2856    /// Reports whether this catalog entry is an introspection source.
2857    pub fn is_introspection_source(&self) -> bool {
2858        matches!(self.item(), CatalogItem::Log(_))
2859    }
2860
2861    /// Reports whether this catalog entry is an index.
2862    pub fn is_index(&self) -> bool {
2863        matches!(self.item(), CatalogItem::Index(_))
2864    }
2865
2866    /// Reports whether this catalog entry is a continual task.
2867    pub fn is_continual_task(&self) -> bool {
2868        matches!(self.item(), CatalogItem::ContinualTask(_))
2869    }
2870
2871    /// Reports whether this catalog entry can be treated as a relation, it can produce rows.
2872    pub fn is_relation(&self) -> bool {
2873        mz_sql::catalog::ObjectType::from(self.item_type()).is_relation()
2874    }
2875
2876    /// Collects the identifiers of the objects that were encountered when
2877    /// resolving names in the item's DDL statement.
2878    pub fn references(&self) -> &ResolvedIds {
2879        self.item.references()
2880    }
2881
2882    /// Collects the identifiers of the objects used by this [`CatalogEntry`].
2883    ///
2884    /// Like [`CatalogEntry::references()`] but also includes objects that are not directly
2885    /// referenced. For example this will include any catalog objects used to implement functions
2886    /// and casts in the item.
2887    pub fn uses(&self) -> BTreeSet<CatalogItemId> {
2888        self.item.uses()
2889    }
2890
2891    /// Returns the `CatalogItem` associated with this catalog entry.
2892    pub fn item(&self) -> &CatalogItem {
2893        &self.item
2894    }
2895
2896    /// Returns the [`CatalogItemId`] of this catalog entry.
2897    pub fn id(&self) -> CatalogItemId {
2898        self.id
2899    }
2900
2901    /// Returns all of the [`GlobalId`]s associated with this item.
2902    pub fn global_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
2903        self.item().global_ids()
2904    }
2905
2906    pub fn latest_global_id(&self) -> GlobalId {
2907        self.item().latest_global_id()
2908    }
2909
2910    /// Returns the OID of this catalog entry.
2911    pub fn oid(&self) -> u32 {
2912        self.oid
2913    }
2914
2915    /// Returns the fully qualified name of this catalog entry.
2916    pub fn name(&self) -> &QualifiedItemName {
2917        &self.name
2918    }
2919
2920    /// Returns the identifiers of the dataflows that are directly referenced by this dataflow.
2921    pub fn referenced_by(&self) -> &[CatalogItemId] {
2922        &self.referenced_by
2923    }
2924
2925    /// Returns the identifiers of the dataflows that depend upon this dataflow.
2926    pub fn used_by(&self) -> &[CatalogItemId] {
2927        &self.used_by
2928    }
2929
2930    /// Returns the connection ID that this item belongs to, if this item is
2931    /// temporary.
2932    pub fn conn_id(&self) -> Option<&ConnectionId> {
2933        self.item.conn_id()
2934    }
2935
2936    /// Returns the role ID of the entry owner.
2937    pub fn owner_id(&self) -> &RoleId {
2938        &self.owner_id
2939    }
2940
2941    /// Returns the privileges of the entry.
2942    pub fn privileges(&self) -> &PrivilegeMap {
2943        &self.privileges
2944    }
2945
2946    /// Returns the comment object ID for this entry.
2947    pub fn comment_object_id(&self) -> CommentObjectId {
2948        use CatalogItemType::*;
2949        match self.item_type() {
2950            Table => CommentObjectId::Table(self.id),
2951            Source => CommentObjectId::Source(self.id),
2952            Sink => CommentObjectId::Sink(self.id),
2953            View => CommentObjectId::View(self.id),
2954            MaterializedView => CommentObjectId::MaterializedView(self.id),
2955            Index => CommentObjectId::Index(self.id),
2956            Func => CommentObjectId::Func(self.id),
2957            Connection => CommentObjectId::Connection(self.id),
2958            Type => CommentObjectId::Type(self.id),
2959            Secret => CommentObjectId::Secret(self.id),
2960            ContinualTask => CommentObjectId::ContinualTask(self.id),
2961        }
2962    }
2963}
2964
2965#[derive(Debug, Clone, Default)]
2966pub struct CommentsMap {
2967    map: BTreeMap<CommentObjectId, BTreeMap<Option<usize>, String>>,
2968}
2969
2970impl CommentsMap {
2971    pub fn update_comment(
2972        &mut self,
2973        object_id: CommentObjectId,
2974        sub_component: Option<usize>,
2975        comment: Option<String>,
2976    ) -> Option<String> {
2977        let object_comments = self.map.entry(object_id).or_default();
2978
2979        // Either replace the existing comment, or remove it if comment is None/NULL.
2980        let (empty, prev) = if let Some(comment) = comment {
2981            let prev = object_comments.insert(sub_component, comment);
2982            (false, prev)
2983        } else {
2984            let prev = object_comments.remove(&sub_component);
2985            (object_comments.is_empty(), prev)
2986        };
2987
2988        // Cleanup entries that are now empty.
2989        if empty {
2990            self.map.remove(&object_id);
2991        }
2992
2993        // Return the previous comment, if there was one, for easy removal.
2994        prev
2995    }
2996
2997    /// Remove all comments for `object_id` from the map.
2998    ///
2999    /// Generally there is one comment for a given [`CommentObjectId`], but in the case of
3000    /// relations you can also have comments on the individual columns. Dropping the comments for a
3001    /// relation will also drop all of the comments on any columns.
3002    pub fn drop_comments(
3003        &mut self,
3004        object_ids: &BTreeSet<CommentObjectId>,
3005    ) -> Vec<(CommentObjectId, Option<usize>, String)> {
3006        let mut removed_comments = Vec::new();
3007
3008        for object_id in object_ids {
3009            if let Some(comments) = self.map.remove(object_id) {
3010                let removed = comments
3011                    .into_iter()
3012                    .map(|(sub_comp, comment)| (object_id.clone(), sub_comp, comment));
3013                removed_comments.extend(removed);
3014            }
3015        }
3016
3017        removed_comments
3018    }
3019
3020    pub fn iter(&self) -> impl Iterator<Item = (CommentObjectId, Option<usize>, &str)> {
3021        self.map
3022            .iter()
3023            .map(|(id, comments)| {
3024                comments
3025                    .iter()
3026                    .map(|(pos, comment)| (*id, *pos, comment.as_str()))
3027            })
3028            .flatten()
3029    }
3030
3031    pub fn get_object_comments(
3032        &self,
3033        object_id: CommentObjectId,
3034    ) -> Option<&BTreeMap<Option<usize>, String>> {
3035        self.map.get(&object_id)
3036    }
3037}
3038
3039impl Serialize for CommentsMap {
3040    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
3041    where
3042        S: serde::Serializer,
3043    {
3044        let comment_count = self
3045            .map
3046            .iter()
3047            .map(|(_object_id, comments)| comments.len())
3048            .sum();
3049
3050        let mut seq = serializer.serialize_seq(Some(comment_count))?;
3051        for (object_id, sub) in &self.map {
3052            for (sub_component, comment) in sub {
3053                seq.serialize_element(&(
3054                    format!("{object_id:?}"),
3055                    format!("{sub_component:?}"),
3056                    comment,
3057                ))?;
3058            }
3059        }
3060        seq.end()
3061    }
3062}
3063
3064#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Default)]
3065pub struct DefaultPrivileges {
3066    #[serde(serialize_with = "mz_ore::serde::map_key_to_string")]
3067    privileges: BTreeMap<DefaultPrivilegeObject, RoleDefaultPrivileges>,
3068}
3069
3070// Use a new type here because otherwise we have two levels of BTreeMap, both needing
3071// map_key_to_string.
3072#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Default)]
3073struct RoleDefaultPrivileges(
3074    /// Denormalized, the key is the grantee Role.
3075    #[serde(serialize_with = "mz_ore::serde::map_key_to_string")]
3076    BTreeMap<RoleId, DefaultPrivilegeAclItem>,
3077);
3078
3079impl Deref for RoleDefaultPrivileges {
3080    type Target = BTreeMap<RoleId, DefaultPrivilegeAclItem>;
3081
3082    fn deref(&self) -> &Self::Target {
3083        &self.0
3084    }
3085}
3086
3087impl DerefMut for RoleDefaultPrivileges {
3088    fn deref_mut(&mut self) -> &mut Self::Target {
3089        &mut self.0
3090    }
3091}
3092
3093impl DefaultPrivileges {
3094    /// Add a new default privilege into the set of all default privileges.
3095    pub fn grant(&mut self, object: DefaultPrivilegeObject, privilege: DefaultPrivilegeAclItem) {
3096        if privilege.acl_mode.is_empty() {
3097            return;
3098        }
3099
3100        let privileges = self.privileges.entry(object).or_default();
3101        if let Some(default_privilege) = privileges.get_mut(&privilege.grantee) {
3102            default_privilege.acl_mode |= privilege.acl_mode;
3103        } else {
3104            privileges.insert(privilege.grantee, privilege);
3105        }
3106    }
3107
3108    /// Revoke a default privilege from the set of all default privileges.
3109    pub fn revoke(&mut self, object: &DefaultPrivilegeObject, privilege: &DefaultPrivilegeAclItem) {
3110        if let Some(privileges) = self.privileges.get_mut(object) {
3111            if let Some(default_privilege) = privileges.get_mut(&privilege.grantee) {
3112                default_privilege.acl_mode =
3113                    default_privilege.acl_mode.difference(privilege.acl_mode);
3114                if default_privilege.acl_mode.is_empty() {
3115                    privileges.remove(&privilege.grantee);
3116                }
3117            }
3118            if privileges.is_empty() {
3119                self.privileges.remove(object);
3120            }
3121        }
3122    }
3123
3124    /// Get the privileges that will be granted on all objects matching `object` to `grantee`, if
3125    /// any exist.
3126    pub fn get_privileges_for_grantee(
3127        &self,
3128        object: &DefaultPrivilegeObject,
3129        grantee: &RoleId,
3130    ) -> Option<&AclMode> {
3131        self.privileges
3132            .get(object)
3133            .and_then(|privileges| privileges.get(grantee))
3134            .map(|privilege| &privilege.acl_mode)
3135    }
3136
3137    /// Get all default privileges that apply to the provided object details.
3138    pub fn get_applicable_privileges(
3139        &self,
3140        role_id: RoleId,
3141        database_id: Option<DatabaseId>,
3142        schema_id: Option<SchemaId>,
3143        object_type: mz_sql::catalog::ObjectType,
3144    ) -> impl Iterator<Item = DefaultPrivilegeAclItem> + '_ {
3145        // Privileges consider all relations to be of type table due to PostgreSQL compatibility. We
3146        // don't require the caller to worry about that and we will map their `object_type` to the
3147        // correct type for privileges.
3148        let privilege_object_type = if object_type.is_relation() {
3149            mz_sql::catalog::ObjectType::Table
3150        } else {
3151            object_type
3152        };
3153        let valid_acl_mode = rbac::all_object_privileges(SystemObjectType::Object(object_type));
3154
3155        // Collect all entries that apply to the provided object details.
3156        // If either `database_id` or `schema_id` are `None`, then we might end up with duplicate
3157        // entries in the vec below. That's OK because we consolidate the results after.
3158        [
3159            DefaultPrivilegeObject {
3160                role_id,
3161                database_id,
3162                schema_id,
3163                object_type: privilege_object_type,
3164            },
3165            DefaultPrivilegeObject {
3166                role_id,
3167                database_id,
3168                schema_id: None,
3169                object_type: privilege_object_type,
3170            },
3171            DefaultPrivilegeObject {
3172                role_id,
3173                database_id: None,
3174                schema_id: None,
3175                object_type: privilege_object_type,
3176            },
3177            DefaultPrivilegeObject {
3178                role_id: RoleId::Public,
3179                database_id,
3180                schema_id,
3181                object_type: privilege_object_type,
3182            },
3183            DefaultPrivilegeObject {
3184                role_id: RoleId::Public,
3185                database_id,
3186                schema_id: None,
3187                object_type: privilege_object_type,
3188            },
3189            DefaultPrivilegeObject {
3190                role_id: RoleId::Public,
3191                database_id: None,
3192                schema_id: None,
3193                object_type: privilege_object_type,
3194            },
3195        ]
3196        .into_iter()
3197        .filter_map(|object| self.privileges.get(&object))
3198        .flat_map(|acl_map| acl_map.values())
3199        // Consolidate privileges with a common grantee.
3200        .fold(
3201            BTreeMap::new(),
3202            |mut accum, DefaultPrivilegeAclItem { grantee, acl_mode }| {
3203                let accum_acl_mode = accum.entry(grantee).or_insert_with(AclMode::empty);
3204                *accum_acl_mode |= *acl_mode;
3205                accum
3206            },
3207        )
3208        .into_iter()
3209        // Restrict the acl_mode to only privileges valid for the provided object type. If the
3210        // default privilege has an object type of Table, then it may contain privileges valid for
3211        // tables but not other relations. If the passed in object type is another relation, then
3212        // we need to remove any privilege that is not valid for the specified relation.
3213        .map(move |(grantee, acl_mode)| (grantee, acl_mode & valid_acl_mode))
3214        // Filter out empty privileges.
3215        .filter(|(_, acl_mode)| !acl_mode.is_empty())
3216        .map(|(grantee, acl_mode)| DefaultPrivilegeAclItem {
3217            grantee: *grantee,
3218            acl_mode,
3219        })
3220    }
3221
3222    pub fn iter(
3223        &self,
3224    ) -> impl Iterator<
3225        Item = (
3226            &DefaultPrivilegeObject,
3227            impl Iterator<Item = &DefaultPrivilegeAclItem>,
3228        ),
3229    > {
3230        self.privileges
3231            .iter()
3232            .map(|(object, acl_map)| (object, acl_map.values()))
3233    }
3234}
3235
3236#[derive(Clone, Debug, Deserialize, Serialize, PartialOrd, PartialEq, Eq, Ord)]
3237pub struct ClusterConfig {
3238    pub variant: ClusterVariant,
3239    pub workload_class: Option<String>,
3240}
3241
3242impl ClusterConfig {
3243    pub fn features(&self) -> Option<&OptimizerFeatureOverrides> {
3244        match &self.variant {
3245            ClusterVariant::Managed(managed) => Some(&managed.optimizer_feature_overrides),
3246            ClusterVariant::Unmanaged => None,
3247        }
3248    }
3249}
3250
3251impl From<ClusterConfig> for durable::ClusterConfig {
3252    fn from(config: ClusterConfig) -> Self {
3253        Self {
3254            variant: config.variant.into(),
3255            workload_class: config.workload_class,
3256        }
3257    }
3258}
3259
3260impl From<durable::ClusterConfig> for ClusterConfig {
3261    fn from(config: durable::ClusterConfig) -> Self {
3262        Self {
3263            variant: config.variant.into(),
3264            workload_class: config.workload_class,
3265        }
3266    }
3267}
3268
3269#[derive(Clone, Debug, Deserialize, Serialize, PartialOrd, PartialEq, Eq, Ord)]
3270pub struct ClusterVariantManaged {
3271    pub size: String,
3272    pub availability_zones: Vec<String>,
3273    pub logging: ReplicaLogging,
3274    pub replication_factor: u32,
3275    pub optimizer_feature_overrides: OptimizerFeatureOverrides,
3276    pub schedule: ClusterSchedule,
3277}
3278
3279impl From<ClusterVariantManaged> for durable::ClusterVariantManaged {
3280    fn from(managed: ClusterVariantManaged) -> Self {
3281        Self {
3282            size: managed.size,
3283            availability_zones: managed.availability_zones,
3284            logging: managed.logging,
3285            replication_factor: managed.replication_factor,
3286            optimizer_feature_overrides: managed.optimizer_feature_overrides.into(),
3287            schedule: managed.schedule,
3288        }
3289    }
3290}
3291
3292impl From<durable::ClusterVariantManaged> for ClusterVariantManaged {
3293    fn from(managed: durable::ClusterVariantManaged) -> Self {
3294        Self {
3295            size: managed.size,
3296            availability_zones: managed.availability_zones,
3297            logging: managed.logging,
3298            replication_factor: managed.replication_factor,
3299            optimizer_feature_overrides: managed.optimizer_feature_overrides.into(),
3300            schedule: managed.schedule,
3301        }
3302    }
3303}
3304
3305#[derive(Clone, Debug, Deserialize, Serialize, PartialOrd, PartialEq, Eq, Ord)]
3306pub enum ClusterVariant {
3307    Managed(ClusterVariantManaged),
3308    Unmanaged,
3309}
3310
3311impl From<ClusterVariant> for durable::ClusterVariant {
3312    fn from(variant: ClusterVariant) -> Self {
3313        match variant {
3314            ClusterVariant::Managed(managed) => Self::Managed(managed.into()),
3315            ClusterVariant::Unmanaged => Self::Unmanaged,
3316        }
3317    }
3318}
3319
3320impl From<durable::ClusterVariant> for ClusterVariant {
3321    fn from(variant: durable::ClusterVariant) -> Self {
3322        match variant {
3323            durable::ClusterVariant::Managed(managed) => Self::Managed(managed.into()),
3324            durable::ClusterVariant::Unmanaged => Self::Unmanaged,
3325        }
3326    }
3327}
3328
3329impl mz_sql::catalog::CatalogDatabase for Database {
3330    fn name(&self) -> &str {
3331        &self.name
3332    }
3333
3334    fn id(&self) -> DatabaseId {
3335        self.id
3336    }
3337
3338    fn has_schemas(&self) -> bool {
3339        !self.schemas_by_name.is_empty()
3340    }
3341
3342    fn schema_ids(&self) -> &BTreeMap<String, SchemaId> {
3343        &self.schemas_by_name
3344    }
3345
3346    // `as` is ok to use to cast to a trait object.
3347    #[allow(clippy::as_conversions)]
3348    fn schemas(&self) -> Vec<&dyn CatalogSchema> {
3349        self.schemas_by_id
3350            .values()
3351            .map(|schema| schema as &dyn CatalogSchema)
3352            .collect()
3353    }
3354
3355    fn owner_id(&self) -> RoleId {
3356        self.owner_id
3357    }
3358
3359    fn privileges(&self) -> &PrivilegeMap {
3360        &self.privileges
3361    }
3362}
3363
3364impl mz_sql::catalog::CatalogSchema for Schema {
3365    fn database(&self) -> &ResolvedDatabaseSpecifier {
3366        &self.name.database
3367    }
3368
3369    fn name(&self) -> &QualifiedSchemaName {
3370        &self.name
3371    }
3372
3373    fn id(&self) -> &SchemaSpecifier {
3374        &self.id
3375    }
3376
3377    fn has_items(&self) -> bool {
3378        !self.items.is_empty()
3379    }
3380
3381    fn item_ids(&self) -> Box<dyn Iterator<Item = CatalogItemId> + '_> {
3382        Box::new(
3383            self.items
3384                .values()
3385                .chain(self.functions.values())
3386                .chain(self.types.values())
3387                .copied(),
3388        )
3389    }
3390
3391    fn owner_id(&self) -> RoleId {
3392        self.owner_id
3393    }
3394
3395    fn privileges(&self) -> &PrivilegeMap {
3396        &self.privileges
3397    }
3398}
3399
3400impl mz_sql::catalog::CatalogRole for Role {
3401    fn name(&self) -> &str {
3402        &self.name
3403    }
3404
3405    fn id(&self) -> RoleId {
3406        self.id
3407    }
3408
3409    fn membership(&self) -> &BTreeMap<RoleId, RoleId> {
3410        &self.membership.map
3411    }
3412
3413    fn attributes(&self) -> &RoleAttributes {
3414        &self.attributes
3415    }
3416
3417    fn vars(&self) -> &BTreeMap<String, OwnedVarInput> {
3418        &self.vars.map
3419    }
3420}
3421
3422impl mz_sql::catalog::CatalogNetworkPolicy for NetworkPolicy {
3423    fn name(&self) -> &str {
3424        &self.name
3425    }
3426
3427    fn id(&self) -> NetworkPolicyId {
3428        self.id
3429    }
3430
3431    fn owner_id(&self) -> RoleId {
3432        self.owner_id
3433    }
3434
3435    fn privileges(&self) -> &PrivilegeMap {
3436        &self.privileges
3437    }
3438}
3439
3440impl mz_sql::catalog::CatalogCluster<'_> for Cluster {
3441    fn name(&self) -> &str {
3442        &self.name
3443    }
3444
3445    fn id(&self) -> ClusterId {
3446        self.id
3447    }
3448
3449    fn bound_objects(&self) -> &BTreeSet<CatalogItemId> {
3450        &self.bound_objects
3451    }
3452
3453    fn replica_ids(&self) -> &BTreeMap<String, ReplicaId> {
3454        &self.replica_id_by_name_
3455    }
3456
3457    // `as` is ok to use to cast to a trait object.
3458    #[allow(clippy::as_conversions)]
3459    fn replicas(&self) -> Vec<&dyn CatalogClusterReplica<'_>> {
3460        self.replicas()
3461            .map(|replica| replica as &dyn CatalogClusterReplica)
3462            .collect()
3463    }
3464
3465    fn replica(&self, id: ReplicaId) -> &dyn CatalogClusterReplica<'_> {
3466        self.replica(id).expect("catalog out of sync")
3467    }
3468
3469    fn owner_id(&self) -> RoleId {
3470        self.owner_id
3471    }
3472
3473    fn privileges(&self) -> &PrivilegeMap {
3474        &self.privileges
3475    }
3476
3477    fn is_managed(&self) -> bool {
3478        self.is_managed()
3479    }
3480
3481    fn managed_size(&self) -> Option<&str> {
3482        match &self.config.variant {
3483            ClusterVariant::Managed(ClusterVariantManaged { size, .. }) => Some(size),
3484            _ => None,
3485        }
3486    }
3487
3488    fn schedule(&self) -> Option<&ClusterSchedule> {
3489        match &self.config.variant {
3490            ClusterVariant::Managed(ClusterVariantManaged { schedule, .. }) => Some(schedule),
3491            _ => None,
3492        }
3493    }
3494
3495    fn try_to_plan(&self) -> Result<CreateClusterPlan, PlanError> {
3496        self.try_to_plan()
3497    }
3498}
3499
3500impl mz_sql::catalog::CatalogClusterReplica<'_> for ClusterReplica {
3501    fn name(&self) -> &str {
3502        &self.name
3503    }
3504
3505    fn cluster_id(&self) -> ClusterId {
3506        self.cluster_id
3507    }
3508
3509    fn replica_id(&self) -> ReplicaId {
3510        self.replica_id
3511    }
3512
3513    fn owner_id(&self) -> RoleId {
3514        self.owner_id
3515    }
3516
3517    fn internal(&self) -> bool {
3518        self.config.location.internal()
3519    }
3520}
3521
3522impl mz_sql::catalog::CatalogItem for CatalogEntry {
3523    fn name(&self) -> &QualifiedItemName {
3524        self.name()
3525    }
3526
3527    fn id(&self) -> CatalogItemId {
3528        self.id()
3529    }
3530
3531    fn global_ids(&self) -> Box<dyn Iterator<Item = GlobalId> + '_> {
3532        Box::new(self.global_ids())
3533    }
3534
3535    fn oid(&self) -> u32 {
3536        self.oid()
3537    }
3538
3539    fn func(&self) -> Result<&'static mz_sql::func::Func, SqlCatalogError> {
3540        self.func()
3541    }
3542
3543    fn source_desc(&self) -> Result<Option<&SourceDesc<ReferencedConnection>>, SqlCatalogError> {
3544        self.source_desc()
3545    }
3546
3547    fn connection(
3548        &self,
3549    ) -> Result<mz_storage_types::connections::Connection<ReferencedConnection>, SqlCatalogError>
3550    {
3551        Ok(self.connection()?.details.to_connection())
3552    }
3553
3554    fn create_sql(&self) -> &str {
3555        match self.item() {
3556            CatalogItem::Table(Table { create_sql, .. }) => {
3557                create_sql.as_deref().unwrap_or("<builtin>")
3558            }
3559            CatalogItem::Source(Source { create_sql, .. }) => {
3560                create_sql.as_deref().unwrap_or("<builtin>")
3561            }
3562            CatalogItem::Sink(Sink { create_sql, .. }) => create_sql,
3563            CatalogItem::View(View { create_sql, .. }) => create_sql,
3564            CatalogItem::MaterializedView(MaterializedView { create_sql, .. }) => create_sql,
3565            CatalogItem::Index(Index { create_sql, .. }) => create_sql,
3566            CatalogItem::Type(Type { create_sql, .. }) => {
3567                create_sql.as_deref().unwrap_or("<builtin>")
3568            }
3569            CatalogItem::Secret(Secret { create_sql, .. }) => create_sql,
3570            CatalogItem::Connection(Connection { create_sql, .. }) => create_sql,
3571            CatalogItem::Func(_) => "<builtin>",
3572            CatalogItem::Log(_) => "<builtin>",
3573            CatalogItem::ContinualTask(ContinualTask { create_sql, .. }) => create_sql,
3574        }
3575    }
3576
3577    fn item_type(&self) -> SqlCatalogItemType {
3578        self.item().typ()
3579    }
3580
3581    fn index_details(&self) -> Option<(&[MirScalarExpr], GlobalId)> {
3582        if let CatalogItem::Index(Index { keys, on, .. }) = self.item() {
3583            Some((keys, *on))
3584        } else {
3585            None
3586        }
3587    }
3588
3589    fn writable_table_details(&self) -> Option<&[Expr<Aug>]> {
3590        if let CatalogItem::Table(Table {
3591            data_source: TableDataSource::TableWrites { defaults },
3592            ..
3593        }) = self.item()
3594        {
3595            Some(defaults.as_slice())
3596        } else {
3597            None
3598        }
3599    }
3600
3601    fn replacement_target(&self) -> Option<CatalogItemId> {
3602        if let CatalogItem::MaterializedView(mv) = self.item() {
3603            mv.replacement_target
3604        } else {
3605            None
3606        }
3607    }
3608
3609    fn type_details(&self) -> Option<&CatalogTypeDetails<IdReference>> {
3610        if let CatalogItem::Type(Type { details, .. }) = self.item() {
3611            Some(details)
3612        } else {
3613            None
3614        }
3615    }
3616
3617    fn references(&self) -> &ResolvedIds {
3618        self.references()
3619    }
3620
3621    fn uses(&self) -> BTreeSet<CatalogItemId> {
3622        self.uses()
3623    }
3624
3625    fn referenced_by(&self) -> &[CatalogItemId] {
3626        self.referenced_by()
3627    }
3628
3629    fn used_by(&self) -> &[CatalogItemId] {
3630        self.used_by()
3631    }
3632
3633    fn subsource_details(
3634        &self,
3635    ) -> Option<(CatalogItemId, &UnresolvedItemName, &SourceExportDetails)> {
3636        self.subsource_details()
3637    }
3638
3639    fn source_export_details(
3640        &self,
3641    ) -> Option<(
3642        CatalogItemId,
3643        &UnresolvedItemName,
3644        &SourceExportDetails,
3645        &SourceExportDataConfig<ReferencedConnection>,
3646    )> {
3647        self.source_export_details()
3648    }
3649
3650    fn is_progress_source(&self) -> bool {
3651        self.is_progress_source()
3652    }
3653
3654    fn progress_id(&self) -> Option<CatalogItemId> {
3655        self.progress_id()
3656    }
3657
3658    fn owner_id(&self) -> RoleId {
3659        self.owner_id
3660    }
3661
3662    fn privileges(&self) -> &PrivilegeMap {
3663        &self.privileges
3664    }
3665
3666    fn cluster_id(&self) -> Option<ClusterId> {
3667        self.item().cluster_id()
3668    }
3669
3670    fn at_version(
3671        &self,
3672        version: RelationVersionSelector,
3673    ) -> Box<dyn mz_sql::catalog::CatalogCollectionItem> {
3674        Box::new(CatalogCollectionEntry {
3675            entry: self.clone(),
3676            version,
3677        })
3678    }
3679
3680    fn latest_version(&self) -> Option<RelationVersion> {
3681        self.table().map(|t| t.desc.latest_version())
3682    }
3683}
3684
3685/// A single update to the catalog state.
3686#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
3687pub struct StateUpdate {
3688    pub kind: StateUpdateKind,
3689    pub ts: Timestamp,
3690    pub diff: StateDiff,
3691}
3692
3693/// The contents of a single state update.
3694///
3695/// Variants are listed in dependency order.
3696#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
3697pub enum StateUpdateKind {
3698    Role(durable::objects::Role),
3699    RoleAuth(durable::objects::RoleAuth),
3700    Database(durable::objects::Database),
3701    Schema(durable::objects::Schema),
3702    DefaultPrivilege(durable::objects::DefaultPrivilege),
3703    SystemPrivilege(MzAclItem),
3704    SystemConfiguration(durable::objects::SystemConfiguration),
3705    Cluster(durable::objects::Cluster),
3706    NetworkPolicy(durable::objects::NetworkPolicy),
3707    IntrospectionSourceIndex(durable::objects::IntrospectionSourceIndex),
3708    ClusterReplica(durable::objects::ClusterReplica),
3709    SourceReferences(durable::objects::SourceReferences),
3710    SystemObjectMapping(durable::objects::SystemObjectMapping),
3711    // Temporary items are not actually updated via the durable catalog, but
3712    // this allows us to model them the same way as all other items in parts of
3713    // the pipeline.
3714    TemporaryItem(TemporaryItem),
3715    Item(durable::objects::Item),
3716    Comment(durable::objects::Comment),
3717    AuditLog(durable::objects::AuditLog),
3718    // Storage updates.
3719    StorageCollectionMetadata(durable::objects::StorageCollectionMetadata),
3720    UnfinalizedShard(durable::objects::UnfinalizedShard),
3721}
3722
3723/// Valid diffs for catalog state updates.
3724#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)]
3725pub enum StateDiff {
3726    Retraction,
3727    Addition,
3728}
3729
3730impl From<StateDiff> for Diff {
3731    fn from(diff: StateDiff) -> Self {
3732        match diff {
3733            StateDiff::Retraction => Diff::MINUS_ONE,
3734            StateDiff::Addition => Diff::ONE,
3735        }
3736    }
3737}
3738impl TryFrom<Diff> for StateDiff {
3739    type Error = String;
3740
3741    fn try_from(diff: Diff) -> Result<Self, Self::Error> {
3742        match diff {
3743            Diff::MINUS_ONE => Ok(Self::Retraction),
3744            Diff::ONE => Ok(Self::Addition),
3745            diff => Err(format!("invalid diff {diff}")),
3746        }
3747    }
3748}
3749
3750/// Information needed to process an update to a temporary item.
3751#[derive(Debug, Clone, Ord, PartialOrd, PartialEq, Eq)]
3752pub struct TemporaryItem {
3753    pub id: CatalogItemId,
3754    pub oid: u32,
3755    pub global_id: GlobalId,
3756    pub schema_id: SchemaId,
3757    pub name: String,
3758    pub conn_id: Option<ConnectionId>,
3759    pub create_sql: String,
3760    pub owner_id: RoleId,
3761    pub privileges: Vec<MzAclItem>,
3762    pub extra_versions: BTreeMap<RelationVersion, GlobalId>,
3763}
3764
3765impl From<CatalogEntry> for TemporaryItem {
3766    fn from(entry: CatalogEntry) -> Self {
3767        let conn_id = entry.conn_id().cloned();
3768        let (create_sql, global_id, extra_versions) = entry.item.to_serialized();
3769
3770        TemporaryItem {
3771            id: entry.id,
3772            oid: entry.oid,
3773            global_id,
3774            schema_id: entry.name.qualifiers.schema_spec.into(),
3775            name: entry.name.item,
3776            conn_id,
3777            create_sql,
3778            owner_id: entry.owner_id,
3779            privileges: entry.privileges.into_all_values().collect(),
3780            extra_versions,
3781        }
3782    }
3783}
3784
3785impl TemporaryItem {
3786    pub fn item_type(&self) -> CatalogItemType {
3787        item_type(&self.create_sql)
3788    }
3789}
3790
3791/// The same as [`StateUpdateKind`], but without `TemporaryItem` so we can derive [`Ord`].
3792#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq)]
3793pub enum BootstrapStateUpdateKind {
3794    Role(durable::objects::Role),
3795    RoleAuth(durable::objects::RoleAuth),
3796    Database(durable::objects::Database),
3797    Schema(durable::objects::Schema),
3798    DefaultPrivilege(durable::objects::DefaultPrivilege),
3799    SystemPrivilege(MzAclItem),
3800    SystemConfiguration(durable::objects::SystemConfiguration),
3801    Cluster(durable::objects::Cluster),
3802    NetworkPolicy(durable::objects::NetworkPolicy),
3803    IntrospectionSourceIndex(durable::objects::IntrospectionSourceIndex),
3804    ClusterReplica(durable::objects::ClusterReplica),
3805    SourceReferences(durable::objects::SourceReferences),
3806    SystemObjectMapping(durable::objects::SystemObjectMapping),
3807    Item(durable::objects::Item),
3808    Comment(durable::objects::Comment),
3809    AuditLog(durable::objects::AuditLog),
3810    // Storage updates.
3811    StorageCollectionMetadata(durable::objects::StorageCollectionMetadata),
3812    UnfinalizedShard(durable::objects::UnfinalizedShard),
3813}
3814
3815impl From<BootstrapStateUpdateKind> for StateUpdateKind {
3816    fn from(value: BootstrapStateUpdateKind) -> Self {
3817        match value {
3818            BootstrapStateUpdateKind::Role(kind) => StateUpdateKind::Role(kind),
3819            BootstrapStateUpdateKind::RoleAuth(kind) => StateUpdateKind::RoleAuth(kind),
3820            BootstrapStateUpdateKind::Database(kind) => StateUpdateKind::Database(kind),
3821            BootstrapStateUpdateKind::Schema(kind) => StateUpdateKind::Schema(kind),
3822            BootstrapStateUpdateKind::DefaultPrivilege(kind) => {
3823                StateUpdateKind::DefaultPrivilege(kind)
3824            }
3825            BootstrapStateUpdateKind::SystemPrivilege(kind) => {
3826                StateUpdateKind::SystemPrivilege(kind)
3827            }
3828            BootstrapStateUpdateKind::SystemConfiguration(kind) => {
3829                StateUpdateKind::SystemConfiguration(kind)
3830            }
3831            BootstrapStateUpdateKind::SourceReferences(kind) => {
3832                StateUpdateKind::SourceReferences(kind)
3833            }
3834            BootstrapStateUpdateKind::Cluster(kind) => StateUpdateKind::Cluster(kind),
3835            BootstrapStateUpdateKind::NetworkPolicy(kind) => StateUpdateKind::NetworkPolicy(kind),
3836            BootstrapStateUpdateKind::IntrospectionSourceIndex(kind) => {
3837                StateUpdateKind::IntrospectionSourceIndex(kind)
3838            }
3839            BootstrapStateUpdateKind::ClusterReplica(kind) => StateUpdateKind::ClusterReplica(kind),
3840            BootstrapStateUpdateKind::SystemObjectMapping(kind) => {
3841                StateUpdateKind::SystemObjectMapping(kind)
3842            }
3843            BootstrapStateUpdateKind::Item(kind) => StateUpdateKind::Item(kind),
3844            BootstrapStateUpdateKind::Comment(kind) => StateUpdateKind::Comment(kind),
3845            BootstrapStateUpdateKind::AuditLog(kind) => StateUpdateKind::AuditLog(kind),
3846            BootstrapStateUpdateKind::StorageCollectionMetadata(kind) => {
3847                StateUpdateKind::StorageCollectionMetadata(kind)
3848            }
3849            BootstrapStateUpdateKind::UnfinalizedShard(kind) => {
3850                StateUpdateKind::UnfinalizedShard(kind)
3851            }
3852        }
3853    }
3854}
3855
3856impl TryFrom<StateUpdateKind> for BootstrapStateUpdateKind {
3857    type Error = TemporaryItem;
3858
3859    fn try_from(value: StateUpdateKind) -> Result<Self, Self::Error> {
3860        match value {
3861            StateUpdateKind::Role(kind) => Ok(BootstrapStateUpdateKind::Role(kind)),
3862            StateUpdateKind::RoleAuth(kind) => Ok(BootstrapStateUpdateKind::RoleAuth(kind)),
3863            StateUpdateKind::Database(kind) => Ok(BootstrapStateUpdateKind::Database(kind)),
3864            StateUpdateKind::Schema(kind) => Ok(BootstrapStateUpdateKind::Schema(kind)),
3865            StateUpdateKind::DefaultPrivilege(kind) => {
3866                Ok(BootstrapStateUpdateKind::DefaultPrivilege(kind))
3867            }
3868            StateUpdateKind::SystemPrivilege(kind) => {
3869                Ok(BootstrapStateUpdateKind::SystemPrivilege(kind))
3870            }
3871            StateUpdateKind::SystemConfiguration(kind) => {
3872                Ok(BootstrapStateUpdateKind::SystemConfiguration(kind))
3873            }
3874            StateUpdateKind::Cluster(kind) => Ok(BootstrapStateUpdateKind::Cluster(kind)),
3875            StateUpdateKind::NetworkPolicy(kind) => {
3876                Ok(BootstrapStateUpdateKind::NetworkPolicy(kind))
3877            }
3878            StateUpdateKind::IntrospectionSourceIndex(kind) => {
3879                Ok(BootstrapStateUpdateKind::IntrospectionSourceIndex(kind))
3880            }
3881            StateUpdateKind::ClusterReplica(kind) => {
3882                Ok(BootstrapStateUpdateKind::ClusterReplica(kind))
3883            }
3884            StateUpdateKind::SourceReferences(kind) => {
3885                Ok(BootstrapStateUpdateKind::SourceReferences(kind))
3886            }
3887            StateUpdateKind::SystemObjectMapping(kind) => {
3888                Ok(BootstrapStateUpdateKind::SystemObjectMapping(kind))
3889            }
3890            StateUpdateKind::TemporaryItem(kind) => Err(kind),
3891            StateUpdateKind::Item(kind) => Ok(BootstrapStateUpdateKind::Item(kind)),
3892            StateUpdateKind::Comment(kind) => Ok(BootstrapStateUpdateKind::Comment(kind)),
3893            StateUpdateKind::AuditLog(kind) => Ok(BootstrapStateUpdateKind::AuditLog(kind)),
3894            StateUpdateKind::StorageCollectionMetadata(kind) => {
3895                Ok(BootstrapStateUpdateKind::StorageCollectionMetadata(kind))
3896            }
3897            StateUpdateKind::UnfinalizedShard(kind) => {
3898                Ok(BootstrapStateUpdateKind::UnfinalizedShard(kind))
3899            }
3900        }
3901    }
3902}