1use std::borrow::Cow;
13use std::collections::{BTreeMap, BTreeSet, VecDeque};
14use std::fmt::Debug;
15use std::sync::Arc;
16use std::sync::LazyLock;
17use std::time::Instant;
18
19use ipnet::IpNet;
20use itertools::Itertools;
21use mz_adapter_types::compaction::CompactionWindow;
22use mz_adapter_types::connection::ConnectionId;
23use mz_audit_log::{EventDetails, EventType, ObjectType, VersionedEvent};
24use mz_build_info::DUMMY_BUILD_INFO;
25use mz_catalog::SYSTEM_CONN_ID;
26use mz_catalog::builtin::{
27 BUILTINS, Builtin, BuiltinCluster, BuiltinLog, BuiltinSource, BuiltinTable, BuiltinType,
28};
29use mz_catalog::config::{AwsPrincipalContext, ClusterReplicaSizeMap};
30use mz_catalog::expr_cache::LocalExpressions;
31use mz_catalog::memory::error::{Error, ErrorKind};
32use mz_catalog::memory::objects::{
33 CatalogCollectionEntry, CatalogEntry, CatalogItem, Cluster, ClusterReplica, CommentsMap,
34 Connection, DataSourceDesc, Database, DefaultPrivileges, Index, MaterializedView,
35 NetworkPolicy, Role, RoleAuth, Schema, Secret, Sink, Source, SourceReferences, Table,
36 TableDataSource, Type, View,
37};
38use mz_controller::clusters::{
39 ManagedReplicaAvailabilityZones, ManagedReplicaLocation, ReplicaAllocation, ReplicaLocation,
40 UnmanagedReplicaLocation,
41};
42use mz_controller_types::{ClusterId, ReplicaId};
43use mz_expr::{CollectionPlan, OptimizedMirRelationExpr};
44use mz_license_keys::ValidatedLicenseKey;
45use mz_orchestrator::DiskLimit;
46use mz_ore::collections::CollectionExt;
47use mz_ore::now::NOW_ZERO;
48use mz_ore::soft_assert_no_log;
49use mz_ore::str::StrExt;
50use mz_pgrepr::oid::INVALID_OID;
51use mz_repr::adt::mz_acl_item::PrivilegeMap;
52use mz_repr::namespaces::{
53 INFORMATION_SCHEMA, MZ_CATALOG_SCHEMA, MZ_CATALOG_UNSTABLE_SCHEMA, MZ_INTERNAL_SCHEMA,
54 MZ_INTROSPECTION_SCHEMA, MZ_TEMP_SCHEMA, MZ_UNSAFE_SCHEMA, PG_CATALOG_SCHEMA, SYSTEM_SCHEMAS,
55 UNSTABLE_SCHEMAS,
56};
57use mz_repr::network_policy_id::NetworkPolicyId;
58use mz_repr::optimize::{OptimizerFeatures, OverrideFrom};
59use mz_repr::role_id::RoleId;
60use mz_repr::{
61 CatalogItemId, GlobalId, RelationDesc, RelationVersion, RelationVersionSelector,
62 VersionedRelationDesc,
63};
64use mz_secrets::InMemorySecretsController;
65use mz_sql::ast::Ident;
66use mz_sql::catalog::{
67 CatalogCluster, CatalogClusterReplica, CatalogDatabase, CatalogError as SqlCatalogError,
68 CatalogItem as SqlCatalogItem, CatalogItemType, CatalogRecordField, CatalogRole, CatalogSchema,
69 CatalogType, CatalogTypeDetails, IdReference, NameReference, SessionCatalog, SystemObjectType,
70 TypeReference,
71};
72use mz_sql::catalog::{CatalogConfig, EnvironmentId};
73use mz_sql::names::{
74 CommentObjectId, DatabaseId, DependencyIds, FullItemName, FullSchemaName, ObjectId,
75 PartialItemName, QualifiedItemName, QualifiedSchemaName, RawDatabaseSpecifier,
76 ResolvedDatabaseSpecifier, ResolvedIds, SchemaId, SchemaSpecifier, SystemObjectId,
77};
78use mz_sql::plan::{
79 CreateConnectionPlan, CreateIndexPlan, CreateMaterializedViewPlan, CreateSecretPlan,
80 CreateSinkPlan, CreateSourcePlan, CreateTablePlan, CreateTypePlan, CreateViewPlan, Params,
81 Plan, PlanContext,
82};
83use mz_sql::rbac;
84use mz_sql::session::metadata::SessionMetadata;
85use mz_sql::session::user::MZ_SYSTEM_ROLE_ID;
86use mz_sql::session::vars::{DEFAULT_DATABASE_NAME, SystemVars, Var, VarInput};
87use mz_sql_parser::ast::QualifiedReplica;
88use mz_storage_client::controller::StorageMetadata;
89use mz_storage_types::connections::ConnectionContext;
90use mz_storage_types::connections::inline::{
91 ConnectionResolver, InlinedConnection, IntoInlineConnection,
92};
93use mz_transform::notice::OptimizerNotice;
94use serde::Serialize;
95use timely::progress::Antichain;
96use tokio::sync::mpsc;
97use tracing::{debug, warn};
98
99use crate::AdapterError;
101use crate::catalog::{Catalog, ConnCatalog};
102use crate::coord::{ConnMeta, infer_sql_type_for_catalog};
103use crate::optimize::{self, Optimize, OptimizerCatalog};
104use crate::session::Session;
105
106#[derive(Debug, Clone, Serialize)]
113pub struct CatalogState {
114 pub(super) database_by_name: imbl::OrdMap<String, DatabaseId>,
120 #[serde(serialize_with = "mz_ore::serde::map_key_to_string")]
121 pub(super) database_by_id: imbl::OrdMap<DatabaseId, Database>,
122 #[serde(serialize_with = "skip_temp_items")]
123 pub(super) entry_by_id: imbl::OrdMap<CatalogItemId, CatalogEntry>,
124 #[serde(serialize_with = "mz_ore::serde::map_key_to_string")]
125 pub(super) entry_by_global_id: imbl::OrdMap<GlobalId, CatalogItemId>,
126 pub(super) ambient_schemas_by_name: imbl::OrdMap<String, SchemaId>,
127 #[serde(serialize_with = "mz_ore::serde::map_key_to_string")]
128 pub(super) ambient_schemas_by_id: imbl::OrdMap<SchemaId, Schema>,
129 pub(super) clusters_by_name: imbl::OrdMap<String, ClusterId>,
130 #[serde(serialize_with = "mz_ore::serde::map_key_to_string")]
131 pub(super) clusters_by_id: imbl::OrdMap<ClusterId, Cluster>,
132 pub(super) roles_by_name: imbl::OrdMap<String, RoleId>,
133 #[serde(serialize_with = "mz_ore::serde::map_key_to_string")]
134 pub(super) roles_by_id: imbl::OrdMap<RoleId, Role>,
135 pub(super) network_policies_by_name: imbl::OrdMap<String, NetworkPolicyId>,
136 #[serde(serialize_with = "mz_ore::serde::map_key_to_string")]
137 pub(super) network_policies_by_id: imbl::OrdMap<NetworkPolicyId, NetworkPolicy>,
138 #[serde(serialize_with = "mz_ore::serde::map_key_to_string")]
139 pub(super) role_auth_by_id: imbl::OrdMap<RoleId, RoleAuth>,
140
141 #[serde(skip)]
142 pub(super) system_configuration: Arc<SystemVars>,
143 pub(super) default_privileges: Arc<DefaultPrivileges>,
144 pub(super) system_privileges: Arc<PrivilegeMap>,
145 pub(super) comments: Arc<CommentsMap>,
146 #[serde(serialize_with = "mz_ore::serde::map_key_to_string")]
147 pub(super) source_references: imbl::OrdMap<CatalogItemId, SourceReferences>,
148 pub(super) storage_metadata: Arc<StorageMetadata>,
149 pub(super) mock_authentication_nonce: Option<String>,
150
151 #[serde(skip)]
156 pub(super) notices_by_dep_id: imbl::OrdMap<GlobalId, Vec<Arc<OptimizerNotice>>>,
157
158 #[serde(skip)]
162 pub(super) temporary_schemas: imbl::OrdMap<ConnectionId, Schema>,
163
164 #[serde(skip)]
166 pub(super) config: mz_sql::catalog::CatalogConfig,
167 pub(super) cluster_replica_sizes: ClusterReplicaSizeMap,
168 #[serde(skip)]
169 pub(crate) availability_zones: Vec<String>,
170
171 #[serde(skip)]
173 pub(super) egress_addresses: Vec<IpNet>,
174 pub(super) aws_principal_context: Option<AwsPrincipalContext>,
175 pub(super) aws_privatelink_availability_zones: Option<BTreeSet<String>>,
176 pub(super) http_host_name: Option<String>,
177
178 #[serde(skip)]
180 pub(super) license_key: ValidatedLicenseKey,
181}
182
183#[derive(Debug, Clone, Serialize)]
188pub(crate) enum LocalExpressionCache {
189 Open {
191 cached_exprs: BTreeMap<GlobalId, LocalExpressions>,
193 uncached_exprs: BTreeMap<GlobalId, LocalExpressions>,
195 },
196 Closed,
198}
199
200impl LocalExpressionCache {
201 pub(super) fn new(cached_exprs: BTreeMap<GlobalId, LocalExpressions>) -> Self {
202 Self::Open {
203 cached_exprs,
204 uncached_exprs: BTreeMap::new(),
205 }
206 }
207
208 pub(super) fn remove_cached_expression(&mut self, id: &GlobalId) -> Option<LocalExpressions> {
209 match self {
210 LocalExpressionCache::Open { cached_exprs, .. } => cached_exprs.remove(id),
211 LocalExpressionCache::Closed => None,
212 }
213 }
214
215 pub(super) fn insert_cached_expression(
218 &mut self,
219 id: GlobalId,
220 local_expressions: LocalExpressions,
221 ) {
222 match self {
223 LocalExpressionCache::Open { cached_exprs, .. } => {
224 cached_exprs.insert(id, local_expressions);
225 }
226 LocalExpressionCache::Closed => {}
227 }
228 }
229
230 pub(super) fn insert_uncached_expression(
233 &mut self,
234 id: GlobalId,
235 local_mir: OptimizedMirRelationExpr,
236 optimizer_features: OptimizerFeatures,
237 ) {
238 match self {
239 LocalExpressionCache::Open { uncached_exprs, .. } => {
240 let local_expr = LocalExpressions {
241 local_mir,
242 optimizer_features,
243 };
244 let prev = uncached_exprs.remove(&id);
251 match prev {
252 Some(prev) if prev == local_expr => {
253 uncached_exprs.insert(id, local_expr);
254 }
255 None => {
256 uncached_exprs.insert(id, local_expr);
257 }
258 Some(_) => {}
259 }
260 }
261 LocalExpressionCache::Closed => {}
262 }
263 }
264
265 pub(super) fn into_uncached_exprs(self) -> BTreeMap<GlobalId, LocalExpressions> {
266 match self {
267 LocalExpressionCache::Open { uncached_exprs, .. } => uncached_exprs,
268 LocalExpressionCache::Closed => BTreeMap::new(),
269 }
270 }
271}
272
273fn skip_temp_items<S>(
274 entries: &imbl::OrdMap<CatalogItemId, CatalogEntry>,
275 serializer: S,
276) -> Result<S::Ok, S::Error>
277where
278 S: serde::Serializer,
279{
280 mz_ore::serde::map_key_to_string(
281 entries.iter().filter(|(_k, v)| v.conn_id().is_none()),
282 serializer,
283 )
284}
285
286impl CatalogState {
287 pub fn empty_test() -> Self {
291 CatalogState {
292 database_by_name: Default::default(),
293 database_by_id: Default::default(),
294 entry_by_id: Default::default(),
295 entry_by_global_id: Default::default(),
296 notices_by_dep_id: Default::default(),
297 ambient_schemas_by_name: Default::default(),
298 ambient_schemas_by_id: Default::default(),
299 temporary_schemas: Default::default(),
300 clusters_by_id: Default::default(),
301 clusters_by_name: Default::default(),
302 network_policies_by_name: Default::default(),
303 roles_by_name: Default::default(),
304 roles_by_id: Default::default(),
305 network_policies_by_id: Default::default(),
306 role_auth_by_id: Default::default(),
307 config: CatalogConfig {
308 start_time: Default::default(),
309 start_instant: Instant::now(),
310 nonce: Default::default(),
311 environment_id: EnvironmentId::for_tests(),
312 session_id: Default::default(),
313 build_info: &DUMMY_BUILD_INFO,
314 now: NOW_ZERO.clone(),
315 connection_context: ConnectionContext::for_tests(Arc::new(
316 InMemorySecretsController::new(),
317 )),
318 helm_chart_version: None,
319 },
320 cluster_replica_sizes: ClusterReplicaSizeMap::for_tests(),
321 availability_zones: Default::default(),
322 system_configuration: Arc::new(SystemVars::default()),
323 egress_addresses: Default::default(),
324 aws_principal_context: Default::default(),
325 aws_privatelink_availability_zones: Default::default(),
326 http_host_name: Default::default(),
327 default_privileges: Arc::new(DefaultPrivileges::default()),
328 system_privileges: Arc::new(PrivilegeMap::default()),
329 comments: Arc::new(CommentsMap::default()),
330 source_references: Default::default(),
331 storage_metadata: Arc::new(StorageMetadata::default()),
332 license_key: ValidatedLicenseKey::for_tests(),
333 mock_authentication_nonce: Default::default(),
334 }
335 }
336
337 pub fn for_session<'a>(&'a self, session: &'a Session) -> ConnCatalog<'a> {
338 let search_path = self.resolve_search_path(session);
339 let database = self
340 .database_by_name
341 .get(session.vars().database())
342 .map(|id| id.clone());
343 let state = match session.transaction().catalog_state() {
344 Some(txn_catalog_state) => Cow::Borrowed(txn_catalog_state),
345 None => Cow::Borrowed(self),
346 };
347 ConnCatalog {
348 state,
349 unresolvable_ids: BTreeSet::new(),
350 conn_id: session.conn_id().clone(),
351 cluster: session.vars().cluster().into(),
352 database,
353 search_path,
354 role_id: session.current_role_id().clone(),
355 prepared_statements: Some(session.prepared_statements()),
356 portals: Some(session.portals()),
357 notices_tx: session.retain_notice_transmitter(),
358 }
359 }
360
361 pub fn for_sessionless_user(&self, role_id: RoleId) -> ConnCatalog<'_> {
362 let (notices_tx, _notices_rx) = mpsc::unbounded_channel();
363 let cluster = self.system_configuration.default_cluster();
364
365 ConnCatalog {
366 state: Cow::Borrowed(self),
367 unresolvable_ids: BTreeSet::new(),
368 conn_id: SYSTEM_CONN_ID.clone(),
369 cluster,
370 database: self
371 .resolve_database(DEFAULT_DATABASE_NAME)
372 .ok()
373 .map(|db| db.id()),
374 search_path: Vec::new(),
377 role_id,
378 prepared_statements: None,
379 portals: None,
380 notices_tx,
381 }
382 }
383
384 pub fn for_system_session(&self) -> ConnCatalog<'_> {
385 self.for_sessionless_user(MZ_SYSTEM_ROLE_ID)
386 }
387
388 pub fn transitive_uses(&self, id: CatalogItemId) -> impl Iterator<Item = CatalogItemId> + '_ {
393 struct I<'a> {
394 queue: VecDeque<CatalogItemId>,
395 seen: BTreeSet<CatalogItemId>,
396 this: &'a CatalogState,
397 }
398 impl<'a> Iterator for I<'a> {
399 type Item = CatalogItemId;
400 fn next(&mut self) -> Option<Self::Item> {
401 if let Some(next) = self.queue.pop_front() {
402 for child in self.this.get_entry(&next).item().uses() {
403 if !self.seen.contains(&child) {
404 self.queue.push_back(child);
405 self.seen.insert(child);
406 }
407 }
408 Some(next)
409 } else {
410 None
411 }
412 }
413 }
414
415 I {
416 queue: [id].into_iter().collect(),
417 seen: [id].into_iter().collect(),
418 this: self,
419 }
420 }
421
422 pub fn introspection_dependencies(&self, id: CatalogItemId) -> Vec<CatalogItemId> {
425 let mut out = Vec::new();
426 self.introspection_dependencies_inner(id, &mut out);
427 out
428 }
429
430 fn introspection_dependencies_inner(&self, id: CatalogItemId, out: &mut Vec<CatalogItemId>) {
431 match self.get_entry(&id).item() {
432 CatalogItem::Log(_) => out.push(id),
433 item @ (CatalogItem::View(_)
434 | CatalogItem::MaterializedView(_)
435 | CatalogItem::Connection(_)) => {
436 for item_id in item.references().items() {
438 self.introspection_dependencies_inner(*item_id, out);
439 }
440 }
441 CatalogItem::Sink(sink) => {
442 let from_item_id = self.get_entry_by_global_id(&sink.from).id();
443 self.introspection_dependencies_inner(from_item_id, out)
444 }
445 CatalogItem::Index(idx) => {
446 let on_item_id = self.get_entry_by_global_id(&idx.on).id();
447 self.introspection_dependencies_inner(on_item_id, out)
448 }
449 CatalogItem::Table(_)
450 | CatalogItem::Source(_)
451 | CatalogItem::Type(_)
452 | CatalogItem::Func(_)
453 | CatalogItem::Secret(_) => (),
454 }
455 }
456
457 pub(super) fn object_dependents(
463 &self,
464 object_ids: &Vec<ObjectId>,
465 conn_id: &ConnectionId,
466 seen: &mut BTreeSet<ObjectId>,
467 ) -> Vec<ObjectId> {
468 let mut dependents = Vec::new();
469 for object_id in object_ids {
470 match object_id {
471 ObjectId::Cluster(id) => {
472 dependents.extend_from_slice(&self.cluster_dependents(*id, seen));
473 }
474 ObjectId::ClusterReplica((cluster_id, replica_id)) => dependents.extend_from_slice(
475 &self.cluster_replica_dependents(*cluster_id, *replica_id, seen),
476 ),
477 ObjectId::Database(id) => {
478 dependents.extend_from_slice(&self.database_dependents(*id, conn_id, seen))
479 }
480 ObjectId::Schema((database_spec, schema_spec)) => {
481 dependents.extend_from_slice(&self.schema_dependents(
482 database_spec.clone(),
483 schema_spec.clone(),
484 conn_id,
485 seen,
486 ));
487 }
488 ObjectId::NetworkPolicy(id) => {
489 dependents.extend_from_slice(&self.network_policy_dependents(*id, seen));
490 }
491 id @ ObjectId::Role(_) => {
492 let unseen = seen.insert(id.clone());
493 if unseen {
494 dependents.push(id.clone());
495 }
496 }
497 ObjectId::Item(id) => {
498 dependents.extend_from_slice(&self.item_dependents(*id, seen))
499 }
500 }
501 }
502 dependents
503 }
504
505 fn cluster_dependents(
512 &self,
513 cluster_id: ClusterId,
514 seen: &mut BTreeSet<ObjectId>,
515 ) -> Vec<ObjectId> {
516 let mut dependents = Vec::new();
517 let object_id = ObjectId::Cluster(cluster_id);
518 if !seen.contains(&object_id) {
519 seen.insert(object_id.clone());
520 let cluster = self.get_cluster(cluster_id);
521 for item_id in cluster.bound_objects() {
522 dependents.extend_from_slice(&self.item_dependents(*item_id, seen));
523 }
524 for replica_id in cluster.replica_ids().values() {
525 dependents.extend_from_slice(&self.cluster_replica_dependents(
526 cluster_id,
527 *replica_id,
528 seen,
529 ));
530 }
531 dependents.push(object_id);
532 }
533 dependents
534 }
535
536 pub(super) fn cluster_replica_dependents(
543 &self,
544 cluster_id: ClusterId,
545 replica_id: ReplicaId,
546 seen: &mut BTreeSet<ObjectId>,
547 ) -> Vec<ObjectId> {
548 let mut dependents = Vec::new();
549 let object_id = ObjectId::ClusterReplica((cluster_id, replica_id));
550 if !seen.contains(&object_id) {
551 seen.insert(object_id.clone());
552 let cluster = self.get_cluster(cluster_id);
556 for item_id in cluster.bound_objects() {
557 if let CatalogItem::MaterializedView(mv) = self.get_entry(item_id).item()
558 && mv.target_replica == Some(replica_id)
559 {
560 dependents.extend_from_slice(&self.item_dependents(*item_id, seen));
561 }
562 }
563 dependents.push(object_id);
564 }
565 dependents
566 }
567
568 fn database_dependents(
575 &self,
576 database_id: DatabaseId,
577 conn_id: &ConnectionId,
578 seen: &mut BTreeSet<ObjectId>,
579 ) -> Vec<ObjectId> {
580 let mut dependents = Vec::new();
581 let object_id = ObjectId::Database(database_id);
582 if !seen.contains(&object_id) {
583 seen.insert(object_id.clone());
584 let database = self.get_database(&database_id);
585 for schema_id in database.schema_ids().values() {
586 dependents.extend_from_slice(&self.schema_dependents(
587 ResolvedDatabaseSpecifier::Id(database_id),
588 SchemaSpecifier::Id(*schema_id),
589 conn_id,
590 seen,
591 ));
592 }
593 dependents.push(object_id);
594 }
595 dependents
596 }
597
598 fn schema_dependents(
605 &self,
606 database_spec: ResolvedDatabaseSpecifier,
607 schema_spec: SchemaSpecifier,
608 conn_id: &ConnectionId,
609 seen: &mut BTreeSet<ObjectId>,
610 ) -> Vec<ObjectId> {
611 let mut dependents = Vec::new();
612 let object_id = ObjectId::Schema((database_spec, schema_spec.clone()));
613 if !seen.contains(&object_id) {
614 seen.insert(object_id.clone());
615 let schema = self.get_schema(&database_spec, &schema_spec, conn_id);
616 for item_id in schema.item_ids() {
617 dependents.extend_from_slice(&self.item_dependents(item_id, seen));
618 }
619 dependents.push(object_id)
620 }
621 dependents
622 }
623
624 pub(super) fn item_dependents(
631 &self,
632 item_id: CatalogItemId,
633 seen: &mut BTreeSet<ObjectId>,
634 ) -> Vec<ObjectId> {
635 let mut dependents = Vec::new();
636 let object_id = ObjectId::Item(item_id);
637 if !seen.contains(&object_id) {
638 seen.insert(object_id.clone());
639 let entry = self.get_entry(&item_id);
640 for dependent_id in entry.used_by() {
641 dependents.extend_from_slice(&self.item_dependents(*dependent_id, seen));
642 }
643 dependents.push(object_id);
644 if let Some(progress_id) = entry.progress_id() {
648 dependents.extend_from_slice(&self.item_dependents(progress_id, seen));
649 }
650 }
651 dependents
652 }
653
654 pub(super) fn network_policy_dependents(
661 &self,
662 network_policy_id: NetworkPolicyId,
663 _seen: &mut BTreeSet<ObjectId>,
664 ) -> Vec<ObjectId> {
665 let object_id = ObjectId::NetworkPolicy(network_policy_id);
666 vec![object_id]
670 }
671
672 fn is_stable(&self, id: CatalogItemId) -> bool {
676 let spec = self.get_entry(&id).name().qualifiers.schema_spec;
677 !self.is_unstable_schema_specifier(spec)
678 }
679
680 pub(super) fn check_unstable_dependencies(&self, item: &CatalogItem) -> Result<(), Error> {
681 if self.system_config().unsafe_enable_unstable_dependencies() {
682 return Ok(());
683 }
684
685 let unstable_dependencies: Vec<_> = item
686 .references()
687 .items()
688 .filter(|id| !self.is_stable(**id))
689 .map(|id| self.get_entry(id).name().item.clone())
690 .collect();
691
692 if unstable_dependencies.is_empty() || item.is_temporary() {
696 Ok(())
697 } else {
698 let object_type = item.typ().to_string();
699 Err(Error {
700 kind: ErrorKind::UnstableDependency {
701 object_type,
702 unstable_dependencies,
703 },
704 })
705 }
706 }
707
708 pub fn resolve_full_name(
709 &self,
710 name: &QualifiedItemName,
711 conn_id: Option<&ConnectionId>,
712 ) -> FullItemName {
713 let conn_id = conn_id.unwrap_or(&SYSTEM_CONN_ID);
714
715 let database = match &name.qualifiers.database_spec {
716 ResolvedDatabaseSpecifier::Ambient => RawDatabaseSpecifier::Ambient,
717 ResolvedDatabaseSpecifier::Id(id) => {
718 RawDatabaseSpecifier::Name(self.get_database(id).name().to_string())
719 }
720 };
721 let schema = match &name.qualifiers.schema_spec {
724 SchemaSpecifier::Temporary => MZ_TEMP_SCHEMA.to_string(),
725 SchemaSpecifier::Id(_) => self
726 .get_schema(
727 &name.qualifiers.database_spec,
728 &name.qualifiers.schema_spec,
729 conn_id,
730 )
731 .name()
732 .schema
733 .clone(),
734 };
735 FullItemName {
736 database,
737 schema,
738 item: name.item.clone(),
739 }
740 }
741
742 pub(super) fn resolve_full_schema_name(&self, name: &QualifiedSchemaName) -> FullSchemaName {
743 let database = match &name.database {
744 ResolvedDatabaseSpecifier::Ambient => RawDatabaseSpecifier::Ambient,
745 ResolvedDatabaseSpecifier::Id(id) => {
746 RawDatabaseSpecifier::Name(self.get_database(id).name().to_string())
747 }
748 };
749 FullSchemaName {
750 database,
751 schema: name.schema.clone(),
752 }
753 }
754
755 pub fn get_entry(&self, id: &CatalogItemId) -> &CatalogEntry {
756 self.entry_by_id
757 .get(id)
758 .unwrap_or_else(|| panic!("catalog out of sync, missing id {id:?}"))
759 }
760
761 pub fn get_entry_by_global_id(&self, id: &GlobalId) -> CatalogCollectionEntry {
762 let item_id = self
763 .entry_by_global_id
764 .get(id)
765 .unwrap_or_else(|| panic!("catalog out of sync, missing id {id:?}"));
766
767 let entry = self.get_entry(item_id).clone();
768 let version = match entry.item() {
769 CatalogItem::Table(table) => {
770 let (version, _) = table
771 .collections
772 .iter()
773 .find(|(_verison, gid)| *gid == id)
774 .expect("version to exist");
775 RelationVersionSelector::Specific(*version)
776 }
777 _ => RelationVersionSelector::Latest,
778 };
779 CatalogCollectionEntry { entry, version }
780 }
781
782 pub fn get_entries(&self) -> impl Iterator<Item = (&CatalogItemId, &CatalogEntry)> + '_ {
783 self.entry_by_id.iter()
784 }
785
786 pub fn get_temp_items(&self, conn: &ConnectionId) -> impl Iterator<Item = ObjectId> + '_ {
787 self.temporary_schemas
789 .get(conn)
790 .into_iter()
791 .flat_map(|schema| schema.items.values().copied().map(ObjectId::from))
792 }
793
794 pub fn has_temporary_schema(&self, conn: &ConnectionId) -> bool {
800 self.temporary_schemas.contains_key(conn)
801 }
802
803 pub(super) fn get_system_type(&self, name: &str) -> &CatalogEntry {
809 let mut res = None;
810 for schema_id in self.system_schema_ids() {
811 let schema = &self.ambient_schemas_by_id[&schema_id];
812 if let Some(global_id) = schema.types.get(name) {
813 match res {
814 None => res = Some(self.get_entry(global_id)),
815 Some(_) => panic!(
816 "only call get_system_type on objects uniquely identifiable in one system schema"
817 ),
818 }
819 }
820 }
821
822 res.unwrap_or_else(|| panic!("cannot find type {} in system schema", name))
823 }
824
825 pub fn get_item_by_name(
826 &self,
827 name: &QualifiedItemName,
828 conn_id: &ConnectionId,
829 ) -> Option<&CatalogEntry> {
830 self.get_schema(
831 &name.qualifiers.database_spec,
832 &name.qualifiers.schema_spec,
833 conn_id,
834 )
835 .items
836 .get(&name.item)
837 .and_then(|id| self.try_get_entry(id))
838 }
839
840 pub fn get_type_by_name(
841 &self,
842 name: &QualifiedItemName,
843 conn_id: &ConnectionId,
844 ) -> Option<&CatalogEntry> {
845 self.get_schema(
846 &name.qualifiers.database_spec,
847 &name.qualifiers.schema_spec,
848 conn_id,
849 )
850 .types
851 .get(&name.item)
852 .and_then(|id| self.try_get_entry(id))
853 }
854
855 pub(super) fn find_available_name(
856 &self,
857 mut name: QualifiedItemName,
858 conn_id: &ConnectionId,
859 ) -> QualifiedItemName {
860 let mut i = 0;
861 let orig_item_name = name.item.clone();
862 while self.get_item_by_name(&name, conn_id).is_some() {
863 i += 1;
864 name.item = format!("{}{}", orig_item_name, i);
865 }
866 name
867 }
868
869 pub fn try_get_entry(&self, id: &CatalogItemId) -> Option<&CatalogEntry> {
870 self.entry_by_id.get(id)
871 }
872
873 pub fn try_get_entry_by_global_id(&self, id: &GlobalId) -> Option<&CatalogEntry> {
874 let item_id = self.entry_by_global_id.get(id)?;
875 self.try_get_entry(item_id)
876 }
877
878 pub fn try_get_desc_by_global_id(&self, id: &GlobalId) -> Option<Cow<'_, RelationDesc>> {
881 let entry = self.try_get_entry_by_global_id(id)?;
882 let desc = match entry.item() {
883 CatalogItem::Table(table) => Cow::Owned(table.desc_for(id)),
884 other => other.relation_desc(RelationVersionSelector::Latest)?,
886 };
887 Some(desc)
888 }
889
890 pub(crate) fn get_cluster(&self, cluster_id: ClusterId) -> &Cluster {
891 self.try_get_cluster(cluster_id)
892 .unwrap_or_else(|| panic!("unknown cluster {cluster_id}"))
893 }
894
895 pub(super) fn try_get_cluster(&self, cluster_id: ClusterId) -> Option<&Cluster> {
896 self.clusters_by_id.get(&cluster_id)
897 }
898
899 pub(super) fn try_get_role(&self, id: &RoleId) -> Option<&Role> {
900 self.roles_by_id.get(id)
901 }
902
903 pub fn get_role(&self, id: &RoleId) -> &Role {
904 self.roles_by_id.get(id).expect("catalog out of sync")
905 }
906
907 pub fn get_roles(&self) -> impl Iterator<Item = &RoleId> {
908 self.roles_by_id.keys()
909 }
910
911 pub(super) fn try_get_role_by_name(&self, role_name: &str) -> Option<&Role> {
912 self.roles_by_name
913 .get(role_name)
914 .map(|id| &self.roles_by_id[id])
915 }
916
917 pub(super) fn get_role_auth(&self, id: &RoleId) -> &RoleAuth {
918 self.role_auth_by_id
919 .get(id)
920 .unwrap_or_else(|| panic!("catalog out of sync, missing role auth for {id}"))
921 }
922
923 pub(super) fn try_get_role_auth_by_id(&self, id: &RoleId) -> Option<&RoleAuth> {
924 self.role_auth_by_id.get(id)
925 }
926
927 pub(super) fn try_get_network_policy_by_name(
928 &self,
929 policy_name: &str,
930 ) -> Option<&NetworkPolicy> {
931 self.network_policies_by_name
932 .get(policy_name)
933 .map(|id| &self.network_policies_by_id[id])
934 }
935
936 pub(crate) fn collect_role_membership(&self, id: &RoleId) -> BTreeSet<RoleId> {
937 let mut membership = BTreeSet::new();
938 let mut queue = VecDeque::from(vec![id]);
939 while let Some(cur_id) = queue.pop_front() {
940 if !membership.contains(cur_id) {
941 membership.insert(cur_id.clone());
942 let role = self.get_role(cur_id);
943 soft_assert_no_log!(
944 !role.membership().keys().contains(id),
945 "circular membership exists in the catalog"
946 );
947 queue.extend(role.membership().keys());
948 }
949 }
950 membership.insert(RoleId::Public);
951 membership
952 }
953
954 pub fn get_network_policy(&self, id: &NetworkPolicyId) -> &NetworkPolicy {
955 self.network_policies_by_id
956 .get(id)
957 .expect("catalog out of sync")
958 }
959
960 pub fn get_network_policies(&self) -> impl Iterator<Item = &NetworkPolicyId> {
961 self.network_policies_by_id.keys()
962 }
963
964 pub fn try_get_webhook_url(&self, id: &CatalogItemId) -> Option<url::Url> {
969 let entry = self.try_get_entry(id)?;
970 let name = self.resolve_full_name(entry.name(), None);
972 let host_name = self
973 .http_host_name
974 .as_ref()
975 .map(|x| x.as_str())
976 .unwrap_or_else(|| "HOST");
977
978 let RawDatabaseSpecifier::Name(database) = name.database else {
979 return None;
980 };
981
982 let mut url = url::Url::parse(&format!("https://{host_name}/api/webhook")).ok()?;
983 url.path_segments_mut()
984 .ok()?
985 .push(&database)
986 .push(&name.schema)
987 .push(&name.item);
988
989 Some(url)
990 }
991
992 pub(crate) fn deserialize_plan_with_enable_for_item_parsing(
1000 &mut self,
1003 create_sql: &str,
1004 force_if_exists_skip: bool,
1005 ) -> Result<(Plan, ResolvedIds), AdapterError> {
1006 self.with_enable_for_item_parsing(|state| {
1007 let pcx = PlanContext::zero().with_ignore_if_exists_errors(force_if_exists_skip);
1008 let pcx = Some(&pcx);
1009 let session_catalog = state.for_system_session();
1010
1011 let stmt = mz_sql::parse::parse(create_sql)?.into_element().ast;
1012 let (stmt, resolved_ids) = mz_sql::names::resolve(&session_catalog, stmt)?;
1013 let (plan, _sql_impl_ids) =
1014 mz_sql::plan::plan(pcx, &session_catalog, stmt, &Params::empty(), &resolved_ids)?;
1015
1016 Ok((plan, resolved_ids))
1017 })
1018 }
1019
1020 #[mz_ore::instrument]
1022 pub(crate) fn parse_plan(
1023 create_sql: &str,
1024 pcx: Option<&PlanContext>,
1025 catalog: &ConnCatalog,
1026 ) -> Result<(Plan, ResolvedIds), AdapterError> {
1027 let stmt = mz_sql::parse::parse(create_sql)?.into_element().ast;
1028 let (stmt, resolved_ids) = mz_sql::names::resolve(catalog, stmt)?;
1029 let (plan, _sql_impl_ids) =
1030 mz_sql::plan::plan(pcx, catalog, stmt, &Params::empty(), &resolved_ids)?;
1031
1032 Ok((plan, resolved_ids))
1033 }
1034
1035 pub(crate) fn deserialize_item(
1037 &self,
1038 global_id: GlobalId,
1039 create_sql: &str,
1040 extra_versions: &BTreeMap<RelationVersion, GlobalId>,
1041 local_expression_cache: &mut LocalExpressionCache,
1042 previous_item: Option<CatalogItem>,
1043 ) -> Result<CatalogItem, AdapterError> {
1044 self.parse_item(
1045 global_id,
1046 create_sql,
1047 extra_versions,
1048 None,
1049 false,
1050 None,
1051 local_expression_cache,
1052 previous_item,
1053 )
1054 }
1055
1056 #[mz_ore::instrument]
1058 pub(crate) fn parse_item(
1059 &self,
1060 global_id: GlobalId,
1061 create_sql: &str,
1062 extra_versions: &BTreeMap<RelationVersion, GlobalId>,
1063 pcx: Option<&PlanContext>,
1064 is_retained_metrics_object: bool,
1065 custom_logical_compaction_window: Option<CompactionWindow>,
1066 local_expression_cache: &mut LocalExpressionCache,
1067 previous_item: Option<CatalogItem>,
1068 ) -> Result<CatalogItem, AdapterError> {
1069 let cached_expr = local_expression_cache.remove_cached_expression(&global_id);
1070 match self.parse_item_inner(
1071 global_id,
1072 create_sql,
1073 extra_versions,
1074 pcx,
1075 is_retained_metrics_object,
1076 custom_logical_compaction_window,
1077 cached_expr,
1078 previous_item,
1079 ) {
1080 Ok((item, uncached_expr)) => {
1081 if let Some((uncached_expr, optimizer_features)) = uncached_expr {
1082 local_expression_cache.insert_uncached_expression(
1083 global_id,
1084 uncached_expr,
1085 optimizer_features,
1086 );
1087 }
1088 Ok(item)
1089 }
1090 Err((err, cached_expr)) => {
1091 if let Some(local_expr) = cached_expr {
1092 local_expression_cache.insert_cached_expression(global_id, local_expr);
1093 }
1094 Err(err)
1095 }
1096 }
1097 }
1098
1099 #[mz_ore::instrument]
1106 pub(crate) fn parse_item_inner(
1107 &self,
1108 global_id: GlobalId,
1109 create_sql: &str,
1110 extra_versions: &BTreeMap<RelationVersion, GlobalId>,
1111 pcx: Option<&PlanContext>,
1112 is_retained_metrics_object: bool,
1113 custom_logical_compaction_window: Option<CompactionWindow>,
1114 cached_expr: Option<LocalExpressions>,
1115 previous_item: Option<CatalogItem>,
1116 ) -> Result<
1117 (
1118 CatalogItem,
1119 Option<(OptimizedMirRelationExpr, OptimizerFeatures)>,
1120 ),
1121 (AdapterError, Option<LocalExpressions>),
1122 > {
1123 let session_catalog = self.for_system_session();
1124
1125 let (plan, resolved_ids) = match Self::parse_plan(create_sql, pcx, &session_catalog) {
1126 Ok((plan, resolved_ids)) => (plan, resolved_ids),
1127 Err(err) => return Err((err, cached_expr)),
1128 };
1129
1130 let mut uncached_expr = None;
1131
1132 let previous_plans = previous_item.as_ref().map(|item| {
1142 (
1143 item.optimized_plan().cloned(),
1144 item.physical_plan().cloned(),
1145 item.dataflow_metainfo().cloned(),
1146 )
1147 });
1148
1149 let mut item = match plan {
1150 Plan::CreateTable(CreateTablePlan { table, .. }) => {
1151 let collections = extra_versions
1152 .iter()
1153 .map(|(version, gid)| (*version, *gid))
1154 .chain([(RelationVersion::root(), global_id)].into_iter())
1155 .collect();
1156
1157 CatalogItem::Table(Table {
1158 create_sql: Some(table.create_sql),
1159 desc: table.desc,
1160 collections,
1161 conn_id: None,
1162 resolved_ids,
1163 custom_logical_compaction_window: custom_logical_compaction_window
1164 .or(table.compaction_window),
1165 is_retained_metrics_object,
1166 data_source: match table.data_source {
1167 mz_sql::plan::TableDataSource::TableWrites { defaults } => {
1168 TableDataSource::TableWrites { defaults }
1169 }
1170 mz_sql::plan::TableDataSource::DataSource {
1171 desc: data_source_desc,
1172 timeline,
1173 } => match data_source_desc {
1174 mz_sql::plan::DataSourceDesc::IngestionExport {
1175 ingestion_id,
1176 external_reference,
1177 details,
1178 data_config,
1179 } => TableDataSource::DataSource {
1180 desc: DataSourceDesc::IngestionExport {
1181 ingestion_id,
1182 external_reference,
1183 details,
1184 data_config,
1185 },
1186 timeline,
1187 },
1188 mz_sql::plan::DataSourceDesc::Webhook {
1189 validate_using,
1190 body_format,
1191 headers,
1192 cluster_id,
1193 } => TableDataSource::DataSource {
1194 desc: DataSourceDesc::Webhook {
1195 validate_using,
1196 body_format,
1197 headers,
1198 cluster_id: cluster_id
1199 .expect("Webhook Tables must have a cluster_id set"),
1200 },
1201 timeline,
1202 },
1203 _ => {
1204 return Err((
1205 AdapterError::Unstructured(anyhow::anyhow!(
1206 "unsupported data source for table"
1207 )),
1208 cached_expr,
1209 ));
1210 }
1211 },
1212 },
1213 })
1214 }
1215 Plan::CreateSource(CreateSourcePlan {
1216 source,
1217 timeline,
1218 in_cluster,
1219 ..
1220 }) => CatalogItem::Source(Source {
1221 create_sql: Some(source.create_sql),
1222 data_source: match source.data_source {
1223 mz_sql::plan::DataSourceDesc::Ingestion(desc) => DataSourceDesc::Ingestion {
1224 desc,
1225 cluster_id: match in_cluster {
1226 Some(id) => id,
1227 None => {
1228 return Err((
1229 AdapterError::Unstructured(anyhow::anyhow!(
1230 "ingestion-based sources must have cluster specified"
1231 )),
1232 cached_expr,
1233 ));
1234 }
1235 },
1236 },
1237 mz_sql::plan::DataSourceDesc::OldSyntaxIngestion {
1238 desc,
1239 progress_subsource,
1240 data_config,
1241 details,
1242 } => DataSourceDesc::OldSyntaxIngestion {
1243 desc,
1244 progress_subsource,
1245 data_config,
1246 details,
1247 cluster_id: match in_cluster {
1248 Some(id) => id,
1249 None => {
1250 return Err((
1251 AdapterError::Unstructured(anyhow::anyhow!(
1252 "ingestion-based sources must have cluster specified"
1253 )),
1254 cached_expr,
1255 ));
1256 }
1257 },
1258 },
1259 mz_sql::plan::DataSourceDesc::IngestionExport {
1260 ingestion_id,
1261 external_reference,
1262 details,
1263 data_config,
1264 } => DataSourceDesc::IngestionExport {
1265 ingestion_id,
1266 external_reference,
1267 details,
1268 data_config,
1269 },
1270 mz_sql::plan::DataSourceDesc::Progress => DataSourceDesc::Progress,
1271 mz_sql::plan::DataSourceDesc::Webhook {
1272 validate_using,
1273 body_format,
1274 headers,
1275 cluster_id,
1276 } => {
1277 mz_ore::soft_assert_or_log!(
1278 cluster_id.is_none(),
1279 "cluster_id set at Source level for Webhooks"
1280 );
1281 DataSourceDesc::Webhook {
1282 validate_using,
1283 body_format,
1284 headers,
1285 cluster_id: in_cluster
1286 .expect("webhook sources must use an existing cluster"),
1287 }
1288 }
1289 },
1290 desc: source.desc,
1291 global_id,
1292 timeline,
1293 resolved_ids,
1294 custom_logical_compaction_window: source
1295 .compaction_window
1296 .or(custom_logical_compaction_window),
1297 is_retained_metrics_object,
1298 }),
1299 Plan::CreateView(CreateViewPlan { view, .. }) => {
1300 let optimizer_config =
1302 optimize::OptimizerConfig::from(session_catalog.system_vars());
1303 let previous_exprs = previous_item.map(|item| match item {
1304 CatalogItem::View(view) => Some((view.raw_expr, view.locally_optimized_expr)),
1305 _ => None,
1306 });
1307
1308 let (raw_expr, optimized_expr) = match (cached_expr, previous_exprs) {
1309 (Some(local_expr), _)
1310 if local_expr.optimizer_features == optimizer_config.features =>
1311 {
1312 debug!("local expression cache hit for {global_id:?}");
1313 (Arc::new(view.expr), Arc::new(local_expr.local_mir))
1314 }
1315 (_, Some(Some((raw_expr, optimized_expr)))) if *raw_expr == view.expr => {
1317 (Arc::clone(&raw_expr), Arc::clone(&optimized_expr))
1318 }
1319 (cached_expr, _) => {
1320 let optimizer_features = optimizer_config.features.clone();
1321 let mut optimizer = optimize::view::Optimizer::new(optimizer_config, None);
1323
1324 let raw_expr = view.expr;
1326 let optimized_expr = match optimizer.optimize(raw_expr.clone()) {
1327 Ok(optimzed_expr) => optimzed_expr,
1328 Err(err) => return Err((err.into(), cached_expr)),
1329 };
1330
1331 uncached_expr = Some((optimized_expr.clone(), optimizer_features));
1332
1333 (Arc::new(raw_expr), Arc::new(optimized_expr))
1334 }
1335 };
1336
1337 let dependencies: BTreeSet<_> = raw_expr
1339 .depends_on()
1340 .into_iter()
1341 .map(|gid| self.get_entry_by_global_id(&gid).id())
1342 .collect();
1343
1344 let typ = infer_sql_type_for_catalog(&raw_expr, &optimized_expr);
1345 CatalogItem::View(View {
1346 create_sql: view.create_sql,
1347 global_id,
1348 raw_expr,
1349 desc: RelationDesc::new(typ, view.column_names),
1350 locally_optimized_expr: optimized_expr,
1351 conn_id: None,
1352 resolved_ids,
1353 dependencies: DependencyIds(dependencies),
1354 })
1355 }
1356 Plan::CreateMaterializedView(CreateMaterializedViewPlan {
1357 materialized_view, ..
1358 }) => {
1359 let collections = extra_versions
1360 .iter()
1361 .map(|(version, gid)| (*version, *gid))
1362 .chain([(RelationVersion::root(), global_id)].into_iter())
1363 .collect();
1364
1365 let system_vars = session_catalog.system_vars();
1367 let overrides = self
1368 .get_cluster(materialized_view.cluster_id)
1369 .config
1370 .features();
1371 let optimizer_config =
1372 optimize::OptimizerConfig::from(system_vars).override_from(&overrides);
1373 let previous_exprs = previous_item.map(|item| match item {
1374 CatalogItem::MaterializedView(materialized_view) => (
1375 materialized_view.raw_expr,
1376 materialized_view.locally_optimized_expr,
1377 ),
1378 item => unreachable!("expected materialized view, found: {item:#?}"),
1379 });
1380
1381 let (raw_expr, optimized_expr) = match (cached_expr, previous_exprs) {
1382 (Some(local_expr), _)
1383 if local_expr.optimizer_features == optimizer_config.features =>
1384 {
1385 debug!("local expression cache hit for {global_id:?}");
1386 (
1387 Arc::new(materialized_view.expr),
1388 Arc::new(local_expr.local_mir),
1389 )
1390 }
1391 (_, Some((raw_expr, optimized_expr)))
1393 if *raw_expr == materialized_view.expr =>
1394 {
1395 (Arc::clone(&raw_expr), Arc::clone(&optimized_expr))
1396 }
1397 (cached_expr, _) => {
1398 let optimizer_features = optimizer_config.features.clone();
1399 let mut optimizer = optimize::view::Optimizer::new(optimizer_config, None);
1401
1402 let raw_expr = materialized_view.expr;
1403 let optimized_expr = match optimizer.optimize(raw_expr.clone()) {
1404 Ok(optimized_expr) => optimized_expr,
1405 Err(err) => return Err((err.into(), cached_expr)),
1406 };
1407
1408 uncached_expr = Some((optimized_expr.clone(), optimizer_features));
1409
1410 (Arc::new(raw_expr), Arc::new(optimized_expr))
1411 }
1412 };
1413 let mut typ = infer_sql_type_for_catalog(&raw_expr, &optimized_expr);
1414
1415 for &i in &materialized_view.non_null_assertions {
1416 typ.column_types[i].nullable = false;
1417 }
1418 let desc = RelationDesc::new(typ, materialized_view.column_names);
1419 let desc = VersionedRelationDesc::new(desc);
1420
1421 let initial_as_of = materialized_view.as_of.map(Antichain::from_elem);
1422
1423 let dependencies = raw_expr
1425 .depends_on()
1426 .into_iter()
1427 .map(|gid| self.get_entry_by_global_id(&gid).id())
1428 .collect();
1429
1430 CatalogItem::MaterializedView(MaterializedView {
1431 create_sql: materialized_view.create_sql,
1432 collections,
1433 raw_expr,
1434 locally_optimized_expr: optimized_expr,
1435 desc,
1436 resolved_ids,
1437 dependencies,
1438 replacement_target: materialized_view.replacement_target,
1439 cluster_id: materialized_view.cluster_id,
1440 target_replica: materialized_view.target_replica,
1441 non_null_assertions: materialized_view.non_null_assertions,
1442 custom_logical_compaction_window: materialized_view.compaction_window,
1443 refresh_schedule: materialized_view.refresh_schedule,
1444 initial_as_of,
1445 optimized_plan: None,
1446 physical_plan: None,
1447 dataflow_metainfo: None,
1448 })
1449 }
1450 Plan::CreateIndex(CreateIndexPlan { index, .. }) => CatalogItem::Index(Index {
1451 create_sql: index.create_sql,
1452 global_id,
1453 on: index.on,
1454 keys: index.keys.into(),
1455 conn_id: None,
1456 resolved_ids,
1457 cluster_id: index.cluster_id,
1458 custom_logical_compaction_window: custom_logical_compaction_window
1459 .or(index.compaction_window),
1460 is_retained_metrics_object,
1461 optimized_plan: None,
1462 physical_plan: None,
1463 dataflow_metainfo: None,
1464 }),
1465 Plan::CreateSink(CreateSinkPlan {
1466 sink,
1467 with_snapshot,
1468 in_cluster,
1469 ..
1470 }) => CatalogItem::Sink(Sink {
1471 create_sql: sink.create_sql,
1472 global_id,
1473 from: sink.from,
1474 connection: sink.connection,
1475 envelope: sink.envelope,
1476 version: sink.version,
1477 with_snapshot,
1478 resolved_ids,
1479 cluster_id: in_cluster,
1480 commit_interval: sink.commit_interval,
1481 }),
1482 Plan::CreateType(CreateTypePlan { typ, .. }) => {
1483 if let Err(err) = typ.inner.desc(&session_catalog) {
1487 return Err((err.into(), cached_expr));
1488 }
1489 CatalogItem::Type(Type {
1490 create_sql: Some(typ.create_sql),
1491 global_id,
1492 details: CatalogTypeDetails {
1493 array_id: None,
1494 typ: typ.inner,
1495 pg_metadata: None,
1496 },
1497 resolved_ids,
1498 })
1499 }
1500 Plan::CreateSecret(CreateSecretPlan { secret, .. }) => CatalogItem::Secret(Secret {
1501 create_sql: secret.create_sql,
1502 global_id,
1503 }),
1504 Plan::CreateConnection(CreateConnectionPlan {
1505 connection:
1506 mz_sql::plan::Connection {
1507 create_sql,
1508 details,
1509 },
1510 ..
1511 }) => CatalogItem::Connection(Connection {
1512 create_sql,
1513 global_id,
1514 details,
1515 resolved_ids,
1516 }),
1517 _ => {
1518 return Err((
1519 Error::new(ErrorKind::Corruption {
1520 detail: "catalog entry generated inappropriate plan".to_string(),
1521 })
1522 .into(),
1523 cached_expr,
1524 ));
1525 }
1526 };
1527
1528 if let Some((prev_optimized, prev_physical, prev_metainfo)) = previous_plans {
1532 if let Some((optimized_plan, physical_plan, dataflow_metainfo)) = item.plan_fields_mut()
1533 {
1534 *optimized_plan = prev_optimized;
1535 *physical_plan = prev_physical;
1536 *dataflow_metainfo = prev_metainfo;
1537 }
1538 }
1539
1540 Ok((item, uncached_expr))
1541 }
1542
1543 pub fn with_enable_for_item_parsing<T>(&mut self, f: impl FnOnce(&mut Self) -> T) -> T {
1549 let restore = Arc::clone(&self.system_configuration);
1560 Arc::make_mut(&mut self.system_configuration).enable_for_item_parsing();
1561 let res = f(self);
1562 self.system_configuration = restore;
1563 res
1564 }
1565
1566 pub fn get_indexes_on(
1568 &self,
1569 id: GlobalId,
1570 cluster: ClusterId,
1571 ) -> impl Iterator<Item = (GlobalId, &Index)> {
1572 let index_matches = move |idx: &Index| idx.on == id && idx.cluster_id == cluster;
1573
1574 self.try_get_entry_by_global_id(&id)
1575 .into_iter()
1576 .map(move |e| {
1577 e.used_by()
1578 .iter()
1579 .filter_map(move |uses_id| match self.get_entry(uses_id).item() {
1580 CatalogItem::Index(index) if index_matches(index) => {
1581 Some((index.global_id(), index))
1582 }
1583 _ => None,
1584 })
1585 })
1586 .flatten()
1587 }
1588
1589 pub(super) fn get_database(&self, database_id: &DatabaseId) -> &Database {
1590 &self.database_by_id[database_id]
1591 }
1592
1593 pub(super) fn try_get_cluster_replica(
1598 &self,
1599 id: ClusterId,
1600 replica_id: ReplicaId,
1601 ) -> Option<&ClusterReplica> {
1602 self.try_get_cluster(id)
1603 .and_then(|cluster| cluster.replica(replica_id))
1604 }
1605
1606 pub(crate) fn get_cluster_replica(
1610 &self,
1611 cluster_id: ClusterId,
1612 replica_id: ReplicaId,
1613 ) -> &ClusterReplica {
1614 self.try_get_cluster_replica(cluster_id, replica_id)
1615 .unwrap_or_else(|| panic!("unknown cluster replica: {cluster_id}.{replica_id}"))
1616 }
1617
1618 pub(super) fn resolve_replica_in_cluster(
1619 &self,
1620 cluster_id: &ClusterId,
1621 replica_name: &str,
1622 ) -> Result<&ClusterReplica, SqlCatalogError> {
1623 let cluster = self.get_cluster(*cluster_id);
1624 let replica_id = cluster
1625 .replica_id_by_name_
1626 .get(replica_name)
1627 .ok_or_else(|| SqlCatalogError::UnknownClusterReplica(replica_name.to_string()))?;
1628 Ok(&cluster.replicas_by_id_[replica_id])
1629 }
1630
1631 pub fn get_system_configuration(&self, name: &str) -> Result<&dyn Var, Error> {
1633 Ok(self.system_configuration.get(name)?)
1634 }
1635
1636 pub(super) fn parse_system_configuration(
1640 &self,
1641 name: &str,
1642 value: VarInput,
1643 ) -> Result<String, Error> {
1644 let value = self.system_configuration.parse(name, value)?;
1645 Ok(value.format())
1646 }
1647
1648 pub(super) fn resolve_schema_in_database(
1650 &self,
1651 database_spec: &ResolvedDatabaseSpecifier,
1652 schema_name: &str,
1653 conn_id: &ConnectionId,
1654 ) -> Result<&Schema, SqlCatalogError> {
1655 let schema = match database_spec {
1656 ResolvedDatabaseSpecifier::Ambient if schema_name == MZ_TEMP_SCHEMA => {
1657 self.temporary_schemas.get(conn_id)
1658 }
1659 ResolvedDatabaseSpecifier::Ambient => self
1660 .ambient_schemas_by_name
1661 .get(schema_name)
1662 .and_then(|id| self.ambient_schemas_by_id.get(id)),
1663 ResolvedDatabaseSpecifier::Id(id) => self.database_by_id.get(id).and_then(|db| {
1664 db.schemas_by_name
1665 .get(schema_name)
1666 .and_then(|id| db.schemas_by_id.get(id))
1667 }),
1668 };
1669 schema.ok_or_else(|| SqlCatalogError::UnknownSchema(schema_name.into()))
1670 }
1671
1672 pub fn try_get_schema(
1677 &self,
1678 database_spec: &ResolvedDatabaseSpecifier,
1679 schema_spec: &SchemaSpecifier,
1680 conn_id: &ConnectionId,
1681 ) -> Option<&Schema> {
1682 match (database_spec, schema_spec) {
1684 (ResolvedDatabaseSpecifier::Ambient, SchemaSpecifier::Temporary) => {
1685 self.temporary_schemas.get(conn_id)
1686 }
1687 (ResolvedDatabaseSpecifier::Ambient, SchemaSpecifier::Id(id)) => {
1688 self.ambient_schemas_by_id.get(id)
1689 }
1690 (ResolvedDatabaseSpecifier::Id(database_id), SchemaSpecifier::Id(schema_id)) => self
1691 .database_by_id
1692 .get(database_id)
1693 .and_then(|db| db.schemas_by_id.get(schema_id)),
1694 (ResolvedDatabaseSpecifier::Id(_), SchemaSpecifier::Temporary) => {
1695 unreachable!("temporary schemas are in the ambient database")
1696 }
1697 }
1698 }
1699
1700 pub fn get_schema(
1701 &self,
1702 database_spec: &ResolvedDatabaseSpecifier,
1703 schema_spec: &SchemaSpecifier,
1704 conn_id: &ConnectionId,
1705 ) -> &Schema {
1706 self.try_get_schema(database_spec, schema_spec, conn_id)
1708 .expect("schema must exist")
1709 }
1710
1711 pub(super) fn find_non_temp_schema(&self, schema_id: &SchemaId) -> &Schema {
1712 self.database_by_id
1713 .values()
1714 .filter_map(|database| database.schemas_by_id.get(schema_id))
1715 .chain(self.ambient_schemas_by_id.values())
1716 .filter(|schema| schema.id() == &SchemaSpecifier::from(*schema_id))
1717 .into_first()
1718 }
1719
1720 pub(super) fn find_temp_schema(&self, schema_id: &SchemaId) -> &Schema {
1721 self.temporary_schemas
1722 .values()
1723 .filter(|schema| schema.id() == &SchemaSpecifier::from(*schema_id))
1724 .into_first()
1725 }
1726
1727 pub fn get_mz_catalog_schema_id(&self) -> SchemaId {
1728 self.ambient_schemas_by_name[MZ_CATALOG_SCHEMA]
1729 }
1730
1731 pub fn get_mz_catalog_unstable_schema_id(&self) -> SchemaId {
1732 self.ambient_schemas_by_name[MZ_CATALOG_UNSTABLE_SCHEMA]
1733 }
1734
1735 pub fn get_pg_catalog_schema_id(&self) -> SchemaId {
1736 self.ambient_schemas_by_name[PG_CATALOG_SCHEMA]
1737 }
1738
1739 pub fn get_information_schema_id(&self) -> SchemaId {
1740 self.ambient_schemas_by_name[INFORMATION_SCHEMA]
1741 }
1742
1743 pub fn get_mz_internal_schema_id(&self) -> SchemaId {
1744 self.ambient_schemas_by_name[MZ_INTERNAL_SCHEMA]
1745 }
1746
1747 pub fn get_mz_introspection_schema_id(&self) -> SchemaId {
1748 self.ambient_schemas_by_name[MZ_INTROSPECTION_SCHEMA]
1749 }
1750
1751 pub fn get_mz_unsafe_schema_id(&self) -> SchemaId {
1752 self.ambient_schemas_by_name[MZ_UNSAFE_SCHEMA]
1753 }
1754
1755 pub fn system_schema_ids(&self) -> impl Iterator<Item = SchemaId> + '_ {
1756 SYSTEM_SCHEMAS
1757 .iter()
1758 .map(|name| self.ambient_schemas_by_name[*name])
1759 }
1760
1761 pub fn is_system_schema_id(&self, id: SchemaId) -> bool {
1762 self.system_schema_ids().contains(&id)
1763 }
1764
1765 pub fn is_system_schema_specifier(&self, spec: SchemaSpecifier) -> bool {
1766 match spec {
1767 SchemaSpecifier::Temporary => false,
1768 SchemaSpecifier::Id(id) => self.is_system_schema_id(id),
1769 }
1770 }
1771
1772 pub fn unstable_schema_ids(&self) -> impl Iterator<Item = SchemaId> + '_ {
1773 UNSTABLE_SCHEMAS
1774 .iter()
1775 .map(|name| self.ambient_schemas_by_name[*name])
1776 }
1777
1778 pub fn is_unstable_schema_id(&self, id: SchemaId) -> bool {
1779 self.unstable_schema_ids().contains(&id)
1780 }
1781
1782 pub fn is_unstable_schema_specifier(&self, spec: SchemaSpecifier) -> bool {
1783 match spec {
1784 SchemaSpecifier::Temporary => false,
1785 SchemaSpecifier::Id(id) => self.is_unstable_schema_id(id),
1786 }
1787 }
1788
1789 pub fn create_temporary_schema(
1792 &mut self,
1793 conn_id: &ConnectionId,
1794 owner_id: RoleId,
1795 ) -> Result<(), Error> {
1796 let oid = INVALID_OID;
1801 self.temporary_schemas.insert(
1802 conn_id.clone(),
1803 Schema {
1804 name: QualifiedSchemaName {
1805 database: ResolvedDatabaseSpecifier::Ambient,
1806 schema: MZ_TEMP_SCHEMA.into(),
1807 },
1808 id: SchemaSpecifier::Temporary,
1809 oid,
1810 items: BTreeMap::new(),
1811 functions: BTreeMap::new(),
1812 types: BTreeMap::new(),
1813 owner_id,
1814 privileges: PrivilegeMap::from_mz_acl_items(vec![rbac::owner_privilege(
1815 mz_sql::catalog::ObjectType::Schema,
1816 owner_id,
1817 )]),
1818 },
1819 );
1820 Ok(())
1821 }
1822
1823 pub(crate) fn get_temporary_oids(&self) -> impl Iterator<Item = u32> + '_ {
1825 std::iter::empty()
1826 .chain(self.ambient_schemas_by_id.values().filter_map(|schema| {
1827 if schema.id.is_temporary() {
1828 Some(schema.oid)
1829 } else {
1830 None
1831 }
1832 }))
1833 .chain(self.entry_by_id.values().filter_map(|entry| {
1834 if entry.item().is_temporary() {
1835 Some(entry.oid)
1836 } else {
1837 None
1838 }
1839 }))
1840 }
1841
1842 pub fn resolve_builtin_table(&self, builtin: &'static BuiltinTable) -> CatalogItemId {
1846 self.resolve_builtin_object(&Builtin::<IdReference>::Table(builtin))
1847 }
1848
1849 pub fn resolve_builtin_log(&self, builtin: &'static BuiltinLog) -> (CatalogItemId, GlobalId) {
1853 let item_id = self.resolve_builtin_object(&Builtin::<IdReference>::Log(builtin));
1854 let log = match self.get_entry(&item_id).item() {
1855 CatalogItem::Log(log) => log,
1856 other => unreachable!("programming error, expected BuiltinLog, found {other:?}"),
1857 };
1858 (item_id, log.global_id)
1859 }
1860
1861 pub fn resolve_builtin_source(&self, builtin: &'static BuiltinSource) -> CatalogItemId {
1865 self.resolve_builtin_object(&Builtin::<IdReference>::Source(builtin))
1866 }
1867
1868 pub fn resolve_builtin_object<T: TypeReference>(&self, builtin: &Builtin<T>) -> CatalogItemId {
1872 let schema_id = &self.ambient_schemas_by_name[builtin.schema()];
1873 let schema = &self.ambient_schemas_by_id[schema_id];
1874 match builtin.catalog_item_type() {
1875 CatalogItemType::Type => schema.types[builtin.name()],
1876 CatalogItemType::Func => schema.functions[builtin.name()],
1877 CatalogItemType::Table
1878 | CatalogItemType::Source
1879 | CatalogItemType::Sink
1880 | CatalogItemType::View
1881 | CatalogItemType::MaterializedView
1882 | CatalogItemType::Index
1883 | CatalogItemType::Secret
1884 | CatalogItemType::Connection => schema.items[builtin.name()],
1885 }
1886 }
1887
1888 pub fn resolve_builtin_type_references(
1890 &self,
1891 builtin: &BuiltinType<NameReference>,
1892 ) -> BuiltinType<IdReference> {
1893 let typ: CatalogType<IdReference> = match &builtin.details.typ {
1894 CatalogType::AclItem => CatalogType::AclItem,
1895 CatalogType::Array { element_reference } => CatalogType::Array {
1896 element_reference: self.get_system_type(element_reference).id,
1897 },
1898 CatalogType::List {
1899 element_reference,
1900 element_modifiers,
1901 } => CatalogType::List {
1902 element_reference: self.get_system_type(element_reference).id,
1903 element_modifiers: element_modifiers.clone(),
1904 },
1905 CatalogType::Map {
1906 key_reference,
1907 value_reference,
1908 key_modifiers,
1909 value_modifiers,
1910 } => CatalogType::Map {
1911 key_reference: self.get_system_type(key_reference).id,
1912 value_reference: self.get_system_type(value_reference).id,
1913 key_modifiers: key_modifiers.clone(),
1914 value_modifiers: value_modifiers.clone(),
1915 },
1916 CatalogType::Range { element_reference } => CatalogType::Range {
1917 element_reference: self.get_system_type(element_reference).id,
1918 },
1919 CatalogType::Record { fields } => CatalogType::Record {
1920 fields: fields
1921 .into_iter()
1922 .map(|f| CatalogRecordField {
1923 name: f.name.clone(),
1924 type_reference: self.get_system_type(f.type_reference).id,
1925 type_modifiers: f.type_modifiers.clone(),
1926 })
1927 .collect(),
1928 },
1929 CatalogType::Bool => CatalogType::Bool,
1930 CatalogType::Bytes => CatalogType::Bytes,
1931 CatalogType::Char => CatalogType::Char,
1932 CatalogType::Date => CatalogType::Date,
1933 CatalogType::Float32 => CatalogType::Float32,
1934 CatalogType::Float64 => CatalogType::Float64,
1935 CatalogType::Int16 => CatalogType::Int16,
1936 CatalogType::Int32 => CatalogType::Int32,
1937 CatalogType::Int64 => CatalogType::Int64,
1938 CatalogType::UInt16 => CatalogType::UInt16,
1939 CatalogType::UInt32 => CatalogType::UInt32,
1940 CatalogType::UInt64 => CatalogType::UInt64,
1941 CatalogType::MzTimestamp => CatalogType::MzTimestamp,
1942 CatalogType::Interval => CatalogType::Interval,
1943 CatalogType::Jsonb => CatalogType::Jsonb,
1944 CatalogType::Numeric => CatalogType::Numeric,
1945 CatalogType::Oid => CatalogType::Oid,
1946 CatalogType::PgLegacyChar => CatalogType::PgLegacyChar,
1947 CatalogType::PgLegacyName => CatalogType::PgLegacyName,
1948 CatalogType::Pseudo => CatalogType::Pseudo,
1949 CatalogType::RegClass => CatalogType::RegClass,
1950 CatalogType::RegProc => CatalogType::RegProc,
1951 CatalogType::RegType => CatalogType::RegType,
1952 CatalogType::String => CatalogType::String,
1953 CatalogType::Time => CatalogType::Time,
1954 CatalogType::Timestamp => CatalogType::Timestamp,
1955 CatalogType::TimestampTz => CatalogType::TimestampTz,
1956 CatalogType::Uuid => CatalogType::Uuid,
1957 CatalogType::VarChar => CatalogType::VarChar,
1958 CatalogType::Int2Vector => CatalogType::Int2Vector,
1959 CatalogType::MzAclItem => CatalogType::MzAclItem,
1960 };
1961
1962 BuiltinType {
1963 name: builtin.name,
1964 schema: builtin.schema,
1965 oid: builtin.oid,
1966 details: CatalogTypeDetails {
1967 array_id: builtin.details.array_id,
1968 typ,
1969 pg_metadata: builtin.details.pg_metadata.clone(),
1970 },
1971 }
1972 }
1973
1974 pub fn config(&self) -> &mz_sql::catalog::CatalogConfig {
1975 &self.config
1976 }
1977
1978 pub fn resolve_database(&self, database_name: &str) -> Result<&Database, SqlCatalogError> {
1979 match self.database_by_name.get(database_name) {
1980 Some(id) => Ok(&self.database_by_id[id]),
1981 None => Err(SqlCatalogError::UnknownDatabase(database_name.into())),
1982 }
1983 }
1984
1985 pub fn resolve_schema(
1986 &self,
1987 current_database: Option<&DatabaseId>,
1988 database_name: Option<&str>,
1989 schema_name: &str,
1990 conn_id: &ConnectionId,
1991 ) -> Result<&Schema, SqlCatalogError> {
1992 let database_spec = match database_name {
1993 Some(database) => Some(ResolvedDatabaseSpecifier::Id(
1998 self.resolve_database(database)?.id().clone(),
1999 )),
2000 None => current_database.map(|id| ResolvedDatabaseSpecifier::Id(id.clone())),
2001 };
2002
2003 if let Some(database_spec) = database_spec {
2005 if let Ok(schema) =
2006 self.resolve_schema_in_database(&database_spec, schema_name, conn_id)
2007 {
2008 return Ok(schema);
2009 }
2010 }
2011
2012 if let Ok(schema) = self.resolve_schema_in_database(
2014 &ResolvedDatabaseSpecifier::Ambient,
2015 schema_name,
2016 conn_id,
2017 ) {
2018 return Ok(schema);
2019 }
2020
2021 Err(SqlCatalogError::UnknownSchema(schema_name.into()))
2022 }
2023
2024 pub fn resolve_system_schema(&self, name: &'static str) -> SchemaId {
2028 self.ambient_schemas_by_name[name]
2029 }
2030
2031 pub fn resolve_search_path(
2032 &self,
2033 session: &dyn SessionMetadata,
2034 ) -> Vec<(ResolvedDatabaseSpecifier, SchemaSpecifier)> {
2035 let database = self
2036 .database_by_name
2037 .get(session.database())
2038 .map(|id| id.clone());
2039
2040 session
2041 .search_path()
2042 .iter()
2043 .map(|schema| {
2044 self.resolve_schema(database.as_ref(), None, schema.as_str(), session.conn_id())
2045 })
2046 .filter_map(|schema| schema.ok())
2047 .map(|schema| (schema.name().database.clone(), schema.id().clone()))
2048 .collect()
2049 }
2050
2051 pub fn effective_search_path(
2052 &self,
2053 search_path: &[(ResolvedDatabaseSpecifier, SchemaSpecifier)],
2054 include_temp_schema: bool,
2055 ) -> Vec<(ResolvedDatabaseSpecifier, SchemaSpecifier)> {
2056 let mut v = Vec::with_capacity(search_path.len() + 3);
2057 let temp_schema = (
2059 ResolvedDatabaseSpecifier::Ambient,
2060 SchemaSpecifier::Temporary,
2061 );
2062 if include_temp_schema && !search_path.contains(&temp_schema) {
2063 v.push(temp_schema);
2064 }
2065 let default_schemas = [
2066 (
2067 ResolvedDatabaseSpecifier::Ambient,
2068 SchemaSpecifier::Id(self.get_mz_catalog_schema_id()),
2069 ),
2070 (
2071 ResolvedDatabaseSpecifier::Ambient,
2072 SchemaSpecifier::Id(self.get_pg_catalog_schema_id()),
2073 ),
2074 ];
2075 for schema in default_schemas.into_iter() {
2076 if !search_path.contains(&schema) {
2077 v.push(schema);
2078 }
2079 }
2080 v.extend_from_slice(search_path);
2081 v
2082 }
2083
2084 pub fn resolve_cluster(&self, name: &str) -> Result<&Cluster, SqlCatalogError> {
2085 let id = self
2086 .clusters_by_name
2087 .get(name)
2088 .ok_or_else(|| SqlCatalogError::UnknownCluster(name.to_string()))?;
2089 Ok(&self.clusters_by_id[id])
2090 }
2091
2092 pub fn resolve_builtin_cluster(&self, cluster: &BuiltinCluster) -> &Cluster {
2093 let id = self
2094 .clusters_by_name
2095 .get(cluster.name)
2096 .expect("failed to lookup BuiltinCluster by name");
2097 self.clusters_by_id
2098 .get(id)
2099 .expect("failed to lookup BuiltinCluster by ID")
2100 }
2101
2102 pub fn resolve_cluster_replica(
2103 &self,
2104 cluster_replica_name: &QualifiedReplica,
2105 ) -> Result<&ClusterReplica, SqlCatalogError> {
2106 let cluster = self.resolve_cluster(cluster_replica_name.cluster.as_str())?;
2107 let replica_name = cluster_replica_name.replica.as_str();
2108 let replica_id = cluster
2109 .replica_id(replica_name)
2110 .ok_or_else(|| SqlCatalogError::UnknownClusterReplica(replica_name.to_string()))?;
2111 Ok(cluster.replica(replica_id).expect("Must exist"))
2112 }
2113
2114 #[allow(clippy::useless_let_if_seq)]
2120 pub fn resolve(
2121 &self,
2122 get_schema_entries: fn(&Schema) -> &BTreeMap<String, CatalogItemId>,
2123 current_database: Option<&DatabaseId>,
2124 search_path: &Vec<(ResolvedDatabaseSpecifier, SchemaSpecifier)>,
2125 name: &PartialItemName,
2126 conn_id: &ConnectionId,
2127 err_gen: fn(String) -> SqlCatalogError,
2128 ) -> Result<&CatalogEntry, SqlCatalogError> {
2129 let schemas = match &name.schema {
2134 Some(schema_name) => {
2135 match self.resolve_schema(
2136 current_database,
2137 name.database.as_deref(),
2138 schema_name,
2139 conn_id,
2140 ) {
2141 Ok(schema) => vec![(schema.name.database.clone(), schema.id.clone())],
2142 Err(e) => return Err(e),
2143 }
2144 }
2145 None => match self
2146 .try_get_schema(
2147 &ResolvedDatabaseSpecifier::Ambient,
2148 &SchemaSpecifier::Temporary,
2149 conn_id,
2150 )
2151 .and_then(|schema| schema.items.get(&name.item))
2152 {
2153 Some(id) => return Ok(self.get_entry(id)),
2154 None => search_path.to_vec(),
2155 },
2156 };
2157
2158 for (database_spec, schema_spec) in &schemas {
2159 let Some(schema) = self.try_get_schema(database_spec, schema_spec, conn_id) else {
2162 continue;
2163 };
2164
2165 if let Some(id) = get_schema_entries(schema).get(&name.item) {
2166 return Ok(&self.entry_by_id[id]);
2167 }
2168 }
2169
2170 let mz_internal_schema = SchemaSpecifier::Id(self.get_mz_internal_schema_id());
2175 if schemas.iter().any(|(_, spec)| *spec == mz_internal_schema) {
2176 for schema_id in [
2177 self.get_mz_catalog_unstable_schema_id(),
2178 self.get_mz_introspection_schema_id(),
2179 ] {
2180 let schema = self.get_schema(
2181 &ResolvedDatabaseSpecifier::Ambient,
2182 &SchemaSpecifier::Id(schema_id),
2183 conn_id,
2184 );
2185
2186 if let Some(id) = get_schema_entries(schema).get(&name.item) {
2187 debug!(
2188 github_27831 = true,
2189 "encountered use of outdated schema `mz_internal` for relation: {name}",
2190 );
2191 return Ok(&self.entry_by_id[id]);
2192 }
2193 }
2194 }
2195
2196 Err(err_gen(name.to_string()))
2197 }
2198
2199 pub fn resolve_entry(
2201 &self,
2202 current_database: Option<&DatabaseId>,
2203 search_path: &Vec<(ResolvedDatabaseSpecifier, SchemaSpecifier)>,
2204 name: &PartialItemName,
2205 conn_id: &ConnectionId,
2206 ) -> Result<&CatalogEntry, SqlCatalogError> {
2207 self.resolve(
2208 |schema| &schema.items,
2209 current_database,
2210 search_path,
2211 name,
2212 conn_id,
2213 SqlCatalogError::UnknownItem,
2214 )
2215 }
2216
2217 pub fn resolve_function(
2219 &self,
2220 current_database: Option<&DatabaseId>,
2221 search_path: &Vec<(ResolvedDatabaseSpecifier, SchemaSpecifier)>,
2222 name: &PartialItemName,
2223 conn_id: &ConnectionId,
2224 ) -> Result<&CatalogEntry, SqlCatalogError> {
2225 self.resolve(
2226 |schema| &schema.functions,
2227 current_database,
2228 search_path,
2229 name,
2230 conn_id,
2231 |name| SqlCatalogError::UnknownFunction {
2232 name,
2233 alternative: None,
2234 },
2235 )
2236 }
2237
2238 pub fn resolve_type(
2240 &self,
2241 current_database: Option<&DatabaseId>,
2242 search_path: &Vec<(ResolvedDatabaseSpecifier, SchemaSpecifier)>,
2243 name: &PartialItemName,
2244 conn_id: &ConnectionId,
2245 ) -> Result<&CatalogEntry, SqlCatalogError> {
2246 static NON_PG_CATALOG_TYPES: LazyLock<
2247 BTreeMap<&'static str, &'static BuiltinType<NameReference>>,
2248 > = LazyLock::new(|| {
2249 BUILTINS::types()
2250 .filter(|typ| typ.schema != PG_CATALOG_SCHEMA)
2251 .map(|typ| (typ.name, typ))
2252 .collect()
2253 });
2254
2255 let entry = self.resolve(
2256 |schema| &schema.types,
2257 current_database,
2258 search_path,
2259 name,
2260 conn_id,
2261 |name| SqlCatalogError::UnknownType { name },
2262 )?;
2263
2264 if conn_id != &SYSTEM_CONN_ID && name.schema.as_deref() == Some(PG_CATALOG_SCHEMA) {
2265 if let Some(typ) = NON_PG_CATALOG_TYPES.get(entry.name().item.as_str()) {
2266 warn!(
2267 "user specified an incorrect schema of {} for the type {}, which should be in \
2268 the {} schema. This works now due to a bug but will be fixed in a later release.",
2269 PG_CATALOG_SCHEMA.quoted(),
2270 typ.name.quoted(),
2271 typ.schema.quoted(),
2272 )
2273 }
2274 }
2275
2276 Ok(entry)
2277 }
2278
2279 pub(super) fn get_comment_id(&self, object_id: ObjectId) -> CommentObjectId {
2281 match object_id {
2282 ObjectId::Item(item_id) => self.get_entry(&item_id).comment_object_id(),
2283 ObjectId::Role(role_id) => CommentObjectId::Role(role_id),
2284 ObjectId::Database(database_id) => CommentObjectId::Database(database_id),
2285 ObjectId::Schema((database, schema)) => CommentObjectId::Schema((database, schema)),
2286 ObjectId::Cluster(cluster_id) => CommentObjectId::Cluster(cluster_id),
2287 ObjectId::ClusterReplica(cluster_replica_id) => {
2288 CommentObjectId::ClusterReplica(cluster_replica_id)
2289 }
2290 ObjectId::NetworkPolicy(network_policy_id) => {
2291 CommentObjectId::NetworkPolicy(network_policy_id)
2292 }
2293 }
2294 }
2295
2296 pub fn system_config(&self) -> &SystemVars {
2298 &self.system_configuration
2299 }
2300
2301 pub fn system_config_mut(&mut self) -> &mut SystemVars {
2303 Arc::make_mut(&mut self.system_configuration)
2304 }
2305
2306 pub fn dump(&self, unfinalized_shards: Option<BTreeSet<String>>) -> Result<String, Error> {
2316 let mut dump = serde_json::to_value(&self).map_err(|e| {
2318 Error::new(ErrorKind::Unstructured(format!(
2319 "internal error: could not dump catalog: {}",
2322 e
2323 )))
2324 })?;
2325
2326 let dump_obj = dump.as_object_mut().expect("state must have been dumped");
2327 dump_obj.insert(
2329 "system_parameter_defaults".into(),
2330 serde_json::json!(self.system_config().defaults()),
2331 );
2332 if let Some(unfinalized_shards) = unfinalized_shards {
2334 dump_obj
2335 .get_mut("storage_metadata")
2336 .expect("known to exist")
2337 .as_object_mut()
2338 .expect("storage_metadata is an object")
2339 .insert(
2340 "unfinalized_shards".into(),
2341 serde_json::json!(unfinalized_shards),
2342 );
2343 }
2344 let temporary_gids: Vec<_> = self
2349 .entry_by_global_id
2350 .iter()
2351 .filter(|(_gid, item_id)| self.get_entry(item_id).conn_id().is_some())
2352 .map(|(gid, _item_id)| *gid)
2353 .collect();
2354 if !temporary_gids.is_empty() {
2355 let gids = dump_obj
2356 .get_mut("entry_by_global_id")
2357 .expect("known_to_exist")
2358 .as_object_mut()
2359 .expect("entry_by_global_id is an object");
2360 for gid in temporary_gids {
2361 gids.remove(&gid.to_string());
2362 }
2363 }
2364 dump_obj.remove("role_auth_by_id");
2367
2368 Ok(serde_json::to_string_pretty(&dump).expect("cannot fail on serde_json::Value"))
2370 }
2371
2372 pub fn availability_zones(&self) -> &[String] {
2373 &self.availability_zones
2374 }
2375
2376 pub fn concretize_replica_location(
2377 &self,
2378 location: mz_catalog::durable::ReplicaLocation,
2379 allowed_sizes: &Vec<String>,
2380 allowed_availability_zones: Option<&[String]>,
2381 ) -> Result<ReplicaLocation, Error> {
2382 let location = match location {
2383 mz_catalog::durable::ReplicaLocation::Unmanaged {
2384 storagectl_addrs,
2385 computectl_addrs,
2386 } => {
2387 if allowed_availability_zones.is_some() {
2388 return Err(Error {
2389 kind: ErrorKind::Internal(
2390 "tried concretize unmanaged replica with specific availability_zones"
2391 .to_string(),
2392 ),
2393 });
2394 }
2395 ReplicaLocation::Unmanaged(UnmanagedReplicaLocation {
2396 storagectl_addrs,
2397 computectl_addrs,
2398 })
2399 }
2400 mz_catalog::durable::ReplicaLocation::Managed {
2401 size,
2402 availability_zone,
2403 billed_as,
2404 internal,
2405 pending,
2406 } => {
2407 if allowed_availability_zones.is_some() && availability_zone.is_some() {
2408 let message = "tried concretize managed replica with specific availability zones and availability zone";
2409 return Err(Error {
2410 kind: ErrorKind::Internal(message.to_string()),
2411 });
2412 }
2413 self.ensure_valid_replica_size(allowed_sizes, &size)?;
2414 let cluster_replica_sizes = &self.cluster_replica_sizes;
2415
2416 ReplicaLocation::Managed(ManagedReplicaLocation {
2417 allocation: cluster_replica_sizes
2418 .0
2419 .get(&size)
2420 .expect("catalog out of sync")
2421 .clone(),
2422 availability_zones: match (availability_zone, allowed_availability_zones) {
2423 (Some(az), _) => ManagedReplicaAvailabilityZones::FromReplica(Some(az)),
2424 (None, Some([])) => ManagedReplicaAvailabilityZones::FromCluster(None),
2425 (None, Some(azs)) => {
2426 ManagedReplicaAvailabilityZones::FromCluster(Some(azs.to_vec()))
2427 }
2428 (None, None) => ManagedReplicaAvailabilityZones::FromReplica(None),
2429 },
2430 size,
2431 billed_as,
2432 internal,
2433 pending,
2434 })
2435 }
2436 };
2437 Ok(location)
2438 }
2439
2440 pub(crate) fn cluster_replica_size_has_disk(&self, size: &str) -> bool {
2451 let alloc = &self.cluster_replica_sizes.0[size];
2452 !alloc.swap_enabled && alloc.disk_limit != Some(DiskLimit::ZERO)
2453 }
2454
2455 pub(crate) fn ensure_valid_replica_size(
2456 &self,
2457 allowed_sizes: &[String],
2458 size: &String,
2459 ) -> Result<(), Error> {
2460 let cluster_replica_sizes = &self.cluster_replica_sizes;
2461
2462 if !cluster_replica_sizes.0.contains_key(size)
2463 || (!allowed_sizes.is_empty() && !allowed_sizes.contains(size))
2464 || cluster_replica_sizes.0[size].disabled
2465 {
2466 let mut entries = cluster_replica_sizes
2467 .enabled_allocations()
2468 .collect::<Vec<_>>();
2469
2470 if !allowed_sizes.is_empty() {
2471 let allowed_sizes = BTreeSet::<&String>::from_iter(allowed_sizes.iter());
2472 entries.retain(|(name, _)| allowed_sizes.contains(name));
2473 }
2474
2475 entries.sort_by_key(
2476 |(
2477 _name,
2478 ReplicaAllocation {
2479 scale, cpu_limit, ..
2480 },
2481 )| (scale, cpu_limit),
2482 );
2483
2484 Err(Error {
2485 kind: ErrorKind::InvalidClusterReplicaSize {
2486 size: size.to_owned(),
2487 expected: entries.into_iter().map(|(name, _)| name.clone()).collect(),
2488 },
2489 })
2490 } else {
2491 Ok(())
2492 }
2493 }
2494
2495 pub fn ensure_not_reserved_role(&self, role_id: &RoleId) -> Result<(), Error> {
2496 if role_id.is_builtin() {
2497 let role = self.get_role(role_id);
2498 Err(Error::new(ErrorKind::ReservedRoleName(
2499 role.name().to_string(),
2500 )))
2501 } else {
2502 Ok(())
2503 }
2504 }
2505
2506 pub fn ensure_not_reserved_network_policy(
2507 &self,
2508 network_policy_id: &NetworkPolicyId,
2509 ) -> Result<(), Error> {
2510 if network_policy_id.is_builtin() {
2511 let policy = self.get_network_policy(network_policy_id);
2512 Err(Error::new(ErrorKind::ReservedNetworkPolicyName(
2513 policy.name.clone(),
2514 )))
2515 } else {
2516 Ok(())
2517 }
2518 }
2519
2520 pub fn ensure_grantable_role(&self, role_id: &RoleId) -> Result<(), Error> {
2521 let is_grantable = !role_id.is_public() && !role_id.is_system();
2522 if is_grantable {
2523 Ok(())
2524 } else {
2525 let role = self.get_role(role_id);
2526 Err(Error::new(ErrorKind::UngrantableRoleName(
2527 role.name().to_string(),
2528 )))
2529 }
2530 }
2531
2532 pub fn ensure_not_system_role(&self, role_id: &RoleId) -> Result<(), Error> {
2533 if role_id.is_system() {
2534 let role = self.get_role(role_id);
2535 Err(Error::new(ErrorKind::ReservedSystemRoleName(
2536 role.name().to_string(),
2537 )))
2538 } else {
2539 Ok(())
2540 }
2541 }
2542
2543 pub fn ensure_not_predefined_role(&self, role_id: &RoleId) -> Result<(), Error> {
2544 if role_id.is_predefined() {
2545 let role = self.get_role(role_id);
2546 Err(Error::new(ErrorKind::ReservedSystemRoleName(
2547 role.name().to_string(),
2548 )))
2549 } else {
2550 Ok(())
2551 }
2552 }
2553
2554 pub(crate) fn add_to_audit_log(
2557 system_configuration: &SystemVars,
2558 oracle_write_ts: mz_repr::Timestamp,
2559 session: Option<&ConnMeta>,
2560 tx: &mut mz_catalog::durable::Transaction,
2561 audit_events: &mut Vec<VersionedEvent>,
2562 event_type: EventType,
2563 object_type: ObjectType,
2564 details: EventDetails,
2565 ) -> Result<(), Error> {
2566 let user = session.map(|session| session.user().name.to_string());
2567
2568 let occurred_at = match system_configuration.unsafe_mock_audit_event_timestamp() {
2571 Some(ts) => ts.into(),
2572 _ => oracle_write_ts.into(),
2573 };
2574 let id = tx.allocate_audit_log_id()?;
2575 let event = VersionedEvent::new(id, event_type, object_type, details, user, occurred_at);
2576 audit_events.push(event.clone());
2577 tx.insert_audit_log_event(event);
2578 Ok(())
2579 }
2580
2581 pub(super) fn get_owner_id(&self, id: &ObjectId, conn_id: &ConnectionId) -> Option<RoleId> {
2582 match id {
2583 ObjectId::Cluster(id) => Some(self.get_cluster(*id).owner_id()),
2584 ObjectId::ClusterReplica((cluster_id, replica_id)) => Some(
2585 self.get_cluster_replica(*cluster_id, *replica_id)
2586 .owner_id(),
2587 ),
2588 ObjectId::Database(id) => Some(self.get_database(id).owner_id()),
2589 ObjectId::Schema((database_spec, schema_spec)) => Some(
2590 self.get_schema(database_spec, schema_spec, conn_id)
2591 .owner_id(),
2592 ),
2593 ObjectId::Item(id) => Some(*self.get_entry(id).owner_id()),
2594 ObjectId::Role(_) => None,
2595 ObjectId::NetworkPolicy(id) => Some(self.get_network_policy(id).owner_id.clone()),
2596 }
2597 }
2598
2599 pub(super) fn get_object_type(&self, object_id: &ObjectId) -> mz_sql::catalog::ObjectType {
2600 match object_id {
2601 ObjectId::Cluster(_) => mz_sql::catalog::ObjectType::Cluster,
2602 ObjectId::ClusterReplica(_) => mz_sql::catalog::ObjectType::ClusterReplica,
2603 ObjectId::Database(_) => mz_sql::catalog::ObjectType::Database,
2604 ObjectId::Schema(_) => mz_sql::catalog::ObjectType::Schema,
2605 ObjectId::Role(_) => mz_sql::catalog::ObjectType::Role,
2606 ObjectId::Item(id) => self.get_entry(id).item_type().into(),
2607 ObjectId::NetworkPolicy(_) => mz_sql::catalog::ObjectType::NetworkPolicy,
2608 }
2609 }
2610
2611 pub(super) fn get_system_object_type(
2612 &self,
2613 id: &SystemObjectId,
2614 ) -> mz_sql::catalog::SystemObjectType {
2615 match id {
2616 SystemObjectId::Object(object_id) => {
2617 SystemObjectType::Object(self.get_object_type(object_id))
2618 }
2619 SystemObjectId::System => SystemObjectType::System,
2620 }
2621 }
2622
2623 pub fn storage_metadata(&self) -> &StorageMetadata {
2627 &self.storage_metadata
2628 }
2629
2630 pub fn source_compaction_windows(
2632 &self,
2633 ids: impl IntoIterator<Item = CatalogItemId>,
2634 ) -> BTreeMap<CompactionWindow, BTreeSet<CatalogItemId>> {
2635 let mut cws: BTreeMap<CompactionWindow, BTreeSet<CatalogItemId>> = BTreeMap::new();
2636 let mut seen = BTreeSet::new();
2637 for item_id in ids {
2638 if !seen.insert(item_id) {
2639 continue;
2640 }
2641 let entry = self.get_entry(&item_id);
2642 match entry.item() {
2643 CatalogItem::Source(source) => {
2644 let source_cw = source.custom_logical_compaction_window.unwrap_or_default();
2645 cws.entry(source_cw).or_default().insert(item_id);
2646 }
2647 CatalogItem::Table(table) => {
2648 let table_cw = table.custom_logical_compaction_window.unwrap_or_default();
2649 match &table.data_source {
2650 TableDataSource::DataSource {
2651 desc:
2652 DataSourceDesc::IngestionExport { .. }
2653 | DataSourceDesc::Webhook { .. },
2655 timeline: _,
2656 } => {
2657 cws.entry(table_cw).or_default().insert(item_id);
2658 }
2659 TableDataSource::TableWrites { .. } => {}
2662 TableDataSource::DataSource {
2663 desc:
2664 DataSourceDesc::Ingestion { .. }
2665 | DataSourceDesc::OldSyntaxIngestion { .. }
2666 | DataSourceDesc::Introspection(_)
2667 | DataSourceDesc::Progress
2668 | DataSourceDesc::Catalog,
2669 ..
2670 } => {
2671 unreachable!(
2672 "unexpected DataSourceDesc for table {item_id}: {:?}",
2673 table.data_source
2674 )
2675 }
2676 }
2677 }
2678 _ => {
2679 continue;
2681 }
2682 }
2683 }
2684 cws
2685 }
2686
2687 pub fn comment_id_to_item_id(id: &CommentObjectId) -> Option<CatalogItemId> {
2688 match id {
2689 CommentObjectId::Table(id)
2690 | CommentObjectId::View(id)
2691 | CommentObjectId::MaterializedView(id)
2692 | CommentObjectId::Source(id)
2693 | CommentObjectId::Sink(id)
2694 | CommentObjectId::Index(id)
2695 | CommentObjectId::Func(id)
2696 | CommentObjectId::Connection(id)
2697 | CommentObjectId::Type(id)
2698 | CommentObjectId::Secret(id) => Some(*id),
2699 CommentObjectId::Role(_)
2700 | CommentObjectId::Database(_)
2701 | CommentObjectId::Schema(_)
2702 | CommentObjectId::Cluster(_)
2703 | CommentObjectId::ClusterReplica(_)
2704 | CommentObjectId::NetworkPolicy(_) => None,
2705 }
2706 }
2707
2708 pub fn get_comment_id_entry(&self, id: &CommentObjectId) -> Option<&CatalogEntry> {
2709 Self::comment_id_to_item_id(id).map(|id| self.get_entry(&id))
2710 }
2711
2712 pub fn comment_id_to_audit_log_name(
2713 &self,
2714 id: CommentObjectId,
2715 conn_id: &ConnectionId,
2716 ) -> String {
2717 match id {
2718 CommentObjectId::Table(id)
2719 | CommentObjectId::View(id)
2720 | CommentObjectId::MaterializedView(id)
2721 | CommentObjectId::Source(id)
2722 | CommentObjectId::Sink(id)
2723 | CommentObjectId::Index(id)
2724 | CommentObjectId::Func(id)
2725 | CommentObjectId::Connection(id)
2726 | CommentObjectId::Type(id)
2727 | CommentObjectId::Secret(id) => {
2728 let item = self.get_entry(&id);
2729 let name = self.resolve_full_name(item.name(), Some(conn_id));
2730 name.to_string()
2731 }
2732 CommentObjectId::Role(id) => self.get_role(&id).name.clone(),
2733 CommentObjectId::Database(id) => self.get_database(&id).name.clone(),
2734 CommentObjectId::Schema((spec, schema_id)) => {
2735 let schema = self.get_schema(&spec, &schema_id, conn_id);
2736 self.resolve_full_schema_name(&schema.name).to_string()
2737 }
2738 CommentObjectId::Cluster(id) => self.get_cluster(id).name.clone(),
2739 CommentObjectId::ClusterReplica((cluster_id, replica_id)) => {
2740 let cluster = self.get_cluster(cluster_id);
2741 let replica = self.get_cluster_replica(cluster_id, replica_id);
2742 QualifiedReplica {
2743 cluster: Ident::new_unchecked(cluster.name.clone()),
2744 replica: Ident::new_unchecked(replica.name.clone()),
2745 }
2746 .to_string()
2747 }
2748 CommentObjectId::NetworkPolicy(id) => self.get_network_policy(&id).name.clone(),
2749 }
2750 }
2751
2752 pub fn mock_authentication_nonce(&self) -> String {
2753 self.mock_authentication_nonce.clone().unwrap_or_default()
2754 }
2755}
2756
2757impl ConnectionResolver for CatalogState {
2758 fn resolve_connection(
2759 &self,
2760 id: CatalogItemId,
2761 ) -> mz_storage_types::connections::Connection<InlinedConnection> {
2762 use mz_storage_types::connections::Connection::*;
2763 match self
2764 .get_entry(&id)
2765 .connection()
2766 .expect("catalog out of sync")
2767 .details
2768 .to_connection()
2769 {
2770 Kafka(conn) => Kafka(conn.into_inline_connection(self)),
2771 Postgres(conn) => Postgres(conn.into_inline_connection(self)),
2772 Csr(conn) => Csr(conn.into_inline_connection(self)),
2773 Ssh(conn) => Ssh(conn),
2774 Aws(conn) => Aws(conn),
2775 AwsPrivatelink(conn) => AwsPrivatelink(conn),
2776 MySql(conn) => MySql(conn.into_inline_connection(self)),
2777 SqlServer(conn) => SqlServer(conn.into_inline_connection(self)),
2778 IcebergCatalog(conn) => IcebergCatalog(conn.into_inline_connection(self)),
2779 }
2780 }
2781}
2782
2783impl OptimizerCatalog for CatalogState {
2784 fn get_entry(&self, id: &GlobalId) -> CatalogCollectionEntry {
2785 CatalogState::get_entry_by_global_id(self, id)
2786 }
2787 fn get_entry_by_item_id(&self, id: &CatalogItemId) -> &CatalogEntry {
2788 CatalogState::get_entry(self, id)
2789 }
2790 fn resolve_full_name(
2791 &self,
2792 name: &QualifiedItemName,
2793 conn_id: Option<&ConnectionId>,
2794 ) -> FullItemName {
2795 CatalogState::resolve_full_name(self, name, conn_id)
2796 }
2797 fn get_indexes_on(
2798 &self,
2799 id: GlobalId,
2800 cluster: ClusterId,
2801 ) -> Box<dyn Iterator<Item = (GlobalId, &Index)> + '_> {
2802 Box::new(CatalogState::get_indexes_on(self, id, cluster))
2803 }
2804}
2805
2806impl OptimizerCatalog for Catalog {
2807 fn get_entry(&self, id: &GlobalId) -> CatalogCollectionEntry {
2808 self.state.get_entry_by_global_id(id)
2809 }
2810
2811 fn get_entry_by_item_id(&self, id: &CatalogItemId) -> &CatalogEntry {
2812 self.state.get_entry(id)
2813 }
2814
2815 fn resolve_full_name(
2816 &self,
2817 name: &QualifiedItemName,
2818 conn_id: Option<&ConnectionId>,
2819 ) -> FullItemName {
2820 self.state.resolve_full_name(name, conn_id)
2821 }
2822
2823 fn get_indexes_on(
2824 &self,
2825 id: GlobalId,
2826 cluster: ClusterId,
2827 ) -> Box<dyn Iterator<Item = (GlobalId, &Index)> + '_> {
2828 Box::new(self.state.get_indexes_on(id, cluster))
2829 }
2830}
2831
2832impl Catalog {
2833 pub fn as_optimizer_catalog(self: Arc<Self>) -> Arc<dyn OptimizerCatalog> {
2834 self
2835 }
2836}