1use anyhow::anyhow;
11use differential_dataflow::lattice::Lattice;
12use maplit::btreemap;
13use maplit::btreeset;
14use mz_adapter_types::compaction::CompactionWindow;
15use mz_catalog::memory::objects::{CatalogItem, MaterializedView};
16use mz_expr::{CollectionPlan, ResultSpec};
17use mz_ore::collections::CollectionExt;
18use mz_ore::instrument;
19use mz_ore::soft_panic_or_log;
20use mz_repr::explain::{ExprHumanizerExt, TransientItem};
21use mz_repr::optimize::OptimizerFeatures;
22use mz_repr::optimize::OverrideFrom;
23use mz_repr::refresh_schedule::RefreshSchedule;
24use mz_repr::{CatalogItemId, Datum, RelationVersion, Row, VersionedRelationDesc};
25use mz_sql::ast::ExplainStage;
26use mz_sql::catalog::CatalogError;
27use mz_sql::names::ResolvedIds;
28use mz_sql::plan;
29use mz_sql::session::metadata::SessionMetadata;
30use mz_sql_parser::ast;
31use mz_sql_parser::ast::display::AstDisplay;
32use mz_storage_client::controller::CollectionDescription;
33use std::collections::BTreeMap;
34use timely::progress::Antichain;
35use tracing::Span;
36
37use crate::ReadHolds;
38use crate::command::ExecuteResponse;
39use crate::coord::sequencer::inner::return_if_err;
40use crate::coord::{
41 Coordinator, CreateMaterializedViewExplain, CreateMaterializedViewFinish,
42 CreateMaterializedViewOptimize, CreateMaterializedViewStage, ExplainContext,
43 ExplainPlanContext, Message, PlanValidity, StageResult, Staged,
44};
45use crate::error::AdapterError;
46use crate::explain::explain_dataflow;
47use crate::explain::explain_plan;
48use crate::explain::optimizer_trace::OptimizerTrace;
49use crate::optimize::dataflows::dataflow_import_id_bundle;
50use crate::optimize::{self, Optimize};
51use crate::session::Session;
52use crate::util::ResultExt;
53use crate::{AdapterNotice, CollectionIdBundle, ExecuteContext, TimestampProvider, catalog};
54
55impl Staged for CreateMaterializedViewStage {
56 type Ctx = ExecuteContext;
57
58 fn validity(&mut self) -> &mut PlanValidity {
59 match self {
60 Self::Optimize(stage) => &mut stage.validity,
61 Self::Finish(stage) => &mut stage.validity,
62 Self::Explain(stage) => &mut stage.validity,
63 }
64 }
65
66 async fn stage(
67 self,
68 coord: &mut Coordinator,
69 ctx: &mut ExecuteContext,
70 ) -> Result<StageResult<Box<Self>>, AdapterError> {
71 match self {
72 CreateMaterializedViewStage::Optimize(stage) => {
73 coord.create_materialized_view_optimize(stage).await
74 }
75 CreateMaterializedViewStage::Finish(stage) => {
76 coord.create_materialized_view_finish(ctx, stage).await
77 }
78 CreateMaterializedViewStage::Explain(stage) => {
79 coord
80 .create_materialized_view_explain(ctx.session(), stage)
81 .await
82 }
83 }
84 }
85
86 fn message(self, ctx: ExecuteContext, span: Span) -> Message {
87 Message::CreateMaterializedViewStageReady {
88 ctx,
89 span,
90 stage: self,
91 }
92 }
93
94 fn cancel_enabled(&self) -> bool {
95 true
96 }
97}
98
99impl Coordinator {
100 #[instrument]
101 pub(crate) async fn sequence_create_materialized_view(
102 &mut self,
103 ctx: ExecuteContext,
104 plan: plan::CreateMaterializedViewPlan,
105 resolved_ids: ResolvedIds,
106 ) {
107 let stage = return_if_err!(
108 self.create_materialized_view_validate(
109 ctx.session(),
110 plan,
111 resolved_ids,
112 ExplainContext::None
113 ),
114 ctx
115 );
116 self.sequence_staged(ctx, Span::current(), stage).await;
117 }
118
119 #[instrument]
120 pub(crate) async fn explain_create_materialized_view(
121 &mut self,
122 ctx: ExecuteContext,
123 plan::ExplainPlanPlan {
124 stage,
125 format,
126 config,
127 explainee,
128 }: plan::ExplainPlanPlan,
129 ) {
130 let plan::Explainee::Statement(stmt) = explainee else {
131 unreachable!()
134 };
135 let plan::ExplaineeStatement::CreateMaterializedView { broken, plan } = stmt else {
136 unreachable!()
139 };
140
141 let optimizer_trace = OptimizerTrace::new(stage.paths());
144
145 let resolved_ids = ResolvedIds::empty();
147
148 let explain_ctx = ExplainContext::Plan(ExplainPlanContext {
149 broken,
150 config,
151 format,
152 stage,
153 replan: None,
154 desc: None,
155 optimizer_trace,
156 });
157 let stage = return_if_err!(
158 self.create_materialized_view_validate(ctx.session(), plan, resolved_ids, explain_ctx),
159 ctx
160 );
161 self.sequence_staged(ctx, Span::current(), stage).await;
162 }
163
164 #[instrument]
165 pub(crate) async fn explain_replan_materialized_view(
166 &mut self,
167 ctx: ExecuteContext,
168 plan::ExplainPlanPlan {
169 stage,
170 format,
171 config,
172 explainee,
173 }: plan::ExplainPlanPlan,
174 ) {
175 let plan::Explainee::ReplanMaterializedView(id) = explainee else {
176 unreachable!() };
178 let CatalogItem::MaterializedView(item) = self.catalog().get_entry(&id).item() else {
179 unreachable!() };
181 let gid = item.global_id_writes();
182
183 let create_sql = item.create_sql.clone();
184 let plan_result = self
185 .catalog_mut()
186 .deserialize_plan_with_enable_for_item_parsing(&create_sql, true);
187 let (plan, resolved_ids) = return_if_err!(plan_result, ctx);
188
189 let plan::Plan::CreateMaterializedView(plan) = plan else {
190 unreachable!() };
192
193 let broken = false;
196
197 let optimizer_trace = OptimizerTrace::new(stage.paths());
200
201 let explain_ctx = ExplainContext::Plan(ExplainPlanContext {
202 broken,
203 config,
204 format,
205 stage,
206 replan: Some(gid),
207 desc: None,
208 optimizer_trace,
209 });
210 let stage = return_if_err!(
211 self.create_materialized_view_validate(ctx.session(), plan, resolved_ids, explain_ctx,),
212 ctx
213 );
214 self.sequence_staged(ctx, Span::current(), stage).await;
215 }
216
217 #[instrument]
218 pub(super) fn explain_materialized_view(
219 &self,
220 ctx: &ExecuteContext,
221 plan::ExplainPlanPlan {
222 stage,
223 format,
224 config,
225 explainee,
226 }: plan::ExplainPlanPlan,
227 ) -> Result<ExecuteResponse, AdapterError> {
228 let plan::Explainee::MaterializedView(id) = explainee else {
229 unreachable!() };
231 let CatalogItem::MaterializedView(view) = self.catalog().get_entry(&id).item() else {
232 unreachable!() };
234 let gid = view.global_id_writes();
235
236 let Some(dataflow_metainfo) = self.catalog().try_get_dataflow_metainfo(&gid) else {
237 if !id.is_system() {
238 tracing::error!(
239 "cannot find dataflow metainformation for materialized view {id} in catalog"
240 );
241 }
242 coord_bail!(
243 "cannot find dataflow metainformation for materialized view {id} in catalog"
244 );
245 };
246
247 let target_cluster = self.catalog().get_cluster(view.cluster_id);
248
249 let features = OptimizerFeatures::from(self.catalog().system_config())
250 .override_from(&target_cluster.config.features())
251 .override_from(&config.features);
252
253 let cardinality_stats = BTreeMap::new();
254
255 let explain = match stage {
256 ExplainStage::RawPlan => explain_plan(
257 view.raw_expr.as_ref().clone(),
258 format,
259 &config,
260 &features,
261 &self.catalog().for_session(ctx.session()),
262 cardinality_stats,
263 Some(target_cluster.name.as_str()),
264 )?,
265 ExplainStage::LocalPlan => explain_plan(
266 view.optimized_expr.as_inner().clone(),
267 format,
268 &config,
269 &features,
270 &self.catalog().for_session(ctx.session()),
271 cardinality_stats,
272 Some(target_cluster.name.as_str()),
273 )?,
274 ExplainStage::GlobalPlan => {
275 let Some(plan) = self.catalog().try_get_optimized_plan(&gid).cloned() else {
276 tracing::error!("cannot find {stage} for materialized view {id} in catalog");
277 coord_bail!("cannot find {stage} for materialized view in catalog");
278 };
279 explain_dataflow(
280 plan,
281 format,
282 &config,
283 &features,
284 &self.catalog().for_session(ctx.session()),
285 cardinality_stats,
286 Some(target_cluster.name.as_str()),
287 dataflow_metainfo,
288 )?
289 }
290 ExplainStage::PhysicalPlan => {
291 let Some(plan) = self.catalog().try_get_physical_plan(&gid).cloned() else {
292 tracing::error!("cannot find {stage} for materialized view {id} in catalog",);
293 coord_bail!("cannot find {stage} for materialized view in catalog");
294 };
295 explain_dataflow(
296 plan,
297 format,
298 &config,
299 &features,
300 &self.catalog().for_session(ctx.session()),
301 cardinality_stats,
302 Some(target_cluster.name.as_str()),
303 dataflow_metainfo,
304 )?
305 }
306 _ => {
307 coord_bail!("cannot EXPLAIN {} FOR MATERIALIZED VIEW", stage);
308 }
309 };
310
311 let row = Row::pack_slice(&[Datum::from(explain.as_str())]);
312
313 Ok(Self::send_immediate_rows(row))
314 }
315
316 #[instrument]
317 fn create_materialized_view_validate(
318 &self,
319 session: &Session,
320 plan: plan::CreateMaterializedViewPlan,
321 resolved_ids: ResolvedIds,
322 explain_ctx: ExplainContext,
325 ) -> Result<CreateMaterializedViewStage, AdapterError> {
326 let plan::CreateMaterializedViewPlan {
327 materialized_view:
328 plan::MaterializedView {
329 expr,
330 cluster_id,
331 refresh_schedule,
332 ..
333 },
334 ambiguous_columns,
335 ..
336 } = &plan;
337
338 let expr_depends_on = expr.depends_on();
343 self.catalog()
344 .validate_timeline_context(expr_depends_on.iter().copied())?;
345 self.validate_system_column_references(*ambiguous_columns, &expr_depends_on)?;
346 let log_names = expr_depends_on
349 .iter()
350 .map(|gid| self.catalog.resolve_item_id(gid))
351 .flat_map(|item_id| self.catalog().introspection_dependencies(item_id))
352 .map(|item_id| self.catalog().get_entry(&item_id).name().item.clone())
353 .collect::<Vec<_>>();
354 if !log_names.is_empty() {
355 return Err(AdapterError::InvalidLogDependency {
356 object_type: "materialized view".into(),
357 log_names,
358 });
359 }
360
361 let validity =
362 PlanValidity::require_transient_revision(self.catalog().transient_revision());
363
364 if let Some(refresh_schedule) = refresh_schedule {
366 if !refresh_schedule.ats.is_empty() && matches!(explain_ctx, ExplainContext::None) {
367 let read_holds = self
370 .txn_read_holds
371 .get(session.conn_id())
372 .expect("purification acquired read holds if there are REFRESH ATs");
373 let least_valid_read = read_holds.least_valid_read();
374 for refresh_at_ts in &refresh_schedule.ats {
375 if !least_valid_read.less_equal(refresh_at_ts) {
376 return Err(AdapterError::InputNotReadableAtRefreshAtTime(
377 *refresh_at_ts,
378 least_valid_read,
379 ));
380 }
381 }
382 let ids = self
385 .index_oracle(*cluster_id)
386 .sufficient_collections(resolved_ids.collections().copied());
387 if !ids.difference(&read_holds.id_bundle()).is_empty() {
388 return Err(AdapterError::ChangedPlan(
389 "the set of possible inputs changed during the creation of the \
390 materialized view"
391 .to_string(),
392 ));
393 }
394 }
395 }
396
397 Ok(CreateMaterializedViewStage::Optimize(
398 CreateMaterializedViewOptimize {
399 validity,
400 plan,
401 resolved_ids,
402 explain_ctx,
403 },
404 ))
405 }
406
407 #[instrument]
408 async fn create_materialized_view_optimize(
409 &mut self,
410 CreateMaterializedViewOptimize {
411 validity,
412 plan,
413 resolved_ids,
414 explain_ctx,
415 }: CreateMaterializedViewOptimize,
416 ) -> Result<StageResult<Box<CreateMaterializedViewStage>>, AdapterError> {
417 let plan::CreateMaterializedViewPlan {
418 name,
419 materialized_view:
420 plan::MaterializedView {
421 column_names,
422 cluster_id,
423 non_null_assertions,
424 refresh_schedule,
425 ..
426 },
427 ..
428 } = &plan;
429
430 let compute_instance = self
432 .instance_snapshot(*cluster_id)
433 .expect("compute instance does not exist");
434 let (item_id, global_id) = if let ExplainContext::None = explain_ctx {
435 let id_ts = self.get_catalog_write_ts().await;
436 self.catalog().allocate_user_id(id_ts).await?
437 } else {
438 self.allocate_transient_id()
439 };
440
441 let (_, view_id) = self.allocate_transient_id();
442 let debug_name = self.catalog().resolve_full_name(name, None).to_string();
443 let optimizer_config = optimize::OptimizerConfig::from(self.catalog().system_config())
444 .override_from(&self.catalog.get_cluster(*cluster_id).config.features())
445 .override_from(&explain_ctx);
446 let force_non_monotonic = Default::default();
447
448 let mut optimizer = optimize::materialized_view::Optimizer::new(
450 self.owned_catalog().as_optimizer_catalog(),
451 compute_instance,
452 global_id,
453 view_id,
454 column_names.clone(),
455 non_null_assertions.clone(),
456 refresh_schedule.clone(),
457 debug_name,
458 optimizer_config,
459 self.optimizer_metrics(),
460 force_non_monotonic,
461 );
462
463 let span = Span::current();
464 Ok(StageResult::Handle(mz_ore::task::spawn_blocking(
465 || "optimize create materialized view",
466 move || {
467 span.in_scope(|| {
468 let mut pipeline = || -> Result<(
469 optimize::materialized_view::LocalMirPlan,
470 optimize::materialized_view::GlobalMirPlan,
471 optimize::materialized_view::GlobalLirPlan,
472 ), AdapterError> {
473 let _dispatch_guard = explain_ctx.dispatch_guard();
474
475 let raw_expr = plan.materialized_view.expr.clone();
476
477 let local_mir_plan = optimizer.catch_unwind_optimize(raw_expr)?;
479 let global_mir_plan =
480 optimizer.catch_unwind_optimize(local_mir_plan.clone())?;
481 let global_lir_plan =
483 optimizer.catch_unwind_optimize(global_mir_plan.clone())?;
484
485 Ok((local_mir_plan, global_mir_plan, global_lir_plan))
486 };
487
488 let stage = match pipeline() {
489 Ok((local_mir_plan, global_mir_plan, global_lir_plan)) => {
490 if let ExplainContext::Plan(explain_ctx) = explain_ctx {
491 let (_, df_meta) = global_lir_plan.unapply();
492 CreateMaterializedViewStage::Explain(
493 CreateMaterializedViewExplain {
494 validity,
495 global_id,
496 plan,
497 df_meta,
498 explain_ctx,
499 },
500 )
501 } else {
502 CreateMaterializedViewStage::Finish(CreateMaterializedViewFinish {
503 item_id,
504 global_id,
505 validity,
506 plan,
507 resolved_ids,
508 local_mir_plan,
509 global_mir_plan,
510 global_lir_plan,
511 })
512 }
513 }
514 Err(err) => {
517 let ExplainContext::Plan(explain_ctx) = explain_ctx else {
518 return Err(err);
520 };
521
522 if explain_ctx.broken {
523 tracing::error!("error while handling EXPLAIN statement: {}", err);
527 CreateMaterializedViewStage::Explain(
528 CreateMaterializedViewExplain {
529 global_id,
530 validity,
531 plan,
532 df_meta: Default::default(),
533 explain_ctx,
534 },
535 )
536 } else {
537 return Err(err);
539 }
540 }
541 };
542
543 Ok(Box::new(stage))
544 })
545 },
546 )))
547 }
548
549 #[instrument]
550 async fn create_materialized_view_finish(
551 &mut self,
552 ctx: &mut ExecuteContext,
553 stage: CreateMaterializedViewFinish,
554 ) -> Result<StageResult<Box<CreateMaterializedViewStage>>, AdapterError> {
555 let CreateMaterializedViewFinish {
556 item_id,
557 global_id,
558 plan:
559 plan::CreateMaterializedViewPlan {
560 name,
561 materialized_view:
562 plan::MaterializedView {
563 mut create_sql,
564 expr: raw_expr,
565 dependencies,
566 replacement_target,
567 cluster_id,
568 non_null_assertions,
569 compaction_window,
570 refresh_schedule,
571 ..
572 },
573 drop_ids,
574 if_not_exists,
575 ..
576 },
577 resolved_ids,
578 local_mir_plan,
579 global_mir_plan,
580 global_lir_plan,
581 ..
582 } = stage;
583
584 if let Some(target_id) = replacement_target {
586 let Some(target) = self.catalog().get_entry(&target_id).materialized_view() else {
587 return Err(AdapterError::internal(
588 "create materialized view",
589 "replacement target not a materialized view",
590 ));
591 };
592
593 let schema_diff = target.desc.latest().diff(global_lir_plan.desc());
595 if !schema_diff.is_empty() {
596 return Err(AdapterError::ReplacementSchemaMismatch(schema_diff));
597 }
598 }
599
600 let id_bundle = dataflow_import_id_bundle(global_lir_plan.df_desc(), cluster_id);
602
603 let read_holds_owned;
604 let read_holds = if let Some(txn_reads) = self.txn_read_holds.get(ctx.session().conn_id()) {
605 txn_reads
609 } else {
610 read_holds_owned = self.acquire_read_holds(&id_bundle);
613 &read_holds_owned
614 };
615
616 let (dataflow_as_of, storage_as_of, until) =
617 self.select_timestamps(id_bundle, refresh_schedule.as_ref(), read_holds)?;
618
619 tracing::info!(
620 dataflow_as_of = ?dataflow_as_of,
621 storage_as_of = ?storage_as_of,
622 until = ?until,
623 "materialized view timestamp selection",
624 );
625
626 let initial_as_of = storage_as_of.clone();
627
628 if let Some(storage_as_of_ts) = storage_as_of.as_option() {
633 let stmt = mz_sql::parse::parse(&create_sql)
634 .map_err(|_| {
635 AdapterError::internal(
636 "create materialized view",
637 "original SQL should roundtrip",
638 )
639 })?
640 .into_element()
641 .ast;
642 let ast::Statement::CreateMaterializedView(mut stmt) = stmt else {
643 panic!("unexpected statement type");
644 };
645 stmt.as_of = Some(storage_as_of_ts.into());
646 create_sql = stmt.to_ast_string_stable();
647 }
648
649 let desc = VersionedRelationDesc::new(global_lir_plan.desc().clone());
650 let collections = [(RelationVersion::root(), global_id)].into_iter().collect();
651
652 let ops = vec![
653 catalog::Op::DropObjects(
654 drop_ids
655 .into_iter()
656 .map(catalog::DropObjectInfo::Item)
657 .collect(),
658 ),
659 catalog::Op::CreateItem {
660 id: item_id,
661 name: name.clone(),
662 item: CatalogItem::MaterializedView(MaterializedView {
663 create_sql,
664 raw_expr: raw_expr.into(),
665 optimized_expr: local_mir_plan.expr().into(),
666 desc,
667 collections,
668 resolved_ids,
669 dependencies,
670 replacement_target,
671 cluster_id,
672 non_null_assertions,
673 custom_logical_compaction_window: compaction_window,
674 refresh_schedule: refresh_schedule.clone(),
675 initial_as_of: Some(initial_as_of.clone()),
676 }),
677 owner_id: *ctx.session().current_role_id(),
678 },
679 ];
680
681 let notice_ids = std::iter::repeat_with(|| self.allocate_transient_id())
683 .map(|(_item_id, global_id)| global_id)
684 .take(global_lir_plan.df_meta().optimizer_notices.len())
685 .collect::<Vec<_>>();
686
687 let transact_result = self
688 .catalog_transact_with_side_effects(Some(ctx), ops, move |coord, ctx| {
689 Box::pin(async move {
690 let output_desc = global_lir_plan.desc().clone();
691 let (mut df_desc, df_meta) = global_lir_plan.unapply();
692
693 coord
695 .catalog_mut()
696 .set_optimized_plan(global_id, global_mir_plan.df_desc().clone());
697 coord
698 .catalog_mut()
699 .set_physical_plan(global_id, df_desc.clone());
700
701 let notice_builtin_updates_fut = coord
702 .process_dataflow_metainfo(df_meta, global_id, ctx, notice_ids)
703 .await;
704
705 df_desc.set_as_of(dataflow_as_of.clone());
706 df_desc.set_initial_as_of(initial_as_of);
707 df_desc.until = until;
708
709 let storage_metadata = coord.catalog.state().storage_metadata();
710
711 let mut collection_desc =
712 CollectionDescription::for_other(output_desc, Some(storage_as_of));
713 let mut allow_writes = true;
714
715 if let Some(target_id) = replacement_target {
718 let target_gid = coord.catalog.get_entry(&target_id).latest_global_id();
719 collection_desc.primary = Some(target_gid);
720 allow_writes = false;
721 }
722
723 coord
725 .controller
726 .storage
727 .create_collections(
728 storage_metadata,
729 None,
730 vec![(global_id, collection_desc)],
731 )
732 .await
733 .unwrap_or_terminate("cannot fail to append");
734
735 coord
736 .initialize_storage_read_policies(
737 btreeset![item_id],
738 compaction_window.unwrap_or(CompactionWindow::Default),
739 )
740 .await;
741
742 coord
743 .ship_dataflow_and_notice_builtin_table_updates(
744 df_desc,
745 cluster_id,
746 notice_builtin_updates_fut,
747 )
748 .await;
749
750 if allow_writes {
751 coord.allow_writes(cluster_id, global_id);
752 }
753 })
754 })
755 .await;
756
757 match transact_result {
758 Ok(_) => Ok(ExecuteResponse::CreatedMaterializedView),
759 Err(AdapterError::Catalog(mz_catalog::memory::error::Error {
760 kind:
761 mz_catalog::memory::error::ErrorKind::Sql(
762 CatalogError::ItemAlreadyExists(_, _),
763 ),
764 })) if if_not_exists => {
765 ctx.session()
766 .add_notice(AdapterNotice::ObjectAlreadyExists {
767 name: name.item,
768 ty: "materialized view",
769 });
770 Ok(ExecuteResponse::CreatedMaterializedView)
771 }
772 Err(err) => Err(err),
773 }
774 .map(StageResult::Response)
775 }
776
777 fn select_timestamps(
780 &self,
781 id_bundle: CollectionIdBundle,
782 refresh_schedule: Option<&RefreshSchedule>,
783 read_holds: &ReadHolds<mz_repr::Timestamp>,
784 ) -> Result<
785 (
786 Antichain<mz_repr::Timestamp>,
787 Antichain<mz_repr::Timestamp>,
788 Antichain<mz_repr::Timestamp>,
789 ),
790 AdapterError,
791 > {
792 assert!(
793 id_bundle.difference(&read_holds.id_bundle()).is_empty(),
794 "we must have read holds for all involved collections"
795 );
796
797 let least_valid_read = read_holds.least_valid_read();
800 let mut dataflow_as_of = least_valid_read.clone();
801 let mut storage_as_of = least_valid_read.clone();
802
803 if let Some(refresh_schedule) = &refresh_schedule {
813 if let Some(least_valid_read_ts) = least_valid_read.as_option() {
814 if let Some(first_refresh_ts) =
815 refresh_schedule.round_up_timestamp(*least_valid_read_ts)
816 {
817 storage_as_of = Antichain::from_elem(first_refresh_ts);
818 dataflow_as_of.join_assign(
819 &self
820 .greatest_available_read(&id_bundle)
821 .meet(&storage_as_of),
822 );
823 } else {
824 let last_refresh = refresh_schedule.last_refresh().expect(
825 "if round_up_timestamp returned None, then there should be a last refresh",
826 );
827
828 return Err(AdapterError::MaterializedViewWouldNeverRefresh(
829 last_refresh,
830 *least_valid_read_ts,
831 ));
832 }
833 } else {
834 soft_panic_or_log!("creating a materialized view with an empty `as_of`");
836 }
837 }
838
839 let until_ts = refresh_schedule
843 .and_then(|s| s.last_refresh())
844 .and_then(|r| r.try_step_forward());
845 let until = Antichain::from_iter(until_ts);
846
847 Ok((dataflow_as_of, storage_as_of, until))
848 }
849
850 #[instrument]
851 async fn create_materialized_view_explain(
852 &self,
853 session: &Session,
854 CreateMaterializedViewExplain {
855 global_id,
856 plan:
857 plan::CreateMaterializedViewPlan {
858 name,
859 materialized_view:
860 plan::MaterializedView {
861 column_names,
862 cluster_id,
863 ..
864 },
865 ..
866 },
867 df_meta,
868 explain_ctx:
869 ExplainPlanContext {
870 config,
871 format,
872 stage,
873 optimizer_trace,
874 ..
875 },
876 ..
877 }: CreateMaterializedViewExplain,
878 ) -> Result<StageResult<Box<CreateMaterializedViewStage>>, AdapterError> {
879 let session_catalog = self.catalog().for_session(session);
880 let expr_humanizer = {
881 let full_name = self.catalog().resolve_full_name(&name, None);
882 let transient_items = btreemap! {
883 global_id => TransientItem::new(
884 Some(full_name.into_parts()),
885 Some(column_names.iter().map(|c| c.to_string()).collect()),
886 )
887 };
888 ExprHumanizerExt::new(transient_items, &session_catalog)
889 };
890
891 let target_cluster = self.catalog().get_cluster(cluster_id);
892
893 let features = OptimizerFeatures::from(self.catalog().system_config())
894 .override_from(&target_cluster.config.features())
895 .override_from(&config.features);
896
897 let rows = optimizer_trace
898 .into_rows(
899 format,
900 &config,
901 &features,
902 &expr_humanizer,
903 None,
904 Some(target_cluster),
905 df_meta,
906 stage,
907 plan::ExplaineeStatementKind::CreateMaterializedView,
908 None,
909 )
910 .await?;
911
912 Ok(StageResult::Response(Self::send_immediate_rows(rows)))
913 }
914
915 pub(crate) async fn explain_pushdown_materialized_view(
916 &self,
917 ctx: ExecuteContext,
918 item_id: CatalogItemId,
919 ) {
920 let CatalogItem::MaterializedView(mview) = self.catalog().get_entry(&item_id).item() else {
921 unreachable!() };
923 let gid = mview.global_id_writes();
924 let mview = mview.clone();
925
926 let Some(plan) = self.catalog().try_get_physical_plan(&gid).cloned() else {
927 let msg = format!("cannot find plan for materialized view {item_id} in catalog");
928 tracing::error!("{msg}");
929 ctx.retire(Err(anyhow!("{msg}").into()));
930 return;
931 };
932
933 let read_holds =
937 Some(self.acquire_read_holds(&dataflow_import_id_bundle(&plan, mview.cluster_id)));
938
939 let frontiers = self
940 .controller
941 .compute
942 .collection_frontiers(gid, Some(mview.cluster_id))
943 .expect("materialized view exists");
944
945 let as_of = frontiers.read_frontier.to_owned();
946
947 let until = mview
948 .refresh_schedule
949 .as_ref()
950 .and_then(|s| s.last_refresh())
951 .unwrap_or(mz_repr::Timestamp::MAX);
952
953 let mz_now = match as_of.as_option() {
954 Some(&as_of) => {
955 ResultSpec::value_between(Datum::MzTimestamp(as_of), Datum::MzTimestamp(until))
956 }
957 None => ResultSpec::value_all(),
958 };
959
960 self.execute_explain_pushdown_with_read_holds(
961 ctx,
962 as_of,
963 mz_now,
964 read_holds,
965 plan.source_imports
966 .into_iter()
967 .filter_map(|(id, import)| import.desc.arguments.operators.map(|mfp| (id, mfp))),
968 )
969 .await
970 }
971}