Skip to main content

mz_compute_types/
plan.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10//! An explicit representation of a rendering plan for provided dataflows.
11
12#![warn(missing_debug_implementations)]
13
14use std::collections::{BTreeMap, BTreeSet};
15
16use columnar::Columnar;
17use mz_expr::{
18    CollectionPlan, EvalError, Id, LetRecLimit, LocalId, MapFilterProject, MirScalarExpr,
19    OptimizedMirRelationExpr, TableFunc,
20};
21use mz_ore::soft_assert_eq_no_log;
22use mz_ore::str::Indent;
23use mz_repr::explain::text::text_string_at;
24use mz_repr::explain::{DummyHumanizer, ExplainConfig, ExprHumanizer, PlanRenderingContext};
25use mz_repr::optimize::OptimizerFeatures;
26use mz_repr::{Diff, GlobalId, Row};
27use serde::{Deserialize, Serialize};
28
29use crate::dataflows::DataflowDescription;
30use crate::plan::join::JoinPlan;
31use crate::plan::reduce::{KeyValPlan, ReducePlan};
32use crate::plan::threshold::ThresholdPlan;
33use crate::plan::top_k::TopKPlan;
34use crate::plan::transform::{Transform, TransformConfig};
35
36mod lowering;
37
38pub mod interpret;
39pub mod join;
40pub mod reduce;
41pub mod render_plan;
42pub mod threshold;
43pub mod top_k;
44pub mod transform;
45
46/// The forms in which an operator's output is available.
47///
48/// These forms may include "raw", meaning as a streamed collection, but also any
49/// number of "arranged" representations.
50///
51/// Each arranged representation is described by a `KeyValRowMapping`, or rather
52/// at the moment by its three fields in a triple. These fields explain how to form
53/// a "key" by applying some expressions to each row, how to select "values" from
54/// columns not explicitly captured by the key, and how to return to the original
55/// row from the concatenation of key and value. Further explanation is available
56/// in the documentation for `KeyValRowMapping`.
57#[derive(
58    Clone,
59    Debug,
60    Default,
61    Deserialize,
62    Eq,
63    Ord,
64    PartialEq,
65    PartialOrd,
66    Serialize
67)]
68pub struct AvailableCollections {
69    /// Whether the collection exists in unarranged form.
70    pub raw: bool,
71    /// The list of available arrangements, presented as a `KeyValRowMapping`,
72    /// but here represented by a triple `(to_key, to_val, to_row)` instead.
73    /// The documentation for `KeyValRowMapping` explains these fields better.
74    pub arranged: Vec<(Vec<MirScalarExpr>, Vec<usize>, Vec<usize>)>,
75}
76
77impl AvailableCollections {
78    /// Represent a collection that has no arrangements.
79    pub fn new_raw() -> Self {
80        Self {
81            raw: true,
82            arranged: Vec::new(),
83        }
84    }
85
86    /// Represent a collection that is arranged in the specified ways.
87    pub fn new_arranged(arranged: Vec<(Vec<MirScalarExpr>, Vec<usize>, Vec<usize>)>) -> Self {
88        assert!(
89            !arranged.is_empty(),
90            "Invariant violated: at least one collection must exist"
91        );
92        Self {
93            raw: false,
94            arranged,
95        }
96    }
97
98    /// Get some arrangement, if one exists.
99    pub fn arbitrary_arrangement(&self) -> Option<&(Vec<MirScalarExpr>, Vec<usize>, Vec<usize>)> {
100        assert!(
101            self.raw || !self.arranged.is_empty(),
102            "Invariant violated: at least one collection must exist"
103        );
104        self.arranged.get(0)
105    }
106}
107
108/// An identifier for an LIR node.
109#[derive(
110    Clone,
111    Copy,
112    Debug,
113    Deserialize,
114    Eq,
115    Ord,
116    PartialEq,
117    PartialOrd,
118    Serialize,
119    Columnar
120)]
121pub struct LirId(u64);
122
123impl LirId {
124    fn as_u64(&self) -> u64 {
125        self.0
126    }
127}
128
129impl From<LirId> for u64 {
130    fn from(value: LirId) -> Self {
131        value.as_u64()
132    }
133}
134
135impl std::fmt::Display for LirId {
136    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
137        write!(f, "{}", self.0)
138    }
139}
140
141/// A rendering plan with as much conditional logic as possible removed.
142#[derive(Clone, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
143pub struct Plan<T = mz_repr::Timestamp> {
144    /// A dataflow-local identifier.
145    pub lir_id: LirId,
146    /// The underlying operator.
147    pub node: PlanNode<T>,
148}
149
150/// The actual AST node of the `Plan`.
151#[derive(Clone, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
152pub enum PlanNode<T = mz_repr::Timestamp> {
153    /// A collection containing a pre-determined collection.
154    Constant {
155        /// Explicit update triples for the collection.
156        rows: Result<Vec<(Row, T, Diff)>, EvalError>,
157    },
158    /// A reference to a bound collection.
159    ///
160    /// This is commonly either an external reference to an existing source or
161    /// maintained arrangement, or an internal reference to a `Let` identifier.
162    Get {
163        /// A global or local identifier naming the collection.
164        id: Id,
165        /// Arrangements that will be available.
166        ///
167        /// The collection will also be loaded if available, which it will
168        /// not be for imported data, but which it may be for locally defined
169        /// data.
170        // TODO: Be more explicit about whether a collection is available,
171        // although one can always produce it from an arrangement, and it
172        // seems generally advantageous to do that instead (to avoid cloning
173        // rows, by using `mfp` first on borrowed data).
174        keys: AvailableCollections,
175        /// The actions to take when introducing the collection.
176        plan: GetPlan,
177    },
178    /// Binds `value` to `id`, and then results in `body` with that binding.
179    ///
180    /// This stage has the effect of sharing `value` across multiple possible
181    /// uses in `body`, and is the only mechanism we have for sharing collection
182    /// information across parts of a dataflow.
183    ///
184    /// The binding is not available outside of `body`.
185    Let {
186        /// The local identifier to be used, available to `body` as `Id::Local(id)`.
187        id: LocalId,
188        /// The collection that should be bound to `id`.
189        value: Box<Plan<T>>,
190        /// The collection that results, which is allowed to contain `Get` stages
191        /// that reference `Id::Local(id)`.
192        body: Box<Plan<T>>,
193    },
194    /// Binds `values` to `ids`, evaluates them potentially recursively, and returns `body`.
195    ///
196    /// All bindings are available to all bindings, and to `body`.
197    /// The contents of each binding are initially empty, and then updated through a sequence
198    /// of iterations in which each binding is updated in sequence, from the most recent values
199    /// of all bindings.
200    LetRec {
201        /// The local identifiers to be used, available to `body` as `Id::Local(id)`.
202        ids: Vec<LocalId>,
203        /// The collection that should be bound to `id`.
204        values: Vec<Plan<T>>,
205        /// Maximum number of iterations. See further info on the MIR `LetRec`.
206        limits: Vec<Option<LetRecLimit>>,
207        /// The collection that results, which is allowed to contain `Get` stages
208        /// that reference `Id::Local(id)`.
209        body: Box<Plan<T>>,
210    },
211    /// Map, Filter, and Project operators.
212    ///
213    /// This stage contains work that we would ideally like to fuse to other plan
214    /// stages, but for practical reasons cannot. For example: threshold, topk,
215    /// and sometimes reduce stages are not able to absorb this operator.
216    Mfp {
217        /// The input collection.
218        input: Box<Plan<T>>,
219        /// Linear operator to apply to each record.
220        mfp: MapFilterProject,
221        /// Whether the input is from an arrangement, and if so,
222        /// whether we can seek to a specific value therein
223        input_key_val: Option<(Vec<MirScalarExpr>, Option<Row>)>,
224    },
225    /// A variable number of output records for each input record.
226    ///
227    /// This stage is a bit of a catch-all for logic that does not easily fit in
228    /// map stages. This includes table valued functions, but also functions of
229    /// multiple arguments, and functions that modify the sign of updates.
230    ///
231    /// This stage allows a `MapFilterProject` operator to be fused to its output,
232    /// and this can be very important as otherwise the output of `func` is just
233    /// appended to the input record, for as many outputs as it has. This has the
234    /// unpleasant default behavior of repeating potentially large records that
235    /// are being unpacked, producing quadratic output in those cases. Instead,
236    /// in these cases use a `mfp` member that projects away these large fields.
237    FlatMap {
238        /// The particular arrangement of the input we expect to use,
239        /// if any
240        input_key: Option<Vec<MirScalarExpr>>,
241        /// The input collection.
242        input: Box<Plan<T>>,
243        /// Expressions that for each row prepare the arguments to `func`.
244        exprs: Vec<MirScalarExpr>,
245        /// The variable-record emitting function.
246        func: TableFunc,
247        /// Linear operator to apply to each record produced by `func`.
248        mfp_after: MapFilterProject,
249    },
250    /// A multiway relational equijoin, with fused map, filter, and projection.
251    ///
252    /// This stage performs a multiway join among `inputs`, using the equality
253    /// constraints expressed in `plan`. The plan also describes the implementation
254    /// strategy we will use, and any pushed down per-record work.
255    Join {
256        /// An ordered list of inputs that will be joined.
257        inputs: Vec<Plan<T>>,
258        /// Detailed information about the implementation of the join.
259        ///
260        /// This includes information about the implementation strategy, but also
261        /// any map, filter, project work that we might follow the join with, but
262        /// potentially pushed down into the implementation of the join.
263        plan: JoinPlan,
264    },
265    /// Aggregation by key.
266    Reduce {
267        /// The particular arrangement of the input we expect to use,
268        /// if any
269        input_key: Option<Vec<MirScalarExpr>>,
270        /// The input collection.
271        input: Box<Plan<T>>,
272        /// A plan for changing input records into key, value pairs.
273        key_val_plan: KeyValPlan,
274        /// A plan for performing the reduce.
275        ///
276        /// The implementation of reduction has several different strategies based
277        /// on the properties of the reduction, and the input itself. Please check
278        /// out the documentation for this type for more detail.
279        plan: ReducePlan,
280        /// An MFP that must be applied to results. The projection part of this
281        /// MFP must preserve the key for the reduction; otherwise, the results
282        /// become undefined. Additionally, the MFP must be free from temporal
283        /// predicates so that it can be readily evaluated.
284        /// TODO(ggevay): should we wrap this in [`mz_expr::SafeMfpPlan`]?
285        mfp_after: MapFilterProject,
286    },
287    /// Key-based "Top K" operator, retaining the first K records in each group.
288    TopK {
289        /// The input collection.
290        input: Box<Plan<T>>,
291        /// A plan for performing the Top-K.
292        ///
293        /// The implementation of reduction has several different strategies based
294        /// on the properties of the reduction, and the input itself. Please check
295        /// out the documentation for this type for more detail.
296        top_k_plan: TopKPlan,
297    },
298    /// Inverts the sign of each update.
299    Negate {
300        /// The input collection.
301        input: Box<Plan<T>>,
302    },
303    /// Filters records that accumulate negatively.
304    ///
305    /// Although the operator suppresses updates, it is a stateful operator taking
306    /// resources proportional to the number of records with non-zero accumulation.
307    Threshold {
308        /// The input collection.
309        input: Box<Plan<T>>,
310        /// A plan for performing the threshold.
311        ///
312        /// The implementation of reduction has several different strategies based
313        /// on the properties of the reduction, and the input itself. Please check
314        /// out the documentation for this type for more detail.
315        threshold_plan: ThresholdPlan,
316    },
317    /// Adds the contents of the input collections.
318    ///
319    /// Importantly, this is *multiset* union, so the multiplicities of records will
320    /// add. This is in contrast to *set* union, where the multiplicities would be
321    /// capped at one. A set union can be formed with `Union` followed by `Reduce`
322    /// implementing the "distinct" operator.
323    Union {
324        /// The input collections
325        inputs: Vec<Plan<T>>,
326        /// Whether to consolidate the output, e.g., cancel negated records.
327        consolidate_output: bool,
328    },
329    /// The `input` plan, but with additional arrangements.
330    ///
331    /// This operator does not change the logical contents of `input`, but ensures
332    /// that certain arrangements are available in the results. This operator can
333    /// be important for e.g. the `Join` stage which benefits from multiple arrangements
334    /// or to cap a `Plan` so that indexes can be exported.
335    ArrangeBy {
336        /// The key that must be used to access the input.
337        input_key: Option<Vec<MirScalarExpr>>,
338        /// The input collection.
339        input: Box<Plan<T>>,
340        /// The MFP that must be applied to the input.
341        input_mfp: MapFilterProject,
342        /// A list of arrangement keys, and possibly a raw collection,
343        /// that will be added to those of the input. Does not include
344        /// any other existing arrangements.
345        forms: AvailableCollections,
346    },
347}
348
349impl<T> PlanNode<T> {
350    /// Iterates through references to child expressions.
351    pub fn children(&self) -> impl Iterator<Item = &Plan<T>> {
352        let mut first = None;
353        let mut second = None;
354        let mut rest = None;
355        let mut last = None;
356
357        use PlanNode::*;
358        match self {
359            Constant { .. } | Get { .. } => (),
360            Let { value, body, .. } => {
361                first = Some(&**value);
362                second = Some(&**body);
363            }
364            LetRec { values, body, .. } => {
365                rest = Some(values);
366                last = Some(&**body);
367            }
368            Mfp { input, .. }
369            | FlatMap { input, .. }
370            | Reduce { input, .. }
371            | TopK { input, .. }
372            | Negate { input, .. }
373            | Threshold { input, .. }
374            | ArrangeBy { input, .. } => {
375                first = Some(&**input);
376            }
377            Join { inputs, .. } | Union { inputs, .. } => {
378                rest = Some(inputs);
379            }
380        }
381
382        first
383            .into_iter()
384            .chain(second)
385            .chain(rest.into_iter().flatten())
386            .chain(last)
387    }
388
389    /// Iterates through mutable references to child expressions.
390    pub fn children_mut(&mut self) -> impl Iterator<Item = &mut Plan<T>> {
391        let mut first = None;
392        let mut second = None;
393        let mut rest = None;
394        let mut last = None;
395
396        use PlanNode::*;
397        match self {
398            Constant { .. } | Get { .. } => (),
399            Let { value, body, .. } => {
400                first = Some(&mut **value);
401                second = Some(&mut **body);
402            }
403            LetRec { values, body, .. } => {
404                rest = Some(values);
405                last = Some(&mut **body);
406            }
407            Mfp { input, .. }
408            | FlatMap { input, .. }
409            | Reduce { input, .. }
410            | TopK { input, .. }
411            | Negate { input, .. }
412            | Threshold { input, .. }
413            | ArrangeBy { input, .. } => {
414                first = Some(&mut **input);
415            }
416            Join { inputs, .. } | Union { inputs, .. } => {
417                rest = Some(inputs);
418            }
419        }
420
421        first
422            .into_iter()
423            .chain(second)
424            .chain(rest.into_iter().flatten())
425            .chain(last)
426    }
427}
428
429impl<T> PlanNode<T> {
430    /// Attach an `lir_id` to a `PlanNode` to make a complete `Plan`.
431    pub fn as_plan(self, lir_id: LirId) -> Plan<T> {
432        Plan { lir_id, node: self }
433    }
434}
435
436impl Plan {
437    /// Pretty-print this [Plan] to a string.
438    pub fn pretty(&self) -> String {
439        let config = ExplainConfig::default();
440        self.explain(&config, None)
441    }
442
443    /// Pretty-print this [Plan] to a string using a custom
444    /// [ExplainConfig] and an optionally provided [ExprHumanizer].
445    pub fn explain(&self, config: &ExplainConfig, humanizer: Option<&dyn ExprHumanizer>) -> String {
446        text_string_at(self, || PlanRenderingContext {
447            indent: Indent::default(),
448            humanizer: humanizer.unwrap_or(&DummyHumanizer),
449            annotations: BTreeMap::default(),
450            config,
451            ambiguous_ids: BTreeSet::default(),
452        })
453    }
454}
455
456/// How a `Get` stage will be rendered.
457#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, Ord, PartialOrd)]
458pub enum GetPlan {
459    /// Simply pass input arrangements on to the next stage.
460    PassArrangements,
461    /// Using the supplied key, optionally seek the row, and apply the MFP.
462    Arrangement(Vec<MirScalarExpr>, Option<Row>, MapFilterProject),
463    /// Scan the input collection (unarranged) and apply the MFP.
464    Collection(MapFilterProject),
465}
466
467impl<T: timely::progress::Timestamp> Plan<T> {
468    /// Convert the dataflow description into one that uses render plans.
469    #[mz_ore::instrument(
470        target = "optimizer",
471        level = "debug",
472        fields(path.segment = "finalize_dataflow")
473    )]
474    pub fn finalize_dataflow(
475        desc: DataflowDescription<OptimizedMirRelationExpr>,
476        features: &OptimizerFeatures,
477    ) -> Result<DataflowDescription<Self>, String> {
478        // First, we lower the dataflow description from MIR to LIR.
479        let mut dataflow = Self::lower_dataflow(desc, features)?;
480
481        // Subsequently, we perform plan refinements for the dataflow.
482        Self::refine_source_mfps(&mut dataflow);
483
484        if features.enable_consolidate_after_union_negate {
485            Self::refine_union_negate_consolidation(&mut dataflow);
486        }
487
488        if dataflow.is_single_time() {
489            Self::refine_single_time_operator_selection(&mut dataflow);
490
491            // The relaxation of the `must_consolidate` flag performs an LIR-based
492            // analysis and transform under checked recursion. By a similar argument
493            // made in `from_mir`, we do not expect the recursion limit to be hit.
494            // However, if that happens, we propagate an error to the caller.
495            // To apply the transform, we first obtain monotonic source and index
496            // global IDs and add them to a `TransformConfig` instance.
497            let monotonic_ids = dataflow
498                .source_imports
499                .iter()
500                .filter_map(|(id, source_import)| source_import.monotonic.then_some(*id))
501                .chain(
502                    dataflow
503                        .index_imports
504                        .iter()
505                        .filter_map(|(_id, index_import)| {
506                            if index_import.monotonic {
507                                Some(index_import.desc.on_id)
508                            } else {
509                                None
510                            }
511                        }),
512                )
513                .collect::<BTreeSet<_>>();
514
515            let config = TransformConfig { monotonic_ids };
516            Self::refine_single_time_consolidation(&mut dataflow, &config)?;
517        }
518
519        soft_assert_eq_no_log!(dataflow.check_invariants(), Ok(()));
520
521        mz_repr::explain::trace_plan(&dataflow);
522
523        Ok(dataflow)
524    }
525
526    /// Lowers the dataflow description from MIR to LIR. To this end, the
527    /// method collects all available arrangements and based on this information
528    /// creates plans for every object to be built for the dataflow.
529    #[mz_ore::instrument(
530        target = "optimizer",
531        level = "debug",
532        fields(path.segment ="mir_to_lir")
533    )]
534    fn lower_dataflow(
535        desc: DataflowDescription<OptimizedMirRelationExpr>,
536        features: &OptimizerFeatures,
537    ) -> Result<DataflowDescription<Self>, String> {
538        let context = lowering::Context::new(desc.debug_name.clone(), features);
539        let dataflow = context.lower(desc)?;
540
541        mz_repr::explain::trace_plan(&dataflow);
542
543        Ok(dataflow)
544    }
545
546    /// Refines the source instance descriptions for sources imported by `dataflow` to
547    /// push down common MFP expressions.
548    #[mz_ore::instrument(
549        target = "optimizer",
550        level = "debug",
551        fields(path.segment = "refine_source_mfps")
552    )]
553    fn refine_source_mfps(dataflow: &mut DataflowDescription<Self>) {
554        // Extract MFPs from Get operators for sources, and extract what we can for the source.
555        // For each source, we want to find `&mut MapFilterProject` for each `Get` expression.
556        for (source_id, source_import) in dataflow.source_imports.iter_mut() {
557            let source = &mut source_import.desc;
558            let mut identity_present = false;
559            let mut mfps = Vec::new();
560            for build_desc in dataflow.objects_to_build.iter_mut() {
561                let mut todo = vec![&mut build_desc.plan];
562                while let Some(expression) = todo.pop() {
563                    let node = &mut expression.node;
564                    if let PlanNode::Get { id, plan, .. } = node {
565                        if *id == mz_expr::Id::Global(*source_id) {
566                            match plan {
567                                GetPlan::Collection(mfp) => mfps.push(mfp),
568                                GetPlan::PassArrangements => {
569                                    identity_present = true;
570                                }
571                                GetPlan::Arrangement(..) => {
572                                    panic!("Surprising `GetPlan` for imported source: {:?}", plan);
573                                }
574                            }
575                        }
576                    } else {
577                        todo.extend(node.children_mut());
578                    }
579                }
580            }
581
582            // Direct exports of sources are possible, and prevent pushdown.
583            identity_present |= dataflow
584                .index_exports
585                .values()
586                .any(|(x, _)| x.on_id == *source_id);
587            identity_present |= dataflow.sink_exports.values().any(|x| x.from == *source_id);
588
589            if !identity_present && !mfps.is_empty() {
590                // Extract a common prefix `MapFilterProject` from `mfps`.
591                let common = MapFilterProject::extract_common(&mut mfps[..]);
592                // Apply common expressions to the source's `MapFilterProject`.
593                let mut mfp = if let Some(mfp) = source.arguments.operators.take() {
594                    MapFilterProject::compose(mfp, common)
595                } else {
596                    common
597                };
598                mfp.optimize();
599                source.arguments.operators = Some(mfp);
600            }
601        }
602        mz_repr::explain::trace_plan(dataflow);
603    }
604
605    /// Changes the `consolidate_output` flag of such Unions that have at least one Negated input.
606    #[mz_ore::instrument(
607        target = "optimizer",
608        level = "debug",
609        fields(path.segment = "refine_union_negate_consolidation")
610    )]
611    fn refine_union_negate_consolidation(dataflow: &mut DataflowDescription<Self>) {
612        for build_desc in dataflow.objects_to_build.iter_mut() {
613            let mut todo = vec![&mut build_desc.plan];
614            while let Some(expression) = todo.pop() {
615                let node = &mut expression.node;
616                match node {
617                    PlanNode::Union {
618                        inputs,
619                        consolidate_output,
620                        ..
621                    } => {
622                        if inputs
623                            .iter()
624                            .any(|input| matches!(input.node, PlanNode::Negate { .. }))
625                        {
626                            *consolidate_output = true;
627                        }
628                    }
629                    _ => {}
630                }
631                todo.extend(node.children_mut());
632            }
633        }
634        mz_repr::explain::trace_plan(dataflow);
635    }
636
637    /// Refines the plans of objects to be built as part of `dataflow` to take advantage
638    /// of monotonic operators if the dataflow refers to a single-time, i.e., is for a
639    /// one-shot SELECT query.
640    #[mz_ore::instrument(
641        target = "optimizer",
642        level = "debug",
643        fields(path.segment = "refine_single_time_operator_selection")
644    )]
645    fn refine_single_time_operator_selection(dataflow: &mut DataflowDescription<Self>) {
646        // We should only reach here if we have a one-shot SELECT query, i.e.,
647        // a single-time dataflow.
648        assert!(dataflow.is_single_time());
649
650        // Upgrade single-time plans to monotonic.
651        for build_desc in dataflow.objects_to_build.iter_mut() {
652            let mut todo = vec![&mut build_desc.plan];
653            while let Some(expression) = todo.pop() {
654                let node = &mut expression.node;
655                match node {
656                    PlanNode::Reduce { plan, .. } => {
657                        // Upgrade non-monotonic hierarchical plans to monotonic with mandatory consolidation.
658                        match plan {
659                            ReducePlan::Hierarchical(hierarchical) => {
660                                hierarchical.as_monotonic(true);
661                            }
662                            _ => {
663                                // Nothing to do for other plans, and doing nothing is safe for future variants.
664                            }
665                        }
666                        todo.extend(node.children_mut());
667                    }
668                    PlanNode::TopK { top_k_plan, .. } => {
669                        top_k_plan.as_monotonic(true);
670                        todo.extend(node.children_mut());
671                    }
672                    PlanNode::LetRec { body, .. } => {
673                        // Only the non-recursive `body` is restricted to a single time.
674                        todo.push(body);
675                    }
676                    _ => {
677                        // Nothing to do for other expressions, and doing nothing is safe for future expressions.
678                        todo.extend(node.children_mut());
679                    }
680                }
681            }
682        }
683        mz_repr::explain::trace_plan(dataflow);
684    }
685
686    /// Refines the plans of objects to be built as part of a single-time `dataflow` to relax
687    /// the setting of the `must_consolidate` attribute of monotonic operators, if necessary,
688    /// whenever the input is deemed to be physically monotonic.
689    #[mz_ore::instrument(
690        target = "optimizer",
691        level = "debug",
692        fields(path.segment = "refine_single_time_consolidation")
693    )]
694    fn refine_single_time_consolidation(
695        dataflow: &mut DataflowDescription<Self>,
696        config: &TransformConfig,
697    ) -> Result<(), String> {
698        // We should only reach here if we have a one-shot SELECT query, i.e.,
699        // a single-time dataflow.
700        assert!(dataflow.is_single_time());
701
702        let transform = transform::RelaxMustConsolidate::<T>::new();
703        for build_desc in dataflow.objects_to_build.iter_mut() {
704            transform
705                .transform(config, &mut build_desc.plan)
706                .map_err(|_| "Maximum recursion limit error in consolidation relaxation.")?;
707        }
708        mz_repr::explain::trace_plan(dataflow);
709        Ok(())
710    }
711}
712
713impl<T> CollectionPlan for PlanNode<T> {
714    fn depends_on_into(&self, out: &mut BTreeSet<GlobalId>) {
715        match self {
716            PlanNode::Constant { rows: _ } => (),
717            PlanNode::Get {
718                id,
719                keys: _,
720                plan: _,
721            } => match id {
722                Id::Global(id) => {
723                    out.insert(*id);
724                }
725                Id::Local(_) => (),
726            },
727            PlanNode::Let { id: _, value, body } => {
728                value.depends_on_into(out);
729                body.depends_on_into(out);
730            }
731            PlanNode::LetRec {
732                ids: _,
733                values,
734                limits: _,
735                body,
736            } => {
737                for value in values.iter() {
738                    value.depends_on_into(out);
739                }
740                body.depends_on_into(out);
741            }
742            PlanNode::Join { inputs, plan: _ }
743            | PlanNode::Union {
744                inputs,
745                consolidate_output: _,
746            } => {
747                for input in inputs {
748                    input.depends_on_into(out);
749                }
750            }
751            PlanNode::Mfp {
752                input,
753                mfp: _,
754                input_key_val: _,
755            }
756            | PlanNode::FlatMap {
757                input_key: _,
758                input,
759                exprs: _,
760                func: _,
761                mfp_after: _,
762            }
763            | PlanNode::ArrangeBy {
764                input_key: _,
765                input,
766                input_mfp: _,
767                forms: _,
768            }
769            | PlanNode::Reduce {
770                input_key: _,
771                input,
772                key_val_plan: _,
773                plan: _,
774                mfp_after: _,
775            }
776            | PlanNode::TopK {
777                input,
778                top_k_plan: _,
779            }
780            | PlanNode::Negate { input }
781            | PlanNode::Threshold {
782                input,
783                threshold_plan: _,
784            } => {
785                input.depends_on_into(out);
786            }
787        }
788    }
789}
790
791impl<T> CollectionPlan for Plan<T> {
792    fn depends_on_into(&self, out: &mut BTreeSet<GlobalId>) {
793        self.node.depends_on_into(out);
794    }
795}
796
797/// Returns bucket sizes, descending, suitable for hierarchical decomposition of an operator, based
798/// on the expected number of rows that will have the same group key.
799fn bucketing_of_expected_group_size(expected_group_size: Option<u64>) -> Vec<u64> {
800    // NOTE(vmarcos): The fan-in of 16 defined below is used in the tuning advice built-in view
801    // mz_introspection.mz_expected_group_size_advice.
802    let mut buckets = vec![];
803    let mut current = 16;
804
805    // Plan for 4B records in the expected case if the user didn't specify a group size.
806    let limit = expected_group_size.unwrap_or(4_000_000_000);
807
808    // Distribute buckets in powers of 16, so that we can strike a balance between how many inputs
809    // each layer gets from the preceding layer, while also limiting the number of layers.
810    while current < limit {
811        buckets.push(current);
812        current = current.saturating_mul(16);
813    }
814
815    buckets.reverse();
816    buckets
817}