mz_expr/
relation.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10#![warn(missing_docs)]
11
12use std::cmp::{Ordering, max};
13use std::collections::{BTreeMap, BTreeSet};
14use std::fmt;
15use std::fmt::{Display, Formatter};
16use std::hash::{DefaultHasher, Hash, Hasher};
17use std::num::NonZeroU64;
18use std::time::Instant;
19
20use bytesize::ByteSize;
21use differential_dataflow::containers::{Columnation, CopyRegion};
22use itertools::Itertools;
23use mz_lowertest::MzReflect;
24use mz_ore::cast::CastFrom;
25use mz_ore::collections::CollectionExt;
26use mz_ore::id_gen::IdGen;
27use mz_ore::metrics::Histogram;
28use mz_ore::num::NonNeg;
29use mz_ore::soft_assert_no_log;
30use mz_ore::stack::RecursionLimitError;
31use mz_ore::str::Indent;
32use mz_repr::adt::numeric::NumericMaxScale;
33use mz_repr::explain::text::text_string_at;
34use mz_repr::explain::{
35    DummyHumanizer, ExplainConfig, ExprHumanizer, IndexUsageType, PlanRenderingContext,
36};
37use mz_repr::{
38    ColumnName, Datum, Diff, GlobalId, IntoRowIterator, Row, RowIterator, SqlColumnType,
39    SqlRelationType, SqlScalarType,
40};
41use serde::{Deserialize, Serialize};
42
43use crate::Id::Local;
44use crate::explain::{HumanizedExpr, HumanizerMode};
45use crate::relation::func::{AggregateFunc, LagLeadType, TableFunc};
46use crate::row::{RowCollection, SortedRowCollectionIter};
47use crate::visit::{Visit, VisitChildren};
48use crate::{
49    EvalError, FilterCharacteristics, Id, LocalId, MirScalarExpr, UnaryFunc, VariadicFunc,
50    func as scalar_func,
51};
52
53pub mod canonicalize;
54pub mod func;
55pub mod join_input_mapper;
56
57/// A recursion limit to be used for stack-safe traversals of [`MirRelationExpr`] trees.
58///
59/// The recursion limit must be large enough to accommodate for the linear representation
60/// of some pathological but frequently occurring query fragments.
61///
62/// For example, in MIR we could have long chains of
63/// - (1) `Let` bindings,
64/// - (2) `CallBinary` calls with associative functions such as `+`
65///
66/// Until we fix those, we need to stick with the larger recursion limit.
67pub const RECURSION_LIMIT: usize = 2048;
68
69/// A trait for types that describe how to build a collection.
70pub trait CollectionPlan {
71    /// Collects the set of global identifiers from dataflows referenced in Get.
72    fn depends_on_into(&self, out: &mut BTreeSet<GlobalId>);
73
74    /// Returns the set of global identifiers from dataflows referenced in Get.
75    ///
76    /// See [`CollectionPlan::depends_on_into`] to reuse an existing `BTreeSet`.
77    fn depends_on(&self) -> BTreeSet<GlobalId> {
78        let mut out = BTreeSet::new();
79        self.depends_on_into(&mut out);
80        out
81    }
82}
83
84/// An abstract syntax tree which defines a collection.
85///
86/// The AST is meant to reflect the capabilities of the `differential_dataflow::Collection` type,
87/// written generically enough to avoid run-time compilation work.
88///
89/// `derived_hash_with_manual_eq` was complaining for the wrong reason: This lint exists because
90/// it's bad when `Eq` doesn't agree with `Hash`, which is often quite likely if one of them is
91/// implemented manually. However, our manual implementation of `Eq` _will_ agree with the derived
92/// one. This is because the reason for the manual implementation is not to change the semantics
93/// from the derived one, but to avoid stack overflows.
94#[allow(clippy::derived_hash_with_manual_eq)]
95#[derive(Clone, Debug, Ord, PartialOrd, Serialize, Deserialize, MzReflect, Hash)]
96pub enum MirRelationExpr {
97    /// A constant relation containing specified rows.
98    ///
99    /// The runtime memory footprint of this operator is zero.
100    ///
101    /// When you would like to pattern match on this, consider using `MirRelationExpr::as_const`
102    /// instead, which looks behind `ArrangeBy`s. You might want this matching behavior because
103    /// constant folding doesn't remove `ArrangeBy`s.
104    Constant {
105        /// Rows of the constant collection and their multiplicities.
106        rows: Result<Vec<(Row, Diff)>, EvalError>,
107        /// Schema of the collection.
108        typ: SqlRelationType,
109    },
110    /// Get an existing dataflow.
111    ///
112    /// The runtime memory footprint of this operator is zero.
113    Get {
114        /// The identifier for the collection to load.
115        #[mzreflect(ignore)]
116        id: Id,
117        /// Schema of the collection.
118        typ: SqlRelationType,
119        /// If this is a global Get, this will indicate whether we are going to read from Persist or
120        /// from an index, or from a different object in `objects_to_build`. If it's an index, then
121        /// how downstream dataflow operations will use this index is also recorded. This is filled
122        /// by `prune_and_annotate_dataflow_index_imports`. Note that this is not used by the
123        /// lowering to LIR, but is used only by EXPLAIN.
124        #[mzreflect(ignore)]
125        access_strategy: AccessStrategy,
126    },
127    /// Introduce a temporary dataflow.
128    ///
129    /// The runtime memory footprint of this operator is zero.
130    Let {
131        /// The identifier to be used in `Get` variants to retrieve `value`.
132        #[mzreflect(ignore)]
133        id: LocalId,
134        /// The collection to be bound to `id`.
135        value: Box<MirRelationExpr>,
136        /// The result of the `Let`, evaluated with `id` bound to `value`.
137        body: Box<MirRelationExpr>,
138    },
139    /// Introduce mutually recursive bindings.
140    ///
141    /// Each `LocalId` is immediately bound to an initially empty  collection
142    /// with the type of its corresponding `MirRelationExpr`. Repeatedly, each
143    /// binding is evaluated using the current contents of each other binding,
144    /// and is refreshed to contain the new evaluation. This process continues
145    /// through all bindings, and repeats as long as changes continue to occur.
146    ///
147    /// The resulting value of the expression is `body` evaluated once in the
148    /// context of the final iterates.
149    ///
150    /// A zero-binding instance can be replaced by `body`.
151    /// A single-binding instance is equivalent to `MirRelationExpr::Let`.
152    ///
153    /// The runtime memory footprint of this operator is zero.
154    LetRec {
155        /// The identifiers to be used in `Get` variants to retrieve each `value`.
156        #[mzreflect(ignore)]
157        ids: Vec<LocalId>,
158        /// The collections to be bound to each `id`.
159        values: Vec<MirRelationExpr>,
160        /// Maximum number of iterations, after which we should artificially force a fixpoint.
161        /// (Whether we error or just stop is configured by `LetRecLimit::return_at_limit`.)
162        /// The per-`LetRec` limit that the user specified is initially copied to each binding to
163        /// accommodate slicing and merging of `LetRec`s in MIR transforms (e.g., `NormalizeLets`).
164        #[mzreflect(ignore)]
165        limits: Vec<Option<LetRecLimit>>,
166        /// The result of the `Let`, evaluated with `id` bound to `value`.
167        body: Box<MirRelationExpr>,
168    },
169    /// Project out some columns from a dataflow
170    ///
171    /// The runtime memory footprint of this operator is zero.
172    Project {
173        /// The source collection.
174        input: Box<MirRelationExpr>,
175        /// Indices of columns to retain.
176        outputs: Vec<usize>,
177    },
178    /// Append new columns to a dataflow
179    ///
180    /// The runtime memory footprint of this operator is zero.
181    Map {
182        /// The source collection.
183        input: Box<MirRelationExpr>,
184        /// Expressions which determine values to append to each row.
185        /// An expression may refer to columns in `input` or
186        /// expressions defined earlier in the vector
187        scalars: Vec<MirScalarExpr>,
188    },
189    /// Like Map, but yields zero-or-more output rows per input row
190    ///
191    /// The runtime memory footprint of this operator is zero.
192    FlatMap {
193        /// The source collection
194        input: Box<MirRelationExpr>,
195        /// The table func to apply
196        func: TableFunc,
197        /// The argument to the table func
198        exprs: Vec<MirScalarExpr>,
199    },
200    /// Keep rows from a dataflow where all the predicates are true
201    ///
202    /// The runtime memory footprint of this operator is zero.
203    Filter {
204        /// The source collection.
205        input: Box<MirRelationExpr>,
206        /// Predicates, each of which must be true.
207        predicates: Vec<MirScalarExpr>,
208    },
209    /// Join several collections, where some columns must be equal.
210    ///
211    /// For further details consult the documentation for [`MirRelationExpr::join`].
212    ///
213    /// The runtime memory footprint of this operator can be proportional to
214    /// the sizes of all inputs and the size of all joins of prefixes.
215    /// This may be reduced due to arrangements available at rendering time.
216    Join {
217        /// A sequence of input relations.
218        inputs: Vec<MirRelationExpr>,
219        /// A sequence of equivalence classes of expressions on the cross product of inputs.
220        ///
221        /// Each equivalence class is a list of scalar expressions, where for each class the
222        /// intended interpretation is that all evaluated expressions should be equal.
223        ///
224        /// Each scalar expression is to be evaluated over the cross-product of all records
225        /// from all inputs. In many cases this may just be column selection from specific
226        /// inputs, but more general cases exist (e.g. complex functions of multiple columns
227        /// from multiple inputs, or just constant literals).
228        equivalences: Vec<Vec<MirScalarExpr>>,
229        /// Join implementation information.
230        #[serde(default)]
231        implementation: JoinImplementation,
232    },
233    /// Group a dataflow by some columns and aggregate over each group
234    ///
235    /// The runtime memory footprint of this operator is at most proportional to the
236    /// number of distinct records in the input and output. The actual requirements
237    /// can be less: the number of distinct inputs to each aggregate, summed across
238    /// each aggregate, plus the output size. For more details consult the code that
239    /// builds the associated dataflow.
240    Reduce {
241        /// The source collection.
242        input: Box<MirRelationExpr>,
243        /// Column indices used to form groups.
244        group_key: Vec<MirScalarExpr>,
245        /// Expressions which determine values to append to each row, after the group keys.
246        aggregates: Vec<AggregateExpr>,
247        /// True iff the input is known to monotonically increase (only addition of records).
248        #[serde(default)]
249        monotonic: bool,
250        /// User hint: expected number of values per group key. Used to optimize physical rendering.
251        #[serde(default)]
252        expected_group_size: Option<u64>,
253    },
254    /// Groups and orders within each group, limiting output.
255    ///
256    /// The runtime memory footprint of this operator is proportional to its input and output.
257    TopK {
258        /// The source collection.
259        input: Box<MirRelationExpr>,
260        /// Column indices used to form groups.
261        group_key: Vec<usize>,
262        /// Column indices used to order rows within groups.
263        order_key: Vec<ColumnOrder>,
264        /// Number of records to retain
265        #[serde(default)]
266        limit: Option<MirScalarExpr>,
267        /// Number of records to skip
268        #[serde(default)]
269        offset: usize,
270        /// True iff the input is known to monotonically increase (only addition of records).
271        #[serde(default)]
272        monotonic: bool,
273        /// User-supplied hint: how many rows will have the same group key.
274        #[serde(default)]
275        expected_group_size: Option<u64>,
276    },
277    /// Return a dataflow where the row counts are negated
278    ///
279    /// The runtime memory footprint of this operator is zero.
280    Negate {
281        /// The source collection.
282        input: Box<MirRelationExpr>,
283    },
284    /// Keep rows from a dataflow where the row counts are positive
285    ///
286    /// The runtime memory footprint of this operator is proportional to its input and output.
287    Threshold {
288        /// The source collection.
289        input: Box<MirRelationExpr>,
290    },
291    /// Adds the frequencies of elements in contained sets.
292    ///
293    /// The runtime memory footprint of this operator is zero.
294    Union {
295        /// A source collection.
296        base: Box<MirRelationExpr>,
297        /// Source collections to union.
298        inputs: Vec<MirRelationExpr>,
299    },
300    /// Technically a no-op. Used to render an index. Will be used to optimize queries
301    /// on finer grain. Each `keys` item represents a different index that should be
302    /// produced from the `keys`.
303    ///
304    /// The runtime memory footprint of this operator is proportional to its input.
305    ArrangeBy {
306        /// The source collection
307        input: Box<MirRelationExpr>,
308        /// Columns to arrange `input` by, in order of decreasing primacy
309        keys: Vec<Vec<MirScalarExpr>>,
310    },
311}
312
313impl PartialEq for MirRelationExpr {
314    fn eq(&self, other: &Self) -> bool {
315        // Capture the result and test it wrt `Ord` implementation in test environments.
316        let result = structured_diff::MreDiff::new(self, other).next().is_none();
317        mz_ore::soft_assert_eq_no_log!(result, self.cmp(other) == Ordering::Equal);
318        result
319    }
320}
321impl Eq for MirRelationExpr {}
322
323impl MirRelationExpr {
324    /// Reports the schema of the relation.
325    ///
326    /// This method determines the type through recursive traversal of the
327    /// relation expression, drawing from the types of base collections.
328    /// As such, this is not an especially cheap method, and should be used
329    /// judiciously.
330    ///
331    /// The relation type is computed incrementally with a recursive post-order
332    /// traversal, that accumulates the input types for the relations yet to be
333    /// visited in `type_stack`.
334    pub fn typ(&self) -> SqlRelationType {
335        let mut type_stack = Vec::new();
336        #[allow(deprecated)]
337        self.visit_pre_post_nolimit(
338            &mut |e: &MirRelationExpr| -> Option<Vec<&MirRelationExpr>> {
339                match &e {
340                    MirRelationExpr::Let { body, .. } => {
341                        // Do not traverse the value sub-graph, since it's not relevant for
342                        // determining the relation type of Let operators.
343                        Some(vec![&*body])
344                    }
345                    MirRelationExpr::LetRec { body, .. } => {
346                        // Do not traverse the value sub-graph, since it's not relevant for
347                        // determining the relation type of Let operators.
348                        Some(vec![&*body])
349                    }
350                    _ => None,
351                }
352            },
353            &mut |e: &MirRelationExpr| {
354                match e {
355                    MirRelationExpr::Let { .. } => {
356                        let body_typ = type_stack.pop().unwrap();
357                        // Insert a dummy relation type for the value, since `typ_with_input_types`
358                        // won't look at it, but expects the relation type of the body to be second.
359                        type_stack.push(SqlRelationType::empty());
360                        type_stack.push(body_typ);
361                    }
362                    MirRelationExpr::LetRec { values, .. } => {
363                        let body_typ = type_stack.pop().unwrap();
364                        // Insert dummy relation types for the values, since `typ_with_input_types`
365                        // won't look at them, but expects the relation type of the body to be last.
366                        type_stack
367                            .extend(std::iter::repeat(SqlRelationType::empty()).take(values.len()));
368                        type_stack.push(body_typ);
369                    }
370                    _ => {}
371                }
372                let num_inputs = e.num_inputs();
373                let relation_type =
374                    e.typ_with_input_types(&type_stack[type_stack.len() - num_inputs..]);
375                type_stack.truncate(type_stack.len() - num_inputs);
376                type_stack.push(relation_type);
377            },
378        );
379        assert_eq!(type_stack.len(), 1);
380        type_stack.pop().unwrap()
381    }
382
383    /// Reports the schema of the relation given the schema of the input relations.
384    ///
385    /// `input_types` is required to contain the schemas for the input relations of
386    /// the current relation in the same order as they are visited by `try_visit_children`
387    /// method, even though not all may be used for computing the schema of the
388    /// current relation. For example, `Let` expects two input types, one for the
389    /// value relation and one for the body, in that order, but only the one for the
390    /// body is used to determine the type of the `Let` relation.
391    ///
392    /// It is meant to be used during post-order traversals to compute relation
393    /// schemas incrementally.
394    pub fn typ_with_input_types(&self, input_types: &[SqlRelationType]) -> SqlRelationType {
395        let column_types = self.col_with_input_cols(input_types.iter().map(|i| &i.column_types));
396        let unique_keys = self.keys_with_input_keys(
397            input_types.iter().map(|i| i.arity()),
398            input_types.iter().map(|i| &i.keys),
399        );
400        SqlRelationType::new(column_types).with_keys(unique_keys)
401    }
402
403    /// Reports the column types of the relation given the column types of the
404    /// input relations.
405    ///
406    /// This method delegates to `try_col_with_input_cols`, panicking if an `Err`
407    /// variant is returned.
408    pub fn col_with_input_cols<'a, I>(&self, input_types: I) -> Vec<SqlColumnType>
409    where
410        I: Iterator<Item = &'a Vec<SqlColumnType>>,
411    {
412        match self.try_col_with_input_cols(input_types) {
413            Ok(col_types) => col_types,
414            Err(err) => panic!("{err}"),
415        }
416    }
417
418    /// Reports the column types of the relation given the column types of the input relations.
419    ///
420    /// `input_types` is required to contain the column types for the input relations of
421    /// the current relation in the same order as they are visited by `try_visit_children`
422    /// method, even though not all may be used for computing the schema of the
423    /// current relation. For example, `Let` expects two input types, one for the
424    /// value relation and one for the body, in that order, but only the one for the
425    /// body is used to determine the type of the `Let` relation.
426    ///
427    /// It is meant to be used during post-order traversals to compute column types
428    /// incrementally.
429    pub fn try_col_with_input_cols<'a, I>(
430        &self,
431        mut input_types: I,
432    ) -> Result<Vec<SqlColumnType>, String>
433    where
434        I: Iterator<Item = &'a Vec<SqlColumnType>>,
435    {
436        use MirRelationExpr::*;
437
438        let col_types = match self {
439            Constant { rows, typ } => {
440                let mut col_types = typ.column_types.clone();
441                let mut seen_null = vec![false; typ.arity()];
442                if let Ok(rows) = rows {
443                    for (row, _diff) in rows {
444                        for (datum, i) in row.iter().zip_eq(0..typ.arity()) {
445                            if datum.is_null() {
446                                seen_null[i] = true;
447                            }
448                        }
449                    }
450                }
451                for (&seen_null, i) in seen_null.iter().zip_eq(0..typ.arity()) {
452                    if !seen_null {
453                        col_types[i].nullable = false;
454                    } else {
455                        assert!(col_types[i].nullable);
456                    }
457                }
458                col_types
459            }
460            Get { typ, .. } => typ.column_types.clone(),
461            Project { outputs, .. } => {
462                let input = input_types.next().unwrap();
463                outputs.iter().map(|&i| input[i].clone()).collect()
464            }
465            Map { scalars, .. } => {
466                let mut result = input_types.next().unwrap().clone();
467                for scalar in scalars.iter() {
468                    result.push(scalar.typ(&result))
469                }
470                result
471            }
472            FlatMap { func, .. } => {
473                let mut result = input_types.next().unwrap().clone();
474                result.extend(func.output_type().column_types);
475                result
476            }
477            Filter { predicates, .. } => {
478                let mut result = input_types.next().unwrap().clone();
479
480                // Set as nonnull any columns where null values would cause
481                // any predicate to evaluate to null.
482                for column in non_nullable_columns(predicates) {
483                    result[column].nullable = false;
484                }
485                result
486            }
487            Join { equivalences, .. } => {
488                // Concatenate input column types
489                let mut types = input_types.flat_map(|cols| cols.to_owned()).collect_vec();
490                // In an equivalence class, if any column is non-null, then make all non-null
491                for equivalence in equivalences {
492                    let col_inds = equivalence
493                        .iter()
494                        .filter_map(|expr| match expr {
495                            MirScalarExpr::Column(col, _name) => Some(*col),
496                            _ => None,
497                        })
498                        .collect_vec();
499                    if col_inds.iter().any(|i| !types.get(*i).unwrap().nullable) {
500                        for i in col_inds {
501                            types.get_mut(i).unwrap().nullable = false;
502                        }
503                    }
504                }
505                types
506            }
507            Reduce {
508                group_key,
509                aggregates,
510                ..
511            } => {
512                let input = input_types.next().unwrap();
513                group_key
514                    .iter()
515                    .map(|e| e.typ(input))
516                    .chain(aggregates.iter().map(|agg| agg.typ(input)))
517                    .collect()
518            }
519            TopK { .. } | Negate { .. } | Threshold { .. } | ArrangeBy { .. } => {
520                input_types.next().unwrap().clone()
521            }
522            Let { .. } => {
523                // skip over the input types for `value`.
524                input_types.nth(1).unwrap().clone()
525            }
526            LetRec { values, .. } => {
527                // skip over the input types for `values`.
528                input_types.nth(values.len()).unwrap().clone()
529            }
530            Union { .. } => {
531                let mut result = input_types.next().unwrap().clone();
532                for input_col_types in input_types {
533                    for (base_col, col) in result.iter_mut().zip_eq(input_col_types) {
534                        *base_col = base_col
535                            .union(col)
536                            .map_err(|e| format!("{}\nin plan:\n{}", e, self.pretty()))?;
537                    }
538                }
539                result
540            }
541        };
542
543        Ok(col_types)
544    }
545
546    /// Reports the unique keys of the relation given the arities and the unique
547    /// keys of the input relations.
548    ///
549    /// `input_arities` and `input_keys` are required to contain the
550    /// corresponding info for the input relations of
551    /// the current relation in the same order as they are visited by `try_visit_children`
552    /// method, even though not all may be used for computing the schema of the
553    /// current relation. For example, `Let` expects two input types, one for the
554    /// value relation and one for the body, in that order, but only the one for the
555    /// body is used to determine the type of the `Let` relation.
556    ///
557    /// It is meant to be used during post-order traversals to compute unique keys
558    /// incrementally.
559    pub fn keys_with_input_keys<'a, I, J>(
560        &self,
561        mut input_arities: I,
562        mut input_keys: J,
563    ) -> Vec<Vec<usize>>
564    where
565        I: Iterator<Item = usize>,
566        J: Iterator<Item = &'a Vec<Vec<usize>>>,
567    {
568        use MirRelationExpr::*;
569
570        let mut keys = match self {
571            Constant {
572                rows: Ok(rows),
573                typ,
574            } => {
575                let n_cols = typ.arity();
576                // If the `i`th entry is `Some`, then we have not yet observed non-uniqueness in the `i`th column.
577                let mut unique_values_per_col = vec![Some(BTreeSet::<Datum>::default()); n_cols];
578                for (row, diff) in rows {
579                    for (i, datum) in row.iter().enumerate() {
580                        if datum != Datum::Dummy {
581                            if let Some(unique_vals) = &mut unique_values_per_col[i] {
582                                let is_dupe = *diff != Diff::ONE || !unique_vals.insert(datum);
583                                if is_dupe {
584                                    unique_values_per_col[i] = None;
585                                }
586                            }
587                        }
588                    }
589                }
590                if rows.len() == 0 || (rows.len() == 1 && rows[0].1 == Diff::ONE) {
591                    vec![vec![]]
592                } else {
593                    // XXX - Multi-column keys are not detected.
594                    typ.keys
595                        .iter()
596                        .cloned()
597                        .chain(
598                            unique_values_per_col
599                                .into_iter()
600                                .enumerate()
601                                .filter(|(_idx, unique_vals)| unique_vals.is_some())
602                                .map(|(idx, _)| vec![idx]),
603                        )
604                        .collect()
605                }
606            }
607            Constant { rows: Err(_), typ } | Get { typ, .. } => typ.keys.clone(),
608            Threshold { .. } | ArrangeBy { .. } => input_keys.next().unwrap().clone(),
609            Let { .. } => {
610                // skip over the unique keys for value
611                input_keys.nth(1).unwrap().clone()
612            }
613            LetRec { values, .. } => {
614                // skip over the unique keys for value
615                input_keys.nth(values.len()).unwrap().clone()
616            }
617            Project { outputs, .. } => {
618                let input = input_keys.next().unwrap();
619                input
620                    .iter()
621                    .filter_map(|key_set| {
622                        if key_set.iter().all(|k| outputs.contains(k)) {
623                            Some(
624                                key_set
625                                    .iter()
626                                    .map(|c| outputs.iter().position(|o| o == c).unwrap())
627                                    .collect(),
628                            )
629                        } else {
630                            None
631                        }
632                    })
633                    .collect()
634            }
635            Map { scalars, .. } => {
636                let mut remappings = Vec::new();
637                let arity = input_arities.next().unwrap();
638                for (column, scalar) in scalars.iter().enumerate() {
639                    // assess whether the scalar preserves uniqueness,
640                    // and could participate in a key!
641
642                    fn uniqueness(expr: &MirScalarExpr) -> Option<usize> {
643                        match expr {
644                            MirScalarExpr::CallUnary { func, expr } => {
645                                if func.preserves_uniqueness() {
646                                    uniqueness(expr)
647                                } else {
648                                    None
649                                }
650                            }
651                            MirScalarExpr::Column(c, _name) => Some(*c),
652                            _ => None,
653                        }
654                    }
655
656                    if let Some(c) = uniqueness(scalar) {
657                        remappings.push((c, column + arity));
658                    }
659                }
660
661                let mut result = input_keys.next().unwrap().clone();
662                let mut new_keys = Vec::new();
663                // Any column in `remappings` could be replaced in a key
664                // by the corresponding c. This could lead to combinatorial
665                // explosion using our current representation, so we wont
666                // do that. Instead, we'll handle the case of one remapping.
667                if remappings.len() == 1 {
668                    let (old, new) = remappings.pop().unwrap();
669                    for key in &result {
670                        if key.contains(&old) {
671                            let mut new_key: Vec<usize> =
672                                key.iter().cloned().filter(|k| k != &old).collect();
673                            new_key.push(new);
674                            new_key.sort_unstable();
675                            new_keys.push(new_key);
676                        }
677                    }
678                    result.append(&mut new_keys);
679                }
680                result
681            }
682            FlatMap { .. } => {
683                // FlatMap can add duplicate rows, so input keys are no longer
684                // valid
685                vec![]
686            }
687            Negate { .. } => {
688                // Although negate may have distinct records for each key,
689                // the multiplicity is -1 rather than 1. This breaks many
690                // of the optimization uses of "keys".
691                vec![]
692            }
693            Filter { predicates, .. } => {
694                // A filter inherits the keys of its input unless the filters
695                // have reduced the input to a single row, in which case the
696                // keys of the input are `()`.
697                let mut input = input_keys.next().unwrap().clone();
698
699                if !input.is_empty() {
700                    // Track columns equated to literals, which we can prune.
701                    let mut cols_equal_to_literal = BTreeSet::new();
702
703                    // Perform union find on `col1 = col2` to establish
704                    // connected components of equated columns. Absent any
705                    // equalities, this will be `0 .. #c` (where #c is the
706                    // greatest column referenced by a predicate), but each
707                    // equality will orient the root of the greater to the root
708                    // of the lesser.
709                    let mut union_find = Vec::new();
710
711                    for expr in predicates.iter() {
712                        if let MirScalarExpr::CallBinary {
713                            func: crate::BinaryFunc::Eq(_),
714                            expr1,
715                            expr2,
716                        } = expr
717                        {
718                            if let MirScalarExpr::Column(c, _name) = &**expr1 {
719                                if expr2.is_literal_ok() {
720                                    cols_equal_to_literal.insert(c);
721                                }
722                            }
723                            if let MirScalarExpr::Column(c, _name) = &**expr2 {
724                                if expr1.is_literal_ok() {
725                                    cols_equal_to_literal.insert(c);
726                                }
727                            }
728                            // Perform union-find to equate columns.
729                            if let (Some(c1), Some(c2)) = (expr1.as_column(), expr2.as_column()) {
730                                if c1 != c2 {
731                                    // Ensure union_find has entries up to
732                                    // max(c1, c2) by filling up missing
733                                    // positions with identity mappings.
734                                    while union_find.len() <= std::cmp::max(c1, c2) {
735                                        union_find.push(union_find.len());
736                                    }
737                                    let mut r1 = c1; // Find the representative column of [c1].
738                                    while r1 != union_find[r1] {
739                                        assert!(union_find[r1] < r1);
740                                        r1 = union_find[r1];
741                                    }
742                                    let mut r2 = c2; // Find the representative column of [c2].
743                                    while r2 != union_find[r2] {
744                                        assert!(union_find[r2] < r2);
745                                        r2 = union_find[r2];
746                                    }
747                                    // Union [c1] and [c2] by pointing the
748                                    // larger to the smaller representative (we
749                                    // update the remaining equivalence class
750                                    // members only once after this for-loop).
751                                    union_find[std::cmp::max(r1, r2)] = std::cmp::min(r1, r2);
752                                }
753                            }
754                        }
755                    }
756
757                    // Complete union-find by pointing each element at its representative column.
758                    for i in 0..union_find.len() {
759                        // Iteration not required, as each prior already references the right column.
760                        union_find[i] = union_find[union_find[i]];
761                    }
762
763                    // Remove columns bound to literals, and remap columns equated to earlier columns.
764                    // We will re-expand remapped columns in a moment, but this avoids exponential work.
765                    for key_set in &mut input {
766                        key_set.retain(|k| !cols_equal_to_literal.contains(&k));
767                        for col in key_set.iter_mut() {
768                            if let Some(equiv) = union_find.get(*col) {
769                                *col = *equiv;
770                            }
771                        }
772                        key_set.sort();
773                        key_set.dedup();
774                    }
775                    input.sort();
776                    input.dedup();
777
778                    // Expand out each key to each of its equivalent forms.
779                    // Each instance of `col` can be replaced by any equivalent column.
780                    // This has the potential to result in exponentially sized number of unique keys,
781                    // and in the future we should probably maintain unique keys modulo equivalence.
782
783                    // First, compute an inverse map from each representative
784                    // column `sub` to all other equivalent columns `col`.
785                    let mut subs = Vec::new();
786                    for (col, sub) in union_find.iter().enumerate() {
787                        if *sub != col {
788                            assert!(*sub < col);
789                            while subs.len() <= *sub {
790                                subs.push(Vec::new());
791                            }
792                            subs[*sub].push(col);
793                        }
794                    }
795                    // For each column, substitute for it in each occurrence.
796                    let mut to_add = Vec::new();
797                    for (col, subs) in subs.iter().enumerate() {
798                        if !subs.is_empty() {
799                            for key_set in input.iter() {
800                                if key_set.contains(&col) {
801                                    let mut to_extend = key_set.clone();
802                                    to_extend.retain(|c| c != &col);
803                                    for sub in subs {
804                                        to_extend.push(*sub);
805                                        to_add.push(to_extend.clone());
806                                        to_extend.pop();
807                                    }
808                                }
809                            }
810                        }
811                        // No deduplication, as we cannot introduce duplicates.
812                        input.append(&mut to_add);
813                    }
814                    for key_set in input.iter_mut() {
815                        key_set.sort();
816                        key_set.dedup();
817                    }
818                }
819                input
820            }
821            Join { equivalences, .. } => {
822                // It is important the `new_from_input_arities` constructor is
823                // used. Otherwise, Materialize may potentially end up in an
824                // infinite loop.
825                let input_mapper = crate::JoinInputMapper::new_from_input_arities(input_arities);
826
827                input_mapper.global_keys(input_keys, equivalences)
828            }
829            Reduce { group_key, .. } => {
830                // The group key should form a key, but we might already have
831                // keys that are subsets of the group key, and should retain
832                // those instead, if so.
833                let mut result = Vec::new();
834                for key_set in input_keys.next().unwrap() {
835                    if key_set
836                        .iter()
837                        .all(|k| group_key.contains(&MirScalarExpr::column(*k)))
838                    {
839                        result.push(
840                            key_set
841                                .iter()
842                                .map(|i| {
843                                    group_key
844                                        .iter()
845                                        .position(|k| k == &MirScalarExpr::column(*i))
846                                        .unwrap()
847                                })
848                                .collect::<Vec<_>>(),
849                        );
850                    }
851                }
852                if result.is_empty() {
853                    result.push((0..group_key.len()).collect());
854                }
855                result
856            }
857            TopK {
858                group_key, limit, ..
859            } => {
860                // If `limit` is `Some(1)` then the group key will become
861                // a unique key, as there will be only one record with that key.
862                let mut result = input_keys.next().unwrap().clone();
863                if limit.as_ref().and_then(|x| x.as_literal_int64()) == Some(1) {
864                    result.push(group_key.clone())
865                }
866                result
867            }
868            Union { base, inputs } => {
869                // Generally, unions do not have any unique keys, because
870                // each input might duplicate some. However, there is at
871                // least one idiomatic structure that does preserve keys,
872                // which results from SQL aggregations that must populate
873                // absent records with default values. In that pattern,
874                // the union of one GET with its negation, which has first
875                // been subjected to a projection and map, we can remove
876                // their influence on the key structure.
877                //
878                // If there are A, B, each with a unique `key` such that
879                // we are looking at
880                //
881                //     A.proj(set_containing_key) + (B - A.proj(key)).map(stuff)
882                //
883                // Then we can report `key` as a unique key.
884                //
885                // TODO: make unique key structure an optimization analysis
886                // rather than part of the type information.
887                // TODO: perhaps ensure that (above) A.proj(key) is a
888                // subset of B, as otherwise there are negative records
889                // and who knows what is true (not expected, but again
890                // who knows what the query plan might look like).
891
892                let arity = input_arities.next().unwrap();
893                let (base_projection, base_with_project_stripped) =
894                    if let MirRelationExpr::Project { input, outputs } = &**base {
895                        (outputs.clone(), &**input)
896                    } else {
897                        // A input without a project is equivalent to an input
898                        // with the project being all columns in the input in order.
899                        ((0..arity).collect::<Vec<_>>(), &**base)
900                    };
901                let mut result = Vec::new();
902                if let MirRelationExpr::Get {
903                    id: first_id,
904                    typ: _,
905                    ..
906                } = base_with_project_stripped
907                {
908                    if inputs.len() == 1 {
909                        if let MirRelationExpr::Map { input, .. } = &inputs[0] {
910                            if let MirRelationExpr::Union { base, inputs } = &**input {
911                                if inputs.len() == 1 {
912                                    if let Some((input, outputs)) = base.is_negated_project() {
913                                        if let MirRelationExpr::Get {
914                                            id: second_id,
915                                            typ: _,
916                                            ..
917                                        } = input
918                                        {
919                                            if first_id == second_id {
920                                                result.extend(
921                                                    input_keys
922                                                        .next()
923                                                        .unwrap()
924                                                        .into_iter()
925                                                        .filter(|key| {
926                                                            key.iter().all(|c| {
927                                                                outputs.get(*c) == Some(c)
928                                                                    && base_projection.get(*c)
929                                                                        == Some(c)
930                                                            })
931                                                        })
932                                                        .cloned(),
933                                                );
934                                            }
935                                        }
936                                    }
937                                }
938                            }
939                        }
940                    }
941                }
942                // Important: do not inherit keys of either input, as not unique.
943                result
944            }
945        };
946        keys.sort();
947        keys.dedup();
948        keys
949    }
950
951    /// The number of columns in the relation.
952    ///
953    /// This number is determined from the type, which is determined recursively
954    /// at non-trivial cost.
955    ///
956    /// The arity is computed incrementally with a recursive post-order
957    /// traversal, that accumulates the arities for the relations yet to be
958    /// visited in `arity_stack`.
959    pub fn arity(&self) -> usize {
960        let mut arity_stack = Vec::new();
961        #[allow(deprecated)]
962        self.visit_pre_post_nolimit(
963            &mut |e: &MirRelationExpr| -> Option<Vec<&MirRelationExpr>> {
964                match &e {
965                    MirRelationExpr::Let { body, .. } => {
966                        // Do not traverse the value sub-graph, since it's not relevant for
967                        // determining the arity of Let operators.
968                        Some(vec![&*body])
969                    }
970                    MirRelationExpr::LetRec { body, .. } => {
971                        // Do not traverse the value sub-graph, since it's not relevant for
972                        // determining the arity of Let operators.
973                        Some(vec![&*body])
974                    }
975                    MirRelationExpr::Project { .. } | MirRelationExpr::Reduce { .. } => {
976                        // No further traversal is required; these operators know their arity.
977                        Some(Vec::new())
978                    }
979                    _ => None,
980                }
981            },
982            &mut |e: &MirRelationExpr| {
983                match &e {
984                    MirRelationExpr::Let { .. } => {
985                        let body_arity = arity_stack.pop().unwrap();
986                        arity_stack.push(0);
987                        arity_stack.push(body_arity);
988                    }
989                    MirRelationExpr::LetRec { values, .. } => {
990                        let body_arity = arity_stack.pop().unwrap();
991                        arity_stack.extend(std::iter::repeat(0).take(values.len()));
992                        arity_stack.push(body_arity);
993                    }
994                    MirRelationExpr::Project { .. } | MirRelationExpr::Reduce { .. } => {
995                        arity_stack.push(0);
996                    }
997                    _ => {}
998                }
999                let num_inputs = e.num_inputs();
1000                let input_arities = arity_stack.drain(arity_stack.len() - num_inputs..);
1001                let arity = e.arity_with_input_arities(input_arities);
1002                arity_stack.push(arity);
1003            },
1004        );
1005        assert_eq!(arity_stack.len(), 1);
1006        arity_stack.pop().unwrap()
1007    }
1008
1009    /// Reports the arity of the relation given the schema of the input relations.
1010    ///
1011    /// `input_arities` is required to contain the arities for the input relations of
1012    /// the current relation in the same order as they are visited by `try_visit_children`
1013    /// method, even though not all may be used for computing the schema of the
1014    /// current relation. For example, `Let` expects two input types, one for the
1015    /// value relation and one for the body, in that order, but only the one for the
1016    /// body is used to determine the type of the `Let` relation.
1017    ///
1018    /// It is meant to be used during post-order traversals to compute arities
1019    /// incrementally.
1020    pub fn arity_with_input_arities<I>(&self, mut input_arities: I) -> usize
1021    where
1022        I: Iterator<Item = usize>,
1023    {
1024        use MirRelationExpr::*;
1025
1026        match self {
1027            Constant { rows: _, typ } => typ.arity(),
1028            Get { typ, .. } => typ.arity(),
1029            Let { .. } => {
1030                input_arities.next();
1031                input_arities.next().unwrap()
1032            }
1033            LetRec { values, .. } => {
1034                for _ in 0..values.len() {
1035                    input_arities.next();
1036                }
1037                input_arities.next().unwrap()
1038            }
1039            Project { outputs, .. } => outputs.len(),
1040            Map { scalars, .. } => input_arities.next().unwrap() + scalars.len(),
1041            FlatMap { func, .. } => input_arities.next().unwrap() + func.output_arity(),
1042            Join { .. } => input_arities.sum(),
1043            Reduce {
1044                input: _,
1045                group_key,
1046                aggregates,
1047                ..
1048            } => group_key.len() + aggregates.len(),
1049            Filter { .. }
1050            | TopK { .. }
1051            | Negate { .. }
1052            | Threshold { .. }
1053            | Union { .. }
1054            | ArrangeBy { .. } => input_arities.next().unwrap(),
1055        }
1056    }
1057
1058    /// The number of child relations this relation has.
1059    pub fn num_inputs(&self) -> usize {
1060        let mut count = 0;
1061
1062        self.visit_children(|_| count += 1);
1063
1064        count
1065    }
1066
1067    /// Constructs a constant collection from specific rows and schema, where
1068    /// each row will have a multiplicity of one.
1069    pub fn constant(rows: Vec<Vec<Datum>>, typ: SqlRelationType) -> Self {
1070        let rows = rows.into_iter().map(|row| (row, Diff::ONE)).collect();
1071        MirRelationExpr::constant_diff(rows, typ)
1072    }
1073
1074    /// Constructs a constant collection from specific rows and schema, where
1075    /// each row can have an arbitrary multiplicity.
1076    pub fn constant_diff(rows: Vec<(Vec<Datum>, Diff)>, typ: SqlRelationType) -> Self {
1077        for (row, _diff) in &rows {
1078            for (datum, column_typ) in row.iter().zip_eq(typ.column_types.iter()) {
1079                assert!(
1080                    datum.is_instance_of_sql(column_typ),
1081                    "Expected datum of type {:?}, got value {:?}",
1082                    column_typ,
1083                    datum
1084                );
1085            }
1086        }
1087        let rows = Ok(rows
1088            .into_iter()
1089            .map(move |(row, diff)| (Row::pack_slice(&row), diff))
1090            .collect());
1091        MirRelationExpr::Constant { rows, typ }
1092    }
1093
1094    /// If self is a constant, return the value and the type, otherwise `None`.
1095    /// Looks behind `ArrangeBy`s.
1096    pub fn as_const(&self) -> Option<(&Result<Vec<(Row, Diff)>, EvalError>, &SqlRelationType)> {
1097        match self {
1098            MirRelationExpr::Constant { rows, typ } => Some((rows, typ)),
1099            MirRelationExpr::ArrangeBy { input, .. } => input.as_const(),
1100            _ => None,
1101        }
1102    }
1103
1104    /// If self is a constant, mutably return the value and the type, otherwise `None`.
1105    /// Looks behind `ArrangeBy`s.
1106    pub fn as_const_mut(
1107        &mut self,
1108    ) -> Option<(
1109        &mut Result<Vec<(Row, Diff)>, EvalError>,
1110        &mut SqlRelationType,
1111    )> {
1112        match self {
1113            MirRelationExpr::Constant { rows, typ } => Some((rows, typ)),
1114            MirRelationExpr::ArrangeBy { input, .. } => input.as_const_mut(),
1115            _ => None,
1116        }
1117    }
1118
1119    /// If self is a constant error, return the error, otherwise `None`.
1120    /// Looks behind `ArrangeBy`s.
1121    pub fn as_const_err(&self) -> Option<&EvalError> {
1122        match self {
1123            MirRelationExpr::Constant { rows: Err(e), .. } => Some(e),
1124            MirRelationExpr::ArrangeBy { input, .. } => input.as_const_err(),
1125            _ => None,
1126        }
1127    }
1128
1129    /// Checks if `self` is the single element collection with no columns.
1130    pub fn is_constant_singleton(&self) -> bool {
1131        if let Some((Ok(rows), typ)) = self.as_const() {
1132            rows.len() == 1 && typ.column_types.len() == 0 && rows[0].1 == Diff::ONE
1133        } else {
1134            false
1135        }
1136    }
1137
1138    /// Constructs the expression for getting a local collection.
1139    pub fn local_get(id: LocalId, typ: SqlRelationType) -> Self {
1140        MirRelationExpr::Get {
1141            id: Id::Local(id),
1142            typ,
1143            access_strategy: AccessStrategy::UnknownOrLocal,
1144        }
1145    }
1146
1147    /// Constructs the expression for getting a global collection
1148    pub fn global_get(id: GlobalId, typ: SqlRelationType) -> Self {
1149        MirRelationExpr::Get {
1150            id: Id::Global(id),
1151            typ,
1152            access_strategy: AccessStrategy::UnknownOrLocal,
1153        }
1154    }
1155
1156    /// Retains only the columns specified by `output`.
1157    pub fn project(mut self, mut outputs: Vec<usize>) -> Self {
1158        if let MirRelationExpr::Project {
1159            outputs: columns, ..
1160        } = &mut self
1161        {
1162            // Update `outputs` to reference base columns of `input`.
1163            for column in outputs.iter_mut() {
1164                *column = columns[*column];
1165            }
1166            *columns = outputs;
1167            self
1168        } else {
1169            MirRelationExpr::Project {
1170                input: Box::new(self),
1171                outputs,
1172            }
1173        }
1174    }
1175
1176    /// Append to each row the results of applying elements of `scalar`.
1177    pub fn map(mut self, scalars: Vec<MirScalarExpr>) -> Self {
1178        if let MirRelationExpr::Map { scalars: s, .. } = &mut self {
1179            s.extend(scalars);
1180            self
1181        } else if !scalars.is_empty() {
1182            MirRelationExpr::Map {
1183                input: Box::new(self),
1184                scalars,
1185            }
1186        } else {
1187            self
1188        }
1189    }
1190
1191    /// Append to each row a single `scalar`.
1192    pub fn map_one(self, scalar: MirScalarExpr) -> Self {
1193        self.map(vec![scalar])
1194    }
1195
1196    /// Like `map`, but yields zero-or-more output rows per input row
1197    pub fn flat_map(self, func: TableFunc, exprs: Vec<MirScalarExpr>) -> Self {
1198        MirRelationExpr::FlatMap {
1199            input: Box::new(self),
1200            func,
1201            exprs,
1202        }
1203    }
1204
1205    /// Retain only the rows satisfying each of several predicates.
1206    pub fn filter<I>(mut self, predicates: I) -> Self
1207    where
1208        I: IntoIterator<Item = MirScalarExpr>,
1209    {
1210        // Extract existing predicates
1211        let mut new_predicates = if let MirRelationExpr::Filter { input, predicates } = self {
1212            self = *input;
1213            predicates
1214        } else {
1215            Vec::new()
1216        };
1217        // Normalize collection of predicates.
1218        new_predicates.extend(predicates);
1219        new_predicates.retain(|p| !p.is_literal_true());
1220        new_predicates.sort();
1221        new_predicates.dedup();
1222        // Introduce a `Filter` only if we have predicates.
1223        if !new_predicates.is_empty() {
1224            self = MirRelationExpr::Filter {
1225                input: Box::new(self),
1226                predicates: new_predicates,
1227            };
1228        }
1229
1230        self
1231    }
1232
1233    /// Form the Cartesian outer-product of rows in both inputs.
1234    pub fn product(mut self, right: Self) -> Self {
1235        if right.is_constant_singleton() {
1236            self
1237        } else if self.is_constant_singleton() {
1238            right
1239        } else if let MirRelationExpr::Join { inputs, .. } = &mut self {
1240            inputs.push(right);
1241            self
1242        } else {
1243            MirRelationExpr::join(vec![self, right], vec![])
1244        }
1245    }
1246
1247    /// Performs a relational equijoin among the input collections.
1248    ///
1249    /// The sequence `inputs` each describe different input collections, and the sequence `variables` describes
1250    /// equality constraints that some of their columns must satisfy. Each element in `variable` describes a set
1251    /// of pairs  `(input_index, column_index)` where every value described by that set must be equal.
1252    ///
1253    /// For example, the pair `(input, column)` indexes into `inputs[input][column]`, extracting the `input`th
1254    /// input collection and for each row examining its `column`th column.
1255    ///
1256    /// # Example
1257    ///
1258    /// ```rust
1259    /// use mz_repr::{Datum, SqlColumnType, SqlRelationType, SqlScalarType};
1260    /// use mz_expr::MirRelationExpr;
1261    ///
1262    /// // A common schema for each input.
1263    /// let schema = SqlRelationType::new(vec![
1264    ///     SqlScalarType::Int32.nullable(false),
1265    ///     SqlScalarType::Int32.nullable(false),
1266    /// ]);
1267    ///
1268    /// // the specific data are not important here.
1269    /// let data = vec![Datum::Int32(0), Datum::Int32(1)];
1270    ///
1271    /// // Three collections that could have been different.
1272    /// let input0 = MirRelationExpr::constant(vec![data.clone()], schema.clone());
1273    /// let input1 = MirRelationExpr::constant(vec![data.clone()], schema.clone());
1274    /// let input2 = MirRelationExpr::constant(vec![data.clone()], schema.clone());
1275    ///
1276    /// // Join the three relations looking for triangles, like so.
1277    /// //
1278    /// //     Output(A,B,C) := Input0(A,B), Input1(B,C), Input2(A,C)
1279    /// let joined = MirRelationExpr::join(
1280    ///     vec![input0, input1, input2],
1281    ///     vec![
1282    ///         vec![(0,0), (2,0)], // fields A of inputs 0 and 2.
1283    ///         vec![(0,1), (1,0)], // fields B of inputs 0 and 1.
1284    ///         vec![(1,1), (2,1)], // fields C of inputs 1 and 2.
1285    ///     ],
1286    /// );
1287    ///
1288    /// // Technically the above produces `Output(A,B,B,C,A,C)` because the columns are concatenated.
1289    /// // A projection resolves this and produces the correct output.
1290    /// let result = joined.project(vec![0, 1, 3]);
1291    /// ```
1292    pub fn join(inputs: Vec<MirRelationExpr>, variables: Vec<Vec<(usize, usize)>>) -> Self {
1293        let input_mapper = join_input_mapper::JoinInputMapper::new(&inputs);
1294
1295        let equivalences = variables
1296            .into_iter()
1297            .map(|vs| {
1298                vs.into_iter()
1299                    .map(|(r, c)| input_mapper.map_expr_to_global(MirScalarExpr::column(c), r))
1300                    .collect::<Vec<_>>()
1301            })
1302            .collect::<Vec<_>>();
1303
1304        Self::join_scalars(inputs, equivalences)
1305    }
1306
1307    /// Constructs a join operator from inputs and required-equal scalar expressions.
1308    pub fn join_scalars(
1309        mut inputs: Vec<MirRelationExpr>,
1310        equivalences: Vec<Vec<MirScalarExpr>>,
1311    ) -> Self {
1312        // Remove all constant inputs that are the identity for join.
1313        // They neither introduce nor modify any column references.
1314        inputs.retain(|i| !i.is_constant_singleton());
1315        MirRelationExpr::Join {
1316            inputs,
1317            equivalences,
1318            implementation: JoinImplementation::Unimplemented,
1319        }
1320    }
1321
1322    /// Perform a key-wise reduction / aggregation.
1323    ///
1324    /// The `group_key` argument indicates columns in the input collection that should
1325    /// be grouped, and `aggregates` lists aggregation functions each of which produces
1326    /// one output column in addition to the keys.
1327    pub fn reduce(
1328        self,
1329        group_key: Vec<usize>,
1330        aggregates: Vec<AggregateExpr>,
1331        expected_group_size: Option<u64>,
1332    ) -> Self {
1333        MirRelationExpr::Reduce {
1334            input: Box::new(self),
1335            group_key: group_key.into_iter().map(MirScalarExpr::column).collect(),
1336            aggregates,
1337            monotonic: false,
1338            expected_group_size,
1339        }
1340    }
1341
1342    /// Perform a key-wise reduction order by and limit.
1343    ///
1344    /// The `group_key` argument indicates columns in the input collection that should
1345    /// be grouped, the `order_key` argument indicates columns that should be further
1346    /// used to order records within groups, and the `limit` argument constrains the
1347    /// total number of records that should be produced in each group.
1348    pub fn top_k(
1349        self,
1350        group_key: Vec<usize>,
1351        order_key: Vec<ColumnOrder>,
1352        limit: Option<MirScalarExpr>,
1353        offset: usize,
1354        expected_group_size: Option<u64>,
1355    ) -> Self {
1356        MirRelationExpr::TopK {
1357            input: Box::new(self),
1358            group_key,
1359            order_key,
1360            limit,
1361            offset,
1362            expected_group_size,
1363            monotonic: false,
1364        }
1365    }
1366
1367    /// Negates the occurrences of each row.
1368    pub fn negate(self) -> Self {
1369        if let MirRelationExpr::Negate { input } = self {
1370            *input
1371        } else {
1372            MirRelationExpr::Negate {
1373                input: Box::new(self),
1374            }
1375        }
1376    }
1377
1378    /// Removes all but the first occurrence of each row.
1379    pub fn distinct(self) -> Self {
1380        let arity = self.arity();
1381        self.distinct_by((0..arity).collect())
1382    }
1383
1384    /// Removes all but the first occurrence of each key. Columns not included
1385    /// in the `group_key` are discarded.
1386    pub fn distinct_by(self, group_key: Vec<usize>) -> Self {
1387        self.reduce(group_key, vec![], None)
1388    }
1389
1390    /// Discards rows with a negative frequency.
1391    pub fn threshold(self) -> Self {
1392        if let MirRelationExpr::Threshold { .. } = &self {
1393            self
1394        } else {
1395            MirRelationExpr::Threshold {
1396                input: Box::new(self),
1397            }
1398        }
1399    }
1400
1401    /// Unions together any number inputs.
1402    ///
1403    /// If `inputs` is empty, then an empty relation of type `typ` is
1404    /// constructed.
1405    pub fn union_many(mut inputs: Vec<Self>, typ: SqlRelationType) -> Self {
1406        // Deconstruct `inputs` as `Union`s and reconstitute.
1407        let mut flat_inputs = Vec::with_capacity(inputs.len());
1408        for input in inputs {
1409            if let MirRelationExpr::Union { base, inputs } = input {
1410                flat_inputs.push(*base);
1411                flat_inputs.extend(inputs);
1412            } else {
1413                flat_inputs.push(input);
1414            }
1415        }
1416        inputs = flat_inputs;
1417        if inputs.len() == 0 {
1418            MirRelationExpr::Constant {
1419                rows: Ok(vec![]),
1420                typ,
1421            }
1422        } else if inputs.len() == 1 {
1423            inputs.into_element()
1424        } else {
1425            MirRelationExpr::Union {
1426                base: Box::new(inputs.remove(0)),
1427                inputs,
1428            }
1429        }
1430    }
1431
1432    /// Produces one collection where each row is present with the sum of its frequencies in each input.
1433    pub fn union(self, other: Self) -> Self {
1434        // Deconstruct `self` and `other` as `Union`s and reconstitute.
1435        let mut flat_inputs = Vec::with_capacity(2);
1436        if let MirRelationExpr::Union { base, inputs } = self {
1437            flat_inputs.push(*base);
1438            flat_inputs.extend(inputs);
1439        } else {
1440            flat_inputs.push(self);
1441        }
1442        if let MirRelationExpr::Union { base, inputs } = other {
1443            flat_inputs.push(*base);
1444            flat_inputs.extend(inputs);
1445        } else {
1446            flat_inputs.push(other);
1447        }
1448
1449        MirRelationExpr::Union {
1450            base: Box::new(flat_inputs.remove(0)),
1451            inputs: flat_inputs,
1452        }
1453    }
1454
1455    /// Arranges the collection by the specified columns
1456    pub fn arrange_by(self, keys: &[Vec<MirScalarExpr>]) -> Self {
1457        MirRelationExpr::ArrangeBy {
1458            input: Box::new(self),
1459            keys: keys.to_owned(),
1460        }
1461    }
1462
1463    /// Indicates if this is a constant empty collection.
1464    ///
1465    /// A false value does not mean the collection is known to be non-empty,
1466    /// only that we cannot currently determine that it is statically empty.
1467    pub fn is_empty(&self) -> bool {
1468        if let Some((Ok(rows), ..)) = self.as_const() {
1469            rows.is_empty()
1470        } else {
1471            false
1472        }
1473    }
1474
1475    /// If the expression is a negated project, return the input and the projection.
1476    pub fn is_negated_project(&self) -> Option<(&MirRelationExpr, &[usize])> {
1477        if let MirRelationExpr::Negate { input } = self {
1478            if let MirRelationExpr::Project { input, outputs } = &**input {
1479                return Some((&**input, outputs));
1480            }
1481        }
1482        if let MirRelationExpr::Project { input, outputs } = self {
1483            if let MirRelationExpr::Negate { input } = &**input {
1484                return Some((&**input, outputs));
1485            }
1486        }
1487        None
1488    }
1489
1490    /// Pretty-print this [MirRelationExpr] to a string.
1491    pub fn pretty(&self) -> String {
1492        let config = ExplainConfig::default();
1493        self.explain(&config, None)
1494    }
1495
1496    /// Pretty-print this [MirRelationExpr] to a string using a custom
1497    /// [ExplainConfig] and an optionally provided [ExprHumanizer].
1498    pub fn explain(&self, config: &ExplainConfig, humanizer: Option<&dyn ExprHumanizer>) -> String {
1499        text_string_at(self, || PlanRenderingContext {
1500            indent: Indent::default(),
1501            humanizer: humanizer.unwrap_or(&DummyHumanizer),
1502            annotations: BTreeMap::default(),
1503            config,
1504        })
1505    }
1506
1507    /// Take ownership of `self`, leaving an empty `MirRelationExpr::Constant` with the optionally
1508    /// given scalar types. The given scalar types should be `base_eq` with the types that `typ()`
1509    /// would find. Keys and nullability are ignored in the given `SqlRelationType`, and instead we set
1510    /// the best possible key and nullability, since we are making an empty collection.
1511    ///
1512    /// If `typ` is not given, then this calls `.typ()` (which is possibly expensive) to determine
1513    /// the correct type.
1514    pub fn take_safely(&mut self, typ: Option<SqlRelationType>) -> MirRelationExpr {
1515        if let Some(typ) = &typ {
1516            soft_assert_no_log!(
1517                self.typ()
1518                    .column_types
1519                    .iter()
1520                    .zip_eq(typ.column_types.iter())
1521                    .all(|(t1, t2)| t1.scalar_type.base_eq(&t2.scalar_type))
1522            );
1523        }
1524        let mut typ = typ.unwrap_or_else(|| self.typ());
1525        typ.keys = vec![vec![]];
1526        for ct in typ.column_types.iter_mut() {
1527            ct.nullable = false;
1528        }
1529        std::mem::replace(
1530            self,
1531            MirRelationExpr::Constant {
1532                rows: Ok(vec![]),
1533                typ,
1534            },
1535        )
1536    }
1537
1538    /// Take ownership of `self`, leaving an empty `MirRelationExpr::Constant` with the given scalar
1539    /// types. Nullability is ignored in the given `SqlColumnType`s, and instead we set the best
1540    /// possible nullability, since we are making an empty collection.
1541    pub fn take_safely_with_col_types(&mut self, typ: Vec<SqlColumnType>) -> MirRelationExpr {
1542        self.take_safely(Some(SqlRelationType::new(typ)))
1543    }
1544
1545    /// Take ownership of `self`, leaving an empty `MirRelationExpr::Constant` with an **incorrect** type.
1546    ///
1547    /// This should only be used if `self` is about to be dropped or otherwise overwritten.
1548    pub fn take_dangerous(&mut self) -> MirRelationExpr {
1549        let empty = MirRelationExpr::Constant {
1550            rows: Ok(vec![]),
1551            typ: SqlRelationType::new(Vec::new()),
1552        };
1553        std::mem::replace(self, empty)
1554    }
1555
1556    /// Replaces `self` with some logic applied to `self`.
1557    pub fn replace_using<F>(&mut self, logic: F)
1558    where
1559        F: FnOnce(MirRelationExpr) -> MirRelationExpr,
1560    {
1561        let empty = MirRelationExpr::Constant {
1562            rows: Ok(vec![]),
1563            typ: SqlRelationType::new(Vec::new()),
1564        };
1565        let expr = std::mem::replace(self, empty);
1566        *self = logic(expr);
1567    }
1568
1569    /// Store `self` in a `Let` and pass the corresponding `Get` to `body`.
1570    pub fn let_in<Body, E>(self, id_gen: &mut IdGen, body: Body) -> Result<MirRelationExpr, E>
1571    where
1572        Body: FnOnce(&mut IdGen, MirRelationExpr) -> Result<MirRelationExpr, E>,
1573    {
1574        if let MirRelationExpr::Get { .. } = self {
1575            // already done
1576            body(id_gen, self)
1577        } else {
1578            let id = LocalId::new(id_gen.allocate_id());
1579            let get = MirRelationExpr::Get {
1580                id: Id::Local(id),
1581                typ: self.typ(),
1582                access_strategy: AccessStrategy::UnknownOrLocal,
1583            };
1584            let body = (body)(id_gen, get)?;
1585            Ok(MirRelationExpr::Let {
1586                id,
1587                value: Box::new(self),
1588                body: Box::new(body),
1589            })
1590        }
1591    }
1592
1593    /// Return every row in `self` that does not have a matching row in the first columns of `keys_and_values`, using `default` to fill in the remaining columns
1594    /// (If `default` is a row of nulls, this is the 'outer' part of LEFT OUTER JOIN)
1595    pub fn anti_lookup<E>(
1596        self,
1597        id_gen: &mut IdGen,
1598        keys_and_values: MirRelationExpr,
1599        default: Vec<(Datum, SqlScalarType)>,
1600    ) -> Result<MirRelationExpr, E> {
1601        let (data, column_types): (Vec<_>, Vec<_>) = default
1602            .into_iter()
1603            .map(|(datum, scalar_type)| (datum, scalar_type.nullable(datum.is_null())))
1604            .unzip();
1605        assert_eq!(keys_and_values.arity() - self.arity(), data.len());
1606        self.let_in(id_gen, |_id_gen, get_keys| {
1607            let get_keys_arity = get_keys.arity();
1608            Ok(MirRelationExpr::join(
1609                vec![
1610                    // all the missing keys (with count 1)
1611                    keys_and_values
1612                        .distinct_by((0..get_keys_arity).collect())
1613                        .negate()
1614                        .union(get_keys.clone().distinct()),
1615                    // join with keys to get the correct counts
1616                    get_keys.clone(),
1617                ],
1618                (0..get_keys_arity).map(|i| vec![(0, i), (1, i)]).collect(),
1619            )
1620            // get rid of the extra copies of columns from keys
1621            .project((0..get_keys_arity).collect())
1622            // This join is logically equivalent to
1623            // `.map(<default_expr>)`, but using a join allows for
1624            // potential predicate pushdown and elision in the
1625            // optimizer.
1626            .product(MirRelationExpr::constant(
1627                vec![data],
1628                SqlRelationType::new(column_types),
1629            )))
1630        })
1631    }
1632
1633    /// Return:
1634    /// * every row in keys_and_values
1635    /// * every row in `self` that does not have a matching row in the first columns of
1636    ///   `keys_and_values`, using `default` to fill in the remaining columns
1637    /// (This is LEFT OUTER JOIN if:
1638    /// 1) `default` is a row of null
1639    /// 2) matching rows in `keys_and_values` and `self` have the same multiplicity.)
1640    pub fn lookup<E>(
1641        self,
1642        id_gen: &mut IdGen,
1643        keys_and_values: MirRelationExpr,
1644        default: Vec<(Datum<'static>, SqlScalarType)>,
1645    ) -> Result<MirRelationExpr, E> {
1646        keys_and_values.let_in(id_gen, |id_gen, get_keys_and_values| {
1647            Ok(get_keys_and_values.clone().union(self.anti_lookup(
1648                id_gen,
1649                get_keys_and_values,
1650                default,
1651            )?))
1652        })
1653    }
1654
1655    /// True iff the expression contains a `NullaryFunc::MzLogicalTimestamp`.
1656    pub fn contains_temporal(&self) -> bool {
1657        let mut contains = false;
1658        self.visit_scalars(&mut |e| contains = contains || e.contains_temporal());
1659        contains
1660    }
1661
1662    /// Fallible visitor for the [`MirScalarExpr`]s directly owned by this relation expression.
1663    ///
1664    /// The `f` visitor should not recursively descend into owned [`MirRelationExpr`]s.
1665    pub fn try_visit_scalars_mut1<F, E>(&mut self, f: &mut F) -> Result<(), E>
1666    where
1667        F: FnMut(&mut MirScalarExpr) -> Result<(), E>,
1668    {
1669        use MirRelationExpr::*;
1670        match self {
1671            Map { scalars, .. } => {
1672                for s in scalars {
1673                    f(s)?;
1674                }
1675            }
1676            Filter { predicates, .. } => {
1677                for p in predicates {
1678                    f(p)?;
1679                }
1680            }
1681            FlatMap { exprs, .. } => {
1682                for expr in exprs {
1683                    f(expr)?;
1684                }
1685            }
1686            Join {
1687                inputs: _,
1688                equivalences,
1689                implementation,
1690            } => {
1691                for equivalence in equivalences {
1692                    for expr in equivalence {
1693                        f(expr)?;
1694                    }
1695                }
1696                match implementation {
1697                    JoinImplementation::Differential((_, start_key, _), order) => {
1698                        if let Some(start_key) = start_key {
1699                            for k in start_key {
1700                                f(k)?;
1701                            }
1702                        }
1703                        for (_, lookup_key, _) in order {
1704                            for k in lookup_key {
1705                                f(k)?;
1706                            }
1707                        }
1708                    }
1709                    JoinImplementation::DeltaQuery(paths) => {
1710                        for path in paths {
1711                            for (_, lookup_key, _) in path {
1712                                for k in lookup_key {
1713                                    f(k)?;
1714                                }
1715                            }
1716                        }
1717                    }
1718                    JoinImplementation::IndexedFilter(_coll_id, _idx_id, index_key, _) => {
1719                        for k in index_key {
1720                            f(k)?;
1721                        }
1722                    }
1723                    JoinImplementation::Unimplemented => {} // No scalar exprs
1724                }
1725            }
1726            ArrangeBy { keys, .. } => {
1727                for key in keys {
1728                    for s in key {
1729                        f(s)?;
1730                    }
1731                }
1732            }
1733            Reduce {
1734                group_key,
1735                aggregates,
1736                ..
1737            } => {
1738                for s in group_key {
1739                    f(s)?;
1740                }
1741                for agg in aggregates {
1742                    f(&mut agg.expr)?;
1743                }
1744            }
1745            TopK { limit, .. } => {
1746                if let Some(s) = limit {
1747                    f(s)?;
1748                }
1749            }
1750            Constant { .. }
1751            | Get { .. }
1752            | Let { .. }
1753            | LetRec { .. }
1754            | Project { .. }
1755            | Negate { .. }
1756            | Threshold { .. }
1757            | Union { .. } => (),
1758        }
1759        Ok(())
1760    }
1761
1762    /// Fallible mutable visitor for the [`MirScalarExpr`]s in the [`MirRelationExpr`] subtree
1763    /// rooted at `self`.
1764    ///
1765    /// Note that this does not recurse into [`MirRelationExpr`] subtrees within [`MirScalarExpr`]
1766    /// nodes.
1767    pub fn try_visit_scalars_mut<F, E>(&mut self, f: &mut F) -> Result<(), E>
1768    where
1769        F: FnMut(&mut MirScalarExpr) -> Result<(), E>,
1770        E: From<RecursionLimitError>,
1771    {
1772        self.try_visit_mut_post(&mut |expr| expr.try_visit_scalars_mut1(f))
1773    }
1774
1775    /// Infallible mutable visitor for the [`MirScalarExpr`]s in the [`MirRelationExpr`] subtree
1776    /// rooted at `self`.
1777    ///
1778    /// Note that this does not recurse into [`MirRelationExpr`] subtrees within [`MirScalarExpr`]
1779    /// nodes.
1780    pub fn visit_scalars_mut<F>(&mut self, f: &mut F)
1781    where
1782        F: FnMut(&mut MirScalarExpr),
1783    {
1784        self.try_visit_scalars_mut(&mut |s| {
1785            f(s);
1786            Ok::<_, RecursionLimitError>(())
1787        })
1788        .expect("Unexpected error in `visit_scalars_mut` call");
1789    }
1790
1791    /// Fallible visitor for the [`MirScalarExpr`]s directly owned by this relation expression.
1792    ///
1793    /// The `f` visitor should not recursively descend into owned [`MirRelationExpr`]s.
1794    pub fn try_visit_scalars_1<F, E>(&self, f: &mut F) -> Result<(), E>
1795    where
1796        F: FnMut(&MirScalarExpr) -> Result<(), E>,
1797    {
1798        use MirRelationExpr::*;
1799        match self {
1800            Map { scalars, .. } => {
1801                for s in scalars {
1802                    f(s)?;
1803                }
1804            }
1805            Filter { predicates, .. } => {
1806                for p in predicates {
1807                    f(p)?;
1808                }
1809            }
1810            FlatMap { exprs, .. } => {
1811                for expr in exprs {
1812                    f(expr)?;
1813                }
1814            }
1815            Join {
1816                inputs: _,
1817                equivalences,
1818                implementation,
1819            } => {
1820                for equivalence in equivalences {
1821                    for expr in equivalence {
1822                        f(expr)?;
1823                    }
1824                }
1825                match implementation {
1826                    JoinImplementation::Differential((_, start_key, _), order) => {
1827                        if let Some(start_key) = start_key {
1828                            for k in start_key {
1829                                f(k)?;
1830                            }
1831                        }
1832                        for (_, lookup_key, _) in order {
1833                            for k in lookup_key {
1834                                f(k)?;
1835                            }
1836                        }
1837                    }
1838                    JoinImplementation::DeltaQuery(paths) => {
1839                        for path in paths {
1840                            for (_, lookup_key, _) in path {
1841                                for k in lookup_key {
1842                                    f(k)?;
1843                                }
1844                            }
1845                        }
1846                    }
1847                    JoinImplementation::IndexedFilter(_coll_id, _idx_id, index_key, _) => {
1848                        for k in index_key {
1849                            f(k)?;
1850                        }
1851                    }
1852                    JoinImplementation::Unimplemented => {} // No scalar exprs
1853                }
1854            }
1855            ArrangeBy { keys, .. } => {
1856                for key in keys {
1857                    for s in key {
1858                        f(s)?;
1859                    }
1860                }
1861            }
1862            Reduce {
1863                group_key,
1864                aggregates,
1865                ..
1866            } => {
1867                for s in group_key {
1868                    f(s)?;
1869                }
1870                for agg in aggregates {
1871                    f(&agg.expr)?;
1872                }
1873            }
1874            TopK { limit, .. } => {
1875                if let Some(s) = limit {
1876                    f(s)?;
1877                }
1878            }
1879            Constant { .. }
1880            | Get { .. }
1881            | Let { .. }
1882            | LetRec { .. }
1883            | Project { .. }
1884            | Negate { .. }
1885            | Threshold { .. }
1886            | Union { .. } => (),
1887        }
1888        Ok(())
1889    }
1890
1891    /// Fallible immutable visitor for the [`MirScalarExpr`]s in the [`MirRelationExpr`] subtree
1892    /// rooted at `self`.
1893    ///
1894    /// Note that this does not recurse into [`MirRelationExpr`] subtrees within [`MirScalarExpr`]
1895    /// nodes.
1896    pub fn try_visit_scalars<F, E>(&self, f: &mut F) -> Result<(), E>
1897    where
1898        F: FnMut(&MirScalarExpr) -> Result<(), E>,
1899        E: From<RecursionLimitError>,
1900    {
1901        self.try_visit_post(&mut |expr| expr.try_visit_scalars_1(f))
1902    }
1903
1904    /// Infallible immutable visitor for the [`MirScalarExpr`]s in the [`MirRelationExpr`] subtree
1905    /// rooted at `self`.
1906    ///
1907    /// Note that this does not recurse into [`MirRelationExpr`] subtrees within [`MirScalarExpr`]
1908    /// nodes.
1909    pub fn visit_scalars<F>(&self, f: &mut F)
1910    where
1911        F: FnMut(&MirScalarExpr),
1912    {
1913        self.try_visit_scalars(&mut |s| {
1914            f(s);
1915            Ok::<_, RecursionLimitError>(())
1916        })
1917        .expect("Unexpected error in `visit_scalars` call");
1918    }
1919
1920    /// Clears the contents of `self` even if it's so deep that simply dropping it would cause a
1921    /// stack overflow in `drop_in_place`.
1922    ///
1923    /// Leaves `self` in an unusable state, so this should only be used if `self` is about to be
1924    /// dropped or otherwise overwritten.
1925    pub fn destroy_carefully(&mut self) {
1926        let mut todo = vec![self.take_dangerous()];
1927        while let Some(mut expr) = todo.pop() {
1928            for child in expr.children_mut() {
1929                todo.push(child.take_dangerous());
1930            }
1931        }
1932    }
1933
1934    /// Computes the size (total number of nodes) and maximum depth of a MirRelationExpr for
1935    /// debug printing purposes.
1936    pub fn debug_size_and_depth(&self) -> (usize, usize) {
1937        let mut size = 0;
1938        let mut max_depth = 0;
1939        let mut todo = vec![(self, 1)];
1940        while let Some((expr, depth)) = todo.pop() {
1941            size += 1;
1942            max_depth = max(max_depth, depth);
1943            todo.extend(expr.children().map(|c| (c, depth + 1)));
1944        }
1945        (size, max_depth)
1946    }
1947
1948    /// The MirRelationExpr is considered potentially expensive if and only if
1949    /// at least one of the following conditions is true:
1950    ///
1951    ///  - It contains at least one FlatMap or a Reduce operator.
1952    ///  - It contains at least one MirScalarExpr with a function call.
1953    ///
1954    /// !!!WARNING!!!: this method has an HirRelationExpr counterpart. The two
1955    /// should be kept in sync w.r.t. HIR ⇒ MIR lowering!
1956    pub fn could_run_expensive_function(&self) -> bool {
1957        let mut result = false;
1958        self.visit_pre(|e: &MirRelationExpr| {
1959            use MirRelationExpr::*;
1960            use MirScalarExpr::*;
1961            if let Err(_) = self.try_visit_scalars::<_, RecursionLimitError>(&mut |scalar| {
1962                result |= match scalar {
1963                    Column(_, _) | Literal(_, _) | CallUnmaterializable(_) | If { .. } => false,
1964                    // Function calls are considered expensive
1965                    CallUnary { .. } | CallBinary { .. } | CallVariadic { .. } => true,
1966                };
1967                Ok(())
1968            }) {
1969                // Conservatively set `true` if on RecursionLimitError.
1970                result = true;
1971            }
1972            // FlatMap has a table function; Reduce has an aggregate function.
1973            // Other constructs use MirScalarExpr to run a function
1974            result |= matches!(e, FlatMap { .. } | Reduce { .. });
1975        });
1976        result
1977    }
1978
1979    /// Hash to an u64 using Rust's default Hasher. (Which is a somewhat slower, but better Hasher
1980    /// than what `Hashable::hashed` would give us.)
1981    pub fn hash_to_u64(&self) -> u64 {
1982        let mut h = DefaultHasher::new();
1983        self.hash(&mut h);
1984        h.finish()
1985    }
1986}
1987
1988// `LetRec` helpers
1989impl MirRelationExpr {
1990    /// True when `expr` contains a `LetRec` AST node.
1991    pub fn is_recursive(self: &MirRelationExpr) -> bool {
1992        let mut worklist = vec![self];
1993        while let Some(expr) = worklist.pop() {
1994            if let MirRelationExpr::LetRec { .. } = expr {
1995                return true;
1996            }
1997            worklist.extend(expr.children());
1998        }
1999        false
2000    }
2001
2002    /// Return the number of sub-expressions in the tree (including self).
2003    pub fn size(&self) -> usize {
2004        let mut size = 0;
2005        self.visit_pre(|_| size += 1);
2006        size
2007    }
2008
2009    /// Given the ids and values of a LetRec, it computes the subset of ids that are used across
2010    /// iterations. These are those ids that have a reference before they are defined, when reading
2011    /// all the bindings in order.
2012    ///
2013    /// For example:
2014    /// ```SQL
2015    /// WITH MUTUALLY RECURSIVE
2016    ///     x(...) AS f(z),
2017    ///     y(...) AS g(x),
2018    ///     z(...) AS h(y)
2019    /// ...;
2020    /// ```
2021    /// Here, only `z` is returned, because `x` and `y` are referenced only within the same
2022    /// iteration.
2023    ///
2024    /// Note that if a binding references itself, that is also returned.
2025    pub fn recursive_ids(ids: &[LocalId], values: &[MirRelationExpr]) -> BTreeSet<LocalId> {
2026        let mut used_across_iterations = BTreeSet::new();
2027        let mut defined = BTreeSet::new();
2028        for (binding_id, value) in itertools::zip_eq(ids.iter(), values.iter()) {
2029            value.visit_pre(|expr| {
2030                if let MirRelationExpr::Get {
2031                    id: Local(get_id), ..
2032                } = expr
2033                {
2034                    // If we haven't seen a definition for it yet, then this will refer
2035                    // to the previous iteration.
2036                    // The `ids.contains` part of the condition is needed to exclude
2037                    // those ids that are not really in this LetRec, but either an inner
2038                    // or outer one.
2039                    if !defined.contains(get_id) && ids.contains(get_id) {
2040                        used_across_iterations.insert(*get_id);
2041                    }
2042                }
2043            });
2044            defined.insert(*binding_id);
2045        }
2046        used_across_iterations
2047    }
2048
2049    /// Replaces `LetRec` nodes with a stack of `Let` nodes.
2050    ///
2051    /// In each `Let` binding, uses of `Get` in `value` that are not at strictly greater
2052    /// identifiers are rewritten to be the constant collection.
2053    /// This makes the computation perform exactly "one" iteration.
2054    ///
2055    /// This was used only temporarily while developing `LetRec`.
2056    pub fn make_nonrecursive(self: &mut MirRelationExpr) {
2057        let mut deadlist = BTreeSet::new();
2058        let mut worklist = vec![self];
2059        while let Some(expr) = worklist.pop() {
2060            if let MirRelationExpr::LetRec {
2061                ids,
2062                values,
2063                limits: _,
2064                body,
2065            } = expr
2066            {
2067                let ids_values = values
2068                    .drain(..)
2069                    .zip_eq(ids)
2070                    .map(|(value, id)| (*id, value))
2071                    .collect::<Vec<_>>();
2072                *expr = body.take_dangerous();
2073                for (id, mut value) in ids_values.into_iter().rev() {
2074                    // Remove references to potentially recursive identifiers.
2075                    deadlist.insert(id);
2076                    value.visit_pre_mut(|e| {
2077                        if let MirRelationExpr::Get {
2078                            id: crate::Id::Local(id),
2079                            typ,
2080                            ..
2081                        } = e
2082                        {
2083                            let typ = typ.clone();
2084                            if deadlist.contains(id) {
2085                                e.take_safely(Some(typ));
2086                            }
2087                        }
2088                    });
2089                    *expr = MirRelationExpr::Let {
2090                        id,
2091                        value: Box::new(value),
2092                        body: Box::new(expr.take_dangerous()),
2093                    };
2094                }
2095                worklist.push(expr);
2096            } else {
2097                worklist.extend(expr.children_mut().rev());
2098            }
2099        }
2100    }
2101
2102    /// For each Id `id'` referenced in `expr`, if it is larger or equal than `id`, then record in
2103    /// `expire_whens` that when `id'` is redefined, then we should expire the information that
2104    /// we are holding about `id`. Call `do_expirations` with `expire_whens` at each Id
2105    /// redefinition.
2106    ///
2107    /// IMPORTANT: Relies on the numbering of Ids to be what `renumber_bindings` gives.
2108    pub fn collect_expirations(
2109        id: LocalId,
2110        expr: &MirRelationExpr,
2111        expire_whens: &mut BTreeMap<LocalId, Vec<LocalId>>,
2112    ) {
2113        expr.visit_pre(|e| {
2114            if let MirRelationExpr::Get {
2115                id: Id::Local(referenced_id),
2116                ..
2117            } = e
2118            {
2119                // The following check needs `renumber_bindings` to have run recently
2120                if referenced_id >= &id {
2121                    expire_whens
2122                        .entry(*referenced_id)
2123                        .or_insert_with(Vec::new)
2124                        .push(id);
2125                }
2126            }
2127        });
2128    }
2129
2130    /// Call this function when `id` is redefined. It modifies `id_infos` by removing information
2131    /// about such Ids whose information depended on the earlier definition of `id`, according to
2132    /// `expire_whens`. Also modifies `expire_whens`: it removes the currently processed entry.
2133    pub fn do_expirations<I>(
2134        redefined_id: LocalId,
2135        expire_whens: &mut BTreeMap<LocalId, Vec<LocalId>>,
2136        id_infos: &mut BTreeMap<LocalId, I>,
2137    ) -> Vec<(LocalId, I)> {
2138        let mut expired_infos = Vec::new();
2139        if let Some(expirations) = expire_whens.remove(&redefined_id) {
2140            for expired_id in expirations.into_iter() {
2141                if let Some(offer) = id_infos.remove(&expired_id) {
2142                    expired_infos.push((expired_id, offer));
2143                }
2144            }
2145        }
2146        expired_infos
2147    }
2148}
2149/// Augment non-nullability of columns, by observing either
2150/// 1. Predicates that explicitly test for null values, and
2151/// 2. Columns that if null would make a predicate be null.
2152pub fn non_nullable_columns(predicates: &[MirScalarExpr]) -> BTreeSet<usize> {
2153    let mut nonnull_required_columns = BTreeSet::new();
2154    for predicate in predicates {
2155        // Add any columns that being null would force the predicate to be null.
2156        // Should that happen, the row would be discarded.
2157        predicate.non_null_requirements(&mut nonnull_required_columns);
2158
2159        /*
2160        Test for explicit checks that a column is non-null.
2161
2162        This analysis is ad hoc, and will miss things:
2163
2164        materialize=> create table a(x int, y int);
2165        CREATE TABLE
2166        materialize=> explain with(types) select x from a where (y=x and y is not null) or x is not null;
2167        Optimized Plan
2168        --------------------------------------------------------------------------------------------------------
2169        Explained Query:                                                                                      +
2170        Project (#0) // { types: "(integer?)" }                                                             +
2171        Filter ((#0) IS NOT NULL OR ((#1) IS NOT NULL AND (#0 = #1))) // { types: "(integer?, integer?)" }+
2172        Get materialize.public.a // { types: "(integer?, integer?)" }                                   +
2173                                                                                  +
2174        Source materialize.public.a                                                                           +
2175        filter=(((#0) IS NOT NULL OR ((#1) IS NOT NULL AND (#0 = #1))))                                     +
2176
2177        (1 row)
2178        */
2179
2180        if let MirScalarExpr::CallUnary {
2181            func: UnaryFunc::Not(scalar_func::Not),
2182            expr,
2183        } = predicate
2184        {
2185            if let MirScalarExpr::CallUnary {
2186                func: UnaryFunc::IsNull(scalar_func::IsNull),
2187                expr,
2188            } = &**expr
2189            {
2190                if let MirScalarExpr::Column(c, _name) = &**expr {
2191                    nonnull_required_columns.insert(*c);
2192                }
2193            }
2194        }
2195    }
2196
2197    nonnull_required_columns
2198}
2199
2200impl CollectionPlan for MirRelationExpr {
2201    // !!!WARNING!!!: this method has an HirRelationExpr counterpart. The two
2202    // should be kept in sync w.r.t. HIR ⇒ MIR lowering!
2203    fn depends_on_into(&self, out: &mut BTreeSet<GlobalId>) {
2204        if let MirRelationExpr::Get {
2205            id: Id::Global(id), ..
2206        } = self
2207        {
2208            out.insert(*id);
2209        }
2210        self.visit_children(|expr| expr.depends_on_into(out))
2211    }
2212}
2213
2214impl MirRelationExpr {
2215    /// Iterates through references to child expressions.
2216    pub fn children(&self) -> impl DoubleEndedIterator<Item = &Self> {
2217        let mut first = None;
2218        let mut second = None;
2219        let mut rest = None;
2220        let mut last = None;
2221
2222        use MirRelationExpr::*;
2223        match self {
2224            Constant { .. } | Get { .. } => (),
2225            Let { value, body, .. } => {
2226                first = Some(&**value);
2227                second = Some(&**body);
2228            }
2229            LetRec { values, body, .. } => {
2230                rest = Some(values);
2231                last = Some(&**body);
2232            }
2233            Project { input, .. }
2234            | Map { input, .. }
2235            | FlatMap { input, .. }
2236            | Filter { input, .. }
2237            | Reduce { input, .. }
2238            | TopK { input, .. }
2239            | Negate { input }
2240            | Threshold { input }
2241            | ArrangeBy { input, .. } => {
2242                first = Some(&**input);
2243            }
2244            Join { inputs, .. } => {
2245                rest = Some(inputs);
2246            }
2247            Union { base, inputs } => {
2248                first = Some(&**base);
2249                rest = Some(inputs);
2250            }
2251        }
2252
2253        first
2254            .into_iter()
2255            .chain(second)
2256            .chain(rest.into_iter().flatten())
2257            .chain(last)
2258    }
2259
2260    /// Iterates through mutable references to child expressions.
2261    pub fn children_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut Self> {
2262        let mut first = None;
2263        let mut second = None;
2264        let mut rest = None;
2265        let mut last = None;
2266
2267        use MirRelationExpr::*;
2268        match self {
2269            Constant { .. } | Get { .. } => (),
2270            Let { value, body, .. } => {
2271                first = Some(&mut **value);
2272                second = Some(&mut **body);
2273            }
2274            LetRec { values, body, .. } => {
2275                rest = Some(values);
2276                last = Some(&mut **body);
2277            }
2278            Project { input, .. }
2279            | Map { input, .. }
2280            | FlatMap { input, .. }
2281            | Filter { input, .. }
2282            | Reduce { input, .. }
2283            | TopK { input, .. }
2284            | Negate { input }
2285            | Threshold { input }
2286            | ArrangeBy { input, .. } => {
2287                first = Some(&mut **input);
2288            }
2289            Join { inputs, .. } => {
2290                rest = Some(inputs);
2291            }
2292            Union { base, inputs } => {
2293                first = Some(&mut **base);
2294                rest = Some(inputs);
2295            }
2296        }
2297
2298        first
2299            .into_iter()
2300            .chain(second)
2301            .chain(rest.into_iter().flatten())
2302            .chain(last)
2303    }
2304
2305    /// Iterative pre-order visitor.
2306    pub fn visit_pre<'a, F: FnMut(&'a Self)>(&'a self, mut f: F) {
2307        let mut worklist = vec![self];
2308        while let Some(expr) = worklist.pop() {
2309            f(expr);
2310            worklist.extend(expr.children().rev());
2311        }
2312    }
2313
2314    /// Iterative pre-order visitor.
2315    pub fn visit_pre_mut<F: FnMut(&mut Self)>(&mut self, mut f: F) {
2316        let mut worklist = vec![self];
2317        while let Some(expr) = worklist.pop() {
2318            f(expr);
2319            worklist.extend(expr.children_mut().rev());
2320        }
2321    }
2322
2323    /// Return a vector of references to the subtrees of this expression
2324    /// in post-visit order (the last element is `&self`).
2325    pub fn post_order_vec(&self) -> Vec<&Self> {
2326        let mut stack = vec![self];
2327        let mut result = vec![];
2328        while let Some(expr) = stack.pop() {
2329            result.push(expr);
2330            stack.extend(expr.children());
2331        }
2332        result.reverse();
2333        result
2334    }
2335}
2336
2337impl VisitChildren<Self> for MirRelationExpr {
2338    fn visit_children<F>(&self, mut f: F)
2339    where
2340        F: FnMut(&Self),
2341    {
2342        for child in self.children() {
2343            f(child)
2344        }
2345    }
2346
2347    fn visit_mut_children<F>(&mut self, mut f: F)
2348    where
2349        F: FnMut(&mut Self),
2350    {
2351        for child in self.children_mut() {
2352            f(child)
2353        }
2354    }
2355
2356    fn try_visit_children<F, E>(&self, mut f: F) -> Result<(), E>
2357    where
2358        F: FnMut(&Self) -> Result<(), E>,
2359        E: From<RecursionLimitError>,
2360    {
2361        for child in self.children() {
2362            f(child)?
2363        }
2364        Ok(())
2365    }
2366
2367    fn try_visit_mut_children<F, E>(&mut self, mut f: F) -> Result<(), E>
2368    where
2369        F: FnMut(&mut Self) -> Result<(), E>,
2370        E: From<RecursionLimitError>,
2371    {
2372        for child in self.children_mut() {
2373            f(child)?
2374        }
2375        Ok(())
2376    }
2377}
2378
2379/// Specification for an ordering by a column.
2380#[derive(
2381    Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Hash, MzReflect,
2382)]
2383pub struct ColumnOrder {
2384    /// The column index.
2385    pub column: usize,
2386    /// Whether to sort in descending order.
2387    #[serde(default)]
2388    pub desc: bool,
2389    /// Whether to sort nulls last.
2390    #[serde(default)]
2391    pub nulls_last: bool,
2392}
2393
2394impl Columnation for ColumnOrder {
2395    type InnerRegion = CopyRegion<Self>;
2396}
2397
2398impl<'a, M> fmt::Display for HumanizedExpr<'a, ColumnOrder, M>
2399where
2400    M: HumanizerMode,
2401{
2402    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2403        // If you modify this, then please also attend to Display for ColumnOrderWithExpr!
2404        write!(
2405            f,
2406            "{} {} {}",
2407            self.child(&self.expr.column),
2408            if self.expr.desc { "desc" } else { "asc" },
2409            if self.expr.nulls_last {
2410                "nulls_last"
2411            } else {
2412                "nulls_first"
2413            },
2414        )
2415    }
2416}
2417
2418/// Describes an aggregation expression.
2419#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Hash, MzReflect)]
2420pub struct AggregateExpr {
2421    /// Names the aggregation function.
2422    pub func: AggregateFunc,
2423    /// An expression which extracts from each row the input to `func`.
2424    pub expr: MirScalarExpr,
2425    /// Should the aggregation be applied only to distinct results in each group.
2426    #[serde(default)]
2427    pub distinct: bool,
2428}
2429
2430impl AggregateExpr {
2431    /// Computes the type of this `AggregateExpr`.
2432    pub fn typ(&self, column_types: &[SqlColumnType]) -> SqlColumnType {
2433        self.func.output_type(self.expr.typ(column_types))
2434    }
2435
2436    /// Returns whether the expression has a constant result.
2437    pub fn is_constant(&self) -> bool {
2438        match self.func {
2439            AggregateFunc::MaxInt16
2440            | AggregateFunc::MaxInt32
2441            | AggregateFunc::MaxInt64
2442            | AggregateFunc::MaxUInt16
2443            | AggregateFunc::MaxUInt32
2444            | AggregateFunc::MaxUInt64
2445            | AggregateFunc::MaxMzTimestamp
2446            | AggregateFunc::MaxFloat32
2447            | AggregateFunc::MaxFloat64
2448            | AggregateFunc::MaxBool
2449            | AggregateFunc::MaxString
2450            | AggregateFunc::MaxDate
2451            | AggregateFunc::MaxTimestamp
2452            | AggregateFunc::MaxTimestampTz
2453            | AggregateFunc::MinInt16
2454            | AggregateFunc::MinInt32
2455            | AggregateFunc::MinInt64
2456            | AggregateFunc::MinUInt16
2457            | AggregateFunc::MinUInt32
2458            | AggregateFunc::MinUInt64
2459            | AggregateFunc::MinMzTimestamp
2460            | AggregateFunc::MinFloat32
2461            | AggregateFunc::MinFloat64
2462            | AggregateFunc::MinBool
2463            | AggregateFunc::MinString
2464            | AggregateFunc::MinDate
2465            | AggregateFunc::MinTimestamp
2466            | AggregateFunc::MinTimestampTz
2467            | AggregateFunc::Any
2468            | AggregateFunc::All
2469            | AggregateFunc::Dummy => self.expr.is_literal(),
2470            AggregateFunc::Count => self.expr.is_literal_null(),
2471            _ => self.expr.is_literal_err(),
2472        }
2473    }
2474
2475    /// Returns an expression that computes `self` on a group that has exactly one row.
2476    /// Instead of performing a `Reduce` with `self`, one can perform a `Map` with the expression
2477    /// returned by `on_unique`, which is cheaper. (See `ReduceElision`.)
2478    pub fn on_unique(&self, input_type: &[SqlColumnType]) -> MirScalarExpr {
2479        match &self.func {
2480            // Count is one if non-null, and zero if null.
2481            AggregateFunc::Count => self
2482                .expr
2483                .clone()
2484                .call_unary(UnaryFunc::IsNull(crate::func::IsNull))
2485                .if_then_else(
2486                    MirScalarExpr::literal_ok(Datum::Int64(0), SqlScalarType::Int64),
2487                    MirScalarExpr::literal_ok(Datum::Int64(1), SqlScalarType::Int64),
2488                ),
2489
2490            // SumInt16 takes Int16s as input, but outputs Int64s.
2491            AggregateFunc::SumInt16 => self
2492                .expr
2493                .clone()
2494                .call_unary(UnaryFunc::CastInt16ToInt64(scalar_func::CastInt16ToInt64)),
2495
2496            // SumInt32 takes Int32s as input, but outputs Int64s.
2497            AggregateFunc::SumInt32 => self
2498                .expr
2499                .clone()
2500                .call_unary(UnaryFunc::CastInt32ToInt64(scalar_func::CastInt32ToInt64)),
2501
2502            // SumInt64 takes Int64s as input, but outputs numerics.
2503            AggregateFunc::SumInt64 => self.expr.clone().call_unary(UnaryFunc::CastInt64ToNumeric(
2504                scalar_func::CastInt64ToNumeric(Some(NumericMaxScale::ZERO)),
2505            )),
2506
2507            // SumUInt16 takes UInt16s as input, but outputs UInt64s.
2508            AggregateFunc::SumUInt16 => self.expr.clone().call_unary(
2509                UnaryFunc::CastUint16ToUint64(scalar_func::CastUint16ToUint64),
2510            ),
2511
2512            // SumUInt32 takes UInt32s as input, but outputs UInt64s.
2513            AggregateFunc::SumUInt32 => self.expr.clone().call_unary(
2514                UnaryFunc::CastUint32ToUint64(scalar_func::CastUint32ToUint64),
2515            ),
2516
2517            // SumUInt64 takes UInt64s as input, but outputs numerics.
2518            AggregateFunc::SumUInt64 => {
2519                self.expr.clone().call_unary(UnaryFunc::CastUint64ToNumeric(
2520                    scalar_func::CastUint64ToNumeric(Some(NumericMaxScale::ZERO)),
2521                ))
2522            }
2523
2524            // JsonbAgg takes _anything_ as input, but must output a Jsonb array.
2525            AggregateFunc::JsonbAgg { .. } => MirScalarExpr::CallVariadic {
2526                func: VariadicFunc::JsonbBuildArray,
2527                exprs: vec![
2528                    self.expr
2529                        .clone()
2530                        .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0))),
2531                ],
2532            },
2533
2534            // JsonbAgg takes _anything_ as input, but must output a Jsonb object.
2535            AggregateFunc::JsonbObjectAgg { .. } => {
2536                let record = self
2537                    .expr
2538                    .clone()
2539                    .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0)));
2540                MirScalarExpr::CallVariadic {
2541                    func: VariadicFunc::JsonbBuildObject,
2542                    exprs: (0..2)
2543                        .map(|i| {
2544                            record
2545                                .clone()
2546                                .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(i)))
2547                        })
2548                        .collect(),
2549                }
2550            }
2551
2552            AggregateFunc::MapAgg { value_type, .. } => {
2553                let record = self
2554                    .expr
2555                    .clone()
2556                    .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0)));
2557                MirScalarExpr::CallVariadic {
2558                    func: VariadicFunc::MapBuild {
2559                        value_type: value_type.clone(),
2560                    },
2561                    exprs: (0..2)
2562                        .map(|i| {
2563                            record
2564                                .clone()
2565                                .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(i)))
2566                        })
2567                        .collect(),
2568                }
2569            }
2570
2571            // StringAgg takes nested records of strings and outputs a string
2572            AggregateFunc::StringAgg { .. } => self
2573                .expr
2574                .clone()
2575                .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0)))
2576                .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0))),
2577
2578            // ListConcat and ArrayConcat take a single level of records and output a list containing exactly 1 element
2579            AggregateFunc::ListConcat { .. } | AggregateFunc::ArrayConcat { .. } => self
2580                .expr
2581                .clone()
2582                .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0))),
2583
2584            // RowNumber, Rank, DenseRank take a list of records and output a list containing exactly 1 element
2585            AggregateFunc::RowNumber { .. } => {
2586                self.on_unique_ranking_window_funcs(input_type, "?row_number?")
2587            }
2588            AggregateFunc::Rank { .. } => self.on_unique_ranking_window_funcs(input_type, "?rank?"),
2589            AggregateFunc::DenseRank { .. } => {
2590                self.on_unique_ranking_window_funcs(input_type, "?dense_rank?")
2591            }
2592
2593            // The input type for LagLead is ((OriginalRow, (InputValue, Offset, Default)), OrderByExprs...)
2594            AggregateFunc::LagLead { lag_lead, .. } => {
2595                let tuple = self
2596                    .expr
2597                    .clone()
2598                    .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0)));
2599
2600                // Get the overall return type
2601                let return_type_with_orig_row = self
2602                    .typ(input_type)
2603                    .scalar_type
2604                    .unwrap_list_element_type()
2605                    .clone();
2606                let lag_lead_return_type =
2607                    return_type_with_orig_row.unwrap_record_element_type()[0].clone();
2608
2609                // Extract the original row
2610                let original_row = tuple
2611                    .clone()
2612                    .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0)));
2613
2614                // Extract the encoded args
2615                let encoded_args =
2616                    tuple.call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(1)));
2617
2618                let (result_expr, column_name) =
2619                    Self::on_unique_lag_lead(lag_lead, encoded_args, lag_lead_return_type.clone());
2620
2621                MirScalarExpr::CallVariadic {
2622                    func: VariadicFunc::ListCreate {
2623                        elem_type: return_type_with_orig_row,
2624                    },
2625                    exprs: vec![MirScalarExpr::CallVariadic {
2626                        func: VariadicFunc::RecordCreate {
2627                            field_names: vec![column_name, ColumnName::from("?record?")],
2628                        },
2629                        exprs: vec![result_expr, original_row],
2630                    }],
2631                }
2632            }
2633
2634            // The input type for FirstValue is ((OriginalRow, InputValue), OrderByExprs...)
2635            AggregateFunc::FirstValue { window_frame, .. } => {
2636                let tuple = self
2637                    .expr
2638                    .clone()
2639                    .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0)));
2640
2641                // Get the overall return type
2642                let return_type_with_orig_row = self
2643                    .typ(input_type)
2644                    .scalar_type
2645                    .unwrap_list_element_type()
2646                    .clone();
2647                let first_value_return_type =
2648                    return_type_with_orig_row.unwrap_record_element_type()[0].clone();
2649
2650                // Extract the original row
2651                let original_row = tuple
2652                    .clone()
2653                    .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0)));
2654
2655                // Extract the input value
2656                let arg = tuple.call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(1)));
2657
2658                let (result_expr, column_name) = Self::on_unique_first_value_last_value(
2659                    window_frame,
2660                    arg,
2661                    first_value_return_type,
2662                );
2663
2664                MirScalarExpr::CallVariadic {
2665                    func: VariadicFunc::ListCreate {
2666                        elem_type: return_type_with_orig_row,
2667                    },
2668                    exprs: vec![MirScalarExpr::CallVariadic {
2669                        func: VariadicFunc::RecordCreate {
2670                            field_names: vec![column_name, ColumnName::from("?record?")],
2671                        },
2672                        exprs: vec![result_expr, original_row],
2673                    }],
2674                }
2675            }
2676
2677            // The input type for LastValue is ((OriginalRow, InputValue), OrderByExprs...)
2678            AggregateFunc::LastValue { window_frame, .. } => {
2679                let tuple = self
2680                    .expr
2681                    .clone()
2682                    .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0)));
2683
2684                // Get the overall return type
2685                let return_type_with_orig_row = self
2686                    .typ(input_type)
2687                    .scalar_type
2688                    .unwrap_list_element_type()
2689                    .clone();
2690                let last_value_return_type =
2691                    return_type_with_orig_row.unwrap_record_element_type()[0].clone();
2692
2693                // Extract the original row
2694                let original_row = tuple
2695                    .clone()
2696                    .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0)));
2697
2698                // Extract the input value
2699                let arg = tuple.call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(1)));
2700
2701                let (result_expr, column_name) = Self::on_unique_first_value_last_value(
2702                    window_frame,
2703                    arg,
2704                    last_value_return_type,
2705                );
2706
2707                MirScalarExpr::CallVariadic {
2708                    func: VariadicFunc::ListCreate {
2709                        elem_type: return_type_with_orig_row,
2710                    },
2711                    exprs: vec![MirScalarExpr::CallVariadic {
2712                        func: VariadicFunc::RecordCreate {
2713                            field_names: vec![column_name, ColumnName::from("?record?")],
2714                        },
2715                        exprs: vec![result_expr, original_row],
2716                    }],
2717                }
2718            }
2719
2720            // The input type for window aggs is ((OriginalRow, InputValue), OrderByExprs...)
2721            // See an example MIR in `window_func_applied_to`.
2722            AggregateFunc::WindowAggregate {
2723                wrapped_aggregate,
2724                window_frame,
2725                order_by: _,
2726            } => {
2727                // TODO: deduplicate code between the various window function cases.
2728
2729                let tuple = self
2730                    .expr
2731                    .clone()
2732                    .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0)));
2733
2734                // Get the overall return type
2735                let return_type = self
2736                    .typ(input_type)
2737                    .scalar_type
2738                    .unwrap_list_element_type()
2739                    .clone();
2740                let window_agg_return_type = return_type.unwrap_record_element_type()[0].clone();
2741
2742                // Extract the original row
2743                let original_row = tuple
2744                    .clone()
2745                    .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0)));
2746
2747                // Extract the input value
2748                let arg_expr = tuple.call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(1)));
2749
2750                let (result, column_name) = Self::on_unique_window_agg(
2751                    window_frame,
2752                    arg_expr,
2753                    input_type,
2754                    window_agg_return_type,
2755                    wrapped_aggregate,
2756                );
2757
2758                MirScalarExpr::CallVariadic {
2759                    func: VariadicFunc::ListCreate {
2760                        elem_type: return_type,
2761                    },
2762                    exprs: vec![MirScalarExpr::CallVariadic {
2763                        func: VariadicFunc::RecordCreate {
2764                            field_names: vec![column_name, ColumnName::from("?record?")],
2765                        },
2766                        exprs: vec![result, original_row],
2767                    }],
2768                }
2769            }
2770
2771            // The input type is ((OriginalRow, (Arg1, Arg2, ...)), OrderByExprs...)
2772            AggregateFunc::FusedWindowAggregate {
2773                wrapped_aggregates,
2774                order_by: _,
2775                window_frame,
2776            } => {
2777                // Throw away OrderByExprs
2778                let tuple = self
2779                    .expr
2780                    .clone()
2781                    .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0)));
2782
2783                // Extract the original row
2784                let original_row = tuple
2785                    .clone()
2786                    .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0)));
2787
2788                // Extract the args of the fused call
2789                let all_args = tuple.call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(1)));
2790
2791                let return_type_with_orig_row = self
2792                    .typ(input_type)
2793                    .scalar_type
2794                    .unwrap_list_element_type()
2795                    .clone();
2796
2797                let all_func_return_types =
2798                    return_type_with_orig_row.unwrap_record_element_type()[0].clone();
2799                let mut func_result_exprs = Vec::new();
2800                let mut col_names = Vec::new();
2801                for (idx, wrapped_aggr) in wrapped_aggregates.iter().enumerate() {
2802                    let arg = all_args
2803                        .clone()
2804                        .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(idx)));
2805                    let return_type =
2806                        all_func_return_types.unwrap_record_element_type()[idx].clone();
2807                    let (result, column_name) = Self::on_unique_window_agg(
2808                        window_frame,
2809                        arg,
2810                        input_type,
2811                        return_type,
2812                        wrapped_aggr,
2813                    );
2814                    func_result_exprs.push(result);
2815                    col_names.push(column_name);
2816                }
2817
2818                MirScalarExpr::CallVariadic {
2819                    func: VariadicFunc::ListCreate {
2820                        elem_type: return_type_with_orig_row,
2821                    },
2822                    exprs: vec![MirScalarExpr::CallVariadic {
2823                        func: VariadicFunc::RecordCreate {
2824                            field_names: vec![
2825                                ColumnName::from("?fused_window_aggr?"),
2826                                ColumnName::from("?record?"),
2827                            ],
2828                        },
2829                        exprs: vec![
2830                            MirScalarExpr::CallVariadic {
2831                                func: VariadicFunc::RecordCreate {
2832                                    field_names: col_names,
2833                                },
2834                                exprs: func_result_exprs,
2835                            },
2836                            original_row,
2837                        ],
2838                    }],
2839                }
2840            }
2841
2842            // The input type is ((OriginalRow, (Args1, Args2, ...)), OrderByExprs...)
2843            AggregateFunc::FusedValueWindowFunc {
2844                funcs,
2845                order_by: outer_order_by,
2846            } => {
2847                // Throw away OrderByExprs
2848                let tuple = self
2849                    .expr
2850                    .clone()
2851                    .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0)));
2852
2853                // Extract the original row
2854                let original_row = tuple
2855                    .clone()
2856                    .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0)));
2857
2858                // Extract the encoded args of the fused call
2859                let all_encoded_args =
2860                    tuple.call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(1)));
2861
2862                let return_type_with_orig_row = self
2863                    .typ(input_type)
2864                    .scalar_type
2865                    .unwrap_list_element_type()
2866                    .clone();
2867
2868                let all_func_return_types =
2869                    return_type_with_orig_row.unwrap_record_element_type()[0].clone();
2870                let mut func_result_exprs = Vec::new();
2871                let mut col_names = Vec::new();
2872                for (idx, func) in funcs.iter().enumerate() {
2873                    let args_for_func = all_encoded_args
2874                        .clone()
2875                        .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(idx)));
2876                    let return_type_for_func =
2877                        all_func_return_types.unwrap_record_element_type()[idx].clone();
2878                    let (result, column_name) = match func {
2879                        AggregateFunc::LagLead {
2880                            lag_lead,
2881                            order_by,
2882                            ignore_nulls: _,
2883                        } => {
2884                            assert_eq!(order_by, outer_order_by);
2885                            Self::on_unique_lag_lead(lag_lead, args_for_func, return_type_for_func)
2886                        }
2887                        AggregateFunc::FirstValue {
2888                            window_frame,
2889                            order_by,
2890                        } => {
2891                            assert_eq!(order_by, outer_order_by);
2892                            Self::on_unique_first_value_last_value(
2893                                window_frame,
2894                                args_for_func,
2895                                return_type_for_func,
2896                            )
2897                        }
2898                        AggregateFunc::LastValue {
2899                            window_frame,
2900                            order_by,
2901                        } => {
2902                            assert_eq!(order_by, outer_order_by);
2903                            Self::on_unique_first_value_last_value(
2904                                window_frame,
2905                                args_for_func,
2906                                return_type_for_func,
2907                            )
2908                        }
2909                        _ => panic!("unknown function in FusedValueWindowFunc"),
2910                    };
2911                    func_result_exprs.push(result);
2912                    col_names.push(column_name);
2913                }
2914
2915                MirScalarExpr::CallVariadic {
2916                    func: VariadicFunc::ListCreate {
2917                        elem_type: return_type_with_orig_row,
2918                    },
2919                    exprs: vec![MirScalarExpr::CallVariadic {
2920                        func: VariadicFunc::RecordCreate {
2921                            field_names: vec![
2922                                ColumnName::from("?fused_value_window_func?"),
2923                                ColumnName::from("?record?"),
2924                            ],
2925                        },
2926                        exprs: vec![
2927                            MirScalarExpr::CallVariadic {
2928                                func: VariadicFunc::RecordCreate {
2929                                    field_names: col_names,
2930                                },
2931                                exprs: func_result_exprs,
2932                            },
2933                            original_row,
2934                        ],
2935                    }],
2936                }
2937            }
2938
2939            // All other variants should return the argument to the aggregation.
2940            AggregateFunc::MaxNumeric
2941            | AggregateFunc::MaxInt16
2942            | AggregateFunc::MaxInt32
2943            | AggregateFunc::MaxInt64
2944            | AggregateFunc::MaxUInt16
2945            | AggregateFunc::MaxUInt32
2946            | AggregateFunc::MaxUInt64
2947            | AggregateFunc::MaxMzTimestamp
2948            | AggregateFunc::MaxFloat32
2949            | AggregateFunc::MaxFloat64
2950            | AggregateFunc::MaxBool
2951            | AggregateFunc::MaxString
2952            | AggregateFunc::MaxDate
2953            | AggregateFunc::MaxTimestamp
2954            | AggregateFunc::MaxTimestampTz
2955            | AggregateFunc::MaxInterval
2956            | AggregateFunc::MaxTime
2957            | AggregateFunc::MinNumeric
2958            | AggregateFunc::MinInt16
2959            | AggregateFunc::MinInt32
2960            | AggregateFunc::MinInt64
2961            | AggregateFunc::MinUInt16
2962            | AggregateFunc::MinUInt32
2963            | AggregateFunc::MinUInt64
2964            | AggregateFunc::MinMzTimestamp
2965            | AggregateFunc::MinFloat32
2966            | AggregateFunc::MinFloat64
2967            | AggregateFunc::MinBool
2968            | AggregateFunc::MinString
2969            | AggregateFunc::MinDate
2970            | AggregateFunc::MinTimestamp
2971            | AggregateFunc::MinTimestampTz
2972            | AggregateFunc::MinInterval
2973            | AggregateFunc::MinTime
2974            | AggregateFunc::SumFloat32
2975            | AggregateFunc::SumFloat64
2976            | AggregateFunc::SumNumeric
2977            | AggregateFunc::Any
2978            | AggregateFunc::All
2979            | AggregateFunc::Dummy => self.expr.clone(),
2980        }
2981    }
2982
2983    /// `on_unique` for ROW_NUMBER, RANK, DENSE_RANK
2984    fn on_unique_ranking_window_funcs(
2985        &self,
2986        input_type: &[SqlColumnType],
2987        col_name: &str,
2988    ) -> MirScalarExpr {
2989        let list = self
2990            .expr
2991            .clone()
2992            // extract the list within the record
2993            .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0)));
2994
2995        // extract the expression within the list
2996        let record = MirScalarExpr::CallVariadic {
2997            func: VariadicFunc::ListIndex,
2998            exprs: vec![
2999                list,
3000                MirScalarExpr::literal_ok(Datum::Int64(1), SqlScalarType::Int64),
3001            ],
3002        };
3003
3004        MirScalarExpr::CallVariadic {
3005            func: VariadicFunc::ListCreate {
3006                elem_type: self
3007                    .typ(input_type)
3008                    .scalar_type
3009                    .unwrap_list_element_type()
3010                    .clone(),
3011            },
3012            exprs: vec![MirScalarExpr::CallVariadic {
3013                func: VariadicFunc::RecordCreate {
3014                    field_names: vec![ColumnName::from(col_name), ColumnName::from("?record?")],
3015                },
3016                exprs: vec![
3017                    MirScalarExpr::literal_ok(Datum::Int64(1), SqlScalarType::Int64),
3018                    record,
3019                ],
3020            }],
3021        }
3022    }
3023
3024    /// `on_unique` for `lag` and `lead`
3025    fn on_unique_lag_lead(
3026        lag_lead: &LagLeadType,
3027        encoded_args: MirScalarExpr,
3028        return_type: SqlScalarType,
3029    ) -> (MirScalarExpr, ColumnName) {
3030        let expr = encoded_args
3031            .clone()
3032            .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(0)));
3033        let offset = encoded_args
3034            .clone()
3035            .call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(1)));
3036        let default_value =
3037            encoded_args.call_unary(UnaryFunc::RecordGet(scalar_func::RecordGet(2)));
3038
3039        // In this case, the window always has only one element, so if the offset is not null and
3040        // not zero, the default value should be returned instead.
3041        let value = offset
3042            .clone()
3043            .call_binary(
3044                MirScalarExpr::literal_ok(Datum::Int32(0), SqlScalarType::Int32),
3045                crate::func::Eq,
3046            )
3047            .if_then_else(expr, default_value);
3048        let result_expr = offset
3049            .call_unary(UnaryFunc::IsNull(crate::func::IsNull))
3050            .if_then_else(MirScalarExpr::literal_null(return_type), value);
3051
3052        let column_name = ColumnName::from(match lag_lead {
3053            LagLeadType::Lag => "?lag?",
3054            LagLeadType::Lead => "?lead?",
3055        });
3056
3057        (result_expr, column_name)
3058    }
3059
3060    /// `on_unique` for `first_value` and `last_value`
3061    fn on_unique_first_value_last_value(
3062        window_frame: &WindowFrame,
3063        arg: MirScalarExpr,
3064        return_type: SqlScalarType,
3065    ) -> (MirScalarExpr, ColumnName) {
3066        // If the window frame includes the current (single) row, return its value, null otherwise
3067        let result_expr = if window_frame.includes_current_row() {
3068            arg
3069        } else {
3070            MirScalarExpr::literal_null(return_type)
3071        };
3072        (result_expr, ColumnName::from("?first_value?"))
3073    }
3074
3075    /// `on_unique` for window aggregations
3076    fn on_unique_window_agg(
3077        window_frame: &WindowFrame,
3078        arg_expr: MirScalarExpr,
3079        input_type: &[SqlColumnType],
3080        return_type: SqlScalarType,
3081        wrapped_aggr: &AggregateFunc,
3082    ) -> (MirScalarExpr, ColumnName) {
3083        // If the window frame includes the current (single) row, evaluate the wrapped aggregate on
3084        // that row. Otherwise, return the default value for the aggregate.
3085        let result_expr = if window_frame.includes_current_row() {
3086            AggregateExpr {
3087                func: wrapped_aggr.clone(),
3088                expr: arg_expr,
3089                distinct: false, // We have just one input element; DISTINCT doesn't matter.
3090            }
3091            .on_unique(input_type)
3092        } else {
3093            MirScalarExpr::literal_ok(wrapped_aggr.default(), return_type)
3094        };
3095        (result_expr, ColumnName::from("?window_agg?"))
3096    }
3097
3098    /// Returns whether the expression is COUNT(*) or not.  Note that
3099    /// when we define the count builtin in sql::func, we convert
3100    /// COUNT(*) to COUNT(true), making it indistinguishable from
3101    /// literal COUNT(true), but we prefer to consider this as the
3102    /// former.
3103    ///
3104    /// (HIR has the same `is_count_asterisk`.)
3105    pub fn is_count_asterisk(&self) -> bool {
3106        self.func == AggregateFunc::Count && self.expr.is_literal_true() && !self.distinct
3107    }
3108}
3109
3110/// Describe a join implementation in dataflow.
3111#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Hash, MzReflect)]
3112pub enum JoinImplementation {
3113    /// Perform a sequence of binary differential dataflow joins.
3114    ///
3115    /// The first argument indicates
3116    /// 1) the index of the starting collection,
3117    /// 2) if it should be arranged, the keys to arrange it by, and
3118    /// 3) the characteristics of the starting collection (for EXPLAINing).
3119    /// The sequence that follows lists other relation indexes, and the key for
3120    /// the arrangement we should use when joining it in.
3121    /// The JoinInputCharacteristics are for EXPLAINing the characteristics that
3122    /// were used for join ordering.
3123    ///
3124    /// Each collection index should occur exactly once, either as the starting collection
3125    /// or somewhere in the list.
3126    Differential(
3127        (
3128            usize,
3129            Option<Vec<MirScalarExpr>>,
3130            Option<JoinInputCharacteristics>,
3131        ),
3132        Vec<(usize, Vec<MirScalarExpr>, Option<JoinInputCharacteristics>)>,
3133    ),
3134    /// Perform independent delta query dataflows for each input.
3135    ///
3136    /// The argument is a sequence of plans, for the input collections in order.
3137    /// Each plan starts from the corresponding index, and then in sequence joins
3138    /// against collections identified by index and with the specified arrangement key.
3139    /// The JoinInputCharacteristics are for EXPLAINing the characteristics that were
3140    /// used for join ordering.
3141    DeltaQuery(Vec<Vec<(usize, Vec<MirScalarExpr>, Option<JoinInputCharacteristics>)>>),
3142    /// Join a user-created index with a constant collection to speed up the evaluation of a
3143    /// predicate such as `(f1 = 3 AND f2 = 5) OR (f1 = 7 AND f2 = 9)`.
3144    /// This gets translated to a Differential join during MIR -> LIR lowering, but we still want
3145    /// to represent it in MIR, because the fast path detection wants to match on this.
3146    ///
3147    /// Consists of (`<coll_id>`, `<index_id>`, `<index_key>`, `<constants>`)
3148    IndexedFilter(
3149        GlobalId,
3150        GlobalId,
3151        Vec<MirScalarExpr>,
3152        #[mzreflect(ignore)] Vec<Row>,
3153    ),
3154    /// No implementation yet selected.
3155    Unimplemented,
3156}
3157
3158impl Default for JoinImplementation {
3159    fn default() -> Self {
3160        JoinImplementation::Unimplemented
3161    }
3162}
3163
3164impl JoinImplementation {
3165    /// Returns `true` iff the value is not [`JoinImplementation::Unimplemented`].
3166    pub fn is_implemented(&self) -> bool {
3167        match self {
3168            Self::Unimplemented => false,
3169            _ => true,
3170        }
3171    }
3172
3173    /// Returns an optional implementation name if the value is not [`JoinImplementation::Unimplemented`].
3174    pub fn name(&self) -> Option<&'static str> {
3175        match self {
3176            Self::Differential(..) => Some("differential"),
3177            Self::DeltaQuery(..) => Some("delta"),
3178            Self::IndexedFilter(..) => Some("indexed_filter"),
3179            Self::Unimplemented => None,
3180        }
3181    }
3182}
3183
3184/// Characteristics of a join order candidate collection.
3185///
3186/// A candidate is described by a collection and a key, and may have various liabilities.
3187/// Primarily, the candidate may risk substantial inflation of records, which is something
3188/// that concerns us greatly. Additionally, the candidate may be unarranged, and we would
3189/// prefer candidates that do not require additional memory. Finally, we prefer lower id
3190/// collections in the interest of consistent tie-breaking. For more characteristics, see
3191/// comments on individual fields.
3192///
3193/// This has more than one version. `new` instantiates the appropriate version based on a
3194/// feature flag.
3195#[derive(Eq, PartialEq, Ord, PartialOrd, Debug, Clone, Serialize, Deserialize, Hash, MzReflect)]
3196pub enum JoinInputCharacteristics {
3197    /// Old version, with `enable_join_prioritize_arranged` turned off.
3198    V1(JoinInputCharacteristicsV1),
3199    /// Newer version, with `enable_join_prioritize_arranged` turned on.
3200    V2(JoinInputCharacteristicsV2),
3201}
3202
3203impl JoinInputCharacteristics {
3204    /// Creates a new instance with the given characteristics.
3205    pub fn new(
3206        unique_key: bool,
3207        key_length: usize,
3208        arranged: bool,
3209        cardinality: Option<usize>,
3210        filters: FilterCharacteristics,
3211        input: usize,
3212        enable_join_prioritize_arranged: bool,
3213    ) -> Self {
3214        if enable_join_prioritize_arranged {
3215            Self::V2(JoinInputCharacteristicsV2::new(
3216                unique_key,
3217                key_length,
3218                arranged,
3219                cardinality,
3220                filters,
3221                input,
3222            ))
3223        } else {
3224            Self::V1(JoinInputCharacteristicsV1::new(
3225                unique_key,
3226                key_length,
3227                arranged,
3228                cardinality,
3229                filters,
3230                input,
3231            ))
3232        }
3233    }
3234
3235    /// Turns the instance into a String to be printed in EXPLAIN.
3236    pub fn explain(&self) -> String {
3237        match self {
3238            Self::V1(jic) => jic.explain(),
3239            Self::V2(jic) => jic.explain(),
3240        }
3241    }
3242
3243    /// Whether the join input described by `self` is arranged.
3244    pub fn arranged(&self) -> bool {
3245        match self {
3246            Self::V1(jic) => jic.arranged,
3247            Self::V2(jic) => jic.arranged,
3248        }
3249    }
3250
3251    /// Returns the `FilterCharacteristics` for the join input described by `self`.
3252    pub fn filters(&mut self) -> &mut FilterCharacteristics {
3253        match self {
3254            Self::V1(jic) => &mut jic.filters,
3255            Self::V2(jic) => &mut jic.filters,
3256        }
3257    }
3258}
3259
3260/// Newer version of `JoinInputCharacteristics`, with `enable_join_prioritize_arranged` turned on.
3261#[derive(Eq, PartialEq, Ord, PartialOrd, Debug, Clone, Serialize, Deserialize, Hash, MzReflect)]
3262pub struct JoinInputCharacteristicsV2 {
3263    /// An excellent indication that record count will not increase.
3264    pub unique_key: bool,
3265    /// Cross joins are bad.
3266    /// (`key_length > 0` also implies that it is not a cross join. However, we need to note cross
3267    /// joins in a separate field, because not being a cross join is more important than `arranged`,
3268    /// but otherwise `key_length` is less important than `arranged`.)
3269    pub not_cross: bool,
3270    /// Indicates that there will be no additional in-memory footprint.
3271    pub arranged: bool,
3272    /// A weaker signal that record count will not increase.
3273    pub key_length: usize,
3274    /// Estimated cardinality (lower is better)
3275    pub cardinality: Option<std::cmp::Reverse<usize>>,
3276    /// Characteristics of the filter that is applied at this input.
3277    pub filters: FilterCharacteristics,
3278    /// We want to prefer input earlier in the input list, for stability of ordering.
3279    pub input: std::cmp::Reverse<usize>,
3280}
3281
3282impl JoinInputCharacteristicsV2 {
3283    /// Creates a new instance with the given characteristics.
3284    pub fn new(
3285        unique_key: bool,
3286        key_length: usize,
3287        arranged: bool,
3288        cardinality: Option<usize>,
3289        filters: FilterCharacteristics,
3290        input: usize,
3291    ) -> Self {
3292        Self {
3293            unique_key,
3294            not_cross: key_length > 0,
3295            arranged,
3296            key_length,
3297            cardinality: cardinality.map(std::cmp::Reverse),
3298            filters,
3299            input: std::cmp::Reverse(input),
3300        }
3301    }
3302
3303    /// Turns the instance into a String to be printed in EXPLAIN.
3304    pub fn explain(&self) -> String {
3305        let mut e = "".to_owned();
3306        if self.unique_key {
3307            e.push_str("U");
3308        }
3309        // Don't need to print `not_cross`, because that is visible in the printed key.
3310        // if !self.not_cross {
3311        //     e.push_str("C");
3312        // }
3313        for _ in 0..self.key_length {
3314            e.push_str("K");
3315        }
3316        if self.arranged {
3317            e.push_str("A");
3318        }
3319        if let Some(std::cmp::Reverse(cardinality)) = self.cardinality {
3320            e.push_str(&format!("|{cardinality}|"));
3321        }
3322        e.push_str(&self.filters.explain());
3323        e
3324    }
3325}
3326
3327/// Old version of `JoinInputCharacteristics`, with `enable_join_prioritize_arranged` turned off.
3328#[derive(Eq, PartialEq, Ord, PartialOrd, Debug, Clone, Serialize, Deserialize, Hash, MzReflect)]
3329pub struct JoinInputCharacteristicsV1 {
3330    /// An excellent indication that record count will not increase.
3331    pub unique_key: bool,
3332    /// A weaker signal that record count will not increase.
3333    pub key_length: usize,
3334    /// Indicates that there will be no additional in-memory footprint.
3335    pub arranged: bool,
3336    /// Estimated cardinality (lower is better)
3337    pub cardinality: Option<std::cmp::Reverse<usize>>,
3338    /// Characteristics of the filter that is applied at this input.
3339    pub filters: FilterCharacteristics,
3340    /// We want to prefer input earlier in the input list, for stability of ordering.
3341    pub input: std::cmp::Reverse<usize>,
3342}
3343
3344impl JoinInputCharacteristicsV1 {
3345    /// Creates a new instance with the given characteristics.
3346    pub fn new(
3347        unique_key: bool,
3348        key_length: usize,
3349        arranged: bool,
3350        cardinality: Option<usize>,
3351        filters: FilterCharacteristics,
3352        input: usize,
3353    ) -> Self {
3354        Self {
3355            unique_key,
3356            key_length,
3357            arranged,
3358            cardinality: cardinality.map(std::cmp::Reverse),
3359            filters,
3360            input: std::cmp::Reverse(input),
3361        }
3362    }
3363
3364    /// Turns the instance into a String to be printed in EXPLAIN.
3365    pub fn explain(&self) -> String {
3366        let mut e = "".to_owned();
3367        if self.unique_key {
3368            e.push_str("U");
3369        }
3370        for _ in 0..self.key_length {
3371            e.push_str("K");
3372        }
3373        if self.arranged {
3374            e.push_str("A");
3375        }
3376        if let Some(std::cmp::Reverse(cardinality)) = self.cardinality {
3377            e.push_str(&format!("|{cardinality}|"));
3378        }
3379        e.push_str(&self.filters.explain());
3380        e
3381    }
3382}
3383
3384/// Instructions for finishing the result of a query.
3385///
3386/// The primary reason for the existence of this structure and attendant code
3387/// is that SQL's ORDER BY requires sorting rows (as already implied by the
3388/// keywords), whereas much of the rest of SQL is defined in terms of unordered
3389/// multisets. But as it turns out, the same idea can be used to optimize
3390/// trivial peeks.
3391///
3392/// The generic parameters are for accommodating prepared statement parameters in
3393/// `limit` and `offset`: the planner can hold these fields as HirScalarExpr long enough to call
3394/// `bind_parameters` on them.
3395#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
3396pub struct RowSetFinishing<L = NonNeg<i64>, O = usize> {
3397    /// Order rows by the given columns.
3398    pub order_by: Vec<ColumnOrder>,
3399    /// Include only as many rows (after offset).
3400    pub limit: Option<L>,
3401    /// Omit as many rows.
3402    pub offset: O,
3403    /// Include only given columns.
3404    pub project: Vec<usize>,
3405}
3406
3407impl<L> RowSetFinishing<L> {
3408    /// Returns a trivial finishing, i.e., that does nothing to the result set.
3409    pub fn trivial(arity: usize) -> RowSetFinishing<L> {
3410        RowSetFinishing {
3411            order_by: Vec::new(),
3412            limit: None,
3413            offset: 0,
3414            project: (0..arity).collect(),
3415        }
3416    }
3417    /// True if the finishing does nothing to any result set.
3418    pub fn is_trivial(&self, arity: usize) -> bool {
3419        self.limit.is_none()
3420            && self.order_by.is_empty()
3421            && self.offset == 0
3422            && self.project.iter().copied().eq(0..arity)
3423    }
3424    /// True if the finishing does not require an ORDER BY.
3425    ///
3426    /// LIMIT and OFFSET without an ORDER BY _are_ streamable: without an
3427    /// explicit ordering we will skip an arbitrary bag of elements and return
3428    /// the first arbitrary elements in the remaining bag. The result semantics
3429    /// are still correct but maybe surprising for some users.
3430    pub fn is_streamable(&self, arity: usize) -> bool {
3431        self.order_by.is_empty() && self.project.iter().copied().eq(0..arity)
3432    }
3433}
3434
3435impl RowSetFinishing<NonNeg<i64>, usize> {
3436    /// The number of rows needed from before the finishing to evaluate the finishing:
3437    /// offset + limit.
3438    ///
3439    /// If it returns None, then we need all the rows.
3440    pub fn num_rows_needed(&self) -> Option<usize> {
3441        self.limit
3442            .as_ref()
3443            .map(|l| usize::cast_from(u64::from(l.clone())) + self.offset)
3444    }
3445}
3446
3447impl RowSetFinishing {
3448    /// Applies finishing actions to a [`RowCollection`], and reports the total
3449    /// time it took to run.
3450    ///
3451    /// Returns a [`SortedRowCollectionIter`] that contains all of the response data, as
3452    /// well as the size of the response in bytes.
3453    pub fn finish(
3454        &self,
3455        rows: RowCollection,
3456        max_result_size: u64,
3457        max_returned_query_size: Option<u64>,
3458        duration_histogram: &Histogram,
3459    ) -> Result<(SortedRowCollectionIter, usize), String> {
3460        let now = Instant::now();
3461        let result = self.finish_inner(rows, max_result_size, max_returned_query_size);
3462        let duration = now.elapsed();
3463        duration_histogram.observe(duration.as_secs_f64());
3464
3465        result
3466    }
3467
3468    /// Implementation for [`RowSetFinishing::finish`].
3469    fn finish_inner(
3470        &self,
3471        rows: RowCollection,
3472        max_result_size: u64,
3473        max_returned_query_size: Option<u64>,
3474    ) -> Result<(SortedRowCollectionIter, usize), String> {
3475        // How much additional memory is required to make a sorted view.
3476        let sorted_view_mem = rows.entries().saturating_mul(std::mem::size_of::<usize>());
3477        let required_memory = rows.byte_len().saturating_add(sorted_view_mem);
3478
3479        // Bail if creating the sorted view would require us to use too much memory.
3480        if required_memory > usize::cast_from(max_result_size) {
3481            let max_bytes = ByteSize::b(max_result_size);
3482            return Err(format!("result exceeds max size of {max_bytes}",));
3483        }
3484
3485        let sorted_view = rows.sorted_view(&self.order_by);
3486        let mut iter = sorted_view
3487            .into_row_iter()
3488            .apply_offset(self.offset)
3489            .with_projection(self.project.clone());
3490
3491        if let Some(limit) = self.limit {
3492            let limit = u64::from(limit);
3493            let limit = usize::cast_from(limit);
3494            iter = iter.with_limit(limit);
3495        };
3496
3497        // TODO(parkmycar): Re-think how we can calculate the total response size without
3498        // having to iterate through the entire collection of Rows, while still
3499        // respecting the LIMIT, OFFSET, and projections.
3500        //
3501        // Note: It feels a bit bad always calculating the response size, but we almost
3502        // always need it to either check the `max_returned_query_size`, or for reporting
3503        // in the query history.
3504        let response_size: usize = iter.clone().map(|row| row.data().len()).sum();
3505
3506        // Bail if we would end up returning more data to the client than they can support.
3507        if let Some(max) = max_returned_query_size {
3508            if response_size > usize::cast_from(max) {
3509                let max_bytes = ByteSize::b(max);
3510                return Err(format!("result exceeds max size of {max_bytes}"));
3511            }
3512        }
3513
3514        Ok((iter, response_size))
3515    }
3516}
3517
3518/// A [RowSetFinishing] that can be repeatedly applied to batches of updates (in
3519/// a [RowCollection]) and keeps track of the remaining limit, offset, and cap
3520/// on query result size.
3521#[derive(Debug)]
3522pub struct RowSetFinishingIncremental {
3523    /// Include only as many rows (after offset).
3524    pub remaining_limit: Option<usize>,
3525    /// Omit as many rows.
3526    pub remaining_offset: usize,
3527    /// The maximum allowed result size, as requested by the client.
3528    pub max_returned_query_size: Option<u64>,
3529    /// Tracks our remaining allowed budget for result size.
3530    pub remaining_max_returned_query_size: Option<u64>,
3531    /// Include only given columns.
3532    pub project: Vec<usize>,
3533}
3534
3535impl RowSetFinishingIncremental {
3536    /// Turns the given [RowSetFinishing] into a [RowSetFinishingIncremental].
3537    /// Can only be used when [is_streamable](RowSetFinishing::is_streamable) is
3538    /// `true`.
3539    ///
3540    /// # Panics
3541    ///
3542    /// Panics if the result is not streamable, that is it has an ORDER BY.
3543    pub fn new(
3544        offset: usize,
3545        limit: Option<NonNeg<i64>>,
3546        project: Vec<usize>,
3547        max_returned_query_size: Option<u64>,
3548    ) -> Self {
3549        let limit = limit.map(|l| {
3550            let l = u64::from(l);
3551            let l = usize::cast_from(l);
3552            l
3553        });
3554
3555        RowSetFinishingIncremental {
3556            remaining_limit: limit,
3557            remaining_offset: offset,
3558            max_returned_query_size,
3559            remaining_max_returned_query_size: max_returned_query_size,
3560            project,
3561        }
3562    }
3563
3564    /// Applies finishing actions to the given [`RowCollection`], and reports
3565    /// the total time it took to run.
3566    ///
3567    /// Returns a [`SortedRowCollectionIter`] that contains all of the response
3568    /// data.
3569    pub fn finish_incremental(
3570        &mut self,
3571        rows: RowCollection,
3572        max_result_size: u64,
3573        duration_histogram: &Histogram,
3574    ) -> Result<SortedRowCollectionIter, String> {
3575        let now = Instant::now();
3576        let result = self.finish_incremental_inner(rows, max_result_size);
3577        let duration = now.elapsed();
3578        duration_histogram.observe(duration.as_secs_f64());
3579
3580        result
3581    }
3582
3583    fn finish_incremental_inner(
3584        &mut self,
3585        rows: RowCollection,
3586        max_result_size: u64,
3587    ) -> Result<SortedRowCollectionIter, String> {
3588        // How much additional memory is required to make a sorted view.
3589        let sorted_view_mem = rows.entries().saturating_mul(std::mem::size_of::<usize>());
3590        let required_memory = rows.byte_len().saturating_add(sorted_view_mem);
3591
3592        // Bail if creating the sorted view would require us to use too much memory.
3593        if required_memory > usize::cast_from(max_result_size) {
3594            let max_bytes = ByteSize::b(max_result_size);
3595            return Err(format!("total result exceeds max size of {max_bytes}",));
3596        }
3597
3598        let batch_num_rows = rows.count(0, None);
3599
3600        let sorted_view = rows.sorted_view(&[]);
3601        let mut iter = sorted_view
3602            .into_row_iter()
3603            .apply_offset(self.remaining_offset)
3604            .with_projection(self.project.clone());
3605
3606        if let Some(limit) = self.remaining_limit {
3607            iter = iter.with_limit(limit);
3608        };
3609
3610        self.remaining_offset = self.remaining_offset.saturating_sub(batch_num_rows);
3611        if let Some(remaining_limit) = self.remaining_limit.as_mut() {
3612            *remaining_limit -= iter.count();
3613        }
3614
3615        // TODO(parkmycar): Re-think how we can calculate the total response size without
3616        // having to iterate through the entire collection of Rows, while still
3617        // respecting the LIMIT, OFFSET, and projections.
3618        //
3619        // Note: It feels a bit bad always calculating the response size, but we almost
3620        // always need it to either check the `max_returned_query_size`, or for reporting
3621        // in the query history.
3622        let response_size: usize = iter.clone().map(|row| row.data().len()).sum();
3623
3624        // Bail if we would end up returning more data to the client than they can support.
3625        if let Some(max) = self.remaining_max_returned_query_size {
3626            if response_size > usize::cast_from(max) {
3627                let max_bytes = ByteSize::b(self.max_returned_query_size.expect("known to exist"));
3628                return Err(format!("total result exceeds max size of {max_bytes}"));
3629            }
3630        }
3631
3632        Ok(iter)
3633    }
3634}
3635
3636/// Compare `left` and `right` using `order`. If that doesn't produce a strict
3637/// ordering, call `tiebreaker`.
3638pub fn compare_columns<F>(
3639    order: &[ColumnOrder],
3640    left: &[Datum],
3641    right: &[Datum],
3642    tiebreaker: F,
3643) -> Ordering
3644where
3645    F: Fn() -> Ordering,
3646{
3647    for order in order {
3648        let cmp = match (&left[order.column], &right[order.column]) {
3649            (Datum::Null, Datum::Null) => Ordering::Equal,
3650            (Datum::Null, _) => {
3651                if order.nulls_last {
3652                    Ordering::Greater
3653                } else {
3654                    Ordering::Less
3655                }
3656            }
3657            (_, Datum::Null) => {
3658                if order.nulls_last {
3659                    Ordering::Less
3660                } else {
3661                    Ordering::Greater
3662                }
3663            }
3664            (lval, rval) => {
3665                if order.desc {
3666                    rval.cmp(lval)
3667                } else {
3668                    lval.cmp(rval)
3669                }
3670            }
3671        };
3672        if cmp != Ordering::Equal {
3673            return cmp;
3674        }
3675    }
3676    tiebreaker()
3677}
3678
3679/// Describe a window frame, e.g. `RANGE UNBOUNDED PRECEDING` or
3680/// `ROWS BETWEEN 5 PRECEDING AND CURRENT ROW`.
3681///
3682/// Window frames define a subset of the partition , and only a subset of
3683/// window functions make use of the window frame.
3684#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Hash, MzReflect)]
3685pub struct WindowFrame {
3686    /// ROWS, RANGE or GROUPS
3687    pub units: WindowFrameUnits,
3688    /// Where the frame starts
3689    pub start_bound: WindowFrameBound,
3690    /// Where the frame ends
3691    pub end_bound: WindowFrameBound,
3692}
3693
3694impl Display for WindowFrame {
3695    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
3696        write!(
3697            f,
3698            "{} between {} and {}",
3699            self.units, self.start_bound, self.end_bound
3700        )
3701    }
3702}
3703
3704impl WindowFrame {
3705    /// Return the default window frame used when one is not explicitly defined
3706    pub fn default() -> Self {
3707        WindowFrame {
3708            units: WindowFrameUnits::Range,
3709            start_bound: WindowFrameBound::UnboundedPreceding,
3710            end_bound: WindowFrameBound::CurrentRow,
3711        }
3712    }
3713
3714    fn includes_current_row(&self) -> bool {
3715        use WindowFrameBound::*;
3716        match self.start_bound {
3717            UnboundedPreceding => match self.end_bound {
3718                UnboundedPreceding => false,
3719                OffsetPreceding(0) => true,
3720                OffsetPreceding(_) => false,
3721                CurrentRow => true,
3722                OffsetFollowing(_) => true,
3723                UnboundedFollowing => true,
3724            },
3725            OffsetPreceding(0) => match self.end_bound {
3726                UnboundedPreceding => unreachable!(),
3727                OffsetPreceding(0) => true,
3728                // Any nonzero offsets here will create an empty window
3729                OffsetPreceding(_) => false,
3730                CurrentRow => true,
3731                OffsetFollowing(_) => true,
3732                UnboundedFollowing => true,
3733            },
3734            OffsetPreceding(_) => match self.end_bound {
3735                UnboundedPreceding => unreachable!(),
3736                // Window ends at the current row
3737                OffsetPreceding(0) => true,
3738                OffsetPreceding(_) => false,
3739                CurrentRow => true,
3740                OffsetFollowing(_) => true,
3741                UnboundedFollowing => true,
3742            },
3743            CurrentRow => true,
3744            OffsetFollowing(0) => match self.end_bound {
3745                UnboundedPreceding => unreachable!(),
3746                OffsetPreceding(_) => unreachable!(),
3747                CurrentRow => unreachable!(),
3748                OffsetFollowing(_) => true,
3749                UnboundedFollowing => true,
3750            },
3751            OffsetFollowing(_) => match self.end_bound {
3752                UnboundedPreceding => unreachable!(),
3753                OffsetPreceding(_) => unreachable!(),
3754                CurrentRow => unreachable!(),
3755                OffsetFollowing(_) => false,
3756                UnboundedFollowing => false,
3757            },
3758            UnboundedFollowing => false,
3759        }
3760    }
3761}
3762
3763/// Describe how frame bounds are interpreted
3764#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Hash, MzReflect)]
3765pub enum WindowFrameUnits {
3766    /// Each row is treated as the unit of work for bounds
3767    Rows,
3768    /// Each peer group is treated as the unit of work for bounds,
3769    /// and offset-based bounds use the value of the ORDER BY expression
3770    Range,
3771    /// Each peer group is treated as the unit of work for bounds.
3772    /// Groups is currently not supported, and it is rejected during planning.
3773    Groups,
3774}
3775
3776impl Display for WindowFrameUnits {
3777    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
3778        match self {
3779            WindowFrameUnits::Rows => write!(f, "rows"),
3780            WindowFrameUnits::Range => write!(f, "range"),
3781            WindowFrameUnits::Groups => write!(f, "groups"),
3782        }
3783    }
3784}
3785
3786/// Specifies [WindowFrame]'s `start_bound` and `end_bound`
3787///
3788/// The order between frame bounds is significant, as Postgres enforces
3789/// some restrictions there.
3790#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, MzReflect, PartialOrd, Ord)]
3791pub enum WindowFrameBound {
3792    /// `UNBOUNDED PRECEDING`
3793    UnboundedPreceding,
3794    /// `<N> PRECEDING`
3795    OffsetPreceding(u64),
3796    /// `CURRENT ROW`
3797    CurrentRow,
3798    /// `<N> FOLLOWING`
3799    OffsetFollowing(u64),
3800    /// `UNBOUNDED FOLLOWING`.
3801    UnboundedFollowing,
3802}
3803
3804impl Display for WindowFrameBound {
3805    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
3806        match self {
3807            WindowFrameBound::UnboundedPreceding => write!(f, "unbounded preceding"),
3808            WindowFrameBound::OffsetPreceding(offset) => write!(f, "{} preceding", offset),
3809            WindowFrameBound::CurrentRow => write!(f, "current row"),
3810            WindowFrameBound::OffsetFollowing(offset) => write!(f, "{} following", offset),
3811            WindowFrameBound::UnboundedFollowing => write!(f, "unbounded following"),
3812        }
3813    }
3814}
3815
3816/// Maximum iterations for a LetRec.
3817#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
3818pub struct LetRecLimit {
3819    /// Maximum number of iterations to evaluate.
3820    pub max_iters: NonZeroU64,
3821    /// Whether to throw an error when reaching the above limit.
3822    /// If true, we simply use the current contents of each Id as the final result.
3823    pub return_at_limit: bool,
3824}
3825
3826impl LetRecLimit {
3827    /// Compute the smallest limit from a Vec of `LetRecLimit`s.
3828    pub fn min_max_iter(limits: &Vec<Option<LetRecLimit>>) -> Option<u64> {
3829        limits
3830            .iter()
3831            .filter_map(|l| l.as_ref().map(|l| l.max_iters.get()))
3832            .min()
3833    }
3834
3835    /// The default value of `LetRecLimit::return_at_limit` when using the RECURSION LIMIT option of
3836    /// WMR without ERROR AT or RETURN AT.
3837    pub const RETURN_AT_LIMIT_DEFAULT: bool = false;
3838}
3839
3840impl Display for LetRecLimit {
3841    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3842        write!(f, "[recursion_limit={}", self.max_iters)?;
3843        if self.return_at_limit != LetRecLimit::RETURN_AT_LIMIT_DEFAULT {
3844            write!(f, ", return_at_limit")?;
3845        }
3846        write!(f, "]")
3847    }
3848}
3849
3850/// For a global Get, this indicates whether we are going to read from Persist or from an index.
3851/// (See comment in MirRelationExpr::Get.)
3852#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Hash)]
3853pub enum AccessStrategy {
3854    /// It's either a local Get (a CTE), or unknown at the time.
3855    /// `prune_and_annotate_dataflow_index_imports` decides it for global Gets, and thus switches to
3856    /// one of the other variants.
3857    UnknownOrLocal,
3858    /// The Get will read from Persist.
3859    Persist,
3860    /// The Get will read from an index or indexes: (index id, how the index will be used).
3861    Index(Vec<(GlobalId, IndexUsageType)>),
3862    /// The Get will read a collection that is computed by the same dataflow, but in a different
3863    /// `BuildDesc` in `objects_to_build`.
3864    SameDataflow,
3865}
3866
3867#[cfg(test)]
3868mod tests {
3869    use mz_repr::explain::text::text_string_at;
3870
3871    use crate::explain::HumanizedExplain;
3872
3873    use super::*;
3874
3875    #[mz_ore::test]
3876    fn test_row_set_finishing_as_text() {
3877        let finishing = RowSetFinishing {
3878            order_by: vec![ColumnOrder {
3879                column: 4,
3880                desc: true,
3881                nulls_last: true,
3882            }],
3883            limit: Some(NonNeg::try_from(7).unwrap()),
3884            offset: Default::default(),
3885            project: vec![1, 3, 4, 5],
3886        };
3887
3888        let mode = HumanizedExplain::new(false);
3889        let expr = mode.expr(&finishing, None);
3890
3891        let act = text_string_at(&expr, mz_ore::str::Indent::default);
3892
3893        let exp = {
3894            use mz_ore::fmt::FormatBuffer;
3895            let mut s = String::new();
3896            write!(&mut s, "Finish");
3897            write!(&mut s, " order_by=[#4 desc nulls_last]");
3898            write!(&mut s, " limit=7");
3899            write!(&mut s, " output=[#1, #3..=#5]");
3900            writeln!(&mut s, "");
3901            s
3902        };
3903
3904        assert_eq!(act, exp);
3905    }
3906}
3907
3908/// An iterator over AST structures, which calls out nodes in difference.
3909///
3910/// The iterators visit two ASTs in tandem, continuing as long as the AST node data matches,
3911/// and yielding an output pair as soon as the AST nodes do not match. Their intent is to call
3912/// attention to the moments in the ASTs where they differ, and incidentally a stack-free way
3913/// to compare two ASTs.
3914mod structured_diff {
3915
3916    use super::MirRelationExpr;
3917    use itertools::Itertools;
3918
3919    ///  An iterator over structured differences between two `MirRelationExpr` instances.
3920    pub struct MreDiff<'a> {
3921        /// Pairs of expressions that must still be compared.
3922        todo: Vec<(&'a MirRelationExpr, &'a MirRelationExpr)>,
3923    }
3924
3925    impl<'a> MreDiff<'a> {
3926        /// Create a new `MirRelationExpr` structured difference.
3927        pub fn new(expr1: &'a MirRelationExpr, expr2: &'a MirRelationExpr) -> Self {
3928            MreDiff {
3929                todo: vec![(expr1, expr2)],
3930            }
3931        }
3932    }
3933
3934    impl<'a> Iterator for MreDiff<'a> {
3935        // Pairs of expressions that do not match.
3936        type Item = (&'a MirRelationExpr, &'a MirRelationExpr);
3937
3938        fn next(&mut self) -> Option<Self::Item> {
3939            while let Some((expr1, expr2)) = self.todo.pop() {
3940                match (expr1, expr2) {
3941                    (
3942                        MirRelationExpr::Constant {
3943                            rows: rows1,
3944                            typ: typ1,
3945                        },
3946                        MirRelationExpr::Constant {
3947                            rows: rows2,
3948                            typ: typ2,
3949                        },
3950                    ) => {
3951                        if rows1 != rows2 || typ1 != typ2 {
3952                            return Some((expr1, expr2));
3953                        }
3954                    }
3955                    (
3956                        MirRelationExpr::Get {
3957                            id: id1,
3958                            typ: typ1,
3959                            access_strategy: as1,
3960                        },
3961                        MirRelationExpr::Get {
3962                            id: id2,
3963                            typ: typ2,
3964                            access_strategy: as2,
3965                        },
3966                    ) => {
3967                        if id1 != id2 || typ1 != typ2 || as1 != as2 {
3968                            return Some((expr1, expr2));
3969                        }
3970                    }
3971                    (
3972                        MirRelationExpr::Let {
3973                            id: id1,
3974                            body: body1,
3975                            value: value1,
3976                        },
3977                        MirRelationExpr::Let {
3978                            id: id2,
3979                            body: body2,
3980                            value: value2,
3981                        },
3982                    ) => {
3983                        if id1 != id2 {
3984                            return Some((expr1, expr2));
3985                        } else {
3986                            self.todo.push((body1, body2));
3987                            self.todo.push((value1, value2));
3988                        }
3989                    }
3990                    (
3991                        MirRelationExpr::LetRec {
3992                            ids: ids1,
3993                            body: body1,
3994                            values: values1,
3995                            limits: limits1,
3996                        },
3997                        MirRelationExpr::LetRec {
3998                            ids: ids2,
3999                            body: body2,
4000                            values: values2,
4001                            limits: limits2,
4002                        },
4003                    ) => {
4004                        if ids1 != ids2 || values1.len() != values2.len() || limits1 != limits2 {
4005                            return Some((expr1, expr2));
4006                        } else {
4007                            self.todo.push((body1, body2));
4008                            self.todo.extend(values1.iter().zip_eq(values2.iter()));
4009                        }
4010                    }
4011                    (
4012                        MirRelationExpr::Project {
4013                            outputs: outputs1,
4014                            input: input1,
4015                        },
4016                        MirRelationExpr::Project {
4017                            outputs: outputs2,
4018                            input: input2,
4019                        },
4020                    ) => {
4021                        if outputs1 != outputs2 {
4022                            return Some((expr1, expr2));
4023                        } else {
4024                            self.todo.push((input1, input2));
4025                        }
4026                    }
4027                    (
4028                        MirRelationExpr::Map {
4029                            scalars: scalars1,
4030                            input: input1,
4031                        },
4032                        MirRelationExpr::Map {
4033                            scalars: scalars2,
4034                            input: input2,
4035                        },
4036                    ) => {
4037                        if scalars1 != scalars2 {
4038                            return Some((expr1, expr2));
4039                        } else {
4040                            self.todo.push((input1, input2));
4041                        }
4042                    }
4043                    (
4044                        MirRelationExpr::Filter {
4045                            predicates: predicates1,
4046                            input: input1,
4047                        },
4048                        MirRelationExpr::Filter {
4049                            predicates: predicates2,
4050                            input: input2,
4051                        },
4052                    ) => {
4053                        if predicates1 != predicates2 {
4054                            return Some((expr1, expr2));
4055                        } else {
4056                            self.todo.push((input1, input2));
4057                        }
4058                    }
4059                    (
4060                        MirRelationExpr::FlatMap {
4061                            input: input1,
4062                            func: func1,
4063                            exprs: exprs1,
4064                        },
4065                        MirRelationExpr::FlatMap {
4066                            input: input2,
4067                            func: func2,
4068                            exprs: exprs2,
4069                        },
4070                    ) => {
4071                        if func1 != func2 || exprs1 != exprs2 {
4072                            return Some((expr1, expr2));
4073                        } else {
4074                            self.todo.push((input1, input2));
4075                        }
4076                    }
4077                    (
4078                        MirRelationExpr::Join {
4079                            inputs: inputs1,
4080                            equivalences: eq1,
4081                            implementation: impl1,
4082                        },
4083                        MirRelationExpr::Join {
4084                            inputs: inputs2,
4085                            equivalences: eq2,
4086                            implementation: impl2,
4087                        },
4088                    ) => {
4089                        if inputs1.len() != inputs2.len() || eq1 != eq2 || impl1 != impl2 {
4090                            return Some((expr1, expr2));
4091                        } else {
4092                            self.todo.extend(inputs1.iter().zip_eq(inputs2.iter()));
4093                        }
4094                    }
4095                    (
4096                        MirRelationExpr::Reduce {
4097                            aggregates: aggregates1,
4098                            input: inputs1,
4099                            group_key: gk1,
4100                            monotonic: m1,
4101                            expected_group_size: egs1,
4102                        },
4103                        MirRelationExpr::Reduce {
4104                            aggregates: aggregates2,
4105                            input: inputs2,
4106                            group_key: gk2,
4107                            monotonic: m2,
4108                            expected_group_size: egs2,
4109                        },
4110                    ) => {
4111                        if aggregates1 != aggregates2 || gk1 != gk2 || m1 != m2 || egs1 != egs2 {
4112                            return Some((expr1, expr2));
4113                        } else {
4114                            self.todo.push((inputs1, inputs2));
4115                        }
4116                    }
4117                    (
4118                        MirRelationExpr::TopK {
4119                            group_key: gk1,
4120                            order_key: order1,
4121                            input: input1,
4122                            limit: l1,
4123                            offset: o1,
4124                            monotonic: m1,
4125                            expected_group_size: egs1,
4126                        },
4127                        MirRelationExpr::TopK {
4128                            group_key: gk2,
4129                            order_key: order2,
4130                            input: input2,
4131                            limit: l2,
4132                            offset: o2,
4133                            monotonic: m2,
4134                            expected_group_size: egs2,
4135                        },
4136                    ) => {
4137                        if order1 != order2
4138                            || gk1 != gk2
4139                            || l1 != l2
4140                            || o1 != o2
4141                            || m1 != m2
4142                            || egs1 != egs2
4143                        {
4144                            return Some((expr1, expr2));
4145                        } else {
4146                            self.todo.push((input1, input2));
4147                        }
4148                    }
4149                    (
4150                        MirRelationExpr::Negate { input: input1 },
4151                        MirRelationExpr::Negate { input: input2 },
4152                    ) => {
4153                        self.todo.push((input1, input2));
4154                    }
4155                    (
4156                        MirRelationExpr::Threshold { input: input1 },
4157                        MirRelationExpr::Threshold { input: input2 },
4158                    ) => {
4159                        self.todo.push((input1, input2));
4160                    }
4161                    (
4162                        MirRelationExpr::Union {
4163                            base: base1,
4164                            inputs: inputs1,
4165                        },
4166                        MirRelationExpr::Union {
4167                            base: base2,
4168                            inputs: inputs2,
4169                        },
4170                    ) => {
4171                        if inputs1.len() != inputs2.len() {
4172                            return Some((expr1, expr2));
4173                        } else {
4174                            self.todo.push((base1, base2));
4175                            self.todo.extend(inputs1.iter().zip_eq(inputs2.iter()));
4176                        }
4177                    }
4178                    (
4179                        MirRelationExpr::ArrangeBy {
4180                            keys: keys1,
4181                            input: input1,
4182                        },
4183                        MirRelationExpr::ArrangeBy {
4184                            keys: keys2,
4185                            input: input2,
4186                        },
4187                    ) => {
4188                        if keys1 != keys2 {
4189                            return Some((expr1, expr2));
4190                        } else {
4191                            self.todo.push((input1, input2));
4192                        }
4193                    }
4194                    _ => {
4195                        return Some((expr1, expr2));
4196                    }
4197                }
4198            }
4199            None
4200        }
4201    }
4202}