mz_adapter/optimize/
view.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10//! An Optimizer that
11//! 1. Optimistically calls `optimize_mir_constant`.
12//! 2. Then, if we haven't arrived at a constant, it does real optimization:
13//!    - applies an [`ExprPrep`].
14//!    - calls [`optimize_mir_local`], i.e., the logical optimizer.
15//!
16//! This is used for `CREATE VIEW` statements and in various other situations where no physical
17//! optimization is needed, such as for `INSERT` statements.
18//!
19//! TODO: We should split this into an optimizer that is just for views, and another optimizer
20//! for various other ad hoc things, such as `INSERT`, `COPY FROM`, etc.
21
22use std::time::Instant;
23
24use mz_expr::OptimizedMirRelationExpr;
25use mz_sql::optimizer_metrics::OptimizerMetrics;
26use mz_sql::plan::HirRelationExpr;
27use mz_transform::TransformCtx;
28use mz_transform::dataflow::DataflowMetainfo;
29use mz_transform::reprtypecheck::{
30    SharedContext as ReprTypecheckContext, empty_context as empty_repr_context,
31};
32
33use crate::optimize::dataflows::{ExprPrep, ExprPrepNoop};
34use crate::optimize::{
35    Optimize, OptimizerConfig, OptimizerError, optimize_mir_constant, optimize_mir_local,
36    trace_plan,
37};
38
39pub struct Optimizer<S> {
40    /// A representation typechecking context to use throughout the optimizer pipeline.
41    repr_typecheck_ctx: ReprTypecheckContext,
42    /// Optimizer config.
43    config: OptimizerConfig,
44    /// Optimizer metrics.
45    ///
46    /// Allowed to be `None` for cases where view optimization is invoked outside the
47    /// coordinator context, and the metrics are not available.
48    metrics: Option<OptimizerMetrics>,
49    /// Expression preparation style to use. Can be `NoopExprPrepStyle` to skip expression
50    /// preparation.
51    expr_prep_style: S,
52    /// Whether to call `FoldConstants` with a size limit, or try to fold constants of any size.
53    fold_constants_limit: bool,
54}
55
56impl Optimizer<ExprPrepNoop> {
57    /// Creates an optimizer instance that does not perform any expression
58    /// preparation. Additionally, this instance calls constant folding with a size limit.
59    pub fn new(config: OptimizerConfig, metrics: Option<OptimizerMetrics>) -> Self {
60        Self {
61            repr_typecheck_ctx: empty_repr_context(),
62            config,
63            metrics,
64            expr_prep_style: ExprPrepNoop,
65            fold_constants_limit: true,
66        }
67    }
68}
69
70impl<S> Optimizer<S> {
71    /// Creates an optimizer instance that takes an [`ExprPrep`] to handle
72    /// unmaterializable functions. Additionally, this instance calls constant
73    /// folding without a size limit.
74    pub fn new_with_prep_no_limit(
75        config: OptimizerConfig,
76        metrics: Option<OptimizerMetrics>,
77        expr_prep_style: S,
78    ) -> Optimizer<S> {
79        Self {
80            repr_typecheck_ctx: empty_repr_context(),
81            config,
82            metrics,
83            expr_prep_style,
84            fold_constants_limit: false,
85        }
86    }
87}
88
89impl<S: ExprPrep> Optimize<HirRelationExpr> for Optimizer<S> {
90    type To = OptimizedMirRelationExpr;
91
92    fn optimize(&mut self, expr: HirRelationExpr) -> Result<Self::To, OptimizerError> {
93        let time = Instant::now();
94
95        // Trace the pipeline input under `optimize/raw`.
96        trace_plan!(at: "raw", &expr);
97
98        // HIR ⇒ MIR lowering and decorrelation
99        let mut expr = expr.lower(&self.config, self.metrics.as_ref())?;
100
101        let mut df_meta = DataflowMetainfo::default();
102        let mut transform_ctx = TransformCtx::local(
103            &self.config.features,
104            &self.repr_typecheck_ctx,
105            &mut df_meta,
106            self.metrics.as_mut(),
107            None,
108        );
109
110        // First, we run a very simple optimizer pipeline, which only folds constants. This takes
111        // care of constant INSERTs. (This optimizer is also used for INSERTs, not just VIEWs.)
112        expr = optimize_mir_constant(expr, &mut transform_ctx, self.fold_constants_limit)?;
113
114        // MIR ⇒ MIR optimization (local)
115        let expr = if expr.as_const().is_some() {
116            // No need to optimize further, because we already have a constant.
117            // But trace this at "local", so that `EXPLAIN LOCALLY OPTIMIZED PLAN` can pick it up.
118            trace_plan!(at: "local", &expr);
119            OptimizedMirRelationExpr(expr)
120        } else {
121            // Do the real optimization (starting with `expr_prep_style`).
122            let mut opt_expr = OptimizedMirRelationExpr(expr);
123            self.expr_prep_style.prep_relation_expr(&mut opt_expr)?;
124            expr = opt_expr.into_inner();
125            optimize_mir_local(expr, &mut transform_ctx)?
126        };
127
128        if let Some(metrics) = &self.metrics {
129            metrics.observe_e2e_optimization_time("view", time.elapsed());
130        }
131
132        // Return the resulting OptimizedMirRelationExpr.
133        Ok(expr)
134    }
135}