Skip to main content

mz_adapter/optimize/
view.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10//! An Optimizer that
11//! 1. Optimistically calls `optimize_mir_constant`.
12//! 2. Then, if we haven't arrived at a constant, it does real optimization:
13//!    - applies an [`ExprPrep`].
14//!    - calls [`optimize_mir_local`], i.e., the logical optimizer.
15//!
16//! This is used for `CREATE VIEW` statements and in various other situations where no physical
17//! optimization is needed, such as for `INSERT` statements.
18//!
19//! TODO: We should split this into an optimizer that is just for views, and another optimizer
20//! for various other ad hoc things, such as `INSERT`, `COPY FROM`, etc.
21
22use std::time::Instant;
23
24use mz_expr::OptimizedMirRelationExpr;
25use mz_sql::optimizer_metrics::OptimizerMetrics;
26use mz_sql::plan::HirRelationExpr;
27use mz_transform::TransformCtx;
28use mz_transform::dataflow::DataflowMetainfo;
29use mz_transform::typecheck::{SharedTypecheckingContext, empty_typechecking_context};
30
31use crate::optimize::dataflows::{ExprPrep, ExprPrepNoop};
32use crate::optimize::{
33    Optimize, OptimizerConfig, OptimizerError, optimize_mir_constant, optimize_mir_local,
34    trace_plan,
35};
36
37pub struct Optimizer<S> {
38    /// A representation typechecking context to use throughout the optimizer pipeline.
39    typecheck_ctx: SharedTypecheckingContext,
40    /// Optimizer config.
41    config: OptimizerConfig,
42    /// Optimizer metrics.
43    ///
44    /// Allowed to be `None` for cases where view optimization is invoked outside the
45    /// coordinator context, and the metrics are not available.
46    metrics: Option<OptimizerMetrics>,
47    /// Expression preparation style to use. Can be `NoopExprPrepStyle` to skip expression
48    /// preparation.
49    expr_prep_style: S,
50    /// Whether to call `FoldConstants` with a size limit, or try to fold constants of any size.
51    fold_constants_limit: bool,
52}
53
54impl Optimizer<ExprPrepNoop> {
55    /// Creates an optimizer instance that does not perform any expression
56    /// preparation. Additionally, this instance calls constant folding with a size limit.
57    pub fn new(config: OptimizerConfig, metrics: Option<OptimizerMetrics>) -> Self {
58        Self {
59            typecheck_ctx: empty_typechecking_context(),
60            config,
61            metrics,
62            expr_prep_style: ExprPrepNoop,
63            fold_constants_limit: true,
64        }
65    }
66}
67
68impl<S> Optimizer<S> {
69    /// Creates an optimizer instance that takes an [`ExprPrep`] to handle
70    /// unmaterializable functions. Additionally, this instance calls constant
71    /// folding without a size limit.
72    pub fn new_with_prep_no_limit(
73        config: OptimizerConfig,
74        metrics: Option<OptimizerMetrics>,
75        expr_prep_style: S,
76    ) -> Optimizer<S> {
77        Self {
78            typecheck_ctx: empty_typechecking_context(),
79            config,
80            metrics,
81            expr_prep_style,
82            fold_constants_limit: false,
83        }
84    }
85}
86
87impl<S: ExprPrep> Optimize<HirRelationExpr> for Optimizer<S> {
88    type To = OptimizedMirRelationExpr;
89
90    fn optimize(&mut self, expr: HirRelationExpr) -> Result<Self::To, OptimizerError> {
91        let time = Instant::now();
92
93        // Trace the pipeline input under `optimize/raw`.
94        trace_plan!(at: "raw", &expr);
95
96        // HIR ⇒ MIR lowering and decorrelation
97        let mut expr = expr.lower(&self.config, self.metrics.as_ref())?;
98
99        let mut df_meta = DataflowMetainfo::default();
100        let mut transform_ctx = TransformCtx::local(
101            &self.config.features,
102            &self.typecheck_ctx,
103            &mut df_meta,
104            self.metrics.as_mut(),
105            None,
106        );
107
108        // First, we run a very simple optimizer pipeline, which only folds constants. This takes
109        // care of constant INSERTs. (This optimizer is also used for INSERTs, not just VIEWs.)
110        expr = optimize_mir_constant(expr, &mut transform_ctx, self.fold_constants_limit)?;
111
112        // MIR ⇒ MIR optimization (local)
113        let expr = if expr.as_const().is_some() {
114            // No need to optimize further, because we already have a constant.
115            // But trace this at "local", so that `EXPLAIN LOCALLY OPTIMIZED PLAN` can pick it up.
116            trace_plan!(at: "local", &expr);
117            OptimizedMirRelationExpr(expr)
118        } else {
119            // Do the real optimization (starting with `expr_prep_style`).
120            let mut opt_expr = OptimizedMirRelationExpr(expr);
121            self.expr_prep_style.prep_relation_expr(&mut opt_expr)?;
122            expr = opt_expr.into_inner();
123            optimize_mir_local(expr, &mut transform_ctx)?
124        };
125
126        if let Some(metrics) = &self.metrics {
127            metrics.observe_e2e_optimization_time("view", time.elapsed());
128        }
129
130        // TODO: Handle the `optimizer_notices` in `df_meta`.
131        // https://github.com/MaterializeInc/database-issues/issues/10012
132
133        // Return the resulting OptimizedMirRelationExpr.
134        Ok(expr)
135    }
136}