1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
// Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.

//! Optimizer implementation for `SUBSCRIBE` statements.

use std::marker::PhantomData;
use std::sync::Arc;
use std::time::{Duration, Instant};

use differential_dataflow::lattice::Lattice;
use mz_adapter_types::connection::ConnectionId;
use mz_compute_types::plan::Plan;
use mz_compute_types::sinks::{ComputeSinkConnection, ComputeSinkDesc, SubscribeSinkConnection};
use mz_compute_types::ComputeInstanceId;
use mz_ore::collections::CollectionExt;
use mz_ore::soft_assert_or_log;
use mz_repr::{GlobalId, RelationDesc, Timestamp};
use mz_sql::optimizer_metrics::OptimizerMetrics;
use mz_sql::plan::SubscribeFrom;
use mz_transform::dataflow::DataflowMetainfo;
use mz_transform::normalize_lets::normalize_lets;
use mz_transform::typecheck::{empty_context, SharedContext as TypecheckContext};
use mz_transform::TransformCtx;
use timely::progress::Antichain;

use crate::catalog::Catalog;
use crate::optimize::dataflows::{
    dataflow_import_id_bundle, prep_relation_expr, prep_scalar_expr, ComputeInstanceSnapshot,
    DataflowBuilder, ExprPrepStyle,
};
use crate::optimize::{
    optimize_mir_local, trace_plan, LirDataflowDescription, MirDataflowDescription, Optimize,
    OptimizeMode, OptimizerConfig, OptimizerError,
};
use crate::CollectionIdBundle;

pub struct Optimizer {
    /// A typechecking context to use throughout the optimizer pipeline.
    typecheck_ctx: TypecheckContext,
    /// A snapshot of the catalog state.
    catalog: Arc<Catalog>,
    /// A snapshot of the cluster that will run the dataflows.
    compute_instance: ComputeInstanceSnapshot,
    /// A transient GlobalId to be used for the exported sink.
    sink_id: GlobalId,
    /// A transient GlobalId to be used when constructing a dataflow for
    /// `SUBSCRIBE FROM <SELECT>` variants.
    view_id: GlobalId,
    /// The id of the session connection in which the optimizer will run.
    conn_id: Option<ConnectionId>,
    /// Should the plan produce an initial snapshot?
    with_snapshot: bool,
    /// Sink timestamp.
    up_to: Option<Timestamp>,
    /// A human-readable name exposed internally (useful for debugging).
    debug_name: String,
    /// Optimizer config.
    config: OptimizerConfig,
    /// Optimizer metrics.
    metrics: OptimizerMetrics,
    /// The time spent performing optimization so far.
    duration: Duration,
}

// A bogey `Debug` implementation that hides fields. This is needed to make the
// `event!` call in `sequence_peek_stage` not emit a lot of data.
//
// For now, we skip almost all fields, but we might revisit that bit if it turns
// out that we really need those for debugging purposes.
impl std::fmt::Debug for Optimizer {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        f.debug_struct("Optimizer")
            .field("config", &self.config)
            .finish_non_exhaustive()
    }
}

impl Optimizer {
    pub fn new(
        catalog: Arc<Catalog>,
        compute_instance: ComputeInstanceSnapshot,
        view_id: GlobalId,
        sink_id: GlobalId,
        conn_id: Option<ConnectionId>,
        with_snapshot: bool,
        up_to: Option<Timestamp>,
        debug_name: String,
        config: OptimizerConfig,
        metrics: OptimizerMetrics,
    ) -> Self {
        Self {
            typecheck_ctx: empty_context(),
            catalog,
            compute_instance,
            view_id,
            sink_id,
            conn_id,
            with_snapshot,
            up_to,
            debug_name,
            config,
            metrics,
            duration: Default::default(),
        }
    }

    pub fn cluster_id(&self) -> ComputeInstanceId {
        self.compute_instance.instance_id()
    }

    pub fn up_to(&self) -> Option<Timestamp> {
        self.up_to.clone()
    }
}

/// The (sealed intermediate) result after:
///
/// 1. embedding a [`SubscribeFrom`] plan into a [`MirDataflowDescription`],
/// 2. transitively inlining referenced views, and
/// 3. jointly optimizing the `MIR` plans in the [`MirDataflowDescription`].
#[derive(Clone, Debug)]
pub struct GlobalMirPlan<T: Clone> {
    df_desc: MirDataflowDescription,
    df_meta: DataflowMetainfo,
    phantom: PhantomData<T>,
}

impl<T: Clone> GlobalMirPlan<T> {
    /// Computes the [`CollectionIdBundle`] of the wrapped dataflow.
    pub fn id_bundle(&self, compute_instance_id: ComputeInstanceId) -> CollectionIdBundle {
        dataflow_import_id_bundle(&self.df_desc, compute_instance_id)
    }
}

/// The (final) result after MIR ⇒ LIR lowering and optimizing the resulting
/// `DataflowDescription` with `LIR` plans.
#[derive(Clone, Debug)]
pub struct GlobalLirPlan {
    df_desc: LirDataflowDescription,
    df_meta: DataflowMetainfo,
}

impl GlobalLirPlan {
    pub fn sink_id(&self) -> GlobalId {
        let sink_exports = &self.df_desc.sink_exports;
        let sink_id = sink_exports.keys().next().expect("valid sink");
        *sink_id
    }

    pub fn as_of(&self) -> Option<Timestamp> {
        self.df_desc.as_of.clone().map(|as_of| as_of.into_element())
    }

    pub fn sink_desc(&self) -> &ComputeSinkDesc {
        let sink_exports = &self.df_desc.sink_exports;
        let sink_desc = sink_exports.values().next().expect("valid sink");
        sink_desc
    }
}

/// Marker type for [`GlobalMirPlan`] structs representing an optimization
/// result without a resolved timestamp.
#[derive(Clone, Debug)]
pub struct Unresolved;

/// Marker type for [`GlobalMirPlan`] structs representing an optimization
/// result with a resolved timestamp.
///
/// The actual timestamp value is set in the [`MirDataflowDescription`] of the
/// surrounding [`GlobalMirPlan`] when we call `resolve()`.
#[derive(Clone, Debug)]
pub struct Resolved;

impl Optimize<SubscribeFrom> for Optimizer {
    type To = GlobalMirPlan<Unresolved>;

    fn optimize(&mut self, plan: SubscribeFrom) -> Result<Self::To, OptimizerError> {
        let time = Instant::now();

        let mut df_builder = {
            let catalog = self.catalog.state();
            let compute = self.compute_instance.clone();
            DataflowBuilder::new(catalog, compute).with_config(&self.config)
        };
        let mut df_desc = MirDataflowDescription::new(self.debug_name.clone());
        let mut df_meta = DataflowMetainfo::default();

        match plan {
            SubscribeFrom::Id(from_id) => {
                let from = self.catalog.get_entry(&from_id);
                let from_desc = from
                    .desc(
                        &self
                            .catalog
                            .state()
                            .resolve_full_name(from.name(), self.conn_id.as_ref()),
                    )
                    .expect("subscribes can only be run on items with descs")
                    .into_owned();

                df_builder.import_into_dataflow(&from_id, &mut df_desc, &self.config.features)?;
                df_builder.maybe_reoptimize_imported_views(&mut df_desc, &self.config)?;

                // Make SinkDesc
                let sink_description = ComputeSinkDesc {
                    from: from_id,
                    from_desc,
                    connection: ComputeSinkConnection::Subscribe(SubscribeSinkConnection::default()),
                    with_snapshot: self.with_snapshot,
                    up_to: self.up_to.map(Antichain::from_elem).unwrap_or_default(),
                    // No `FORCE NOT NULL` for subscribes
                    non_null_assertions: vec![],
                    // No `REFRESH` for subscribes
                    refresh_schedule: None,
                };
                df_desc.export_sink(self.sink_id, sink_description);
            }
            SubscribeFrom::Query { expr, desc } => {
                // TODO: Change the `expr` type to be `HirRelationExpr` and run
                // HIR ⇒ MIR lowering and decorrelation here. This would allow
                // us implement something like `EXPLAIN RAW PLAN FOR SUBSCRIBE.`
                //
                // let expr = expr.lower(&self.config)?;

                // MIR ⇒ MIR optimization (local)
                let mut transform_ctx =
                    TransformCtx::local(&self.config.features, &self.typecheck_ctx, &mut df_meta);
                let expr = optimize_mir_local(expr, &mut transform_ctx)?;

                df_builder.import_view_into_dataflow(
                    &self.view_id,
                    &expr,
                    &mut df_desc,
                    &self.config.features,
                )?;
                df_builder.maybe_reoptimize_imported_views(&mut df_desc, &self.config)?;

                // Make SinkDesc
                let sink_description = ComputeSinkDesc {
                    from: self.view_id,
                    from_desc: RelationDesc::new(expr.typ(), desc.iter_names()),
                    connection: ComputeSinkConnection::Subscribe(SubscribeSinkConnection::default()),
                    with_snapshot: self.with_snapshot,
                    up_to: self.up_to.map(Antichain::from_elem).unwrap_or_default(),
                    // No `FORCE NOT NULL` for subscribes
                    non_null_assertions: vec![],
                    // No `REFRESH` for subscribes
                    refresh_schedule: None,
                };
                df_desc.export_sink(self.sink_id, sink_description);
            }
        };

        // Prepare expressions in the assembled dataflow.
        let style = ExprPrepStyle::Index;
        df_desc.visit_children(
            |r| prep_relation_expr(r, style),
            |s| prep_scalar_expr(s, style),
        )?;

        // Construct TransformCtx for global optimization.
        let mut transform_ctx = TransformCtx::global(
            &df_builder,
            &mz_transform::EmptyStatisticsOracle, // TODO: wire proper stats
            &self.config.features,
            &self.typecheck_ctx,
            &mut df_meta,
        );
        // Run global optimization.
        mz_transform::optimize_dataflow(&mut df_desc, &mut transform_ctx, false)?;

        if self.config.mode == OptimizeMode::Explain {
            // Collect the list of indexes used by the dataflow at this point.
            trace_plan!(at: "global", &df_meta.used_indexes(&df_desc));
        }

        self.duration += time.elapsed();

        // Return the (sealed) plan at the end of this optimization step.
        Ok(GlobalMirPlan {
            df_desc,
            df_meta,
            phantom: PhantomData::<Unresolved>,
        })
    }
}

impl GlobalMirPlan<Unresolved> {
    /// Produces the [`GlobalMirPlan`] with [`Resolved`] timestamp.
    ///
    /// We need to resolve timestamps before the `GlobalMirPlan ⇒ GlobalLirPlan`
    /// optimization stage in order to profit from possible single-time
    /// optimizations in the `Plan::finalize_dataflow` call.
    pub fn resolve(mut self, as_of: Antichain<Timestamp>) -> GlobalMirPlan<Resolved> {
        // A datalfow description for a `SUBSCRIBE` statement should not have
        // index exports.
        soft_assert_or_log!(
            self.df_desc.index_exports.is_empty(),
            "unexpectedly setting until for a DataflowDescription with an index",
        );

        // Set the `as_of` timestamp for the dataflow.
        self.df_desc.set_as_of(as_of);

        // The only outputs of the dataflow are sinks, so we might be able to
        // turn off the computation early, if they all have non-trivial
        // `up_to`s.
        self.df_desc.until = Antichain::from_elem(Timestamp::MIN);
        for (_, sink) in &self.df_desc.sink_exports {
            self.df_desc.until.join_assign(&sink.up_to);
        }

        GlobalMirPlan {
            df_desc: self.df_desc,
            df_meta: self.df_meta,
            phantom: PhantomData::<Resolved>,
        }
    }
}

impl Optimize<GlobalMirPlan<Resolved>> for Optimizer {
    type To = GlobalLirPlan;

    fn optimize(&mut self, plan: GlobalMirPlan<Resolved>) -> Result<Self::To, OptimizerError> {
        let time = Instant::now();

        let GlobalMirPlan {
            mut df_desc,
            df_meta,
            phantom: _,
        } = plan;

        // Ensure all expressions are normalized before finalizing.
        for build in df_desc.objects_to_build.iter_mut() {
            normalize_lets(&mut build.plan.0, &self.config.features)?
        }

        // Finalize the dataflow. This includes:
        // - MIR ⇒ LIR lowering
        // - LIR ⇒ LIR transforms
        let df_desc = Plan::finalize_dataflow(df_desc, &self.config.features)?;

        self.duration += time.elapsed();
        self.metrics
            .observe_e2e_optimization_time("subscribe", self.duration);

        // Return the plan at the end of this `optimize` step.
        Ok(GlobalLirPlan { df_desc, df_meta })
    }
}

impl GlobalLirPlan {
    /// Unwraps the parts of the final result of the optimization pipeline.
    pub fn unapply(self) -> (LirDataflowDescription, DataflowMetainfo) {
        (self.df_desc, self.df_meta)
    }
}