Skip to main content

mz_compute_types/
dataflows.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10//! Types for describing dataflows.
11
12use std::collections::{BTreeMap, BTreeSet};
13use std::fmt;
14
15use mz_expr::{CollectionPlan, MirRelationExpr, MirScalarExpr, OptimizedMirRelationExpr};
16use mz_ore::collections::CollectionExt;
17use mz_ore::soft_assert_or_log;
18use mz_repr::refresh_schedule::RefreshSchedule;
19use mz_repr::{GlobalId, SqlRelationType};
20use mz_storage_types::time_dependence::TimeDependence;
21use serde::{Deserialize, Serialize};
22use timely::progress::Antichain;
23
24use crate::plan::Plan;
25use crate::plan::render_plan::RenderPlan;
26use crate::sinks::{ComputeSinkConnection, ComputeSinkDesc};
27use crate::sources::{SourceInstanceArguments, SourceInstanceDesc};
28
29/// A description of a dataflow to construct and results to surface.
30#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
31pub struct DataflowDescription<P, S: 'static = (), T = mz_repr::Timestamp> {
32    /// Sources instantiations made available to the dataflow pair with monotonicity information.
33    pub source_imports: BTreeMap<GlobalId, SourceImport<S, T>>,
34    /// Indexes made available to the dataflow.
35    /// (id of index, import)
36    pub index_imports: BTreeMap<GlobalId, IndexImport>,
37    /// Views and indexes to be built and stored in the local context.
38    /// Objects must be built in the specific order, as there may be
39    /// dependencies of later objects on prior identifiers.
40    pub objects_to_build: Vec<BuildDesc<P>>,
41    /// Indexes to be made available to be shared with other dataflows
42    /// (id of new index, description of index, relationtype of base source/view/table)
43    pub index_exports: BTreeMap<GlobalId, (IndexDesc, SqlRelationType)>,
44    /// sinks to be created
45    /// (id of new sink, description of sink)
46    pub sink_exports: BTreeMap<GlobalId, ComputeSinkDesc<S, T>>,
47    /// An optional frontier to which inputs should be advanced.
48    ///
49    /// If this is set, it should override the default setting determined by
50    /// the upper bound of `since` frontiers contributing to the dataflow.
51    /// It is an error for this to be set to a frontier not beyond that default.
52    pub as_of: Option<Antichain<T>>,
53    /// Frontier beyond which the dataflow should not execute.
54    /// Specifically, updates at times greater or equal to this frontier are suppressed.
55    /// This is often set to `as_of + 1` to enable "batch" computations.
56    /// Note that frontier advancements might still happen to times that are after the `until`,
57    /// only data is suppressed. (This is consistent with how frontier advancements can also
58    /// happen before the `as_of`.)
59    pub until: Antichain<T>,
60    /// The initial as_of when the collection is first created. Filled only for materialized views.
61    /// Note that this doesn't change upon restarts.
62    pub initial_storage_as_of: Option<Antichain<T>>,
63    /// The schedule of REFRESH materialized views.
64    pub refresh_schedule: Option<RefreshSchedule>,
65    /// Human-readable name
66    pub debug_name: String,
67    /// Description of how the dataflow's progress relates to wall-clock time. None for unknown.
68    pub time_dependence: Option<TimeDependence>,
69}
70
71impl<P, S> DataflowDescription<P, S, mz_repr::Timestamp> {
72    /// Tests if the dataflow refers to a single timestamp, namely
73    /// that `as_of` has a single coordinate and that the `until`
74    /// value corresponds to the `as_of` value plus one, or `as_of`
75    /// is the maximum timestamp and is thus single.
76    pub fn is_single_time(&self) -> bool {
77        // TODO: this would be much easier to check if `until` was a strict lower bound,
78        // and we would be testing that `until == as_of`.
79
80        let until = &self.until;
81
82        // IF `as_of` is not set at all this can't be a single time dataflow.
83        let Some(as_of) = self.as_of.as_ref() else {
84            return false;
85        };
86        // Ensure that as_of <= until.
87        soft_assert_or_log!(
88            timely::PartialOrder::less_equal(as_of, until),
89            "expected empty `as_of ≤ until`, got `{as_of:?} ≰ {until:?}`",
90        );
91        // IF `as_of` is not a single timestamp this can't be a single time dataflow.
92        let Some(as_of) = as_of.as_option() else {
93            return false;
94        };
95        // Ensure that `as_of = MAX` implies `until.is_empty()`.
96        soft_assert_or_log!(
97            as_of != &mz_repr::Timestamp::MAX || until.is_empty(),
98            "expected `until = {{}}` due to `as_of = MAX`, got `until = {until:?}`",
99        );
100        // Note that the `(as_of = MAX, until = {})` case also returns `true`
101        // here (as expected) since we are going to compare two `None` values.
102        as_of.try_step_forward().as_ref() == until.as_option()
103    }
104}
105
106impl<T> DataflowDescription<Plan<T>, (), mz_repr::Timestamp> {
107    /// Check invariants expected to be true about `DataflowDescription`s.
108    pub fn check_invariants(&self) -> Result<(), String> {
109        let mut plans: Vec<_> = self.objects_to_build.iter().map(|o| &o.plan).collect();
110        let mut lir_ids = BTreeSet::new();
111
112        while let Some(plan) = plans.pop() {
113            let lir_id = plan.lir_id;
114            if !lir_ids.insert(lir_id) {
115                return Err(format!(
116                    "duplicate `LirId` in `DataflowDescription`: {lir_id}"
117                ));
118            }
119            plans.extend(plan.node.children());
120        }
121
122        Ok(())
123    }
124}
125
126impl<T> DataflowDescription<OptimizedMirRelationExpr, (), T> {
127    /// Imports a previously exported index.
128    ///
129    /// This method makes available an index previously exported as `id`, identified
130    /// to the query by `description` (which names the view the index arranges, and
131    /// the keys by which it is arranged).
132    pub fn import_index(
133        &mut self,
134        id: GlobalId,
135        desc: IndexDesc,
136        typ: SqlRelationType,
137        monotonic: bool,
138    ) {
139        self.index_imports.insert(
140            id,
141            IndexImport {
142                desc,
143                typ,
144                monotonic,
145                with_snapshot: true,
146            },
147        );
148    }
149
150    /// Imports a source and makes it available as `id`.
151    pub fn import_source(&mut self, id: GlobalId, typ: SqlRelationType, monotonic: bool) {
152        // Import the source with no linear operators applied to it.
153        // They may be populated by whole-dataflow optimization.
154        // Similarly, we require the snapshot by default, though optimization may choose to skip it.
155        self.source_imports.insert(
156            id,
157            SourceImport {
158                desc: SourceInstanceDesc {
159                    storage_metadata: (),
160                    arguments: SourceInstanceArguments { operators: None },
161                    typ,
162                },
163                monotonic,
164                with_snapshot: true,
165                upper: Antichain::new(),
166            },
167        );
168    }
169
170    /// Binds to `id` the relation expression `plan`.
171    pub fn insert_plan(&mut self, id: GlobalId, plan: OptimizedMirRelationExpr) {
172        self.objects_to_build.push(BuildDesc { id, plan });
173    }
174
175    /// Exports as `id` an index described by `description`.
176    ///
177    /// Future uses of `import_index` in other dataflow descriptions may use `id`,
178    /// as long as this dataflow has not been terminated in the meantime.
179    pub fn export_index(&mut self, id: GlobalId, description: IndexDesc, on_type: SqlRelationType) {
180        // We first create a "view" named `id` that ensures that the
181        // data are correctly arranged and available for export.
182        self.insert_plan(
183            id,
184            OptimizedMirRelationExpr::declare_optimized(MirRelationExpr::ArrangeBy {
185                input: Box::new(MirRelationExpr::global_get(
186                    description.on_id,
187                    on_type.clone(),
188                )),
189                keys: vec![description.key.clone()],
190            }),
191        );
192        self.index_exports.insert(id, (description, on_type));
193    }
194
195    /// Exports as `id` a sink described by `description`.
196    pub fn export_sink(&mut self, id: GlobalId, description: ComputeSinkDesc<(), T>) {
197        self.sink_exports.insert(id, description);
198    }
199
200    /// Returns true iff `id` is already imported.
201    pub fn is_imported(&self, id: &GlobalId) -> bool {
202        self.objects_to_build.iter().any(|bd| &bd.id == id)
203            || self.index_imports.keys().any(|i| i == id)
204            || self.source_imports.keys().any(|i| i == id)
205    }
206
207    /// The number of columns associated with an identifier in the dataflow.
208    pub fn arity_of(&self, id: &GlobalId) -> usize {
209        for (source_id, source_import) in self.source_imports.iter() {
210            let source = &source_import.desc;
211            if source_id == id {
212                return source.typ.arity();
213            }
214        }
215        for IndexImport { desc, typ, .. } in self.index_imports.values() {
216            if &desc.on_id == id {
217                return typ.arity();
218            }
219        }
220        for desc in self.objects_to_build.iter() {
221            if &desc.id == id {
222                return desc.plan.arity();
223            }
224        }
225        panic!("GlobalId {} not found in DataflowDesc", id);
226    }
227
228    /// Calls r and s on any sub-members of those types in self. Halts at the first error return.
229    pub fn visit_children<R, S, E>(&mut self, r: R, s: S) -> Result<(), E>
230    where
231        R: Fn(&mut OptimizedMirRelationExpr) -> Result<(), E>,
232        S: Fn(&mut MirScalarExpr) -> Result<(), E>,
233    {
234        for BuildDesc { plan, .. } in &mut self.objects_to_build {
235            r(plan)?;
236        }
237        for source_import in self.source_imports.values_mut() {
238            let Some(mfp) = source_import.desc.arguments.operators.as_mut() else {
239                continue;
240            };
241            for expr in mfp.expressions.iter_mut() {
242                s(expr)?;
243            }
244            for (_, expr) in mfp.predicates.iter_mut() {
245                s(expr)?;
246            }
247        }
248        Ok(())
249    }
250}
251
252impl<P, S, T> DataflowDescription<P, S, T> {
253    /// Creates a new dataflow description with a human-readable name.
254    pub fn new(name: String) -> Self {
255        Self {
256            source_imports: Default::default(),
257            index_imports: Default::default(),
258            objects_to_build: Vec::new(),
259            index_exports: Default::default(),
260            sink_exports: Default::default(),
261            as_of: Default::default(),
262            until: Antichain::new(),
263            initial_storage_as_of: None,
264            refresh_schedule: None,
265            debug_name: name,
266            time_dependence: None,
267        }
268    }
269
270    /// Sets the `as_of` frontier to the supplied argument.
271    ///
272    /// This method allows the dataflow to indicate a frontier up through
273    /// which all times should be advanced. This can be done for at least
274    /// two reasons: 1. correctness and 2. performance.
275    ///
276    /// Correctness may require an `as_of` to ensure that historical detail
277    /// is consolidated at representative times that do not present specific
278    /// detail that is not specifically correct. For example, updates may be
279    /// compacted to times that are no longer the source times, but instead
280    /// some byproduct of when compaction was executed; we should not present
281    /// those specific times as meaningfully different from other equivalent
282    /// times.
283    ///
284    /// Performance may benefit from an aggressive `as_of` as it reduces the
285    /// number of distinct moments at which collections vary. Differential
286    /// dataflow will refresh its outputs at each time its inputs change and
287    /// to moderate that we can minimize the volume of distinct input times
288    /// as much as possible.
289    ///
290    /// Generally, one should consider setting `as_of` at least to the `since`
291    /// frontiers of contributing data sources and as aggressively as the
292    /// computation permits.
293    pub fn set_as_of(&mut self, as_of: Antichain<T>) {
294        self.as_of = Some(as_of);
295    }
296
297    /// Records the initial `as_of` of the storage collection associated with a materialized view.
298    pub fn set_initial_as_of(&mut self, initial_as_of: Antichain<T>) {
299        self.initial_storage_as_of = Some(initial_as_of);
300    }
301
302    /// Identifiers of imported objects (indexes and sources).
303    pub fn import_ids(&self) -> impl Iterator<Item = GlobalId> + Clone + '_ {
304        self.imported_index_ids().chain(self.imported_source_ids())
305    }
306
307    /// Identifiers of imported indexes.
308    pub fn imported_index_ids(&self) -> impl Iterator<Item = GlobalId> + Clone + '_ {
309        self.index_imports.keys().copied()
310    }
311
312    /// Identifiers of imported sources.
313    pub fn imported_source_ids(&self) -> impl Iterator<Item = GlobalId> + Clone + '_ {
314        self.source_imports.keys().copied()
315    }
316
317    /// Identifiers of exported objects (indexes and sinks).
318    pub fn export_ids(&self) -> impl Iterator<Item = GlobalId> + Clone + '_ {
319        self.exported_index_ids().chain(self.exported_sink_ids())
320    }
321
322    /// Identifiers of exported indexes.
323    pub fn exported_index_ids(&self) -> impl Iterator<Item = GlobalId> + Clone + '_ {
324        self.index_exports.keys().copied()
325    }
326
327    /// Identifiers of exported sinks.
328    pub fn exported_sink_ids(&self) -> impl Iterator<Item = GlobalId> + Clone + '_ {
329        self.sink_exports.keys().copied()
330    }
331
332    /// Identifiers of exported persist sinks.
333    pub fn persist_sink_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
334        self.sink_exports
335            .iter()
336            .filter_map(|(id, desc)| match desc.connection {
337                ComputeSinkConnection::MaterializedView(_) => Some(*id),
338                ComputeSinkConnection::ContinualTask(_) => Some(*id),
339                _ => None,
340            })
341    }
342
343    /// Identifiers of exported subscribe sinks.
344    pub fn subscribe_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
345        self.sink_exports
346            .iter()
347            .filter_map(|(id, desc)| match desc.connection {
348                ComputeSinkConnection::Subscribe(_) => Some(*id),
349                _ => None,
350            })
351    }
352
353    /// Identifiers of exported continual tasks.
354    pub fn continual_task_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
355        self.sink_exports
356            .iter()
357            .filter_map(|(id, desc)| match desc.connection {
358                ComputeSinkConnection::ContinualTask(_) => Some(*id),
359                _ => None,
360            })
361    }
362
363    /// Identifiers of exported copy to sinks.
364    pub fn copy_to_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
365        self.sink_exports
366            .iter()
367            .filter_map(|(id, desc)| match desc.connection {
368                ComputeSinkConnection::CopyToS3Oneshot(_) => Some(*id),
369                _ => None,
370            })
371    }
372
373    /// Produce a `Display`able value containing the import IDs of this dataflow.
374    pub fn display_import_ids(&self) -> impl fmt::Display + '_ {
375        use mz_ore::str::{bracketed, separated};
376        bracketed("[", "]", separated(", ", self.import_ids()))
377    }
378
379    /// Produce a `Display`able value containing the export IDs of this dataflow.
380    pub fn display_export_ids(&self) -> impl fmt::Display + '_ {
381        use mz_ore::str::{bracketed, separated};
382        bracketed("[", "]", separated(", ", self.export_ids()))
383    }
384
385    /// Whether this dataflow installs transient collections.
386    pub fn is_transient(&self) -> bool {
387        self.export_ids().all(|id| id.is_transient())
388    }
389
390    /// Returns the description of the object to build with the specified
391    /// identifier.
392    ///
393    /// # Panics
394    ///
395    /// Panics if `id` is not present in `objects_to_build` exactly once.
396    pub fn build_desc(&self, id: GlobalId) -> &BuildDesc<P> {
397        let mut builds = self.objects_to_build.iter().filter(|build| build.id == id);
398        let build = builds
399            .next()
400            .unwrap_or_else(|| panic!("object to build id {id} unexpectedly missing"));
401        assert!(builds.next().is_none());
402        build
403    }
404
405    /// Returns the id of the dataflow's sink export.
406    ///
407    /// # Panics
408    ///
409    /// Panics if the dataflow has no sink exports or has more than one.
410    pub fn sink_id(&self) -> GlobalId {
411        let sink_exports = &self.sink_exports;
412        let sink_id = sink_exports.keys().into_element();
413        *sink_id
414    }
415}
416
417impl<P, S, T> DataflowDescription<P, S, T>
418where
419    P: CollectionPlan,
420{
421    /// Computes the set of identifiers upon which the specified collection
422    /// identifier depends.
423    ///
424    /// `collection_id` must specify a valid object in `objects_to_build`.
425    ///
426    /// This method includes identifiers for e.g. intermediate views, and should be filtered
427    /// if one only wants sources and indexes.
428    ///
429    /// This method is safe for mutually recursive view definitions.
430    pub fn depends_on(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> {
431        let mut out = BTreeSet::new();
432        self.depends_on_into(collection_id, &mut out);
433        out
434    }
435
436    /// Like `depends_on`, but appends to an existing `BTreeSet`.
437    pub fn depends_on_into(&self, collection_id: GlobalId, out: &mut BTreeSet<GlobalId>) {
438        out.insert(collection_id);
439        if self.source_imports.contains_key(&collection_id) {
440            // The collection is provided by an imported source. Report the
441            // dependency on the source.
442            out.insert(collection_id);
443            return;
444        }
445
446        // NOTE(benesch): we're not smart enough here to know *which* index
447        // for the collection will be used, if one exists, so we have to report
448        // the dependency on all of them.
449        let mut found_index = false;
450        for (index_id, IndexImport { desc, .. }) in &self.index_imports {
451            if desc.on_id == collection_id {
452                // The collection is provided by an imported index. Report the
453                // dependency on the index.
454                out.insert(*index_id);
455                found_index = true;
456            }
457        }
458        if found_index {
459            return;
460        }
461
462        // The collection is not provided by a source or imported index.
463        // It must be a collection whose plan we have handy. Recurse.
464        let build = self.build_desc(collection_id);
465        for id in build.plan.depends_on() {
466            if !out.contains(&id) {
467                self.depends_on_into(id, out)
468            }
469        }
470    }
471
472    /// Computes the set of imports upon which the specified collection depends.
473    ///
474    /// This method behaves like `depends_on` but filters out internal dependencies that are not
475    /// included in the dataflow imports.
476    pub fn depends_on_imports(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> {
477        let is_import = |id: &GlobalId| {
478            self.source_imports.contains_key(id) || self.index_imports.contains_key(id)
479        };
480
481        let deps = self.depends_on(collection_id);
482        deps.into_iter().filter(is_import).collect()
483    }
484}
485
486impl<S, T> DataflowDescription<RenderPlan, S, T>
487where
488    S: Clone + PartialEq,
489    T: Clone + timely::PartialOrder,
490{
491    /// Determine if a dataflow description is compatible with this dataflow description.
492    ///
493    /// Compatible dataflows have structurally equal exports, imports, and objects to build. The
494    /// `as_of` of the receiver has to be less equal the `other` `as_of`.
495    ///
496    /// Note that this method performs normalization as part of the structural equality checking,
497    /// which involves cloning both `self` and `other`. It is therefore relatively expensive and
498    /// should only be used on cold code paths.
499    ///
500    // TODO: The semantics of this function are only useful for command reconciliation at the moment.
501    pub fn compatible_with(&self, other: &Self) -> bool {
502        let old = self.as_comparable();
503        let new = other.as_comparable();
504
505        let equality = old.index_exports == new.index_exports
506            && old.sink_exports == new.sink_exports
507            && old.objects_to_build == new.objects_to_build
508            && old.index_imports == new.index_imports
509            && old.source_imports == new.source_imports
510            && old.time_dependence == new.time_dependence;
511
512        let partial = if let (Some(old_as_of), Some(new_as_of)) = (&old.as_of, &new.as_of) {
513            timely::PartialOrder::less_equal(old_as_of, new_as_of)
514        } else {
515            false
516        };
517
518        equality && partial
519    }
520
521    /// Returns a `DataflowDescription` that has the same structure as `self` and can be
522    /// structurally compared to other `DataflowDescription`s.
523    ///
524    /// The function normalizes several properties. It replaces transient `GlobalId`s
525    /// that are only used internally (i.e. not imported nor exported) with consecutive IDs
526    /// starting from `t1`. It replaces the source import's `upper` by a dummy value.
527    fn as_comparable(&self) -> Self {
528        let external_ids: BTreeSet<_> = self.import_ids().chain(self.export_ids()).collect();
529
530        let mut id_counter = 0;
531        let mut replacements = BTreeMap::new();
532
533        let mut maybe_replace = |id: GlobalId| {
534            if id.is_transient() && !external_ids.contains(&id) {
535                *replacements.entry(id).or_insert_with(|| {
536                    id_counter += 1;
537                    GlobalId::Transient(id_counter)
538                })
539            } else {
540                id
541            }
542        };
543
544        let mut source_imports = self.source_imports.clone();
545        for import in source_imports.values_mut() {
546            import.upper = Antichain::new();
547        }
548
549        let mut objects_to_build = self.objects_to_build.clone();
550        for object in &mut objects_to_build {
551            object.id = maybe_replace(object.id);
552            object.plan.replace_ids(&mut maybe_replace);
553        }
554
555        let mut index_exports = self.index_exports.clone();
556        for (desc, _typ) in index_exports.values_mut() {
557            desc.on_id = maybe_replace(desc.on_id);
558        }
559
560        let mut sink_exports = self.sink_exports.clone();
561        for desc in sink_exports.values_mut() {
562            desc.from = maybe_replace(desc.from);
563        }
564
565        DataflowDescription {
566            source_imports,
567            index_imports: self.index_imports.clone(),
568            objects_to_build,
569            index_exports,
570            sink_exports,
571            as_of: self.as_of.clone(),
572            until: self.until.clone(),
573            initial_storage_as_of: self.initial_storage_as_of.clone(),
574            refresh_schedule: self.refresh_schedule.clone(),
575            debug_name: self.debug_name.clone(),
576            time_dependence: self.time_dependence.clone(),
577        }
578    }
579}
580
581/// A commonly used name for dataflows contain MIR expressions.
582pub type DataflowDesc = DataflowDescription<OptimizedMirRelationExpr, ()>;
583
584/// An index storing processed updates so they can be queried
585/// or reused in other computations
586#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]
587pub struct IndexDesc {
588    /// Identity of the collection the index is on.
589    pub on_id: GlobalId,
590    /// Expressions to be arranged, in order of decreasing primacy.
591    pub key: Vec<MirScalarExpr>,
592}
593
594/// Information about an imported index, and how it will be used by the dataflow.
595#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
596pub struct IndexImport {
597    /// Description of index.
598    pub desc: IndexDesc,
599    /// Schema and keys of the object the index is on.
600    pub typ: SqlRelationType,
601    /// Whether the index will supply monotonic data.
602    pub monotonic: bool,
603    /// Whether this import must include the snapshot data.
604    pub with_snapshot: bool,
605}
606
607/// Information about an imported source, and how it will be used by the dataflow.
608#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
609pub struct SourceImport<S: 'static = (), T = mz_repr::Timestamp> {
610    /// Description of the source instance to import.
611    pub desc: SourceInstanceDesc<S>,
612    /// Whether the source will supply monotonic data.
613    pub monotonic: bool,
614    /// Whether this import must include the snapshot data.
615    pub with_snapshot: bool,
616    /// The initial known upper frontier for the source.
617    pub upper: Antichain<T>,
618}
619
620/// An association of a global identifier to an expression.
621#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
622pub struct BuildDesc<P> {
623    /// TODO(database-issues#7533): Add documentation.
624    pub id: GlobalId,
625    /// TODO(database-issues#7533): Add documentation.
626    pub plan: P,
627}