mz_timely_util/
operator.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License in the LICENSE file at the
6// root of this repository, or online at
7//
8//     http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15
16//! Common operator transformations on timely streams and differential collections.
17
18use std::hash::{BuildHasher, Hash, Hasher};
19use std::marker::PhantomData;
20
21use differential_dataflow::consolidation::ConsolidatingContainerBuilder;
22use differential_dataflow::containers::{Columnation, TimelyStack};
23use differential_dataflow::difference::{Multiply, Semigroup};
24use differential_dataflow::lattice::Lattice;
25use differential_dataflow::trace::{Batcher, Builder, Description};
26use differential_dataflow::{AsCollection, Collection, Hashable, VecCollection};
27use timely::container::{DrainContainer, PushInto};
28use timely::dataflow::channels::pact::{Exchange, ParallelizationContract, Pipeline};
29use timely::dataflow::operators::Capability;
30use timely::dataflow::operators::generic::builder_rc::{
31    OperatorBuilder as OperatorBuilderRc, OperatorBuilder,
32};
33use timely::dataflow::operators::generic::operator::{self, Operator};
34use timely::dataflow::operators::generic::{
35    InputHandleCore, OperatorInfo, OutputBuilder, OutputBuilderSession,
36};
37use timely::dataflow::{Scope, Stream, StreamCore};
38use timely::progress::{Antichain, Timestamp};
39use timely::{Container, ContainerBuilder, Data, PartialOrder};
40
41/// Extension methods for timely [`StreamCore`]s.
42pub trait StreamExt<G, C1>
43where
44    C1: Container + DrainContainer,
45    G: Scope,
46{
47    /// Like `timely::dataflow::operators::generic::operator::Operator::unary`,
48    /// but the logic function can handle failures.
49    ///
50    /// Creates a new dataflow operator that partitions its input stream by a
51    /// parallelization strategy `pact` and repeatedly invokes `logic`, the
52    /// function returned by the function passed as `constructor`. The `logic`
53    /// function can read to the input stream and write to either of two output
54    /// streams, where the first output stream represents successful
55    /// computations and the second output stream represents failed
56    /// computations.
57    fn unary_fallible<DCB, ECB, B, P>(
58        &self,
59        pact: P,
60        name: &str,
61        constructor: B,
62    ) -> (StreamCore<G, DCB::Container>, StreamCore<G, ECB::Container>)
63    where
64        DCB: ContainerBuilder,
65        ECB: ContainerBuilder,
66        B: FnOnce(
67            Capability<G::Timestamp>,
68            OperatorInfo,
69        ) -> Box<
70            dyn FnMut(
71                    &mut InputHandleCore<G::Timestamp, C1, P::Puller>,
72                    &mut OutputBuilderSession<'_, G::Timestamp, DCB>,
73                    &mut OutputBuilderSession<'_, G::Timestamp, ECB>,
74                ) + 'static,
75        >,
76        P: ParallelizationContract<G::Timestamp, C1>;
77
78    /// Like [`timely::dataflow::operators::map::Map::flat_map`], but `logic`
79    /// is allowed to fail. The first returned stream will contain the
80    /// successful applications of `logic`, while the second returned stream
81    /// will contain the failed applications.
82    fn flat_map_fallible<DCB, ECB, D2, E, I, L>(
83        &self,
84        name: &str,
85        logic: L,
86    ) -> (StreamCore<G, DCB::Container>, StreamCore<G, ECB::Container>)
87    where
88        DCB: ContainerBuilder + PushInto<D2>,
89        ECB: ContainerBuilder + PushInto<E>,
90        I: IntoIterator<Item = Result<D2, E>>,
91        L: for<'a> FnMut(C1::Item<'a>) -> I + 'static;
92
93    /// Block progress of the frontier at `expiration` time
94    fn expire_stream_at(&self, name: &str, expiration: G::Timestamp) -> StreamCore<G, C1>;
95}
96
97/// Extension methods for differential [`Collection`]s.
98pub trait CollectionExt<G, D1, R>
99where
100    G: Scope,
101    R: Semigroup,
102{
103    /// Creates a new empty collection in `scope`.
104    fn empty(scope: &G) -> VecCollection<G, D1, R>;
105
106    /// Like [`Collection::map`], but `logic` is allowed to fail. The first
107    /// returned collection will contain successful applications of `logic`,
108    /// while the second returned collection will contain the failed
109    /// applications.
110    ///
111    /// Callers need to specify the following type parameters:
112    /// * `DCB`: The container builder for the `Ok` output.
113    /// * `ECB`: The container builder for the `Err` output.
114    fn map_fallible<DCB, ECB, D2, E, L>(
115        &self,
116        name: &str,
117        mut logic: L,
118    ) -> (VecCollection<G, D2, R>, VecCollection<G, E, R>)
119    where
120        DCB: ContainerBuilder<Container = Vec<(D2, G::Timestamp, R)>>
121            + PushInto<(D2, G::Timestamp, R)>,
122        ECB: ContainerBuilder<Container = Vec<(E, G::Timestamp, R)>>
123            + PushInto<(E, G::Timestamp, R)>,
124        D2: Data,
125        E: Data,
126        L: FnMut(D1) -> Result<D2, E> + 'static,
127    {
128        self.flat_map_fallible::<DCB, ECB, _, _, _, _>(name, move |record| Some(logic(record)))
129    }
130
131    /// Like [`Collection::flat_map`], but `logic` is allowed to fail. The first
132    /// returned collection will contain the successful applications of `logic`,
133    /// while the second returned collection will contain the failed
134    /// applications.
135    fn flat_map_fallible<DCB, ECB, D2, E, I, L>(
136        &self,
137        name: &str,
138        logic: L,
139    ) -> (Collection<G, DCB::Container>, Collection<G, ECB::Container>)
140    where
141        DCB: ContainerBuilder + PushInto<(D2, G::Timestamp, R)>,
142        ECB: ContainerBuilder + PushInto<(E, G::Timestamp, R)>,
143        D2: Data,
144        E: Data,
145        I: IntoIterator<Item = Result<D2, E>>,
146        L: FnMut(D1) -> I + 'static;
147
148    /// Block progress of the frontier at `expiration` time.
149    fn expire_collection_at(&self, name: &str, expiration: G::Timestamp)
150    -> VecCollection<G, D1, R>;
151
152    /// Replaces each record with another, with a new difference type.
153    ///
154    /// This method is most commonly used to take records containing aggregatable data (e.g. numbers to be summed)
155    /// and move the data into the difference component. This will allow differential dataflow to update in-place.
156    fn explode_one<D2, R2, L>(&self, logic: L) -> VecCollection<G, D2, <R2 as Multiply<R>>::Output>
157    where
158        D2: differential_dataflow::Data,
159        R2: Semigroup + Multiply<R>,
160        <R2 as Multiply<R>>::Output: Data + Semigroup,
161        L: FnMut(D1) -> (D2, R2) + 'static,
162        G::Timestamp: Lattice;
163
164    /// Partitions the input into a monotonic collection and
165    /// non-monotone exceptions, with respect to differences.
166    ///
167    /// The exceptions are transformed by `into_err`.
168    fn ensure_monotonic<E, IE>(
169        &self,
170        into_err: IE,
171    ) -> (VecCollection<G, D1, R>, VecCollection<G, E, R>)
172    where
173        E: Data,
174        IE: Fn(D1, R) -> (E, R) + 'static,
175        R: num_traits::sign::Signed;
176
177    /// Consolidates the collection if `must_consolidate` is `true` and leaves it
178    /// untouched otherwise.
179    fn consolidate_named_if<Ba>(self, must_consolidate: bool, name: &str) -> Self
180    where
181        D1: differential_dataflow::ExchangeData + Hash + Columnation,
182        R: Semigroup + differential_dataflow::ExchangeData + Columnation,
183        G::Timestamp: Lattice + Columnation,
184        Ba: Batcher<
185                Input = Vec<((D1, ()), G::Timestamp, R)>,
186                Output = TimelyStack<((D1, ()), G::Timestamp, R)>,
187                Time = G::Timestamp,
188            > + 'static;
189
190    /// Consolidates the collection.
191    fn consolidate_named<Ba>(self, name: &str) -> Self
192    where
193        D1: differential_dataflow::ExchangeData + Hash + Columnation,
194        R: Semigroup + differential_dataflow::ExchangeData + Columnation,
195        G::Timestamp: Lattice + Columnation,
196        Ba: Batcher<
197                Input = Vec<((D1, ()), G::Timestamp, R)>,
198                Output = TimelyStack<((D1, ()), G::Timestamp, R)>,
199                Time = G::Timestamp,
200            > + 'static;
201}
202
203impl<G, C1> StreamExt<G, C1> for StreamCore<G, C1>
204where
205    C1: Container + DrainContainer,
206    G: Scope,
207{
208    fn unary_fallible<DCB, ECB, B, P>(
209        &self,
210        pact: P,
211        name: &str,
212        constructor: B,
213    ) -> (StreamCore<G, DCB::Container>, StreamCore<G, ECB::Container>)
214    where
215        DCB: ContainerBuilder,
216        ECB: ContainerBuilder,
217        B: FnOnce(
218            Capability<G::Timestamp>,
219            OperatorInfo,
220        ) -> Box<
221            dyn FnMut(
222                    &mut InputHandleCore<G::Timestamp, C1, P::Puller>,
223                    &mut OutputBuilderSession<'_, G::Timestamp, DCB>,
224                    &mut OutputBuilderSession<'_, G::Timestamp, ECB>,
225                ) + 'static,
226        >,
227        P: ParallelizationContract<G::Timestamp, C1>,
228    {
229        let mut builder = OperatorBuilderRc::new(name.into(), self.scope());
230        builder.set_notify(false);
231
232        let operator_info = builder.operator_info();
233
234        let mut input = builder.new_input(self, pact);
235        let (ok_output, ok_stream) = builder.new_output();
236        let mut ok_output = OutputBuilder::from(ok_output);
237        let (err_output, err_stream) = builder.new_output();
238        let mut err_output = OutputBuilder::from(err_output);
239
240        builder.build(move |mut capabilities| {
241            // `capabilities` should be a single-element vector.
242            let capability = capabilities.pop().unwrap();
243            let mut logic = constructor(capability, operator_info);
244            move |_frontiers| {
245                let mut ok_output_handle = ok_output.activate();
246                let mut err_output_handle = err_output.activate();
247                logic(&mut input, &mut ok_output_handle, &mut err_output_handle);
248            }
249        });
250
251        (ok_stream, err_stream)
252    }
253
254    // XXX(guswynn): file an minimization bug report for the logic flat_map
255    // false positive here
256    // TODO(guswynn): remove this after https://github.com/rust-lang/rust-clippy/issues/8098 is
257    // resolved. The `logic` `FnMut` needs to be borrowed in the `flat_map` call, not moved in
258    // so the simple `|d1| logic(d1)` closure is load-bearing
259    #[allow(clippy::redundant_closure)]
260    fn flat_map_fallible<DCB, ECB, D2, E, I, L>(
261        &self,
262        name: &str,
263        mut logic: L,
264    ) -> (StreamCore<G, DCB::Container>, StreamCore<G, ECB::Container>)
265    where
266        DCB: ContainerBuilder + PushInto<D2>,
267        ECB: ContainerBuilder + PushInto<E>,
268        I: IntoIterator<Item = Result<D2, E>>,
269        L: for<'a> FnMut(C1::Item<'a>) -> I + 'static,
270    {
271        self.unary_fallible::<DCB, ECB, _, _>(Pipeline, name, move |_, _| {
272            Box::new(move |input, ok_output, err_output| {
273                input.for_each_time(|time, data| {
274                    let mut ok_session = ok_output.session_with_builder(&time);
275                    let mut err_session = err_output.session_with_builder(&time);
276                    for r in data
277                        .flat_map(DrainContainer::drain)
278                        .flat_map(|d1| logic(d1))
279                    {
280                        match r {
281                            Ok(d2) => ok_session.give(d2),
282                            Err(e) => err_session.give(e),
283                        }
284                    }
285                })
286            })
287        })
288    }
289
290    fn expire_stream_at(&self, name: &str, expiration: G::Timestamp) -> StreamCore<G, C1> {
291        let name = format!("expire_stream_at({name})");
292        self.unary_frontier(Pipeline, &name.clone(), move |cap, _| {
293            // Retain a capability for the expiration time, which we'll only drop if the token
294            // is dropped. Else, block progress at the expiration time to prevent downstream
295            // operators from making any statement about expiration time or any following time.
296            let cap = Some(cap.delayed(&expiration));
297            let mut warned = false;
298            move |(input, frontier), output| {
299                let _ = &cap;
300                let frontier = frontier.frontier();
301                if !frontier.less_than(&expiration) && !warned {
302                    // Here, we print a warning, not an error. The state is only a liveness
303                    // concern, but not relevant for correctness. Additionally, a race between
304                    // shutting down the dataflow and dropping the token can cause the dataflow
305                    // to shut down before we drop the token.  This can happen when dropping
306                    // the last remaining capability on a different worker.  We do not want to
307                    // log an error every time this happens.
308
309                    tracing::warn!(
310                        name = name,
311                        frontier = ?frontier,
312                        expiration = ?expiration,
313                        "frontier not less than expiration"
314                    );
315                    warned = true;
316                }
317                input.for_each(|time, data| {
318                    let mut session = output.session(&time);
319                    session.give_container(data);
320                });
321            }
322        })
323    }
324}
325
326impl<G, D1, R> CollectionExt<G, D1, R> for VecCollection<G, D1, R>
327where
328    G: Scope,
329    G::Timestamp: Data,
330    D1: Data,
331    R: Semigroup + 'static,
332{
333    fn empty(scope: &G) -> VecCollection<G, D1, R> {
334        operator::empty(scope).as_collection()
335    }
336
337    fn flat_map_fallible<DCB, ECB, D2, E, I, L>(
338        &self,
339        name: &str,
340        mut logic: L,
341    ) -> (Collection<G, DCB::Container>, Collection<G, ECB::Container>)
342    where
343        DCB: ContainerBuilder + PushInto<(D2, G::Timestamp, R)>,
344        ECB: ContainerBuilder + PushInto<(E, G::Timestamp, R)>,
345        D2: Data,
346        E: Data,
347        I: IntoIterator<Item = Result<D2, E>>,
348        L: FnMut(D1) -> I + 'static,
349    {
350        let (ok_stream, err_stream) =
351            self.inner
352                .flat_map_fallible::<DCB, ECB, _, _, _, _>(name, move |(d1, t, r)| {
353                    logic(d1).into_iter().map(move |res| match res {
354                        Ok(d2) => Ok((d2, t.clone(), r.clone())),
355                        Err(e) => Err((e, t.clone(), r.clone())),
356                    })
357                });
358        (ok_stream.as_collection(), err_stream.as_collection())
359    }
360
361    fn expire_collection_at(
362        &self,
363        name: &str,
364        expiration: G::Timestamp,
365    ) -> VecCollection<G, D1, R> {
366        self.inner
367            .expire_stream_at(name, expiration)
368            .as_collection()
369    }
370
371    fn explode_one<D2, R2, L>(
372        &self,
373        mut logic: L,
374    ) -> VecCollection<G, D2, <R2 as Multiply<R>>::Output>
375    where
376        D2: differential_dataflow::Data,
377        R2: Semigroup + Multiply<R>,
378        <R2 as Multiply<R>>::Output: Data + Semigroup,
379        L: FnMut(D1) -> (D2, R2) + 'static,
380        G::Timestamp: Lattice,
381    {
382        self.inner
383            .unary::<ConsolidatingContainerBuilder<_>, _, _, _>(
384                Pipeline,
385                "ExplodeOne",
386                move |_, _| {
387                    move |input, output| {
388                        input.for_each(|time, data| {
389                            output
390                                .session_with_builder(&time)
391                                .give_iterator(data.drain(..).map(|(x, t, d)| {
392                                    let (x, d2) = logic(x);
393                                    (x, t, d2.multiply(&d))
394                                }));
395                        });
396                    }
397                },
398            )
399            .as_collection()
400    }
401
402    fn ensure_monotonic<E, IE>(
403        &self,
404        into_err: IE,
405    ) -> (VecCollection<G, D1, R>, VecCollection<G, E, R>)
406    where
407        E: Data,
408        IE: Fn(D1, R) -> (E, R) + 'static,
409        R: num_traits::sign::Signed,
410    {
411        let (oks, errs) = self
412            .inner
413            .unary_fallible(Pipeline, "EnsureMonotonic", move |_, _| {
414                Box::new(move |input, ok_output, err_output| {
415                    input.for_each(|time, data| {
416                        let mut ok_session = ok_output.session(&time);
417                        let mut err_session = err_output.session(&time);
418                        for (x, t, d) in data.drain(..) {
419                            if d.is_positive() {
420                                ok_session.give((x, t, d))
421                            } else {
422                                let (e, d2) = into_err(x, d);
423                                err_session.give((e, t, d2))
424                            }
425                        }
426                    })
427                })
428            });
429        (oks.as_collection(), errs.as_collection())
430    }
431
432    fn consolidate_named_if<Ba>(self, must_consolidate: bool, name: &str) -> Self
433    where
434        D1: differential_dataflow::ExchangeData + Hash + Columnation,
435        R: Semigroup + differential_dataflow::ExchangeData + Columnation,
436        G::Timestamp: Lattice + Ord + Columnation,
437        Ba: Batcher<
438                Input = Vec<((D1, ()), G::Timestamp, R)>,
439                Output = TimelyStack<((D1, ()), G::Timestamp, R)>,
440                Time = G::Timestamp,
441            > + 'static,
442    {
443        if must_consolidate {
444            // We employ AHash below instead of the default hasher in DD to obtain
445            // a better distribution of data to workers. AHash claims empirically
446            // both speed and high quality, according to
447            // https://github.com/tkaitchuck/aHash/blob/master/compare/readme.md.
448            // TODO(vmarcos): Consider here if it is worth it to spend the time to
449            // implement twisted tabulation hashing as proposed in Mihai Patrascu,
450            // Mikkel Thorup: Twisted Tabulation Hashing. SODA 2013: 209-228, available
451            // at https://epubs.siam.org/doi/epdf/10.1137/1.9781611973105.16. The latter
452            // would provide good bounds for balls-into-bins problems when the number of
453            // bins is small (as is our case), so we'd have a theoretical guarantee.
454            // NOTE: We fix the seeds of a RandomState instance explicity with the same
455            // seeds that would be given by `AHash` via ahash::AHasher::default() so as
456            // to avoid a different selection due to compile-time features being differently
457            // selected in other dependencies using `AHash` vis-à-vis cargo's strategy
458            // of unioning features.
459            // NOTE: Depending on target features, we may end up employing the fallback
460            // hasher of `AHash`, but it should be sufficient for our needs.
461            let random_state = ahash::RandomState::with_seeds(
462                0x243f_6a88_85a3_08d3,
463                0x1319_8a2e_0370_7344,
464                0xa409_3822_299f_31d0,
465                0x082e_fa98_ec4e_6c89,
466            );
467            let exchange = Exchange::new(move |update: &((D1, _), G::Timestamp, R)| {
468                let data = &(update.0).0;
469                let mut h = random_state.build_hasher();
470                data.hash(&mut h);
471                h.finish()
472            });
473            consolidate_pact::<Ba, _, _>(&self.map(|k| (k, ())).inner, exchange, name)
474                .unary(Pipeline, "unpack consolidated", |_, _| {
475                    |input, output| {
476                        input.for_each(|time, data| {
477                            let mut session = output.session(&time);
478                            for ((k, ()), t, d) in
479                                data.iter().flatten().flat_map(|chunk| chunk.iter())
480                            {
481                                session.give((k.clone(), t.clone(), d.clone()))
482                            }
483                        })
484                    }
485                })
486                .as_collection()
487        } else {
488            self
489        }
490    }
491
492    fn consolidate_named<Ba>(self, name: &str) -> Self
493    where
494        D1: differential_dataflow::ExchangeData + Hash + Columnation,
495        R: Semigroup + differential_dataflow::ExchangeData + Columnation,
496        G::Timestamp: Lattice + Ord + Columnation,
497        Ba: Batcher<
498                Input = Vec<((D1, ()), G::Timestamp, R)>,
499                Output = TimelyStack<((D1, ()), G::Timestamp, R)>,
500                Time = G::Timestamp,
501            > + 'static,
502    {
503        let exchange =
504            Exchange::new(move |update: &((D1, ()), G::Timestamp, R)| (update.0).0.hashed());
505
506        consolidate_pact::<Ba, _, _>(&self.map(|k| (k, ())).inner, exchange, name)
507            .unary(Pipeline, &format!("Unpack {name}"), |_, _| {
508                |input, output| {
509                    input.for_each(|time, data| {
510                        let mut session = output.session(&time);
511                        for ((k, ()), t, d) in data.iter().flatten().flat_map(|chunk| chunk.iter())
512                        {
513                            session.give((k.clone(), t.clone(), d.clone()))
514                        }
515                    })
516                }
517            })
518            .as_collection()
519    }
520}
521
522/// Aggregates the weights of equal records into at most one record.
523///
524/// Produces a stream of chains of records, partitioned according to `pact`. The
525/// data is sorted according to `Ba`. For each timestamp, it produces at most one chain.
526///
527/// The data are accumulated in place, each held back until their timestamp has completed.
528pub fn consolidate_pact<Ba, P, G>(
529    stream: &StreamCore<G, Ba::Input>,
530    pact: P,
531    name: &str,
532) -> Stream<G, Vec<Ba::Output>>
533where
534    G: Scope,
535    Ba: Batcher<Time = G::Timestamp> + 'static,
536    Ba::Input: Container + Clone + 'static,
537    Ba::Output: Clone,
538    P: ParallelizationContract<G::Timestamp, Ba::Input>,
539{
540    stream.unary_frontier(pact, name, |_cap, info| {
541        // Acquire a logger for arrange events.
542        let logger = stream
543            .scope()
544            .logger_for("differential/arrange")
545            .map(Into::into);
546
547        let mut batcher = Ba::new(logger, info.global_id);
548        // Capabilities for the lower envelope of updates in `batcher`.
549        let mut capabilities = Antichain::<Capability<G::Timestamp>>::new();
550        let mut prev_frontier = Antichain::from_elem(G::Timestamp::minimum());
551
552        move |(input, frontier), output| {
553            input.for_each(|cap, data| {
554                capabilities.insert(cap.retain());
555                batcher.push_container(data);
556            });
557
558            if prev_frontier.borrow() != frontier.frontier() {
559                if capabilities
560                    .elements()
561                    .iter()
562                    .any(|c| !frontier.less_equal(c.time()))
563                {
564                    let mut upper = Antichain::new(); // re-used allocation for sealing batches.
565
566                    // For each capability not in advance of the input frontier ...
567                    for (index, capability) in capabilities.elements().iter().enumerate() {
568                        if !frontier.less_equal(capability.time()) {
569                            // Assemble the upper bound on times we can commit with this capabilities.
570                            // We must respect the input frontier, and *subsequent* capabilities, as
571                            // we are pretending to retire the capability changes one by one.
572                            upper.clear();
573                            for time in frontier.frontier().iter() {
574                                upper.insert(time.clone());
575                            }
576                            for other_capability in &capabilities.elements()[(index + 1)..] {
577                                upper.insert(other_capability.time().clone());
578                            }
579
580                            // send the batch to downstream consumers, empty or not.
581                            let mut session = output.session(&capabilities.elements()[index]);
582                            // Extract updates not in advance of `upper`.
583                            let output =
584                                batcher.seal::<ConsolidateBuilder<_, Ba::Output>>(upper.clone());
585                            session.give(output);
586                        }
587                    }
588
589                    // Having extracted and sent batches between each capability and the input frontier,
590                    // we should downgrade all capabilities to match the batcher's lower update frontier.
591                    // This may involve discarding capabilities, which is fine as any new updates arrive
592                    // in messages with new capabilities.
593
594                    let mut new_capabilities = Antichain::new();
595                    for time in batcher.frontier().iter() {
596                        if let Some(capability) = capabilities
597                            .elements()
598                            .iter()
599                            .find(|c| c.time().less_equal(time))
600                        {
601                            new_capabilities.insert(capability.delayed(time));
602                        } else {
603                            panic!("failed to find capability");
604                        }
605                    }
606
607                    capabilities = new_capabilities;
608                }
609
610                prev_frontier.clear();
611                prev_frontier.extend(frontier.frontier().iter().cloned());
612            }
613        }
614    })
615}
616
617/// A builder that wraps a session for direct output to a stream.
618struct ConsolidateBuilder<T, I> {
619    _marker: PhantomData<(T, I)>,
620}
621
622impl<T, I> Builder for ConsolidateBuilder<T, I>
623where
624    T: Timestamp,
625    I: Clone,
626{
627    type Input = I;
628    type Time = T;
629    type Output = Vec<I>;
630
631    fn new() -> Self {
632        Self {
633            _marker: PhantomData,
634        }
635    }
636
637    fn with_capacity(_keys: usize, _vals: usize, _upds: usize) -> Self {
638        Self::new()
639    }
640
641    fn push(&mut self, _chunk: &mut Self::Input) {
642        unimplemented!("ConsolidateBuilder::push")
643    }
644
645    fn done(self, _: Description<Self::Time>) -> Self::Output {
646        unimplemented!("ConsolidateBuilder::done")
647    }
648
649    fn seal(chain: &mut Vec<Self::Input>, _description: Description<Self::Time>) -> Self::Output {
650        std::mem::take(chain)
651    }
652}
653
654/// Merge the contents of multiple streams and combine the containers using a container builder.
655pub trait ConcatenateFlatten<G: Scope, C: Container + DrainContainer> {
656    /// Merge the contents of multiple streams and use the provided container builder to form
657    /// output containers.
658    ///
659    /// # Examples
660    /// ```
661    /// use timely::container::CapacityContainerBuilder;
662    /// use timely::dataflow::operators::{ToStream, Inspect};
663    /// use mz_timely_util::operator::ConcatenateFlatten;
664    ///
665    /// timely::example(|scope| {
666    ///
667    ///     let streams = vec![(0..10).to_stream(scope),
668    ///                        (0..10).to_stream(scope),
669    ///                        (0..10).to_stream(scope)];
670    ///
671    ///     scope.concatenate_flatten::<_, CapacityContainerBuilder<Vec<_>>>(streams)
672    ///          .inspect(|x| println!("seen: {:?}", x));
673    /// });
674    /// ```
675    fn concatenate_flatten<I, CB>(&self, sources: I) -> StreamCore<G, CB::Container>
676    where
677        I: IntoIterator<Item = StreamCore<G, C>>,
678        CB: ContainerBuilder + for<'a> PushInto<C::Item<'a>>;
679}
680
681impl<G, C> ConcatenateFlatten<G, C> for StreamCore<G, C>
682where
683    G: Scope,
684    C: Container + DrainContainer,
685{
686    fn concatenate_flatten<I, CB>(&self, sources: I) -> StreamCore<G, CB::Container>
687    where
688        I: IntoIterator<Item = StreamCore<G, C>>,
689        CB: ContainerBuilder + for<'a> PushInto<C::Item<'a>>,
690    {
691        let clone = self.clone();
692        self.scope()
693            .concatenate_flatten::<_, CB>(Some(clone).into_iter().chain(sources))
694    }
695}
696
697impl<G, C> ConcatenateFlatten<G, C> for G
698where
699    G: Scope,
700    C: Container + DrainContainer,
701{
702    fn concatenate_flatten<I, CB>(&self, sources: I) -> StreamCore<G, CB::Container>
703    where
704        I: IntoIterator<Item = StreamCore<G, C>>,
705        CB: ContainerBuilder + for<'a> PushInto<C::Item<'a>>,
706    {
707        let mut builder = OperatorBuilder::new("ConcatenateFlatten".to_string(), self.clone());
708        builder.set_notify(false);
709
710        // create new input handles for each input stream.
711        let mut handles = sources
712            .into_iter()
713            .map(|s| builder.new_input(&s, Pipeline))
714            .collect::<Vec<_>>();
715
716        // create one output handle for the concatenated results.
717        let (output, result) = builder.new_output::<CB::Container>();
718        let mut output = OutputBuilder::<_, CB>::from(output);
719
720        builder.build(move |_capability| {
721            move |_frontier| {
722                let mut output = output.activate();
723                for handle in handles.iter_mut() {
724                    handle.for_each_time(|time, data| {
725                        output
726                            .session_with_builder(&time)
727                            .give_iterator(data.flat_map(DrainContainer::drain));
728                    })
729                }
730            }
731        });
732
733        result
734    }
735}