differential_dataflow/operators/
consolidate.rs

1//! Aggregates the weights of equal records into at most one record.
2//!
3//! As differential dataflow streams are unordered and taken to be the accumulation of all records,
4//! no semantic change happens via `consolidate`. However, there is a practical difference between
5//! a collection that aggregates down to zero records, and one that actually has no records. The
6//! underlying system can more clearly see that no work must be done in the later case, and we can
7//! drop out of, e.g. iterative computations.
8
9use timely::dataflow::Scope;
10
11use crate::{Collection, ExchangeData, Hashable};
12use crate::consolidation::ConsolidatingContainerBuilder;
13use crate::difference::Semigroup;
14
15use crate::Data;
16use crate::lattice::Lattice;
17use crate::trace::{Batcher, Builder};
18
19/// Methods which require data be arrangeable.
20impl<G, D, R> Collection<G, D, R>
21where
22    G: Scope<Timestamp: Data+Lattice>,
23    D: ExchangeData+Hashable,
24    R: Semigroup+ExchangeData,
25{
26    /// Aggregates the weights of equal records into at most one record.
27    ///
28    /// This method uses the type `D`'s `hashed()` method to partition the data. The data are
29    /// accumulated in place, each held back until their timestamp has completed.
30    ///
31    /// # Examples
32    ///
33    /// ```
34    /// use differential_dataflow::input::Input;
35    ///
36    /// ::timely::example(|scope| {
37    ///
38    ///     let x = scope.new_collection_from(1 .. 10u32).1;
39    ///
40    ///     x.negate()
41    ///      .concat(&x)
42    ///      .consolidate() // <-- ensures cancellation occurs
43    ///      .assert_empty();
44    /// });
45    /// ```
46    pub fn consolidate(&self) -> Self {
47        use crate::trace::implementations::{KeyBatcher, KeyBuilder, KeySpine};
48        self.consolidate_named::<KeyBatcher<_, _, _>,KeyBuilder<_,_,_>, KeySpine<_,_,_>,_>("Consolidate", |key,&()| key.clone())
49    }
50
51    /// As `consolidate` but with the ability to name the operator, specify the trace type,
52    /// and provide the function `reify` to produce owned keys and values..
53    pub fn consolidate_named<Ba, Bu, Tr, F>(&self, name: &str, reify: F) -> Self
54    where
55        Ba: Batcher<Input=Vec<((D,()),G::Timestamp,R)>, Time=G::Timestamp> + 'static,
56        Tr: for<'a> crate::trace::Trace<Time=G::Timestamp,Diff=R>+'static,
57        Bu: Builder<Time=Tr::Time, Input=Ba::Output, Output=Tr::Batch>,
58        F: Fn(Tr::Key<'_>, Tr::Val<'_>) -> D + 'static,
59    {
60        use crate::operators::arrange::arrangement::Arrange;
61        self.map(|k| (k, ()))
62            .arrange_named::<Ba, Bu, Tr>(name)
63            .as_collection(reify)
64    }
65
66    /// Aggregates the weights of equal records.
67    ///
68    /// Unlike `consolidate`, this method does not exchange data and does not
69    /// ensure that at most one copy of each `(data, time)` pair exists in the
70    /// results. Instead, it acts on each batch of data and collapses equivalent
71    /// `(data, time)` pairs found therein, suppressing any that accumulate to
72    /// zero.
73    ///
74    /// # Examples
75    ///
76    /// ```
77    /// use differential_dataflow::input::Input;
78    ///
79    /// ::timely::example(|scope| {
80    ///
81    ///     let x = scope.new_collection_from(1 .. 10u32).1;
82    ///
83    ///     // nothing to assert, as no particular guarantees.
84    ///     x.negate()
85    ///      .concat(&x)
86    ///      .consolidate_stream();
87    /// });
88    /// ```
89    pub fn consolidate_stream(&self) -> Self {
90
91        use timely::dataflow::channels::pact::Pipeline;
92        use timely::dataflow::operators::Operator;
93        use crate::collection::AsCollection;
94
95        self.inner
96            .unary::<ConsolidatingContainerBuilder<_>, _, _, _>(Pipeline, "ConsolidateStream", |_cap, _info| {
97
98                move |input, output| {
99                    input.for_each(|time, data| {
100                        output.session_with_builder(&time).give_iterator(data.drain(..));
101                    })
102                }
103            })
104            .as_collection()
105    }
106}