differential_dataflow/operators/arrange/upsert.rs
1//! Support for forming collections from streams of upsert.
2//!
3//! Upserts are sequences of keyed optional values, and they define a collection of
4//! the pairs of keys and each's most recent value, if it is present. Element in the
5//! sequence effectively overwrites the previous value at the key, if present, and if
6//! the value is not present it uninstalls the key.
7//!
8//! Upserts are non-trivial because they do not themselves describe the deletions that
9//! the `Collection` update stream must present. However, if one creates an `Arrangement`
10//! then this state provides sufficient information. The arrangement will continue to
11//! exist even if dropped until the input or dataflow shuts down, as the upsert operator
12//! itself needs access to its accumulated state.
13//!
14//! # Notes
15//!
16//! Upserts currently only work with totally ordered timestamps.
17//!
18//! In the case of ties in timestamps (concurrent updates to the same key) they choose
19//! the *greatest* value according to `Option<Val>` ordering, which will prefer a value
20//! to `None` and choose the greatest value (informally, as if applied in order of value).
21//!
22//! If the same value is repeated, no change will occur in the output. That may make this
23//! operator effective at determining the difference between collections of keyed values,
24//! but note that it will not notice the absence of keys in a collection.
25//!
26//! To effect "filtering" in a way that reduces the arrangement footprint, apply a map to
27//! the input stream, mapping values that fail the predicate to `None` values, like so:
28//!
29//! ```ignore
30//! // Dropped values should be retained as "uninstall" upserts.
31//! upserts.map(|(key,opt_val)| (key, opt_val.filter(predicate)))
32//! ```
33//!
34//! # Example
35//!
36//! ```rust
37//! // define a new timely dataflow computation.
38//! timely::execute_from_args(std::env::args().skip(1), move |worker| {
39//!
40//! type Key = String;
41//! type Val = String;
42//!
43//! let mut input = timely::dataflow::InputHandle::new();
44//! let mut probe = timely::dataflow::ProbeHandle::new();
45//!
46//! // Create a dataflow demonstrating upserts.
47//! //
48//! // Upserts are a sequence of records (key, option<val>) where the intended
49//! // value associated with a key is the most recent value, and if that is a
50//! // `none` then the key is removed (until a new value shows up).
51//! //
52//! // The challenge with upserts is that the value to *retract* isn't supplied
53//! // as part of the input stream. We have to determine what it should be!
54//!
55//! worker.dataflow(|scope| {
56//!
57//! use timely::dataflow::operators::Input;
58//! use differential_dataflow::trace::implementations::{ValBuilder, ValSpine};
59//! use differential_dataflow::operators::arrange::upsert;
60//!
61//! let stream = scope.input_from(&mut input);
62//! let arranged = upsert::arrange_from_upsert::<_, ValBuilder<Key, Val, _, _>, ValSpine<Key, Val, _, _>>(&stream, &"test");
63//!
64//! arranged
65//! .as_collection(|k,v| (k.clone(), v.clone()))
66//! .inspect(|x| println!("Observed: {:?}", x))
67//! .probe_with(&mut probe);
68//! });
69//!
70//! // Introduce the key, with a specific value.
71//! input.send(("frank".to_string(), Some("mcsherry".to_string()), 3));
72//! input.advance_to(4);
73//! while probe.less_than(input.time()) { worker.step(); }
74//!
75//! // Change the value to a different value.
76//! input.send(("frank".to_string(), Some("zappa".to_string()), 4));
77//! input.advance_to(5);
78//! while probe.less_than(input.time()) { worker.step(); }
79//!
80//! // Remove the key and its value.
81//! input.send(("frank".to_string(), None, 5));
82//! input.advance_to(9);
83//! while probe.less_than(input.time()) { worker.step(); }
84//!
85//! // Introduce a new totally different value
86//! input.send(("frank".to_string(), Some("oz".to_string()), 9));
87//! input.advance_to(10);
88//! while probe.less_than(input.time()) { worker.step(); }
89//!
90//! // Repeat the value, which should produce no output.
91//! input.send(("frank".to_string(), Some("oz".to_string()), 11));
92//! input.advance_to(12);
93//! while probe.less_than(input.time()) { worker.step(); }
94//! // Remove the key and value.
95//! input.send(("frank".to_string(), None, 15));
96//! input.close();
97//!
98//! }).unwrap();
99//! ```
100
101use std::collections::{BinaryHeap, BTreeMap};
102
103use timely::order::{PartialOrder, TotalOrder};
104use timely::dataflow::{Scope, Stream};
105use timely::dataflow::operators::generic::Operator;
106use timely::dataflow::channels::pact::Exchange;
107use timely::progress::Timestamp;
108use timely::progress::Antichain;
109use timely::dataflow::operators::Capability;
110
111use crate::operators::arrange::arrangement::Arranged;
112use crate::trace::{Builder, Description};
113use crate::trace::{self, Trace, TraceReader, Cursor};
114use crate::{ExchangeData, Hashable};
115
116use crate::trace::implementations::containers::BatchContainer;
117
118use super::TraceAgent;
119
120/// Arrange data from a stream of keyed upserts.
121///
122/// The input should be a stream of timestamped pairs of Key and Option<Val>.
123/// The contents of the collection are defined key-by-key, where each optional
124/// value in sequence either replaces or removes the existing value, should it
125/// exist.
126///
127/// This method is only implemented for totally ordered times, as we do not yet
128/// understand what a "sequence" of upserts would mean for partially ordered
129/// timestamps.
130pub fn arrange_from_upsert<G, Bu, Tr>(
131 stream: &Stream<G, (Tr::KeyOwn, Option<Tr::ValOwn>, G::Timestamp)>,
132 name: &str,
133) -> Arranged<G, TraceAgent<Tr>>
134where
135 G: Scope<Timestamp=Tr::Time>,
136 Tr: for<'a> Trace<
137 KeyOwn: ExchangeData+Hashable+std::hash::Hash,
138 ValOwn: ExchangeData,
139 Time: TotalOrder+ExchangeData,
140 Diff=isize,
141 >+'static,
142 Bu: Builder<Time=G::Timestamp, Input = Vec<((Tr::KeyOwn, Tr::ValOwn), Tr::Time, Tr::Diff)>, Output = Tr::Batch>,
143{
144 let mut reader: Option<TraceAgent<Tr>> = None;
145
146 // fabricate a data-parallel operator using the `unary_notify` pattern.
147 let stream = {
148
149 let reader = &mut reader;
150
151 let exchange = Exchange::new(move |update: &(Tr::KeyOwn,Option<Tr::ValOwn>,G::Timestamp)| (update.0).hashed().into());
152
153 stream.unary_frontier(exchange, name, move |_capability, info| {
154
155 // Acquire a logger for arrange events.
156 let logger = stream.scope().logger_for::<crate::logging::DifferentialEventBuilder>("differential/arrange").map(Into::into);
157
158 // Tracks the lower envelope of times in `priority_queue`.
159 let mut capabilities = Antichain::<Capability<G::Timestamp>>::new();
160 // Form the trace we will both use internally and publish.
161 let activator = Some(stream.scope().activator_for(info.address.clone()));
162 let mut empty_trace = Tr::new(info.clone(), logger.clone(), activator);
163
164 if let Some(exert_logic) = stream.scope().config().get::<trace::ExertionLogic>("differential/default_exert_logic").cloned() {
165 empty_trace.set_exert_logic(exert_logic);
166 }
167
168 let (mut reader_local, mut writer) = TraceAgent::new(empty_trace, info, logger);
169 // Capture the reader outside the builder scope.
170 *reader = Some(reader_local.clone());
171
172 // Tracks the input frontier, used to populate the lower bound of new batches.
173 let mut prev_frontier = Antichain::from_elem(<G::Timestamp as Timestamp>::minimum());
174
175 // For stashing input upserts, ordered increasing by time (`BinaryHeap` is a max-heap).
176 let mut priority_queue = BinaryHeap::<std::cmp::Reverse<(G::Timestamp, Tr::KeyOwn, Option<Tr::ValOwn>)>>::new();
177 let mut updates = Vec::new();
178
179 move |input, output| {
180
181 // Stash capabilities and associated data (ordered by time).
182 input.for_each(|cap, data| {
183 capabilities.insert(cap.retain());
184 for (key, val, time) in data.drain(..) {
185 priority_queue.push(std::cmp::Reverse((time, key, val)))
186 }
187 });
188
189 // Assert that the frontier never regresses.
190 assert!(PartialOrder::less_equal(&prev_frontier.borrow(), &input.frontier().frontier()));
191
192 // Test to see if strict progress has occurred, which happens whenever the new
193 // frontier isn't equal to the previous. It is only in this case that we have any
194 // data processing to do.
195 if prev_frontier.borrow() != input.frontier().frontier() {
196
197 // If there is at least one capability not in advance of the input frontier ...
198 if capabilities.elements().iter().any(|c| !input.frontier().less_equal(c.time())) {
199
200 let mut upper = Antichain::new(); // re-used allocation for sealing batches.
201
202 // For each capability not in advance of the input frontier ...
203 for (index, capability) in capabilities.elements().iter().enumerate() {
204
205 if !input.frontier().less_equal(capability.time()) {
206
207 // Assemble the upper bound on times we can commit with this capabilities.
208 // We must respect the input frontier, and *subsequent* capabilities, as
209 // we are pretending to retire the capability changes one by one.
210 upper.clear();
211 for time in input.frontier().frontier().iter() {
212 upper.insert(time.clone());
213 }
214 for other_capability in &capabilities.elements()[(index + 1) .. ] {
215 upper.insert(other_capability.time().clone());
216 }
217
218 // Extract upserts available to process as of this `upper`.
219 let mut to_process = BTreeMap::new();
220 while priority_queue.peek().map(|std::cmp::Reverse((t,_k,_v))| !upper.less_equal(t)).unwrap_or(false) {
221 let std::cmp::Reverse((time, key, val)) = priority_queue.pop().expect("Priority queue just ensured non-empty");
222 to_process.entry(key).or_insert(Vec::new()).push((time, std::cmp::Reverse(val)));
223 }
224 // Reduce the allocation behind the priority queue if it is presently excessive.
225 // A factor of four is used to avoid repeated doubling and shrinking.
226 // TODO: if the queue were a sequence of geometrically sized allocations, we could
227 // shed the additional capacity without copying any data.
228 if priority_queue.capacity() > 4 * priority_queue.len() {
229 priority_queue.shrink_to_fit();
230 }
231
232 // Prepare a cursor to the existing arrangement, and a batch builder for
233 // new stuff that we add.
234 let (mut trace_cursor, trace_storage) = reader_local.cursor();
235 let mut builder = Bu::new();
236 let mut key_con = Tr::KeyContainer::with_capacity(1);
237 for (key, mut list) in to_process {
238
239 key_con.clear(); key_con.push_own(&key);
240
241 // The prior value associated with the key.
242 let mut prev_value: Option<Tr::ValOwn> = None;
243
244 // Attempt to find the key in the trace.
245 trace_cursor.seek_key(&trace_storage, key_con.index(0));
246 if trace_cursor.get_key(&trace_storage).map(|k| k.eq(&key_con.index(0))).unwrap_or(false) {
247 // Determine the prior value associated with the key.
248 while let Some(val) = trace_cursor.get_val(&trace_storage) {
249 let mut count = 0;
250 trace_cursor.map_times(&trace_storage, |_time, diff| count += Tr::owned_diff(diff));
251 assert!(count == 0 || count == 1);
252 if count == 1 {
253 assert!(prev_value.is_none());
254 prev_value = Some(Tr::owned_val(val));
255 }
256 trace_cursor.step_val(&trace_storage);
257 }
258 trace_cursor.step_key(&trace_storage);
259 }
260
261 // Sort the list of upserts to `key` by their time, suppress multiple updates.
262 list.sort();
263 list.dedup_by(|(t1,_), (t2,_)| t1 == t2);
264 for (time, std::cmp::Reverse(next)) in list {
265 if prev_value != next {
266 if let Some(prev) = prev_value {
267 updates.push(((key.clone(), prev), time.clone(), -1));
268 }
269 if let Some(next) = next.as_ref() {
270 updates.push(((key.clone(), next.clone()), time.clone(), 1));
271 }
272 prev_value = next;
273 }
274 }
275 // Must insert updates in (key, val, time) order.
276 updates.sort();
277 builder.push(&mut updates);
278 }
279 let description = Description::new(prev_frontier.clone(), upper.clone(), Antichain::from_elem(G::Timestamp::minimum()));
280 let batch = builder.done(description);
281 prev_frontier.clone_from(&upper);
282
283 // Communicate `batch` to the arrangement and the stream.
284 writer.insert(batch.clone(), Some(capability.time().clone()));
285 output.session(&capabilities.elements()[index]).give(batch);
286 }
287 }
288
289 // Having extracted and sent batches between each capability and the input frontier,
290 // we should downgrade all capabilities to match the batcher's lower update frontier.
291 // This may involve discarding capabilities, which is fine as any new updates arrive
292 // in messages with new capabilities.
293
294 let mut new_capabilities = Antichain::new();
295 if let Some(std::cmp::Reverse((time, _, _))) = priority_queue.peek() {
296 if let Some(capability) = capabilities.elements().iter().find(|c| c.time().less_equal(time)) {
297 new_capabilities.insert(capability.delayed(time));
298 }
299 else {
300 panic!("failed to find capability");
301 }
302 }
303
304 capabilities = new_capabilities;
305 }
306 else {
307 // Announce progress updates, even without data.
308 writer.seal(input.frontier().frontier().to_owned());
309 }
310
311 // Update our view of the input frontier.
312 prev_frontier.clear();
313 prev_frontier.extend(input.frontier().frontier().iter().cloned());
314
315 // Downgrade capabilities for `reader_local`.
316 reader_local.set_logical_compaction(prev_frontier.borrow());
317 reader_local.set_physical_compaction(prev_frontier.borrow());
318 }
319
320 writer.exert();
321 }
322 })
323 };
324
325 Arranged { stream, trace: reader.unwrap() }
326
327}