differential_dataflow/trace/mod.rs
1//! Traits and datastructures representing a collection trace.
2//!
3//! A collection trace is a set of updates of the form `(key, val, time, diff)`, which determine the contents
4//! of a collection at given times by accumulating updates whose time field is less or equal to the target field.
5//!
6//! The `Trace` trait describes those types and methods that a data structure must implement to be viewed as a
7//! collection trace. This trait allows operator implementations to be generic with respect to the type of trace,
8//! and allows various data structures to be interpretable as multiple different types of trace.
9
10pub mod cursor;
11pub mod description;
12pub mod implementations;
13pub mod wrappers;
14
15use timely::progress::{Antichain, frontier::AntichainRef};
16use timely::progress::Timestamp;
17
18use crate::logging::Logger;
19pub use self::cursor::Cursor;
20pub use self::description::Description;
21
22use crate::trace::implementations::LayoutExt;
23
24/// A type used to express how much effort a trace should exert even in the absence of updates.
25pub type ExertionLogic = std::sync::Arc<dyn for<'a> Fn(&'a [(usize, usize, usize)])->Option<usize>+Send+Sync>;
26
27// The traces and batch and cursors want the flexibility to appear as if they manage certain types of keys and
28// values and such, while perhaps using other representations, I'm thinking mostly of wrappers around the keys
29// and vals that change the `Ord` implementation, or stash hash codes, or the like.
30//
31// This complicates what requirements we make so that the trace is still usable by someone who knows only about
32// the base key and value types. For example, the complex types should likely dereference to the simpler types,
33// so that the user can make sense of the result as if they were given references to the simpler types. At the
34// same time, the collection should be formable from base types (perhaps we need an `Into` or `From` constraint)
35// and we should, somehow, be able to take a reference to the simple types to compare against the more complex
36// types. This second one is also like an `Into` or `From` constraint, except that we start with a reference and
37// really don't need anything more complex than a reference, but we can't form an owned copy of the complex type
38// without cloning it.
39//
40// We could just start by cloning things. Worry about wrapping references later on.
41
42/// A trace whose contents may be read.
43///
44/// This is a restricted interface to the more general `Trace` trait, which extends this trait with further methods
45/// to update the contents of the trace. These methods are used to examine the contents, and to update the reader's
46/// capabilities (which may release restrictions on the mutations to the underlying trace and cause work to happen).
47pub trait TraceReader : LayoutExt {
48
49 /// The type of an immutable collection of updates.
50 type Batch:
51 'static +
52 Clone +
53 BatchReader +
54 WithLayout<Layout = Self::Layout> +
55 for<'a> LayoutExt<
56 Key<'a> = Self::Key<'a>,
57 KeyOwn = Self::KeyOwn,
58 Val<'a> = Self::Val<'a>,
59 ValOwn = Self::ValOwn,
60 Time = Self::Time,
61 TimeGat<'a> = Self::TimeGat<'a>,
62 Diff = Self::Diff,
63 DiffGat<'a> = Self::DiffGat<'a>,
64 KeyContainer = Self::KeyContainer,
65 ValContainer = Self::ValContainer,
66 TimeContainer = Self::TimeContainer,
67 DiffContainer = Self::DiffContainer,
68 >;
69
70
71 /// Storage type for `Self::Cursor`. Likely related to `Self::Batch`.
72 type Storage;
73
74 /// The type used to enumerate the collections contents.
75 type Cursor:
76 Cursor<Storage=Self::Storage> +
77 WithLayout<Layout = Self::Layout> +
78 for<'a> LayoutExt<
79 Key<'a> = Self::Key<'a>,
80 KeyOwn = Self::KeyOwn,
81 Val<'a> = Self::Val<'a>,
82 ValOwn = Self::ValOwn,
83 Time = Self::Time,
84 TimeGat<'a> = Self::TimeGat<'a>,
85 Diff = Self::Diff,
86 DiffGat<'a> = Self::DiffGat<'a>,
87 KeyContainer = Self::KeyContainer,
88 ValContainer = Self::ValContainer,
89 TimeContainer = Self::TimeContainer,
90 DiffContainer = Self::DiffContainer,
91 >;
92
93
94 /// Provides a cursor over updates contained in the trace.
95 fn cursor(&mut self) -> (Self::Cursor, Self::Storage) {
96 if let Some(cursor) = self.cursor_through(Antichain::new().borrow()) {
97 cursor
98 }
99 else {
100 panic!("unable to acquire complete cursor for trace; is it closed?");
101 }
102 }
103
104 /// Acquires a cursor to the restriction of the collection's contents to updates at times not greater or
105 /// equal to an element of `upper`.
106 ///
107 /// This method is expected to work if called with an `upper` that (i) was an observed bound in batches from
108 /// the trace, and (ii) the trace has not been advanced beyond `upper`. Practically, the implementation should
109 /// be expected to look for a "clean cut" using `upper`, and if it finds such a cut can return a cursor. This
110 /// should allow `upper` such as `&[]` as used by `self.cursor()`, though it is difficult to imagine other uses.
111 fn cursor_through(&mut self, upper: AntichainRef<Self::Time>) -> Option<(Self::Cursor, Self::Storage)>;
112
113 /// Advances the frontier that constrains logical compaction.
114 ///
115 /// Logical compaction is the ability of the trace to change the times of the updates it contains.
116 /// Update times may be changed as long as their comparison to all query times beyond the logical compaction
117 /// frontier remains unchanged. Practically, this means that groups of timestamps not beyond the frontier can
118 /// be coalesced into fewer representative times.
119 ///
120 /// Logical compaction is important, as it allows the trace to forget historical distinctions between update
121 /// times, and maintain a compact memory footprint over an unbounded update history.
122 ///
123 /// By advancing the logical compaction frontier, the caller unblocks merging of otherwise equivalent updates,
124 /// but loses the ability to observe historical detail that is not beyond `frontier`.
125 ///
126 /// It is an error to call this method with a frontier not equal to or beyond the most recent arguments to
127 /// this method, or the initial value of `get_logical_compaction()` if this method has not yet been called.
128 fn set_logical_compaction(&mut self, frontier: AntichainRef<Self::Time>);
129
130 /// Reports the logical compaction frontier.
131 ///
132 /// All update times beyond this frontier will be presented with their original times, and all update times
133 /// not beyond this frontier will present as a time that compares identically with all query times beyond
134 /// this frontier. Practically, update times not beyond this frontier should not be taken to be accurate as
135 /// presented, and should be used carefully, only in accumulation to times that are beyond the frontier.
136 fn get_logical_compaction(&mut self) -> AntichainRef<'_, Self::Time>;
137
138 /// Advances the frontier that constrains physical compaction.
139 ///
140 /// Physical compaction is the ability of the trace to merge the batches of updates it maintains. Physical
141 /// compaction does not change the updates or their timestamps, although it is also the moment at which
142 /// logical compaction is most likely to happen.
143 ///
144 /// Physical compaction allows the trace to maintain a logarithmic number of batches of updates, which is
145 /// what allows the trace to provide efficient random access by keys and values.
146 ///
147 /// By advancing the physical compaction frontier, the caller unblocks the merging of batches of updates,
148 /// but loses the ability to create a cursor through any frontier not beyond `frontier`.
149 ///
150 /// It is an error to call this method with a frontier not equal to or beyond the most recent arguments to
151 /// this method, or the initial value of `get_physical_compaction()` if this method has not yet been called.
152 fn set_physical_compaction(&mut self, frontier: AntichainRef<'_, Self::Time>);
153
154 /// Reports the physical compaction frontier.
155 ///
156 /// All batches containing updates beyond this frontier will not be merged with other batches. This allows
157 /// the caller to create a cursor through any frontier beyond the physical compaction frontier, with the
158 /// `cursor_through()` method. This functionality is primarily of interest to the `join` operator, and any
159 /// other operators who need to take notice of the physical structure of update batches.
160 fn get_physical_compaction(&mut self) -> AntichainRef<'_, Self::Time>;
161
162 /// Maps logic across the non-empty sequence of batches in the trace.
163 ///
164 /// This is currently used only to extract historical data to prime late-starting operators who want to reproduce
165 /// the stream of batches moving past the trace. It could also be a fine basis for a default implementation of the
166 /// cursor methods, as they (by default) just move through batches accumulating cursors into a cursor list.
167 fn map_batches<F: FnMut(&Self::Batch)>(&self, f: F);
168
169 /// Reads the upper frontier of committed times.
170 ///
171 ///
172 #[inline]
173 fn read_upper(&mut self, target: &mut Antichain<Self::Time>) {
174 target.clear();
175 target.insert(<Self::Time as timely::progress::Timestamp>::minimum());
176 self.map_batches(|batch| {
177 target.clone_from(batch.upper());
178 });
179 }
180
181 /// Advances `upper` by any empty batches.
182 ///
183 /// An empty batch whose `batch.lower` bound equals the current
184 /// contents of `upper` will advance `upper` to `batch.upper`.
185 /// Taken across all batches, this should advance `upper` across
186 /// empty batch regions.
187 fn advance_upper(&mut self, upper: &mut Antichain<Self::Time>) {
188 self.map_batches(|batch| {
189 if batch.is_empty() && batch.lower() == upper {
190 upper.clone_from(batch.upper());
191 }
192 });
193 }
194
195}
196
197/// An append-only collection of `(key, val, time, diff)` tuples.
198///
199/// The trace must pretend to look like a collection of `(Key, Val, Time, isize)` tuples, but is permitted
200/// to introduce new types `KeyRef`, `ValRef`, and `TimeRef` which can be dereference to the types above.
201///
202/// The trace must be constructable from, and navigable by the `Key`, `Val`, `Time` types, but does not need
203/// to return them.
204pub trait Trace : TraceReader<Batch: Batch> {
205
206 /// Allocates a new empty trace.
207 fn new(
208 info: ::timely::dataflow::operators::generic::OperatorInfo,
209 logging: Option<crate::logging::Logger>,
210 activator: Option<timely::scheduling::activate::Activator>,
211 ) -> Self;
212
213 /// Exert merge effort, even without updates.
214 fn exert(&mut self);
215
216 /// Sets the logic for exertion in the absence of updates.
217 ///
218 /// The function receives an iterator over batch levels, from large to small, as triples `(level, count, length)`,
219 /// indicating the level, the number of batches, and their total length in updates. It should return a number of
220 /// updates to perform, or `None` if no work is required.
221 fn set_exert_logic(&mut self, logic: ExertionLogic);
222
223 /// Introduces a batch of updates to the trace.
224 ///
225 /// Batches describe the time intervals they contain, and they should be added to the trace in contiguous
226 /// intervals. If a batch arrives with a lower bound that does not equal the upper bound of the most recent
227 /// addition, the trace will add an empty batch. It is an error to then try to populate that region of time.
228 ///
229 /// This restriction could be relaxed, especially if we discover ways in which batch interval order could
230 /// commute. For now, the trace should complain, to the extent that it cares about contiguous intervals.
231 fn insert(&mut self, batch: Self::Batch);
232
233 /// Introduces an empty batch concluding the trace.
234 ///
235 /// This method should be logically equivalent to introducing an empty batch whose lower frontier equals
236 /// the upper frontier of the most recently introduced batch, and whose upper frontier is empty.
237 fn close(&mut self);
238}
239
240use crate::trace::implementations::WithLayout;
241
242/// A batch of updates whose contents may be read.
243///
244/// This is a restricted interface to batches of updates, which support the reading of the batch's contents,
245/// but do not expose ways to construct the batches. This trait is appropriate for views of the batch, and is
246/// especially useful for views derived from other sources in ways that prevent the construction of batches
247/// from the type of data in the view (for example, filtered views, or views with extended time coordinates).
248pub trait BatchReader : LayoutExt + Sized {
249
250 /// The type used to enumerate the batch's contents.
251 type Cursor:
252 Cursor<Storage=Self> +
253 WithLayout<Layout = Self::Layout> +
254 for<'a> LayoutExt<
255 Key<'a> = Self::Key<'a>,
256 KeyOwn = Self::KeyOwn,
257 Val<'a> = Self::Val<'a>,
258 ValOwn = Self::ValOwn,
259 Time = Self::Time,
260 TimeGat<'a> = Self::TimeGat<'a>,
261 Diff = Self::Diff,
262 DiffGat<'a> = Self::DiffGat<'a>,
263 KeyContainer = Self::KeyContainer,
264 ValContainer = Self::ValContainer,
265 TimeContainer = Self::TimeContainer,
266 DiffContainer = Self::DiffContainer,
267 >;
268
269 /// Acquires a cursor to the batch's contents.
270 fn cursor(&self) -> Self::Cursor;
271 /// The number of updates in the batch.
272 fn len(&self) -> usize;
273 /// True if the batch is empty.
274 fn is_empty(&self) -> bool { self.len() == 0 }
275 /// Describes the times of the updates in the batch.
276 fn description(&self) -> &Description<Self::Time>;
277
278 /// All times in the batch are greater or equal to an element of `lower`.
279 fn lower(&self) -> &Antichain<Self::Time> { self.description().lower() }
280 /// All times in the batch are not greater or equal to any element of `upper`.
281 fn upper(&self) -> &Antichain<Self::Time> { self.description().upper() }
282}
283
284/// An immutable collection of updates.
285pub trait Batch : BatchReader + Sized {
286 /// A type used to progressively merge batches.
287 type Merger: Merger<Self>;
288
289 /// Initiates the merging of consecutive batches.
290 ///
291 /// The result of this method can be exercised to eventually produce the same result
292 /// that a call to `self.merge(other)` would produce, but it can be done in a measured
293 /// fashion. This can help to avoid latency spikes where a large merge needs to happen.
294 fn begin_merge(&self, other: &Self, compaction_frontier: AntichainRef<Self::Time>) -> Self::Merger {
295 Self::Merger::new(self, other, compaction_frontier)
296 }
297
298 /// Produce an empty batch over the indicated interval.
299 fn empty(lower: Antichain<Self::Time>, upper: Antichain<Self::Time>) -> Self;
300}
301
302/// Functionality for collecting and batching updates.
303pub trait Batcher {
304 /// Type pushed into the batcher.
305 type Input;
306 /// Type produced by the batcher.
307 type Output;
308 /// Times at which batches are formed.
309 type Time: Timestamp;
310 /// Allocates a new empty batcher.
311 fn new(logger: Option<Logger>, operator_id: usize) -> Self;
312 /// Adds an unordered container of elements to the batcher.
313 fn push_container(&mut self, batch: &mut Self::Input);
314 /// Returns all updates not greater or equal to an element of `upper`.
315 fn seal<B: Builder<Input=Self::Output, Time=Self::Time>>(&mut self, upper: Antichain<Self::Time>) -> B::Output;
316 /// Returns the lower envelope of contained update times.
317 fn frontier(&mut self) -> AntichainRef<'_, Self::Time>;
318}
319
320/// Functionality for building batches from ordered update sequences.
321pub trait Builder: Sized {
322 /// Input item type.
323 type Input;
324 /// Timestamp type.
325 type Time: Timestamp;
326 /// Output batch type.
327 type Output;
328
329 /// Allocates an empty builder.
330 ///
331 /// Ideally we deprecate this and insist all non-trivial building happens via `with_capacity()`.
332 // #[deprecated]
333 fn new() -> Self { Self::with_capacity(0, 0, 0) }
334 /// Allocates an empty builder with capacity for the specified keys, values, and updates.
335 ///
336 /// They represent respectively the number of distinct `key`, `(key, val)`, and total updates.
337 fn with_capacity(keys: usize, vals: usize, upds: usize) -> Self;
338 /// Adds a chunk of elements to the batch.
339 ///
340 /// Adds all elements from `chunk` to the builder and leaves `chunk` in an undefined state.
341 fn push(&mut self, chunk: &mut Self::Input);
342 /// Completes building and returns the batch.
343 fn done(self, description: Description<Self::Time>) -> Self::Output;
344
345 /// Builds a batch from a chain of updates corresponding to the indicated lower and upper bounds.
346 ///
347 /// This method relies on the chain only containing updates greater or equal to the lower frontier,
348 /// and not greater or equal to the upper frontier, as encoded in the description. Chains must also
349 /// be sorted and consolidated.
350 fn seal(chain: &mut Vec<Self::Input>, description: Description<Self::Time>) -> Self::Output;
351}
352
353/// Represents a merge in progress.
354pub trait Merger<Output: Batch> {
355 /// Creates a new merger to merge the supplied batches, optionally compacting
356 /// up to the supplied frontier.
357 fn new(source1: &Output, source2: &Output, compaction_frontier: AntichainRef<Output::Time>) -> Self;
358 /// Perform some amount of work, decrementing `fuel`.
359 ///
360 /// If `fuel` is non-zero after the call, the merging is complete and
361 /// one should call `done` to extract the merged results.
362 fn work(&mut self, source1: &Output, source2: &Output, fuel: &mut isize);
363 /// Extracts merged results.
364 ///
365 /// This method should only be called after `work` has been called and
366 /// has not brought `fuel` to zero. Otherwise, the merge is still in
367 /// progress.
368 fn done(self) -> Output;
369}
370
371
372/// Blanket implementations for reference counted batches.
373pub mod rc_blanket_impls {
374
375 use std::rc::Rc;
376
377 use timely::progress::{Antichain, frontier::AntichainRef};
378 use super::{Batch, BatchReader, Builder, Merger, Cursor, Description};
379
380 impl<B: BatchReader> WithLayout for Rc<B> {
381 type Layout = B::Layout;
382 }
383
384 impl<B: BatchReader> BatchReader for Rc<B> {
385
386 /// The type used to enumerate the batch's contents.
387 type Cursor = RcBatchCursor<B::Cursor>;
388 /// Acquires a cursor to the batch's contents.
389 fn cursor(&self) -> Self::Cursor {
390 RcBatchCursor::new((**self).cursor())
391 }
392
393 /// The number of updates in the batch.
394 fn len(&self) -> usize { (**self).len() }
395 /// Describes the times of the updates in the batch.
396 fn description(&self) -> &Description<Self::Time> { (**self).description() }
397 }
398
399 /// Wrapper to provide cursor to nested scope.
400 pub struct RcBatchCursor<C> {
401 cursor: C,
402 }
403
404 use crate::trace::implementations::WithLayout;
405 impl<C: Cursor> WithLayout for RcBatchCursor<C> {
406 type Layout = C::Layout;
407 }
408
409 impl<C> RcBatchCursor<C> {
410 fn new(cursor: C) -> Self {
411 RcBatchCursor {
412 cursor,
413 }
414 }
415 }
416
417 impl<C: Cursor> Cursor for RcBatchCursor<C> {
418
419 type Storage = Rc<C::Storage>;
420
421 #[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) }
422 #[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) }
423
424 #[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> Self::Key<'a> { self.cursor.key(storage) }
425 #[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> Self::Val<'a> { self.cursor.val(storage) }
426
427 #[inline] fn get_key<'a>(&self, storage: &'a Self::Storage) -> Option<Self::Key<'a>> { self.cursor.get_key(storage) }
428 #[inline] fn get_val<'a>(&self, storage: &'a Self::Storage) -> Option<Self::Val<'a>> { self.cursor.get_val(storage) }
429
430 #[inline]
431 fn map_times<L: FnMut(Self::TimeGat<'_>, Self::DiffGat<'_>)>(&mut self, storage: &Self::Storage, logic: L) {
432 self.cursor.map_times(storage, logic)
433 }
434
435 #[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) }
436 #[inline] fn seek_key(&mut self, storage: &Self::Storage, key: Self::Key<'_>) { self.cursor.seek_key(storage, key) }
437
438 #[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) }
439 #[inline] fn seek_val(&mut self, storage: &Self::Storage, val: Self::Val<'_>) { self.cursor.seek_val(storage, val) }
440
441 #[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) }
442 #[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) }
443 }
444
445 /// An immutable collection of updates.
446 impl<B: Batch> Batch for Rc<B> {
447 type Merger = RcMerger<B>;
448 fn empty(lower: Antichain<Self::Time>, upper: Antichain<Self::Time>) -> Self {
449 Rc::new(B::empty(lower, upper))
450 }
451 }
452
453 /// Wrapper type for building reference counted batches.
454 pub struct RcBuilder<B: Builder> { builder: B }
455
456 /// Functionality for building batches from ordered update sequences.
457 impl<B: Builder> Builder for RcBuilder<B> {
458 type Input = B::Input;
459 type Time = B::Time;
460 type Output = Rc<B::Output>;
461 fn with_capacity(keys: usize, vals: usize, upds: usize) -> Self { RcBuilder { builder: B::with_capacity(keys, vals, upds) } }
462 fn push(&mut self, input: &mut Self::Input) { self.builder.push(input) }
463 fn done(self, description: Description<Self::Time>) -> Rc<B::Output> { Rc::new(self.builder.done(description)) }
464 fn seal(chain: &mut Vec<Self::Input>, description: Description<Self::Time>) -> Self::Output {
465 Rc::new(B::seal(chain, description))
466 }
467 }
468
469 /// Wrapper type for merging reference counted batches.
470 pub struct RcMerger<B:Batch> { merger: B::Merger }
471
472 /// Represents a merge in progress.
473 impl<B:Batch> Merger<Rc<B>> for RcMerger<B> {
474 fn new(source1: &Rc<B>, source2: &Rc<B>, compaction_frontier: AntichainRef<B::Time>) -> Self { RcMerger { merger: B::begin_merge(source1, source2, compaction_frontier) } }
475 fn work(&mut self, source1: &Rc<B>, source2: &Rc<B>, fuel: &mut isize) { self.merger.work(source1, source2, fuel) }
476 fn done(self) -> Rc<B> { Rc::new(self.merger.done()) }
477 }
478}