differential_dataflow/trace/implementations/
rhh.rs

1//! Batch implementation based on Robin Hood Hashing.
2//!
3//! Items are ordered by `(hash(Key), Key)` rather than `Key`, which means
4//! that these implementations should only be used with each other, under
5//! the same `hash` function, or for types that also order by `(hash(X), X)`,
6//! for example wrapped types that implement `Ord` that way.
7
8use std::rc::Rc;
9use std::cmp::Ordering;
10
11use serde::{Deserialize, Serialize};
12
13use crate::Hashable;
14use crate::containers::TimelyStack;
15use crate::trace::implementations::chunker::{ColumnationChunker, VecChunker};
16use crate::trace::implementations::merge_batcher::{MergeBatcher, VecMerger, ColMerger};
17use crate::trace::implementations::spine_fueled::Spine;
18use crate::trace::rc_blanket_impls::RcBuilder;
19
20use super::{Layout, Vector, TStack};
21
22use self::val_batch::{RhhValBatch, RhhValBuilder};
23
24/// A trace implementation using a spine of ordered lists.
25pub type VecSpine<K, V, T, R> = Spine<Rc<RhhValBatch<Vector<((K,V),T,R)>>>>;
26/// A batcher for ordered lists.
27pub type VecBatcher<K,V,T,R> = MergeBatcher<Vec<((K,V),T,R)>, VecChunker<((K,V),T,R)>, VecMerger<(K, V), T, R>>;
28/// A builder for ordered lists.
29pub type VecBuilder<K,V,T,R> = RcBuilder<RhhValBuilder<Vector<((K,V),T,R)>, Vec<((K,V),T,R)>>>;
30
31// /// A trace implementation for empty values using a spine of ordered lists.
32// pub type OrdKeySpine<K, T, R> = Spine<Rc<OrdKeyBatch<Vector<((K,()),T,R)>>>>;
33
34/// A trace implementation backed by columnar storage.
35pub type ColSpine<K, V, T, R> = Spine<Rc<RhhValBatch<TStack<((K,V),T,R)>>>>;
36/// A batcher for columnar storage.
37pub type ColBatcher<K,V,T,R> = MergeBatcher<Vec<((K,V),T,R)>, ColumnationChunker<((K,V),T,R)>, ColMerger<(K,V),T,R>>;
38/// A builder for columnar storage.
39pub type ColBuilder<K,V,T,R> = RcBuilder<RhhValBuilder<TStack<((K,V),T,R)>, TimelyStack<((K,V),T,R)>>>;
40
41// /// A trace implementation backed by columnar storage.
42// pub type ColKeySpine<K, T, R> = Spine<Rc<OrdKeyBatch<TStack<((K,()),T,R)>>>>;
43
44/// A carrier trait indicating that the type's `Ord` and `PartialOrd` implementations are by `Hashable::hashed()`.
45pub trait HashOrdered: Hashable { }
46
47impl<'a, T: std::hash::Hash + HashOrdered> HashOrdered for &'a T { }
48
49/// A hash-ordered wrapper that modifies `Ord` and `PartialOrd`.
50#[derive(Copy, Clone, Eq, PartialEq, Debug, Default, Serialize, Deserialize)]
51pub struct HashWrapper<T: std::hash::Hash + Hashable> {
52    /// The inner value, freely modifiable.
53    pub inner: T
54}
55
56impl<T: PartialOrd + std::hash::Hash + Hashable<Output: PartialOrd>> PartialOrd for HashWrapper<T> {
57    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
58        let this_hash = self.inner.hashed();
59        let that_hash = other.inner.hashed();
60        (this_hash, &self.inner).partial_cmp(&(that_hash, &other.inner))
61    }
62}
63
64impl<T: Ord + PartialOrd + std::hash::Hash + Hashable<Output: PartialOrd>> Ord for HashWrapper<T> {
65    fn cmp(&self, other: &Self) -> Ordering {
66        self.partial_cmp(other).unwrap()
67    }
68}
69
70impl<T: std::hash::Hash + Hashable> HashOrdered for HashWrapper<T> { }
71
72impl<T: std::hash::Hash + Hashable> Hashable for HashWrapper<T> {
73    type Output = T::Output;
74    fn hashed(&self) -> Self::Output { self.inner.hashed() }
75}
76
77impl<T: std::hash::Hash + Hashable> HashOrdered for &HashWrapper<T> { }
78
79impl<T: std::hash::Hash + Hashable> Hashable for &HashWrapper<T> {
80    type Output = T::Output;
81    fn hashed(&self) -> Self::Output { self.inner.hashed() }
82}
83
84mod val_batch {
85
86    use std::convert::TryInto;
87    use std::marker::PhantomData;
88    use serde::{Deserialize, Serialize};
89    use timely::container::PushInto;
90    use timely::progress::{Antichain, frontier::AntichainRef};
91
92    use crate::hashable::Hashable;
93    use crate::trace::{Batch, BatchReader, Builder, Cursor, Description, Merger};
94    use crate::trace::implementations::{BatchContainer, BuilderInput};
95    use crate::trace::implementations::layout;
96
97    use super::{Layout, HashOrdered};
98
99    /// Update tuples organized as a Robin Hood Hash map, ordered by `(hash(Key), Key, Val, Time)`.
100    ///
101    /// Specifically, this means that we attempt to place any `Key` at `alloc_len * (hash(Key) / 2^64)`,
102    /// and spill onward if the slot is occupied. The cleverness of RHH is that you may instead evict
103    /// someone else, in order to maintain the ordering up above. In fact, that is basically the rule:
104    /// when there is a conflict, evict the greater of the two and attempt to place it in the next slot.
105    ///
106    /// This RHH implementation uses a repeated `keys_offs` offset to indicate an absent element, as all
107    /// keys for valid updates must have some associated values with updates. This is the same type of
108    /// optimization made for repeated updates, and it rules out (here) using that trick for repeated values.
109    ///
110    /// We will use the `Hashable` trait here, but any consistent hash function should work out ok.
111    /// We specifically want to use the highest bits of the result (we will) because the low bits have
112    /// likely been spent shuffling the data between workers (by key), and are likely low entropy.
113    #[derive(Debug, Serialize, Deserialize)]
114    pub struct RhhValStorage<L: Layout>
115    where
116        layout::Key<L>: Default + HashOrdered,
117    {
118
119        /// The requested capacity for `keys`. We use this when determining where a key with a certain hash
120        /// would most like to end up. The `BatchContainer` trait does not provide a `capacity()` method,
121        /// otherwise we would just use that.
122        pub key_capacity: usize,
123        /// A number large enough that when it divides any `u64` the result is at most `self.key_capacity`.
124        /// When that capacity is zero or one, this is set to zero instead.
125        pub divisor: u64,
126        /// The number of present keys, distinct from `keys.len()` which contains
127        pub key_count: usize,
128
129        /// An ordered list of keys, corresponding to entries in `keys_offs`.
130        pub keys: L::KeyContainer,
131        /// Offsets used to provide indexes from keys to values.
132        ///
133        /// The length of this list is one longer than `keys`, so that we can avoid bounds logic.
134        pub keys_offs: L::OffsetContainer,
135        /// Concatenated ordered lists of values, bracketed by offsets in `keys_offs`.
136        pub vals: L::ValContainer,
137        /// Offsets used to provide indexes from values to updates.
138        ///
139        /// This list has a special representation that any empty range indicates the singleton
140        /// element just before the range, as if the start were decremented by one. The empty
141        /// range is otherwise an invalid representation, and we borrow it to compactly encode
142        /// single common update values (e.g. in a snapshot, the minimal time and a diff of one).
143        ///
144        /// The length of this list is one longer than `vals`, so that we can avoid bounds logic.
145        pub vals_offs: L::OffsetContainer,
146        /// Concatenated ordered lists of update times, bracketed by offsets in `vals_offs`.
147        pub times: L::TimeContainer,
148        /// Concatenated ordered lists of update diffs, bracketed by offsets in `vals_offs`.
149        pub diffs: L::DiffContainer,
150    }
151
152    impl<L: Layout> RhhValStorage<L>
153    where
154        layout::Key<L>: Default + HashOrdered,
155        for<'a> layout::KeyRef<'a, L>: HashOrdered,
156    {
157        /// Lower and upper bounds in `self.vals` corresponding to the key at `index`.
158        fn values_for_key(&self, index: usize) -> (usize, usize) {
159            let lower = self.keys_offs.index(index);
160            let upper = self.keys_offs.index(index+1);
161            // Looking up values for an invalid key indicates something is wrong.
162            assert!(lower < upper, "{:?} v {:?} at {:?}", lower, upper, index);
163            (lower, upper)
164        }
165        /// Lower and upper bounds in `self.updates` corresponding to the value at `index`.
166        fn updates_for_value(&self, index: usize) -> (usize, usize) {
167            let mut lower = self.vals_offs.index(index);
168            let upper = self.vals_offs.index(index+1);
169            // We use equal lower and upper to encode "singleton update; just before here".
170            // It should only apply when there is a prior element, so `lower` should be greater than zero.
171            if lower == upper {
172                assert!(lower > 0);
173                lower -= 1;
174            }
175            (lower, upper)
176        }
177
178        /// Inserts the key at its desired location, or nearby.
179        ///
180        /// Because there may be collisions, they key may be placed just after its desired location.
181        /// If necessary, this method will introduce default keys and copy the offsets to create space
182        /// after which to insert the key. These will be indicated by `None` entries in the `hash` vector.
183        ///
184        /// If `offset` is specified, we will insert it at the appropriate location. If it is not specified,
185        /// we leave `keys_offs` ready to receive it as the next `push`. This is so that builders that may
186        /// not know the final offset at the moment of key insertion can prepare for receiving the offset.
187        fn insert_key(&mut self, key: layout::KeyRef<'_, L>, offset: Option<usize>) {
188            let desired = self.desired_location(&key);
189            // Were we to push the key now, it would be at `self.keys.len()`, so while that is wrong,
190            // push additional blank entries in.
191            while self.keys.len() < desired {
192                // We insert a default (dummy) key and repeat the offset to indicate this.
193                let current_offset = self.keys_offs.index(self.keys.len());
194                self.keys.push_own(&<layout::Key<L> as Default>::default());
195                self.keys_offs.push_ref(current_offset);
196            }
197
198            // Now we insert the key. Even if it is no longer the desired location because of contention.
199            // If an offset has been supplied we insert it, and otherwise leave it for future determination.
200            self.keys.push_ref(key);
201            if let Some(offset) = offset {
202                self.keys_offs.push_ref(offset);
203            }
204            self.key_count += 1;
205        }
206
207        /// Inserts a reference to an owned key, inefficiently. Should be removed.
208        fn insert_key_own(&mut self, key: &layout::Key<L>, offset: Option<usize>) {
209            let mut key_con = L::KeyContainer::with_capacity(1);
210            key_con.push_own(&key);
211            self.insert_key(key_con.index(0), offset)
212        }
213
214        /// Indicates both the desired location and the hash signature of the key.
215        fn desired_location<K: Hashable>(&self, key: &K) -> usize {
216            if self.divisor == 0 { 0 }
217            else {
218                (key.hashed().into() / self.divisor).try_into().expect("divisor not large enough to force u64 into uisze")
219            }
220        }
221
222        /// Returns true if one should advance one's index in the search for `key`.
223        fn advance_key(&self, index: usize, key: layout::KeyRef<'_, L>) -> bool {
224            // Ideally this short-circuits, as `self.keys[index]` is bogus data.
225            !self.live_key(index) || self.keys.index(index).lt(&<L::KeyContainer as BatchContainer>::reborrow(key))
226        }
227
228        /// Indicates that a key is valid, rather than dead space, by looking for a valid offset range.
229        fn live_key(&self, index: usize) -> bool {
230            self.keys_offs.index(index) != self.keys_offs.index(index+1)
231        }
232
233        /// Advances `index` until it references a live key, or is `keys.len()`.
234        fn advance_to_live_key(&self, index: &mut usize) {
235            while *index < self.keys.len() && !self.live_key(*index) {
236                *index += 1;
237            }
238        }
239
240        /// A value large enough that any `u64` divided by it is less than `capacity`.
241        ///
242        /// This is `2^64 / capacity`, except in the cases where `capacity` is zero or one.
243        /// In those cases, we'll return `0` to communicate the exception, for which we should
244        /// just return `0` when announcing a target location (and a zero capacity that we insert
245        /// into becomes a bug).
246        fn divisor_for_capacity(capacity: usize) -> u64 {
247            let capacity: u64 = capacity.try_into().expect("usize exceeds u64");
248            if capacity == 0 || capacity == 1 { 0 }
249            else {
250                ((1 << 63) / capacity) << 1
251            }
252        }
253    }
254
255    /// An immutable collection of update tuples, from a contiguous interval of logical times.
256    ///
257    /// The `L` parameter captures how the updates should be laid out, and `C` determines which
258    /// merge batcher to select.
259    #[derive(Serialize, Deserialize)]
260    #[serde(bound = "
261        L::KeyContainer: Serialize + for<'a> Deserialize<'a>,
262        L::ValContainer: Serialize + for<'a> Deserialize<'a>,
263        L::OffsetContainer: Serialize + for<'a> Deserialize<'a>,
264        L::TimeContainer: Serialize + for<'a> Deserialize<'a>,
265        L::DiffContainer: Serialize + for<'a> Deserialize<'a>,
266    ")]
267    pub struct RhhValBatch<L: Layout>
268    where
269        layout::Key<L>: Default + HashOrdered,
270    {
271        /// The updates themselves.
272        pub storage: RhhValStorage<L>,
273        /// Description of the update times this layer represents.
274        pub description: Description<layout::Time<L>>,
275        /// The number of updates reflected in the batch.
276        ///
277        /// We track this separately from `storage` because due to the singleton optimization,
278        /// we may have many more updates than `storage.updates.len()`. It should equal that
279        /// length, plus the number of singleton optimizations employed.
280        pub updates: usize,
281    }
282
283    impl<L: Layout> WithLayout for RhhValBatch<L>
284    where
285        layout::Key<L>: Default + HashOrdered,
286        for<'a> layout::KeyRef<'a, L>: HashOrdered,
287    {
288        type Layout = L;
289    }
290
291    impl<L: Layout> BatchReader for RhhValBatch<L>
292    where
293        layout::Key<L>: Default + HashOrdered,
294        for<'a> layout::KeyRef<'a, L>: HashOrdered,
295    {
296        type Cursor = RhhValCursor<L>;
297        fn cursor(&self) -> Self::Cursor {
298            let mut cursor = RhhValCursor {
299                key_cursor: 0,
300                val_cursor: 0,
301                phantom: std::marker::PhantomData,
302            };
303            cursor.step_key(self);
304            cursor
305        }
306        fn len(&self) -> usize {
307            // Normally this would be `self.updates.len()`, but we have a clever compact encoding.
308            // Perhaps we should count such exceptions to the side, to provide a correct accounting.
309            self.updates
310        }
311        fn description(&self) -> &Description<layout::Time<L>> { &self.description }
312    }
313
314    impl<L: Layout> Batch for RhhValBatch<L>
315    where
316        layout::Key<L>: Default + HashOrdered,
317        for<'a> layout::KeyRef<'a, L>: HashOrdered,
318    {
319        type Merger = RhhValMerger<L>;
320
321        fn begin_merge(&self, other: &Self, compaction_frontier: AntichainRef<layout::Time<L>>) -> Self::Merger {
322            RhhValMerger::new(self, other, compaction_frontier)
323        }
324
325        fn empty(lower: Antichain<Self::Time>, upper: Antichain<Self::Time>) -> Self {
326            use timely::progress::Timestamp;
327            Self {
328                storage: RhhValStorage {
329                    keys: L::KeyContainer::with_capacity(0),
330                    keys_offs: L::OffsetContainer::with_capacity(0),
331                    vals: L::ValContainer::with_capacity(0),
332                    vals_offs: L::OffsetContainer::with_capacity(0),
333                    times: L::TimeContainer::with_capacity(0),
334                    diffs: L::DiffContainer::with_capacity(0),
335                    key_count: 0,
336                    key_capacity: 0,
337                    divisor: 0,
338                },
339                description: Description::new(lower, upper, Antichain::from_elem(Self::Time::minimum())),
340                updates: 0,
341            }
342        }
343    }
344
345    /// State for an in-progress merge.
346    pub struct RhhValMerger<L: Layout>
347    where
348        layout::Key<L>: Default + HashOrdered,
349    {
350        /// Key position to merge next in the first batch.
351        key_cursor1: usize,
352        /// Key position to merge next in the second batch.
353        key_cursor2: usize,
354        /// result that we are currently assembling.
355        result: RhhValStorage<L>,
356        /// description
357        description: Description<layout::Time<L>>,
358
359        /// Local stash of updates, to use for consolidation.
360        ///
361        /// We could emulate a `ChangeBatch` here, with related compaction smarts.
362        /// A `ChangeBatch` itself needs an `i64` diff type, which we have not.
363        update_stash: Vec<(layout::Time<L>, layout::Diff<L>)>,
364        /// Counts the number of singleton-optimized entries, that we may correctly count the updates.
365        singletons: usize,
366    }
367
368    impl<L: Layout> Merger<RhhValBatch<L>> for RhhValMerger<L>
369    where
370        layout::Key<L>: Default + HashOrdered,
371        RhhValBatch<L>: Batch<Time=layout::Time<L>>,
372        for<'a> layout::KeyRef<'a, L>: HashOrdered,
373    {
374        fn new(batch1: &RhhValBatch<L>, batch2: &RhhValBatch<L>, compaction_frontier: AntichainRef<layout::Time<L>>) -> Self {
375
376            assert!(batch1.upper() == batch2.lower());
377            use crate::lattice::Lattice;
378            let mut since = batch1.description().since().join(batch2.description().since());
379            since = since.join(&compaction_frontier.to_owned());
380
381            let description = Description::new(batch1.lower().clone(), batch2.upper().clone(), since);
382
383            // This is a massive overestimate on the number of keys, but we don't have better information.
384            // An over-estimate can be a massive problem as well, with sparse regions being hard to cross.
385            let max_cap = batch1.len() + batch2.len();
386            let rhh_cap = 2 * max_cap;
387
388            let batch1 = &batch1.storage;
389            let batch2 = &batch2.storage;
390
391            let mut storage = RhhValStorage {
392                keys: L::KeyContainer::merge_capacity(&batch1.keys, &batch2.keys),
393                keys_offs: L::OffsetContainer::with_capacity(batch1.keys_offs.len() + batch2.keys_offs.len()),
394                vals: L::ValContainer::merge_capacity(&batch1.vals, &batch2.vals),
395                vals_offs: L::OffsetContainer::with_capacity(batch1.vals_offs.len() + batch2.vals_offs.len()),
396                times: L::TimeContainer::merge_capacity(&batch1.times, &batch2.times),
397                diffs: L::DiffContainer::merge_capacity(&batch1.diffs, &batch2.diffs),
398                key_count: 0,
399                key_capacity: rhh_cap,
400                divisor: RhhValStorage::<L>::divisor_for_capacity(rhh_cap),
401            };
402
403            // Mark explicit types because type inference fails to resolve it.
404            let keys_offs: &mut L::OffsetContainer = &mut storage.keys_offs;
405            keys_offs.push_ref(0);
406            let vals_offs: &mut L::OffsetContainer = &mut storage.vals_offs;
407            vals_offs.push_ref(0);
408
409            RhhValMerger {
410                key_cursor1: 0,
411                key_cursor2: 0,
412                result: storage,
413                description,
414                update_stash: Vec::new(),
415                singletons: 0,
416            }
417        }
418        fn done(self) -> RhhValBatch<L> {
419            RhhValBatch {
420                updates: self.result.times.len() + self.singletons,
421                storage: self.result,
422                description: self.description,
423            }
424        }
425        fn work(&mut self, source1: &RhhValBatch<L>, source2: &RhhValBatch<L>, fuel: &mut isize) {
426
427            // An (incomplete) indication of the amount of work we've done so far.
428            let starting_updates = self.result.times.len();
429            let mut effort = 0isize;
430
431            source1.storage.advance_to_live_key(&mut self.key_cursor1);
432            source2.storage.advance_to_live_key(&mut self.key_cursor2);
433
434            // While both mergees are still active, perform single-key merges.
435            while self.key_cursor1 < source1.storage.keys.len() && self.key_cursor2 < source2.storage.keys.len() && effort < *fuel {
436                self.merge_key(&source1.storage, &source2.storage);
437                source1.storage.advance_to_live_key(&mut self.key_cursor1);
438                source2.storage.advance_to_live_key(&mut self.key_cursor2);
439                    // An (incomplete) accounting of the work we've done.
440                effort = (self.result.times.len() - starting_updates) as isize;
441            }
442
443            // Merging is complete, and only copying remains.
444            // Key-by-key copying allows effort interruption, and compaction.
445            while self.key_cursor1 < source1.storage.keys.len() && effort < *fuel {
446                self.copy_key(&source1.storage, self.key_cursor1);
447                self.key_cursor1 += 1;
448                source1.storage.advance_to_live_key(&mut self.key_cursor1);
449                effort = (self.result.times.len() - starting_updates) as isize;
450            }
451            while self.key_cursor2 < source2.storage.keys.len() && effort < *fuel {
452                self.copy_key(&source2.storage, self.key_cursor2);
453                self.key_cursor2 += 1;
454                source2.storage.advance_to_live_key(&mut self.key_cursor2);
455                effort = (self.result.times.len() - starting_updates) as isize;
456            }
457
458            *fuel -= effort;
459        }
460    }
461
462    // Helper methods in support of merging batches.
463    impl<L: Layout> RhhValMerger<L>
464    where
465        layout::Key<L>: Default + HashOrdered,
466        for<'a> layout::KeyRef<'a, L>: HashOrdered,
467    {
468        /// Copy the next key in `source`.
469        ///
470        /// The method extracts the key in `source` at `cursor`, and merges it in to `self`.
471        /// If the result does not wholly cancel, they key will be present in `self` with the
472        /// compacted values and updates.
473        ///
474        /// The caller should be certain to update the cursor, as this method does not do this.
475        fn copy_key(&mut self, source: &RhhValStorage<L>, cursor: usize) {
476            // Capture the initial number of values to determine if the merge was ultimately non-empty.
477            let init_vals = self.result.vals.len();
478            let (mut lower, upper) = source.values_for_key(cursor);
479            while lower < upper {
480                self.stash_updates_for_val(source, lower);
481                if let Some(off) = self.consolidate_updates() {
482                    self.result.vals_offs.push_ref(off);
483                    self.result.vals.push_ref(source.vals.index(lower));
484                }
485                lower += 1;
486            }
487
488            // If we have pushed any values, copy the key as well.
489            if self.result.vals.len() > init_vals {
490                self.result.insert_key(source.keys.index(cursor), Some(self.result.vals.len()));
491            }
492        }
493        /// Merge the next key in each of `source1` and `source2` into `self`, updating the appropriate cursors.
494        ///
495        /// This method only merges a single key. It applies all compaction necessary, and may result in no output
496        /// if the updates cancel either directly or after compaction.
497        fn merge_key(&mut self, source1: &RhhValStorage<L>, source2: &RhhValStorage<L>) {
498
499            use ::std::cmp::Ordering;
500            match source1.keys.index(self.key_cursor1).cmp(&source2.keys.index(self.key_cursor2)) {
501                Ordering::Less => {
502                    self.copy_key(source1, self.key_cursor1);
503                    self.key_cursor1 += 1;
504                },
505                Ordering::Equal => {
506                    // Keys are equal; must merge all values from both sources for this one key.
507                    let (lower1, upper1) = source1.values_for_key(self.key_cursor1);
508                    let (lower2, upper2) = source2.values_for_key(self.key_cursor2);
509                    if let Some(off) = self.merge_vals((source1, lower1, upper1), (source2, lower2, upper2)) {
510                        self.result.insert_key(source1.keys.index(self.key_cursor1), Some(off));
511                    }
512                    // Increment cursors in either case; the keys are merged.
513                    self.key_cursor1 += 1;
514                    self.key_cursor2 += 1;
515                },
516                Ordering::Greater => {
517                    self.copy_key(source2, self.key_cursor2);
518                    self.key_cursor2 += 1;
519                },
520            }
521        }
522        /// Merge two ranges of values into `self`.
523        ///
524        /// If the compacted result contains values with non-empty updates, the function returns
525        /// an offset that should be recorded to indicate the upper extent of the result values.
526        fn merge_vals(
527            &mut self,
528            (source1, mut lower1, upper1): (&RhhValStorage<L>, usize, usize),
529            (source2, mut lower2, upper2): (&RhhValStorage<L>, usize, usize),
530        ) -> Option<usize> {
531            // Capture the initial number of values to determine if the merge was ultimately non-empty.
532            let init_vals = self.result.vals.len();
533            while lower1 < upper1 && lower2 < upper2 {
534                // We compare values, and fold in updates for the lowest values;
535                // if they are non-empty post-consolidation, we write the value.
536                // We could multi-way merge and it wouldn't be very complicated.
537                use ::std::cmp::Ordering;
538                match source1.vals.index(lower1).cmp(&source2.vals.index(lower2)) {
539                    Ordering::Less => {
540                        // Extend stash by updates, with logical compaction applied.
541                        self.stash_updates_for_val(source1, lower1);
542                        if let Some(off) = self.consolidate_updates() {
543                            self.result.vals_offs.push_ref(off);
544                            self.result.vals.push_ref(source1.vals.index(lower1));
545                        }
546                        lower1 += 1;
547                    },
548                    Ordering::Equal => {
549                        self.stash_updates_for_val(source1, lower1);
550                        self.stash_updates_for_val(source2, lower2);
551                        if let Some(off) = self.consolidate_updates() {
552                            self.result.vals_offs.push_ref(off);
553                            self.result.vals.push_ref(source1.vals.index(lower1));
554                        }
555                        lower1 += 1;
556                        lower2 += 1;
557                    },
558                    Ordering::Greater => {
559                        // Extend stash by updates, with logical compaction applied.
560                        self.stash_updates_for_val(source2, lower2);
561                        if let Some(off) = self.consolidate_updates() {
562                            self.result.vals_offs.push_ref(off);
563                            self.result.vals.push_ref(source2.vals.index(lower2));
564                        }
565                        lower2 += 1;
566                    },
567                }
568            }
569            // Merging is complete, but we may have remaining elements to push.
570            while lower1 < upper1 {
571                self.stash_updates_for_val(source1, lower1);
572                if let Some(off) = self.consolidate_updates() {
573                    self.result.vals_offs.push_ref(off);
574                    self.result.vals.push_ref(source1.vals.index(lower1));
575                }
576                lower1 += 1;
577            }
578            while lower2 < upper2 {
579                self.stash_updates_for_val(source2, lower2);
580                if let Some(off) = self.consolidate_updates() {
581                    self.result.vals_offs.push_ref(off);
582                    self.result.vals.push_ref(source2.vals.index(lower2));
583                }
584                lower2 += 1;
585            }
586
587            // Values being pushed indicate non-emptiness.
588            if self.result.vals.len() > init_vals {
589                Some(self.result.vals.len())
590            } else {
591                None
592            }
593        }
594
595        /// Transfer updates for an indexed value in `source` into `self`, with compaction applied.
596        fn stash_updates_for_val(&mut self, source: &RhhValStorage<L>, index: usize) {
597            let (lower, upper) = source.updates_for_value(index);
598            for i in lower .. upper {
599                // NB: Here is where we would need to look back if `lower == upper`.
600                let time = source.times.index(i);
601                let diff = source.diffs.index(i);
602                let mut new_time = L::TimeContainer::into_owned(time);
603                use crate::lattice::Lattice;
604                new_time.advance_by(self.description.since().borrow());
605                self.update_stash.push((new_time, L::DiffContainer::into_owned(diff)));
606            }
607        }
608
609        /// Consolidates `self.updates_stash` and produces the offset to record, if any.
610        fn consolidate_updates(&mut self) -> Option<usize> {
611            use crate::consolidation;
612            consolidation::consolidate(&mut self.update_stash);
613            if !self.update_stash.is_empty() {
614                // If there is a single element, equal to a just-prior recorded update,
615                // we push nothing and report an unincremented offset to encode this case.
616                let time_diff = self.result.times.last().zip(self.result.diffs.last());
617                let last_eq = self.update_stash.last().zip(time_diff).map(|((t1, d1), (t2, d2))| {
618                    // TODO: The use of `into_owned` is a work-around for not having reference types.
619                    *t1 == L::TimeContainer::into_owned(t2) && *d1 == L::DiffContainer::into_owned(d2)
620                });
621                if self.update_stash.len() == 1 && last_eq.unwrap_or(false) {
622                    // Just clear out update_stash, as we won't drain it here.
623                    self.update_stash.clear();
624                    self.singletons += 1;
625                }
626                else {
627                    // Conventional; move `update_stash` into `updates`.
628                    for (time, diff) in self.update_stash.drain(..) {
629                        self.result.times.push_own(&time);
630                        self.result.diffs.push_own(&diff);
631                    }
632                }
633                Some(self.result.times.len())
634            } else {
635                None
636            }
637        }
638    }
639
640
641    /// A cursor through a Robin Hood Hashed list of keys, vals, and such.
642    ///
643    /// The important detail is that not all of `keys` represent valid keys.
644    /// We must consult `storage.hashed` to see if the associated data is valid.
645    /// Importantly, we should skip over invalid keys, rather than report them as
646    /// invalid through `key_valid`: that method is meant to indicate the end of
647    /// the cursor, rather than internal state.
648    pub struct RhhValCursor<L: Layout>
649    where
650        layout::Key<L>: Default + HashOrdered,
651    {
652        /// Absolute position of the current key.
653        key_cursor: usize,
654        /// Absolute position of the current value.
655        val_cursor: usize,
656        /// Phantom marker for Rust happiness.
657        phantom: PhantomData<L>,
658    }
659
660    use crate::trace::implementations::WithLayout;
661    impl<L: Layout> WithLayout for RhhValCursor<L>
662    where
663        layout::Key<L>: Default + HashOrdered,
664        for<'a> layout::KeyRef<'a, L>: HashOrdered,
665    {
666        type Layout = L;
667    }
668
669    impl<L: Layout> Cursor for RhhValCursor<L>
670    where
671        layout::Key<L>: Default + HashOrdered,
672        for<'a> layout::KeyRef<'a, L>: HashOrdered,
673    {
674        type Storage = RhhValBatch<L>;
675
676        fn get_key<'a>(&self, storage: &'a RhhValBatch<L>) -> Option<Self::Key<'a>> { storage.storage.keys.get(self.key_cursor) }
677        fn get_val<'a>(&self, storage: &'a RhhValBatch<L>) -> Option<Self::Val<'a>> { if self.val_valid(storage) { storage.storage.vals.get(self.val_cursor) } else { None } }
678        fn key<'a>(&self, storage: &'a RhhValBatch<L>) -> Self::Key<'a> { storage.storage.keys.index(self.key_cursor) }
679        fn val<'a>(&self, storage: &'a RhhValBatch<L>) -> Self::Val<'a> { storage.storage.vals.index(self.val_cursor) }
680        fn map_times<L2: FnMut(Self::TimeGat<'_>, Self::DiffGat<'_>)>(&mut self, storage: &RhhValBatch<L>, mut logic: L2) {
681            let (lower, upper) = storage.storage.updates_for_value(self.val_cursor);
682            for index in lower .. upper {
683                let time = storage.storage.times.index(index);
684                let diff = storage.storage.diffs.index(index);
685                logic(time, diff);
686            }
687        }
688        fn key_valid(&self, storage: &RhhValBatch<L>) -> bool { self.key_cursor < storage.storage.keys.len() }
689        fn val_valid(&self, storage: &RhhValBatch<L>) -> bool { self.val_cursor < storage.storage.values_for_key(self.key_cursor).1 }
690        fn step_key(&mut self, storage: &RhhValBatch<L>){
691            // We advance the cursor by one for certain, and then as long as we need to find a valid key.
692            self.key_cursor += 1;
693            storage.storage.advance_to_live_key(&mut self.key_cursor);
694
695            if self.key_valid(storage) {
696                self.rewind_vals(storage);
697            }
698            else {
699                self.key_cursor = storage.storage.keys.len();
700            }
701        }
702        fn seek_key(&mut self, storage: &RhhValBatch<L>, key: Self::Key<'_>) {
703            // self.key_cursor += storage.storage.keys.advance(self.key_cursor, storage.storage.keys.len(), |x| x.lt(key));
704            let desired = storage.storage.desired_location(&key);
705            // Advance the cursor, if `desired` is ahead of it.
706            if self.key_cursor < desired {
707                self.key_cursor = desired;
708            }
709            // Advance the cursor as long as we have not found a value greater or equal to `key`.
710            // We may have already passed `key`, and confirmed its absence, but our goal is to
711            // find the next key afterwards so that users can, for example, alternately iterate.
712            while self.key_valid(storage) && storage.storage.advance_key(self.key_cursor, key) {
713                // TODO: Based on our encoding, we could skip logarithmically over empty regions by galloping
714                //       through `storage.keys_offs`, which stays put for dead space.
715                self.key_cursor += 1;
716            }
717
718            if self.key_valid(storage) {
719                self.rewind_vals(storage);
720            }
721        }
722        fn step_val(&mut self, storage: &RhhValBatch<L>) {
723            self.val_cursor += 1;
724            if !self.val_valid(storage) {
725                self.val_cursor = storage.storage.values_for_key(self.key_cursor).1;
726            }
727        }
728        fn seek_val(&mut self, storage: &RhhValBatch<L>, val: Self::Val<'_>) {
729            self.val_cursor += storage.storage.vals.advance(self.val_cursor, storage.storage.values_for_key(self.key_cursor).1, |x| <L::ValContainer as BatchContainer>::reborrow(x).lt(&<L::ValContainer as BatchContainer>::reborrow(val)));
730        }
731        fn rewind_keys(&mut self, storage: &RhhValBatch<L>) {
732            self.key_cursor = 0;
733            storage.storage.advance_to_live_key(&mut self.key_cursor);
734
735            if self.key_valid(storage) {
736                self.rewind_vals(storage)
737            }
738        }
739        fn rewind_vals(&mut self, storage: &RhhValBatch<L>) {
740            self.val_cursor = storage.storage.values_for_key(self.key_cursor).0;
741        }
742    }
743
744    /// A builder for creating layers from unsorted update tuples.
745    pub struct RhhValBuilder<L: Layout, CI>
746    where
747        layout::Key<L>: Default + HashOrdered,
748    {
749        result: RhhValStorage<L>,
750        singleton: Option<(layout::Time<L>, layout::Diff<L>)>,
751        /// Counts the number of singleton optimizations we performed.
752        ///
753        /// This number allows us to correctly gauge the total number of updates reflected in a batch,
754        /// even though `updates.len()` may be much shorter than this amount.
755        singletons: usize,
756        _marker: PhantomData<CI>,
757    }
758
759    impl<L: Layout, CI> RhhValBuilder<L, CI>
760    where
761        layout::Key<L>: Default + HashOrdered,
762    {
763        /// Pushes a single update, which may set `self.singleton` rather than push.
764        ///
765        /// This operation is meant to be equivalent to `self.results.updates.push((time, diff))`.
766        /// However, for "clever" reasons it does not do this. Instead, it looks for opportunities
767        /// to encode a singleton update with an "absert" update: repeating the most recent offset.
768        /// This otherwise invalid state encodes "look back one element".
769        ///
770        /// When `self.singleton` is `Some`, it means that we have seen one update and it matched the
771        /// previously pushed update exactly. In that case, we do not push the update into `updates`.
772        /// The update tuple is retained in `self.singleton` in case we see another update and need
773        /// to recover the singleton to push it into `updates` to join the second update.
774        fn push_update(&mut self, time: layout::Time<L>, diff: layout::Diff<L>) {
775            // If a just-pushed update exactly equals `(time, diff)` we can avoid pushing it.
776            // TODO: The use of `into_owned` is a bandage for not having references we can compare.
777            if self.result.times.last().map(|t| L::TimeContainer::into_owned(t) == time).unwrap_or(false) && self.result.diffs.last().map(|d| L::DiffContainer::into_owned(d) == diff).unwrap_or(false) {
778                assert!(self.singleton.is_none());
779                self.singleton = Some((time, diff));
780            }
781            else {
782                // If we have pushed a single element, we need to copy it out to meet this one.
783                if let Some((time, diff)) = self.singleton.take() {
784                    self.result.times.push_own(&time);
785                    self.result.diffs.push_own(&diff);
786                }
787                self.result.times.push_own(&time);
788                self.result.diffs.push_own(&diff);
789            }
790        }
791    }
792
793    impl<L: Layout, CI> Builder for RhhValBuilder<L, CI>
794    where
795        layout::Key<L>: Default + HashOrdered,
796        CI: for<'a> BuilderInput<L::KeyContainer, L::ValContainer, Key<'a> = layout::Key<L>, Time=layout::Time<L>, Diff=layout::Diff<L>>,
797        for<'a> L::ValContainer: PushInto<CI::Val<'a>>,
798        for<'a> layout::KeyRef<'a, L>: HashOrdered,
799    {
800        type Input = CI;
801        type Time = layout::Time<L>;
802        type Output = RhhValBatch<L>;
803
804        fn with_capacity(keys: usize, vals: usize, upds: usize) -> Self {
805
806            // Double the capacity for RHH; probably excessive.
807            let rhh_capacity = 2 * keys;
808            let divisor = RhhValStorage::<L>::divisor_for_capacity(rhh_capacity);
809            // We want some additive slop, in case we spill over.
810            // This number magically chosen based on nothing in particular.
811            // Worst case, we will re-alloc and copy if we spill beyond this.
812            let keys = rhh_capacity + 10;
813
814            // We don't introduce zero offsets as they will be introduced by the first `push` call.
815            Self {
816                result: RhhValStorage {
817                    keys: L::KeyContainer::with_capacity(keys),
818                    keys_offs: L::OffsetContainer::with_capacity(keys + 1),
819                    vals: L::ValContainer::with_capacity(vals),
820                    vals_offs: L::OffsetContainer::with_capacity(vals + 1),
821                    times: L::TimeContainer::with_capacity(upds),
822                    diffs: L::DiffContainer::with_capacity(upds),
823                    key_count: 0,
824                    key_capacity: rhh_capacity,
825                    divisor,
826                },
827                singleton: None,
828                singletons: 0,
829                _marker: PhantomData,
830            }
831        }
832
833        #[inline]
834        fn push(&mut self, chunk: &mut Self::Input) {
835            for item in chunk.drain() {
836                let (key, val, time, diff) = CI::into_parts(item);
837                // Perhaps this is a continuation of an already received key.
838                if self.result.keys.last().map(|k| CI::key_eq(&key, k)).unwrap_or(false) {
839                    // Perhaps this is a continuation of an already received value.
840                    if self.result.vals.last().map(|v| CI::val_eq(&val, v)).unwrap_or(false) {
841                        self.push_update(time, diff);
842                    } else {
843                        // New value; complete representation of prior value.
844                        self.result.vals_offs.push_ref(self.result.times.len());
845                        if self.singleton.take().is_some() { self.singletons += 1; }
846                        self.push_update(time, diff);
847                        self.result.vals.push_into(val);
848                    }
849                } else {
850                    // New key; complete representation of prior key.
851                    self.result.vals_offs.push_ref(self.result.times.len());
852                    if self.singleton.take().is_some() { self.singletons += 1; }
853                    self.result.keys_offs.push_ref(self.result.vals.len());
854                    self.push_update(time, diff);
855                    self.result.vals.push_into(val);
856                    // Insert the key, but with no specified offset.
857                    self.result.insert_key_own(&key, None);
858                }
859            }
860        }
861
862        #[inline(never)]
863        fn done(mut self, description: Description<Self::Time>) -> RhhValBatch<L> {
864            // Record the final offsets
865            self.result.vals_offs.push_ref(self.result.times.len());
866            // Remove any pending singleton, and if it was set increment our count.
867            if self.singleton.take().is_some() { self.singletons += 1; }
868            self.result.keys_offs.push_ref(self.result.vals.len());
869            RhhValBatch {
870                updates: self.result.times.len() + self.singletons,
871                storage: self.result,
872                description,
873            }
874        }
875
876        fn seal(chain: &mut Vec<Self::Input>, description: Description<Self::Time>) -> Self::Output {
877            let (keys, vals, upds) = Self::Input::key_val_upd_counts(&chain[..]);
878            let mut builder = Self::with_capacity(keys, vals, upds);
879            for mut chunk in chain.drain(..) {
880                builder.push(&mut chunk);
881            }
882
883            builder.done(description)
884        }
885    }
886
887}
888
889mod key_batch {
890
891    // Copy the above, once it works!
892
893}