mz_persist_client/internal/
trace.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10//! An append-only collection of compactable update batches. The Spine below is
11//! a fork of Differential Dataflow's [Spine] with minimal modifications. The
12//! original Spine code is designed for incremental (via "fuel"ing) synchronous
13//! merge of in-memory batches. Persist doesn't want compaction to block
14//! incoming writes and, in fact, may in the future elect to push the work of
15//! compaction onto another machine entirely via RPC. As a result, we abuse the
16//! Spine code as follows:
17//!
18//! [Spine]: differential_dataflow::trace::implementations::spine_fueled::Spine
19//!
20//! - The normal Spine works in terms of [Batch] impls. A `Batch` is added to
21//!   the Spine. As progress is made, the Spine will merge two batches together
22//!   by: constructing a [Batch::Merger], giving it bits of fuel to
23//!   incrementally perform the merge (which spreads out the work, keeping
24//!   latencies even), and then once it's done fueling extracting the new single
25//!   output `Batch` and discarding the inputs.
26//! - Persist instead represents a batch of blob data with a [HollowBatch]
27//!   pointer which contains the normal `Batch` metadata plus the keys necessary
28//!   to retrieve the updates.
29//! - [SpineBatch] wraps `HollowBatch` and has a [FuelingMerge] companion
30//!   (analogous to `Batch::Merger`) that allows us to represent a merge as it
31//!   is fueling. Normally, this would represent real incremental compaction
32//!   progress, but in persist, it's simply a bookkeeping mechanism. Once fully
33//!   fueled, the `FuelingMerge` is turned into a fueled [SpineBatch],
34//!   which to the Spine is indistinguishable from a merged batch. At this
35//!   point, it is eligible for asynchronous compaction and a `FueledMergeReq`
36//!   is generated.
37//! - At any later point, this request may be answered via
38//!   [Trace::apply_merge_res_checked] or [Trace::apply_merge_res_unchecked].
39//!   This internally replaces the`SpineBatch`, which has no
40//!   effect on the structure of `Spine` but replaces the metadata
41//!   in persist's state to point at the new batch.
42//! - `SpineBatch` is explictly allowed to accumulate a list of `HollowBatch`s.
43//!   This decouples compaction from Spine progress and also allows us to reduce
44//!   write amplification by merging `N` batches at once where `N` can be
45//!   greater than 2.
46//!
47//! [Batch]: differential_dataflow::trace::Batch
48//! [Batch::Merger]: differential_dataflow::trace::Batch::Merger
49
50use std::cmp::Ordering;
51use std::collections::{BTreeMap, BTreeSet};
52use std::fmt::Debug;
53use std::mem;
54use std::ops::Range;
55use std::sync::Arc;
56
57use arrayvec::ArrayVec;
58use differential_dataflow::difference::Monoid;
59use differential_dataflow::lattice::Lattice;
60use differential_dataflow::trace::Description;
61use itertools::Itertools;
62use mz_ore::cast::CastFrom;
63use mz_persist::metrics::ColumnarMetrics;
64use mz_persist_types::Codec64;
65use serde::{Serialize, Serializer};
66use timely::PartialOrder;
67use timely::progress::frontier::AntichainRef;
68use timely::progress::{Antichain, Timestamp};
69use tracing::warn;
70
71use crate::internal::paths::WriterKey;
72use crate::internal::state::{HollowBatch, RunId};
73
74use super::state::RunPart;
75
76#[derive(Debug, Clone, PartialEq)]
77pub struct FueledMergeReq<T> {
78    pub id: SpineId,
79    pub desc: Description<T>,
80    pub inputs: Vec<IdHollowBatch<T>>,
81}
82
83#[derive(Debug)]
84pub struct FueledMergeRes<T> {
85    pub output: HollowBatch<T>,
86    pub input: CompactionInput,
87    pub new_active_compaction: Option<ActiveCompaction>,
88}
89
90/// An append-only collection of compactable update batches.
91///
92/// In an effort to keep our fork of Spine as close as possible to the original,
93/// we push as many changes as possible into this wrapper.
94#[derive(Debug, Clone)]
95pub struct Trace<T> {
96    spine: Spine<T>,
97    pub(crate) roundtrip_structure: bool,
98}
99
100#[cfg(any(test, debug_assertions))]
101impl<T: PartialEq> PartialEq for Trace<T> {
102    fn eq(&self, other: &Self) -> bool {
103        // Deconstruct self and other so we get a compile failure if new fields
104        // are added.
105        let Trace {
106            spine: _,
107            roundtrip_structure: _,
108        } = self;
109        let Trace {
110            spine: _,
111            roundtrip_structure: _,
112        } = other;
113
114        // Intentionally use HollowBatches for this comparison so we ignore
115        // differences in spine layers.
116        self.batches().eq(other.batches())
117    }
118}
119
120impl<T: Timestamp + Lattice> Default for Trace<T> {
121    fn default() -> Self {
122        Self {
123            spine: Spine::new(),
124            roundtrip_structure: true,
125        }
126    }
127}
128
129#[derive(Clone, Debug, Serialize)]
130pub struct ThinSpineBatch<T> {
131    pub(crate) level: usize,
132    pub(crate) desc: Description<T>,
133    pub(crate) parts: Vec<SpineId>,
134    /// NB: this exists to validate legacy batch bounds during the migration;
135    /// it can be deleted once the roundtrip_structure flag is permanently rolled out.
136    pub(crate) descs: Vec<Description<T>>,
137}
138
139impl<T: PartialEq> PartialEq for ThinSpineBatch<T> {
140    fn eq(&self, other: &Self) -> bool {
141        // Ignore the temporary descs vector when comparing for equality.
142        (self.level, &self.desc, &self.parts).eq(&(other.level, &other.desc, &other.parts))
143    }
144}
145
146#[derive(Clone, Debug, Eq, PartialEq, Serialize)]
147pub struct ThinMerge<T> {
148    pub(crate) since: Antichain<T>,
149    pub(crate) remaining_work: usize,
150    pub(crate) active_compaction: Option<ActiveCompaction>,
151}
152
153impl<T: Clone> ThinMerge<T> {
154    fn fueling(merge: &FuelingMerge<T>) -> Self {
155        ThinMerge {
156            since: merge.since.clone(),
157            remaining_work: merge.remaining_work,
158            active_compaction: None,
159        }
160    }
161
162    fn fueled(batch: &SpineBatch<T>) -> Self {
163        ThinMerge {
164            since: batch.desc.since().clone(),
165            remaining_work: 0,
166            active_compaction: batch.active_compaction.clone(),
167        }
168    }
169}
170
171/// This is a "flattened" representation of a Trace. Goals:
172/// - small updates to the trace should result in small differences in the `FlatTrace`;
173/// - two `FlatTrace`s should be efficient to diff;
174/// - converting to and from a `Trace` should be relatively straightforward.
175///
176/// These goals are all somewhat in tension, and the space of possible representations is pretty
177/// large. See individual fields for comments on some of the tradeoffs.
178#[derive(Clone, Debug)]
179pub struct FlatTrace<T> {
180    pub(crate) since: Antichain<T>,
181    /// Hollow batches without an associated ID. If this flattened trace contains spine batches,
182    /// we can figure out which legacy batch belongs in which spine batch by comparing the `desc`s.
183    /// Previously, we serialized a trace as just this list of batches. Keeping this data around
184    /// helps ensure backwards compatibility. In the near future, we may still keep some batches
185    /// here to help minimize the size of diffs -- rewriting all the hollow batches in a shard
186    /// can be prohibitively expensive. Eventually, we'd like to remove this in favour of the
187    /// collection below.
188    pub(crate) legacy_batches: BTreeMap<Arc<HollowBatch<T>>, ()>,
189    /// Hollow batches _with_ an associated ID. Spine batches can reference these hollow batches
190    /// by id directly.
191    pub(crate) hollow_batches: BTreeMap<SpineId, Arc<HollowBatch<T>>>,
192    /// Spine batches stored by ID. We reference hollow batches by ID, instead of inlining them,
193    /// to make differential updates smaller when two batches merge together. We also store the
194    /// level on the batch, instead of mapping from level to a list of batches... the level of a
195    /// spine batch doesn't change over time, but the list of batches at a particular level does.
196    pub(crate) spine_batches: BTreeMap<SpineId, ThinSpineBatch<T>>,
197    /// In-progress merges. We store this by spine id instead of level to prepare for some possible
198    /// generalizations to spine (merging N of M batches at a level). This is also a natural place
199    /// to store incremental merge progress in the future.
200    pub(crate) merges: BTreeMap<SpineId, ThinMerge<T>>,
201}
202
203impl<T: Timestamp + Lattice> Trace<T> {
204    pub(crate) fn flatten(&self) -> FlatTrace<T> {
205        let since = self.spine.since.clone();
206        let mut legacy_batches = BTreeMap::new();
207        let mut hollow_batches = BTreeMap::new();
208        let mut spine_batches = BTreeMap::new();
209        let mut merges = BTreeMap::new();
210
211        let mut push_spine_batch = |level: usize, batch: &SpineBatch<T>| {
212            let id = batch.id();
213            let desc = batch.desc.clone();
214            let mut parts = Vec::with_capacity(batch.parts.len());
215            let mut descs = Vec::with_capacity(batch.parts.len());
216            for IdHollowBatch { id, batch } in &batch.parts {
217                parts.push(*id);
218                descs.push(batch.desc.clone());
219                // Ideally, we'd like to put all batches in the hollow_batches collection, since
220                // tracking the spine id reduces ambiguity and makes diffing cheaper. However,
221                // we currently keep most batches in the legacy collection for backwards
222                // compatibility.
223                // As an exception, we add batches with empty time ranges to hollow_batches:
224                // they're otherwise not guaranteed to be unique, and since we only started writing
225                // them down recently there's no backwards compatibility risk.
226                if batch.desc.lower() == batch.desc.upper() {
227                    hollow_batches.insert(*id, Arc::clone(batch));
228                } else {
229                    legacy_batches.insert(Arc::clone(batch), ());
230                }
231            }
232
233            let spine_batch = ThinSpineBatch {
234                level,
235                desc,
236                parts,
237                descs,
238            };
239            spine_batches.insert(id, spine_batch);
240        };
241
242        for (level, state) in self.spine.merging.iter().enumerate() {
243            for batch in &state.batches {
244                push_spine_batch(level, batch);
245                if let Some(c) = &batch.active_compaction {
246                    let previous = merges.insert(batch.id, ThinMerge::fueled(batch));
247                    assert!(
248                        previous.is_none(),
249                        "recording a compaction for a batch that already exists! (level={level}, id={:?}, compaction={c:?})",
250                        batch.id,
251                    )
252                }
253            }
254            if let Some(IdFuelingMerge { id, merge }) = state.merge.as_ref() {
255                let previous = merges.insert(*id, ThinMerge::fueling(merge));
256                assert!(
257                    previous.is_none(),
258                    "fueling a merge for a batch that already exists! (level={level}, id={id:?}, merge={merge:?})"
259                )
260            }
261        }
262
263        if !self.roundtrip_structure {
264            assert!(hollow_batches.is_empty());
265            spine_batches.clear();
266            merges.clear();
267        }
268
269        FlatTrace {
270            since,
271            legacy_batches,
272            hollow_batches,
273            spine_batches,
274            merges,
275        }
276    }
277    pub(crate) fn unflatten(value: FlatTrace<T>) -> Result<Self, String> {
278        let FlatTrace {
279            since,
280            legacy_batches,
281            mut hollow_batches,
282            spine_batches,
283            mut merges,
284        } = value;
285
286        // If the flattened representation has spine batches (or is empty)
287        // we know to preserve the structure for this trace.
288        let roundtrip_structure = !spine_batches.is_empty() || legacy_batches.is_empty();
289
290        // We need to look up legacy batches somehow, but we don't have a spine id for them.
291        // Instead, we rely on the fact that the spine must store them in antichain order.
292        // Our timestamp type may not be totally ordered, so we need to implement our own comparator
293        // here. Persist's invariants ensure that all the frontiers we're comparing are comparable,
294        // though.
295        let compare_chains = |left: &Antichain<T>, right: &Antichain<T>| {
296            if PartialOrder::less_than(left, right) {
297                Ordering::Less
298            } else if PartialOrder::less_than(right, left) {
299                Ordering::Greater
300            } else {
301                Ordering::Equal
302            }
303        };
304        let mut legacy_batches: Vec<_> = legacy_batches.into_iter().map(|(k, _)| k).collect();
305        legacy_batches.sort_by(|a, b| compare_chains(a.desc.lower(), b.desc.lower()).reverse());
306
307        let mut pop_batch =
308            |id: SpineId, expected_desc: Option<&Description<T>>| -> Result<_, String> {
309                if let Some(batch) = hollow_batches.remove(&id) {
310                    if let Some(desc) = expected_desc {
311                        // We don't expect the desc's upper and lower to change for a given spine id.
312                        assert_eq!(desc.lower(), batch.desc.lower());
313                        assert_eq!(desc.upper(), batch.desc.upper());
314                        // Due to the way thin spine batches are diffed, the sinces can be out of sync.
315                        // This should be rare, and hopefully impossible once we change how diffs work.
316                        if desc.since() != batch.desc.since() {
317                            warn!(
318                                "unexpected since out of sync for spine batch: {:?} != {:?}",
319                                desc.since().elements(),
320                                batch.desc.since().elements()
321                            );
322                        }
323                    }
324                    return Ok(IdHollowBatch { id, batch });
325                }
326                let mut batch = legacy_batches
327                    .pop()
328                    .ok_or_else(|| format!("missing referenced hollow batch {id:?}"))?;
329
330                let Some(expected_desc) = expected_desc else {
331                    return Ok(IdHollowBatch { id, batch });
332                };
333
334                if expected_desc.lower() != batch.desc.lower() {
335                    return Err(format!(
336                        "hollow batch lower {:?} did not match expected lower {:?}",
337                        batch.desc.lower().elements(),
338                        expected_desc.lower().elements()
339                    ));
340                }
341
342                // Empty legacy batches are not deterministic: different nodes may split them up
343                // in different ways. For now, we rearrange them such to match the spine data.
344                if batch.parts.is_empty() && batch.run_splits.is_empty() && batch.len == 0 {
345                    let mut new_upper = batch.desc.upper().clone();
346
347                    // While our current batch is too small, and there's another empty batch
348                    // in the list, roll it in.
349                    while PartialOrder::less_than(&new_upper, expected_desc.upper()) {
350                        let Some(next_batch) = legacy_batches.pop() else {
351                            break;
352                        };
353                        if next_batch.is_empty() {
354                            new_upper.clone_from(next_batch.desc.upper());
355                        } else {
356                            legacy_batches.push(next_batch);
357                            break;
358                        }
359                    }
360
361                    // If our current batch is too large, split it by the expected upper
362                    // and preserve the remainder.
363                    if PartialOrder::less_than(expected_desc.upper(), &new_upper) {
364                        legacy_batches.push(Arc::new(HollowBatch::empty(Description::new(
365                            expected_desc.upper().clone(),
366                            new_upper.clone(),
367                            batch.desc.since().clone(),
368                        ))));
369                        new_upper.clone_from(expected_desc.upper());
370                    }
371                    batch = Arc::new(HollowBatch::empty(Description::new(
372                        batch.desc.lower().clone(),
373                        new_upper,
374                        batch.desc.since().clone(),
375                    )))
376                }
377
378                if expected_desc.upper() != batch.desc.upper() {
379                    return Err(format!(
380                        "hollow batch upper {:?} did not match expected upper {:?}",
381                        batch.desc.upper().elements(),
382                        expected_desc.upper().elements()
383                    ));
384                }
385
386                Ok(IdHollowBatch { id, batch })
387            };
388
389        let (upper, next_id) = if let Some((id, batch)) = spine_batches.last_key_value() {
390            (batch.desc.upper().clone(), id.1)
391        } else {
392            (Antichain::from_elem(T::minimum()), 0)
393        };
394        let levels = spine_batches
395            .first_key_value()
396            .map(|(_, batch)| batch.level + 1)
397            .unwrap_or(0);
398        let mut merging = vec![MergeState::default(); levels];
399        for (id, batch) in spine_batches {
400            let level = batch.level;
401
402            let descs = batch.descs.iter().map(Some).chain(std::iter::repeat_n(
403                None,
404                batch.parts.len() - batch.descs.len(),
405            ));
406            let parts = batch
407                .parts
408                .into_iter()
409                .zip_eq(descs)
410                .map(|(id, desc)| pop_batch(id, desc))
411                .collect::<Result<Vec<_>, _>>()?;
412            let len = parts.iter().map(|p| (*p).batch.len).sum();
413            let active_compaction = merges.remove(&id).and_then(|m| m.active_compaction);
414            let batch = SpineBatch {
415                id,
416                desc: batch.desc,
417                parts,
418                active_compaction,
419                len,
420            };
421
422            let state = &mut merging[level];
423
424            state.push_batch(batch);
425            if let Some(id) = state.id() {
426                if let Some(merge) = merges.remove(&id) {
427                    state.merge = Some(IdFuelingMerge {
428                        id,
429                        merge: FuelingMerge {
430                            since: merge.since,
431                            remaining_work: merge.remaining_work,
432                        },
433                    })
434                }
435            }
436        }
437
438        let mut trace = Trace {
439            spine: Spine {
440                effort: 1,
441                next_id,
442                since,
443                upper,
444                merging,
445            },
446            roundtrip_structure,
447        };
448
449        fn check_empty(name: &str, len: usize) -> Result<(), String> {
450            if len != 0 {
451                Err(format!("{len} {name} left after reconstructing spine"))
452            } else {
453                Ok(())
454            }
455        }
456
457        if roundtrip_structure {
458            check_empty("legacy batches", legacy_batches.len())?;
459        } else {
460            // If the structure wasn't actually serialized, we may have legacy batches left over.
461            for batch in legacy_batches.into_iter().rev() {
462                trace.push_batch_no_merge_reqs(Arc::unwrap_or_clone(batch));
463            }
464        }
465        check_empty("hollow batches", hollow_batches.len())?;
466        check_empty("merges", merges.len())?;
467
468        debug_assert_eq!(trace.validate(), Ok(()), "{:?}", trace);
469
470        Ok(trace)
471    }
472}
473
474#[derive(Clone, Debug, Default)]
475pub(crate) struct SpineMetrics {
476    pub compact_batches: u64,
477    pub compacting_batches: u64,
478    pub noncompact_batches: u64,
479}
480
481impl<T> Trace<T> {
482    pub fn since(&self) -> &Antichain<T> {
483        &self.spine.since
484    }
485
486    pub fn upper(&self) -> &Antichain<T> {
487        &self.spine.upper
488    }
489
490    pub fn map_batches<'a, F: FnMut(&'a HollowBatch<T>)>(&'a self, mut f: F) {
491        for batch in self.batches() {
492            f(batch);
493        }
494    }
495
496    pub fn batches(&self) -> impl Iterator<Item = &HollowBatch<T>> {
497        self.spine
498            .spine_batches()
499            .flat_map(|b| b.parts.as_slice())
500            .map(|b| &*b.batch)
501    }
502
503    pub fn num_spine_batches(&self) -> usize {
504        self.spine.spine_batches().count()
505    }
506
507    #[cfg(test)]
508    pub fn num_hollow_batches(&self) -> usize {
509        self.batches().count()
510    }
511
512    #[cfg(test)]
513    pub fn num_updates(&self) -> usize {
514        self.batches().map(|b| b.len).sum()
515    }
516}
517
518impl<T: Timestamp + Lattice> Trace<T> {
519    pub fn downgrade_since(&mut self, since: &Antichain<T>) {
520        self.spine.since.clone_from(since);
521    }
522
523    #[must_use]
524    pub fn push_batch(&mut self, batch: HollowBatch<T>) -> Vec<FueledMergeReq<T>> {
525        let mut merge_reqs = Vec::new();
526        self.spine.insert(
527            batch,
528            &mut SpineLog::Enabled {
529                merge_reqs: &mut merge_reqs,
530            },
531        );
532        debug_assert_eq!(self.spine.validate(), Ok(()), "{:?}", self);
533        // Spine::roll_up (internally used by insert) clears all batches out of
534        // levels below a target by walking up from level 0 and merging each
535        // level into the next (providing the necessary fuel). In practice, this
536        // means we'll get a series of requests like `(a, b), (a, b, c), ...`.
537        // It's a waste to do all of these (we'll throw away the results), so we
538        // filter out any that are entirely covered by some other request.
539        Self::remove_redundant_merge_reqs(merge_reqs)
540    }
541
542    pub fn claim_compaction(&mut self, id: SpineId, compaction: ActiveCompaction) {
543        // TODO: we ought to be able to look up the id for a batch by binary searching the levels.
544        // In the meantime, search backwards, since most compactions are for recent batches.
545        for batch in self.spine.spine_batches_mut().rev() {
546            if batch.id == id {
547                batch.active_compaction = Some(compaction);
548                break;
549            }
550        }
551    }
552
553    /// The same as [Self::push_batch] but without the `FueledMergeReq`s, which
554    /// account for a surprising amount of cpu in prod. database-issues#5411
555    pub(crate) fn push_batch_no_merge_reqs(&mut self, batch: HollowBatch<T>) {
556        self.spine.insert(batch, &mut SpineLog::Disabled);
557    }
558
559    /// Apply some amount of effort to trace maintenance.
560    ///
561    /// The units of effort are updates, and the method should be thought of as
562    /// analogous to inserting as many empty updates, where the trace is
563    /// permitted to perform proportionate work.
564    ///
565    /// Returns true if this did work and false if it left the spine unchanged.
566    #[must_use]
567    pub fn exert(&mut self, fuel: usize) -> (Vec<FueledMergeReq<T>>, bool) {
568        let mut merge_reqs = Vec::new();
569        let did_work = self.spine.exert(
570            fuel,
571            &mut SpineLog::Enabled {
572                merge_reqs: &mut merge_reqs,
573            },
574        );
575        debug_assert_eq!(self.spine.validate(), Ok(()), "{:?}", self);
576        // See the comment in [Self::push_batch].
577        let merge_reqs = Self::remove_redundant_merge_reqs(merge_reqs);
578        (merge_reqs, did_work)
579    }
580
581    /// Validates invariants.
582    ///
583    /// See `Spine::validate` for details.
584    pub fn validate(&self) -> Result<(), String> {
585        self.spine.validate()
586    }
587
588    /// Obtain all fueled merge reqs that either have no active compaction, or the previous
589    /// compaction was started at or before the threshold time, in order from oldest to newest.
590    pub(crate) fn fueled_merge_reqs_before_ms(
591        &self,
592        threshold_ms: u64,
593        threshold_writer: Option<WriterKey>,
594    ) -> impl Iterator<Item = FueledMergeReq<T>> + '_ {
595        self.spine
596            .spine_batches()
597            .filter(move |b| {
598                let noncompact = !b.is_compact();
599                let old_writer = threshold_writer.as_ref().map_or(false, |min_writer| {
600                    b.parts.iter().any(|b| {
601                        b.batch
602                            .parts
603                            .iter()
604                            .any(|p| p.writer_key().map_or(false, |writer| writer < *min_writer))
605                    })
606                });
607                noncompact || old_writer
608            })
609            .filter(move |b| {
610                // Either there's no active compaction, or the last active compaction
611                // is not after the timeout timestamp.
612                b.active_compaction
613                    .as_ref()
614                    .map_or(true, move |c| c.start_ms <= threshold_ms)
615            })
616            .map(|b| FueledMergeReq {
617                id: b.id,
618                desc: b.desc.clone(),
619                inputs: b.parts.clone(),
620            })
621    }
622
623    // This is only called with the results of one `insert` and so the length of
624    // `merge_reqs` is bounded by the number of levels in the spine (or possibly
625    // some small constant multiple?). The number of levels is logarithmic in the
626    // number of updates in the spine, so this number should stay very small. As
627    // a result, we simply use the naive O(n^2) algorithm here instead of doing
628    // anything fancy with e.g. interval trees.
629    fn remove_redundant_merge_reqs(
630        mut merge_reqs: Vec<FueledMergeReq<T>>,
631    ) -> Vec<FueledMergeReq<T>> {
632        // Returns true if b0 covers b1, false otherwise.
633        fn covers<T: PartialOrder>(b0: &FueledMergeReq<T>, b1: &FueledMergeReq<T>) -> bool {
634            // TODO: can we relax or remove this since check?
635            b0.id.covers(b1.id) && b0.desc.since() == b1.desc.since()
636        }
637
638        let mut ret = Vec::<FueledMergeReq<T>>::with_capacity(merge_reqs.len());
639        // In practice, merge_reqs will come in sorted such that the "large"
640        // requests are later. Take advantage of this by processing back to
641        // front.
642        while let Some(merge_req) = merge_reqs.pop() {
643            let covered = ret.iter().any(|r| covers(r, &merge_req));
644            if !covered {
645                // Now check if anything we've already staged is covered by this
646                // new req. In practice, the merge_reqs come in sorted and so
647                // this `retain` is a no-op.
648                ret.retain(|r| !covers(&merge_req, r));
649                ret.push(merge_req);
650            }
651        }
652        ret
653    }
654
655    pub fn spine_metrics(&self) -> SpineMetrics {
656        let mut metrics = SpineMetrics::default();
657        for batch in self.spine.spine_batches() {
658            if batch.is_compact() {
659                metrics.compact_batches += 1;
660            } else if batch.is_merging() {
661                metrics.compacting_batches += 1;
662            } else {
663                metrics.noncompact_batches += 1;
664            }
665        }
666        metrics
667    }
668}
669
670impl<T: Timestamp + Lattice + Codec64> Trace<T> {
671    pub fn apply_merge_res_checked<D: Codec64 + Monoid + PartialEq>(
672        &mut self,
673        res: &FueledMergeRes<T>,
674        metrics: &ColumnarMetrics,
675    ) -> ApplyMergeResult {
676        for batch in self.spine.spine_batches_mut().rev() {
677            let result = batch.maybe_replace_checked::<D>(res, metrics);
678            if result.matched() {
679                return result;
680            }
681        }
682        ApplyMergeResult::NotAppliedNoMatch
683    }
684
685    pub fn apply_merge_res_unchecked(&mut self, res: &FueledMergeRes<T>) -> ApplyMergeResult {
686        for batch in self.spine.spine_batches_mut().rev() {
687            let result = batch.maybe_replace_unchecked(res);
688            if result.matched() {
689                return result;
690            }
691        }
692        ApplyMergeResult::NotAppliedNoMatch
693    }
694
695    pub fn apply_tombstone_merge(&mut self, desc: &Description<T>) -> ApplyMergeResult {
696        for batch in self.spine.spine_batches_mut().rev() {
697            let result = batch.maybe_replace_with_tombstone(desc);
698            if result.matched() {
699                return result;
700            }
701        }
702        ApplyMergeResult::NotAppliedNoMatch
703    }
704}
705
706/// A log of what transitively happened during a Spine operation: e.g.
707/// FueledMergeReqs were generated.
708enum SpineLog<'a, T> {
709    Enabled {
710        merge_reqs: &'a mut Vec<FueledMergeReq<T>>,
711    },
712    Disabled,
713}
714
715#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
716pub enum CompactionInput {
717    /// We don't know what our inputs were; this should only be used for
718    /// unchecked legacy replacements.
719    Legacy,
720    /// This compaction output is a total replacement for all batches in this id range.
721    IdRange(SpineId),
722    /// This compaction output replaces the specified runs in this id range.
723    PartialBatch(SpineId, BTreeSet<RunId>),
724}
725
726#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
727pub struct SpineId(pub usize, pub usize);
728
729impl Serialize for SpineId {
730    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
731    where
732        S: Serializer,
733    {
734        let SpineId(lo, hi) = self;
735        serializer.serialize_str(&format!("{lo}-{hi}"))
736    }
737}
738
739/// Creates a `SpineId` that covers the range of ids in the set.
740pub fn id_range(ids: BTreeSet<SpineId>) -> SpineId {
741    let mut id_iter = ids.iter().copied();
742    let Some(mut result) = id_iter.next() else {
743        panic!("at least one batch must be present")
744    };
745
746    for id in id_iter {
747        assert_eq!(
748            result.1, id.0,
749            "expected contiguous ids, but {result:?} is not adjacent to {id:?} in ids {ids:?}"
750        );
751        result.1 = id.1;
752    }
753    result
754}
755
756impl SpineId {
757    fn covers(self, other: SpineId) -> bool {
758        self.0 <= other.0 && other.1 <= self.1
759    }
760}
761
762#[derive(Debug, Clone, PartialEq)]
763pub struct IdHollowBatch<T> {
764    pub id: SpineId,
765    pub batch: Arc<HollowBatch<T>>,
766}
767
768#[derive(Debug, Clone, Eq, PartialEq, Serialize)]
769pub struct ActiveCompaction {
770    pub start_ms: u64,
771}
772
773#[derive(Debug, Clone, PartialEq)]
774struct SpineBatch<T> {
775    id: SpineId,
776    desc: Description<T>,
777    parts: Vec<IdHollowBatch<T>>,
778    active_compaction: Option<ActiveCompaction>,
779    // A cached version of parts.iter().map(|x| x.len).sum()
780    len: usize,
781}
782
783impl<T> SpineBatch<T> {
784    fn merged(batch: IdHollowBatch<T>) -> Self
785    where
786        T: Clone,
787    {
788        Self {
789            id: batch.id,
790            desc: batch.batch.desc.clone(),
791            len: batch.batch.len,
792            parts: vec![batch],
793            active_compaction: None,
794        }
795    }
796}
797
798#[derive(Debug, Copy, Clone)]
799pub enum ApplyMergeResult {
800    AppliedExact,
801    AppliedSubset,
802    NotAppliedNoMatch,
803    NotAppliedInvalidSince,
804    NotAppliedTooManyUpdates,
805}
806
807impl ApplyMergeResult {
808    pub fn applied(&self) -> bool {
809        match self {
810            ApplyMergeResult::AppliedExact | ApplyMergeResult::AppliedSubset => true,
811            _ => false,
812        }
813    }
814    pub fn matched(&self) -> bool {
815        match self {
816            ApplyMergeResult::AppliedExact
817            | ApplyMergeResult::AppliedSubset
818            | ApplyMergeResult::NotAppliedTooManyUpdates => true,
819            _ => false,
820        }
821    }
822}
823
824impl<T: Timestamp + Lattice> SpineBatch<T> {
825    pub fn lower(&self) -> &Antichain<T> {
826        self.desc().lower()
827    }
828
829    pub fn upper(&self) -> &Antichain<T> {
830        self.desc().upper()
831    }
832
833    fn id(&self) -> SpineId {
834        debug_assert_eq!(self.parts.first().map(|x| x.id.0), Some(self.id.0));
835        debug_assert_eq!(self.parts.last().map(|x| x.id.1), Some(self.id.1));
836        self.id
837    }
838
839    pub fn is_compact(&self) -> bool {
840        // A compact batch has at most one run.
841        // This check used to be if there was at most one hollow batch with at most one run,
842        // but that was a bit too strict since introducing incremental compaction.
843        // Incremental compaction can result in a batch with a single run, but multiple empty
844        // hollow batches, which we still consider compact. As levels are merged, we
845        // will eventually clean up the empty hollow batches.
846        self.parts
847            .iter()
848            .map(|p| p.batch.run_meta.len())
849            .sum::<usize>()
850            <= 1
851    }
852
853    pub fn is_merging(&self) -> bool {
854        self.active_compaction.is_some()
855    }
856
857    fn desc(&self) -> &Description<T> {
858        &self.desc
859    }
860
861    pub fn len(&self) -> usize {
862        // NB: This is an upper bound on len for a non-compact batch; we won't know for sure until
863        // we compact it.
864        debug_assert_eq!(
865            self.len,
866            self.parts.iter().map(|x| x.batch.len).sum::<usize>()
867        );
868        self.len
869    }
870
871    pub fn is_empty(&self) -> bool {
872        self.len() == 0
873    }
874
875    pub fn empty(
876        id: SpineId,
877        lower: Antichain<T>,
878        upper: Antichain<T>,
879        since: Antichain<T>,
880    ) -> Self {
881        SpineBatch::merged(IdHollowBatch {
882            id,
883            batch: Arc::new(HollowBatch::empty(Description::new(lower, upper, since))),
884        })
885    }
886
887    pub fn begin_merge(
888        bs: &[Self],
889        compaction_frontier: Option<AntichainRef<T>>,
890    ) -> Option<IdFuelingMerge<T>> {
891        let from = bs.first()?.id().0;
892        let until = bs.last()?.id().1;
893        let id = SpineId(from, until);
894        let mut sinces = bs.iter().map(|b| b.desc().since());
895        let mut since = sinces.next()?.clone();
896        for b in bs {
897            since.join_assign(b.desc().since())
898        }
899        if let Some(compaction_frontier) = compaction_frontier {
900            since.join_assign(&compaction_frontier.to_owned());
901        }
902        let remaining_work = bs.iter().map(|x| x.len()).sum();
903        Some(IdFuelingMerge {
904            id,
905            merge: FuelingMerge {
906                since,
907                remaining_work,
908            },
909        })
910    }
911
912    #[cfg(test)]
913    fn describe(&self, extended: bool) -> String {
914        let SpineBatch {
915            id,
916            parts,
917            desc,
918            active_compaction,
919            len,
920        } = self;
921        let compaction = match active_compaction {
922            None => "".to_owned(),
923            Some(c) => format!(" (c@{})", c.start_ms),
924        };
925        match extended {
926            false => format!(
927                "[{}-{}]{:?}{:?}{}/{}{compaction}",
928                id.0,
929                id.1,
930                desc.lower().elements(),
931                desc.upper().elements(),
932                parts.len(),
933                len
934            ),
935            true => {
936                format!(
937                    "[{}-{}]{:?}{:?}{:?} {}/{}{}{compaction}",
938                    id.0,
939                    id.1,
940                    desc.lower().elements(),
941                    desc.upper().elements(),
942                    desc.since().elements(),
943                    parts.len(),
944                    len,
945                    parts
946                        .iter()
947                        .flat_map(|x| x.batch.parts.iter())
948                        .map(|x| format!(" {}", x.printable_name()))
949                        .collect::<Vec<_>>()
950                        .join("")
951                )
952            }
953        }
954    }
955}
956
957impl<T: Timestamp + Lattice + Codec64> SpineBatch<T> {
958    fn diffs_sum<'a, D: Monoid + Codec64>(
959        parts: impl IntoIterator<Item = &'a RunPart<T>>,
960        metrics: &ColumnarMetrics,
961    ) -> Option<D> {
962        let mut sum = D::zero();
963        for part in parts {
964            sum.plus_equals(&part.diffs_sum::<D>(metrics)?);
965        }
966        Some(sum)
967    }
968
969    /// Get the diff sum from the given batch for the given runs.
970    /// Returns `None` if the runs aren't present or any parts don't have statistics.
971    fn diffs_sum_for_runs<D: Monoid + Codec64>(
972        batch: &HollowBatch<T>,
973        run_ids: &[RunId],
974        metrics: &ColumnarMetrics,
975    ) -> Option<D> {
976        let mut run_ids = BTreeSet::from_iter(run_ids.iter().copied());
977        let mut sum = D::zero();
978
979        for (meta, run) in batch.runs() {
980            let id = meta.id?;
981            if run_ids.remove(&id) {
982                sum.plus_equals(&Self::diffs_sum(run, metrics)?);
983            }
984        }
985
986        run_ids.is_empty().then_some(sum)
987    }
988
989    fn maybe_replace_with_tombstone(&mut self, desc: &Description<T>) -> ApplyMergeResult {
990        let exact_match =
991            desc.lower() == self.desc().lower() && desc.upper() == self.desc().upper();
992
993        let empty_batch = HollowBatch::empty(desc.clone());
994        if exact_match {
995            *self = SpineBatch::merged(IdHollowBatch {
996                id: self.id(),
997                batch: Arc::new(empty_batch),
998            });
999            return ApplyMergeResult::AppliedExact;
1000        }
1001
1002        if let Some((id, range)) = self.find_replacement_range(desc) {
1003            self.perform_subset_replacement(&empty_batch, id, range, None)
1004        } else {
1005            ApplyMergeResult::NotAppliedNoMatch
1006        }
1007    }
1008
1009    fn construct_batch_with_runs_replaced(
1010        original: &HollowBatch<T>,
1011        run_ids: &[RunId],
1012        replacement: &HollowBatch<T>,
1013    ) -> Result<HollowBatch<T>, ApplyMergeResult> {
1014        if run_ids.is_empty() {
1015            return Err(ApplyMergeResult::NotAppliedNoMatch);
1016        }
1017
1018        let orig_run_ids: BTreeSet<_> = original.runs().filter_map(|(meta, _)| meta.id).collect();
1019        let run_ids: BTreeSet<_> = run_ids.iter().cloned().collect();
1020        if !orig_run_ids.is_superset(&run_ids) {
1021            return Err(ApplyMergeResult::NotAppliedNoMatch);
1022        }
1023
1024        let runs: Vec<_> = original
1025            .runs()
1026            .filter(|(meta, _)| {
1027                !run_ids.contains(&meta.id.expect("id should be present at this point"))
1028            })
1029            .chain(replacement.runs())
1030            .collect();
1031
1032        let len = runs.iter().filter_map(|(meta, _)| meta.len).sum::<usize>();
1033
1034        let run_meta = runs
1035            .iter()
1036            .map(|(meta, _)| *meta)
1037            .cloned()
1038            .collect::<Vec<_>>();
1039
1040        let parts = runs
1041            .iter()
1042            .flat_map(|(_, parts)| *parts)
1043            .cloned()
1044            .collect::<Vec<_>>();
1045
1046        let run_splits = {
1047            let mut splits = Vec::with_capacity(run_meta.len().saturating_sub(1));
1048            let mut pointer = 0;
1049            for (i, (_, parts)) in runs.into_iter().enumerate() {
1050                if parts.is_empty() {
1051                    continue;
1052                }
1053                if i < run_meta.len() - 1 {
1054                    splits.push(pointer + parts.len());
1055                }
1056                pointer += parts.len();
1057            }
1058            splits
1059        };
1060
1061        Ok(HollowBatch::new(
1062            replacement.desc.clone(),
1063            parts,
1064            len,
1065            run_meta,
1066            run_splits,
1067        ))
1068    }
1069
1070    fn maybe_replace_checked<D>(
1071        &mut self,
1072        res: &FueledMergeRes<T>,
1073        metrics: &ColumnarMetrics,
1074    ) -> ApplyMergeResult
1075    where
1076        D: Monoid + Codec64 + PartialEq + Debug,
1077    {
1078        // The spine's and merge res's sinces don't need to match (which could occur if Spine
1079        // has been reloaded from state due to compare_and_set mismatch), but if so, the Spine
1080        // since must be in advance of the merge res since.
1081        if !PartialOrder::less_equal(res.output.desc.since(), self.desc().since()) {
1082            return ApplyMergeResult::NotAppliedInvalidSince;
1083        }
1084
1085        let new_diffs_sum = Self::diffs_sum(res.output.parts.iter(), metrics);
1086        let num_batches = self.parts.len();
1087
1088        let result = match &res.input {
1089            CompactionInput::IdRange(id) => {
1090                self.handle_id_range_replacement::<D>(res, id, new_diffs_sum, metrics)
1091            }
1092            CompactionInput::PartialBatch(id, runs) => {
1093                self.handle_partial_batch_replacement::<D>(res, *id, runs, new_diffs_sum, metrics)
1094            }
1095            CompactionInput::Legacy => self.maybe_replace_checked_classic::<D>(res, metrics),
1096        };
1097
1098        let num_batches_after = self.parts.len();
1099        assert!(
1100            num_batches_after <= num_batches,
1101            "replacing parts should not increase the number of batches"
1102        );
1103        result
1104    }
1105
1106    fn handle_id_range_replacement<D>(
1107        &mut self,
1108        res: &FueledMergeRes<T>,
1109        id: &SpineId,
1110        new_diffs_sum: Option<D>,
1111        metrics: &ColumnarMetrics,
1112    ) -> ApplyMergeResult
1113    where
1114        D: Monoid + Codec64 + PartialEq + Debug,
1115    {
1116        let range = self
1117            .parts
1118            .iter()
1119            .enumerate()
1120            .filter_map(|(i, p)| {
1121                if id.covers(p.id) {
1122                    Some((i, p.id))
1123                } else {
1124                    None
1125                }
1126            })
1127            .collect::<Vec<_>>();
1128
1129        let ids: BTreeSet<_> = range.iter().map(|(_, id)| *id).collect();
1130
1131        // If ids is empty, it means that we didn't find any parts that match the id range.
1132        // We also check that the id matches the range of ids we found.
1133        // At scale, sometimes regular compaction will race forced compaction,
1134        // for things like the catalog. In that case, we may have a
1135        // replacement that no longer lines up with the spine batches.
1136        // I think this is because forced compaction ignores the active_compaction
1137        // and just goes for it. This is slightly annoying but probably the right behavior
1138        // for a functions whose prefix is `force_`, so we just return
1139        // NotAppliedNoMatch here.
1140        if ids.is_empty() || id != &id_range(ids) {
1141            return ApplyMergeResult::NotAppliedNoMatch;
1142        }
1143
1144        let range: BTreeSet<_> = range.iter().map(|(i, _)| *i).collect();
1145
1146        // This is the range of hollow batches that we will replace.
1147        let min = *range.iter().min().unwrap();
1148        let max = *range.iter().max().unwrap();
1149        let replacement_range = min..max + 1;
1150
1151        // We need to replace a range of parts. Here we don't care about the run_indices
1152        // because we must be replacing the entire part(s)
1153        let old_diffs_sum = Self::diffs_sum::<D>(
1154            self.parts[replacement_range.clone()]
1155                .iter()
1156                .flat_map(|p| p.batch.parts.iter()),
1157            metrics,
1158        );
1159
1160        Self::validate_diffs_sum_match(old_diffs_sum, new_diffs_sum, "id range replacement");
1161
1162        self.perform_subset_replacement(
1163            &res.output,
1164            *id,
1165            replacement_range,
1166            res.new_active_compaction.clone(),
1167        )
1168    }
1169
1170    fn handle_partial_batch_replacement<D>(
1171        &mut self,
1172        res: &FueledMergeRes<T>,
1173        id: SpineId,
1174        runs: &BTreeSet<RunId>,
1175        new_diffs_sum: Option<D>,
1176        metrics: &ColumnarMetrics,
1177    ) -> ApplyMergeResult
1178    where
1179        D: Monoid + Codec64 + PartialEq + Debug,
1180    {
1181        if runs.is_empty() {
1182            return ApplyMergeResult::NotAppliedNoMatch;
1183        }
1184
1185        let part = self.parts.iter().enumerate().find(|(_, p)| p.id == id);
1186        let Some((i, batch)) = part else {
1187            return ApplyMergeResult::NotAppliedNoMatch;
1188        };
1189        let replacement_range = i..(i + 1);
1190
1191        let replacement_desc = &res.output.desc;
1192        let existing_desc = &batch.batch.desc;
1193        assert_eq!(
1194            replacement_desc.lower(),
1195            existing_desc.lower(),
1196            "batch lower should match, but {:?} != {:?}",
1197            replacement_desc.lower(),
1198            existing_desc.lower()
1199        );
1200        assert_eq!(
1201            replacement_desc.upper(),
1202            existing_desc.upper(),
1203            "batch upper should match, but {:?} != {:?}",
1204            replacement_desc.upper(),
1205            existing_desc.upper()
1206        );
1207
1208        let batch = &batch.batch;
1209        let run_ids = runs.iter().cloned().collect::<Vec<_>>();
1210
1211        match Self::construct_batch_with_runs_replaced(batch, &run_ids, &res.output) {
1212            Ok(new_batch) => {
1213                let old_diffs_sum = Self::diffs_sum_for_runs::<D>(batch, &run_ids, metrics);
1214                Self::validate_diffs_sum_match(
1215                    old_diffs_sum,
1216                    new_diffs_sum,
1217                    "partial batch replacement",
1218                );
1219                let old_batch_diff_sum = Self::diffs_sum::<D>(batch.parts.iter(), metrics);
1220                let new_batch_diff_sum = Self::diffs_sum::<D>(new_batch.parts.iter(), metrics);
1221                Self::validate_diffs_sum_match(
1222                    old_batch_diff_sum,
1223                    new_batch_diff_sum,
1224                    "sanity checking diffs sum for replaced runs",
1225                );
1226                self.perform_subset_replacement(
1227                    &new_batch,
1228                    id,
1229                    replacement_range,
1230                    res.new_active_compaction.clone(),
1231                )
1232            }
1233            Err(err) => err,
1234        }
1235    }
1236
1237    fn validate_diffs_sum_match<D>(
1238        old_diffs_sum: Option<D>,
1239        new_diffs_sum: Option<D>,
1240        context: &str,
1241    ) where
1242        D: Monoid + Codec64 + PartialEq + Debug,
1243    {
1244        let new_diffs_sum = new_diffs_sum.unwrap_or_else(D::zero);
1245        if let Some(old_diffs_sum) = old_diffs_sum {
1246            assert_eq!(
1247                old_diffs_sum, new_diffs_sum,
1248                "merge res diffs sum ({:?}) did not match spine batch diffs sum ({:?}) ({})",
1249                new_diffs_sum, old_diffs_sum, context
1250            )
1251        }
1252    }
1253
1254    /// This is the "legacy" way of replacing a spine batch with a merge result.
1255    /// It is used in moments when we don't have the full compaction input
1256    /// information.
1257    /// Eventually we should strive to roundtrip Spine IDs everywhere and
1258    /// deprecate this method.
1259    fn maybe_replace_checked_classic<D>(
1260        &mut self,
1261        res: &FueledMergeRes<T>,
1262        metrics: &ColumnarMetrics,
1263    ) -> ApplyMergeResult
1264    where
1265        D: Monoid + Codec64 + PartialEq + Debug,
1266    {
1267        // The spine's and merge res's sinces don't need to match (which could occur if Spine
1268        // has been reloaded from state due to compare_and_set mismatch), but if so, the Spine
1269        // since must be in advance of the merge res since.
1270        if !PartialOrder::less_equal(res.output.desc.since(), self.desc().since()) {
1271            return ApplyMergeResult::NotAppliedInvalidSince;
1272        }
1273
1274        let new_diffs_sum = Self::diffs_sum(res.output.parts.iter(), metrics);
1275
1276        // If our merge result exactly matches a spine batch, we can swap it in directly
1277        let exact_match = res.output.desc.lower() == self.desc().lower()
1278            && res.output.desc.upper() == self.desc().upper();
1279        if exact_match {
1280            let old_diffs_sum = Self::diffs_sum::<D>(
1281                self.parts.iter().flat_map(|p| p.batch.parts.iter()),
1282                metrics,
1283            );
1284
1285            if let (Some(old_diffs_sum), Some(new_diffs_sum)) = (old_diffs_sum, new_diffs_sum) {
1286                assert_eq!(
1287                    old_diffs_sum, new_diffs_sum,
1288                    "merge res diffs sum ({:?}) did not match spine batch diffs sum ({:?})",
1289                    new_diffs_sum, old_diffs_sum
1290                );
1291            }
1292
1293            // Spine internally has an invariant about a batch being at some level
1294            // or higher based on the len. We could end up violating this invariant
1295            // if we increased the length of the batch.
1296            //
1297            // A res output with length greater than the existing spine batch implies
1298            // a compaction has already been applied to this range, and with a higher
1299            // rate of consolidation than this one. This could happen as a result of
1300            // compaction's memory bound limiting the amount of consolidation possible.
1301            if res.output.len > self.len() {
1302                return ApplyMergeResult::NotAppliedTooManyUpdates;
1303            }
1304            *self = SpineBatch::merged(IdHollowBatch {
1305                id: self.id(),
1306                batch: Arc::new(res.output.clone()),
1307            });
1308            return ApplyMergeResult::AppliedExact;
1309        }
1310
1311        // Try subset replacement
1312        if let Some((id, range)) = self.find_replacement_range(&res.output.desc) {
1313            let old_diffs_sum = Self::diffs_sum::<D>(
1314                self.parts[range.clone()]
1315                    .iter()
1316                    .flat_map(|p| p.batch.parts.iter()),
1317                metrics,
1318            );
1319
1320            if let (Some(old_diffs_sum), Some(new_diffs_sum)) = (old_diffs_sum, new_diffs_sum) {
1321                assert_eq!(
1322                    old_diffs_sum, new_diffs_sum,
1323                    "merge res diffs sum ({:?}) did not match spine batch diffs sum ({:?})",
1324                    new_diffs_sum, old_diffs_sum
1325                );
1326            }
1327
1328            self.perform_subset_replacement(
1329                &res.output,
1330                id,
1331                range,
1332                res.new_active_compaction.clone(),
1333            )
1334        } else {
1335            ApplyMergeResult::NotAppliedNoMatch
1336        }
1337    }
1338
1339    /// This is the even more legacy way of replacing a spine batch with a merge result.
1340    /// It is used in moments when we don't have the full compaction input
1341    /// information, and we don't have the diffs sum.
1342    /// Eventually we should strive to roundtrip Spine IDs and diffs sums everywhere and
1343    /// deprecate this method.
1344    fn maybe_replace_unchecked(&mut self, res: &FueledMergeRes<T>) -> ApplyMergeResult {
1345        // The spine's and merge res's sinces don't need to match (which could occur if Spine
1346        // has been reloaded from state due to compare_and_set mismatch), but if so, the Spine
1347        // since must be in advance of the merge res since.
1348        if !PartialOrder::less_equal(res.output.desc.since(), self.desc().since()) {
1349            return ApplyMergeResult::NotAppliedInvalidSince;
1350        }
1351
1352        // If our merge result exactly matches a spine batch, we can swap it in directly
1353        let exact_match = res.output.desc.lower() == self.desc().lower()
1354            && res.output.desc.upper() == self.desc().upper();
1355        if exact_match {
1356            // Spine internally has an invariant about a batch being at some level
1357            // or higher based on the len. We could end up violating this invariant
1358            // if we increased the length of the batch.
1359            //
1360            // A res output with length greater than the existing spine batch implies
1361            // a compaction has already been applied to this range, and with a higher
1362            // rate of consolidation than this one. This could happen as a result of
1363            // compaction's memory bound limiting the amount of consolidation possible.
1364            if res.output.len > self.len() {
1365                return ApplyMergeResult::NotAppliedTooManyUpdates;
1366            }
1367
1368            *self = SpineBatch::merged(IdHollowBatch {
1369                id: self.id(),
1370                batch: Arc::new(res.output.clone()),
1371            });
1372            return ApplyMergeResult::AppliedExact;
1373        }
1374
1375        // Try subset replacement
1376        if let Some((id, range)) = self.find_replacement_range(&res.output.desc) {
1377            self.perform_subset_replacement(
1378                &res.output,
1379                id,
1380                range,
1381                res.new_active_compaction.clone(),
1382            )
1383        } else {
1384            ApplyMergeResult::NotAppliedNoMatch
1385        }
1386    }
1387
1388    /// Find the range of parts that can be replaced by the merge result
1389    fn find_replacement_range(&self, desc: &Description<T>) -> Option<(SpineId, Range<usize>)> {
1390        // It is possible the structure of the spine has changed since the merge res
1391        // was created, such that it no longer exactly matches the description of a
1392        // spine batch. This can happen if another merge has happened in the interim,
1393        // or if spine needed to be rebuilt from state.
1394        //
1395        // When this occurs, we can still attempt to slot the merge res in to replace
1396        // the parts of a fueled merge. e.g. if the res is for `[1,3)` and the parts
1397        // are `[0,1),[1,2),[2,3),[3,4)`, we can swap out the middle two parts for res.
1398
1399        let mut lower = None;
1400        let mut upper = None;
1401
1402        for (i, batch) in self.parts.iter().enumerate() {
1403            if batch.batch.desc.lower() == desc.lower() {
1404                lower = Some((i, batch.id.0));
1405            }
1406            if batch.batch.desc.upper() == desc.upper() {
1407                upper = Some((i, batch.id.1));
1408            }
1409            if lower.is_some() && upper.is_some() {
1410                break;
1411            }
1412        }
1413
1414        match (lower, upper) {
1415            (Some((lower_idx, id_lower)), Some((upper_idx, id_upper))) => {
1416                Some((SpineId(id_lower, id_upper), lower_idx..(upper_idx + 1)))
1417            }
1418            _ => None,
1419        }
1420    }
1421
1422    /// Perform the actual subset replacement
1423    fn perform_subset_replacement(
1424        &mut self,
1425        res: &HollowBatch<T>,
1426        spine_id: SpineId,
1427        range: Range<usize>,
1428        new_active_compaction: Option<ActiveCompaction>,
1429    ) -> ApplyMergeResult {
1430        let SpineBatch {
1431            id,
1432            parts,
1433            desc,
1434            active_compaction: _,
1435            len: _,
1436        } = self;
1437
1438        let mut new_parts = vec![];
1439        new_parts.extend_from_slice(&parts[..range.start]);
1440        new_parts.push(IdHollowBatch {
1441            id: spine_id,
1442            batch: Arc::new(res.clone()),
1443        });
1444        new_parts.extend_from_slice(&parts[range.end..]);
1445
1446        let new_spine_batch = SpineBatch {
1447            id: *id,
1448            desc: desc.to_owned(),
1449            len: new_parts.iter().map(|x| x.batch.len).sum(),
1450            parts: new_parts,
1451            active_compaction: new_active_compaction,
1452        };
1453
1454        if new_spine_batch.len() > self.len() {
1455            return ApplyMergeResult::NotAppliedTooManyUpdates;
1456        }
1457
1458        *self = new_spine_batch;
1459        ApplyMergeResult::AppliedSubset
1460    }
1461}
1462
1463#[derive(Debug, Clone, PartialEq, Serialize)]
1464pub struct FuelingMerge<T> {
1465    pub(crate) since: Antichain<T>,
1466    pub(crate) remaining_work: usize,
1467}
1468
1469#[derive(Debug, Clone, PartialEq, Serialize)]
1470pub struct IdFuelingMerge<T> {
1471    id: SpineId,
1472    merge: FuelingMerge<T>,
1473}
1474
1475impl<T: Timestamp + Lattice> FuelingMerge<T> {
1476    /// Perform some amount of work, decrementing `fuel`.
1477    ///
1478    /// If `fuel` is non-zero after the call, the merging is complete and one
1479    /// should call `done` to extract the merged results.
1480    // TODO(benesch): rewrite to avoid usage of `as`.
1481    #[allow(clippy::as_conversions)]
1482    fn work(&mut self, _: &[SpineBatch<T>], fuel: &mut isize) {
1483        let used = std::cmp::min(*fuel as usize, self.remaining_work);
1484        self.remaining_work = self.remaining_work.saturating_sub(used);
1485        *fuel -= used as isize;
1486    }
1487
1488    /// Extracts merged results.
1489    ///
1490    /// This method should only be called after `work` has been called and has
1491    /// not brought `fuel` to zero. Otherwise, the merge is still in progress.
1492    fn done(
1493        self,
1494        bs: ArrayVec<SpineBatch<T>, BATCHES_PER_LEVEL>,
1495        log: &mut SpineLog<'_, T>,
1496    ) -> Option<SpineBatch<T>> {
1497        let first = bs.first()?;
1498        let last = bs.last()?;
1499        let id = SpineId(first.id().0, last.id().1);
1500        assert!(id.0 < id.1);
1501        let lower = first.desc().lower().clone();
1502        let upper = last.desc().upper().clone();
1503        let since = self.since;
1504
1505        // Special case empty batches.
1506        if bs.iter().all(SpineBatch::is_empty) {
1507            return Some(SpineBatch::empty(id, lower, upper, since));
1508        }
1509
1510        let desc = Description::new(lower, upper, since);
1511        let len = bs.iter().map(SpineBatch::len).sum();
1512
1513        // Pre-size the merged_parts Vec. Benchmarking has shown that, at least
1514        // in the worst case, the double iteration is absolutely worth having
1515        // merged_parts pre-sized.
1516        let mut merged_parts_len = 0;
1517        for b in &bs {
1518            merged_parts_len += b.parts.len();
1519        }
1520        let mut merged_parts = Vec::with_capacity(merged_parts_len);
1521        for b in bs {
1522            merged_parts.extend(b.parts)
1523        }
1524        // Sanity check the pre-size code.
1525        debug_assert_eq!(merged_parts.len(), merged_parts_len);
1526
1527        if let SpineLog::Enabled { merge_reqs } = log {
1528            merge_reqs.push(FueledMergeReq {
1529                id,
1530                desc: desc.clone(),
1531                inputs: merged_parts.clone(),
1532            });
1533        }
1534
1535        Some(SpineBatch {
1536            id,
1537            desc,
1538            len,
1539            parts: merged_parts,
1540            active_compaction: None,
1541        })
1542    }
1543}
1544
1545/// The maximum number of batches per level in the spine.
1546/// In practice, we probably want a larger max and a configurable soft cap, but using a
1547/// stack-friendly data structure and keeping this number low makes this safer during the
1548/// initial rollout.
1549const BATCHES_PER_LEVEL: usize = 2;
1550
1551/// An append-only collection of update batches.
1552///
1553/// The `Spine` is a general-purpose trace implementation based on collection
1554/// and merging immutable batches of updates. It is generic with respect to the
1555/// batch type, and can be instantiated for any implementor of `trace::Batch`.
1556///
1557/// ## Design
1558///
1559/// This spine is represented as a list of layers, where each element in the
1560/// list is either
1561///
1562///   1. MergeState::Vacant  empty
1563///   2. MergeState::Single  a single batch
1564///   3. MergeState::Double  a pair of batches
1565///
1566/// Each "batch" has the option to be `None`, indicating a non-batch that
1567/// nonetheless acts as a number of updates proportionate to the level at which
1568/// it exists (for bookkeeping).
1569///
1570/// Each of the batches at layer i contains at most 2^i elements. The sequence
1571/// of batches should have the upper bound of one match the lower bound of the
1572/// next. Batches may be logically empty, with matching upper and lower bounds,
1573/// as a bookkeeping mechanism.
1574///
1575/// Each batch at layer i is treated as if it contains exactly 2^i elements,
1576/// even though it may actually contain fewer elements. This allows us to
1577/// decouple the physical representation from logical amounts of effort invested
1578/// in each batch. It allows us to begin compaction and to reduce the number of
1579/// updates, without compromising our ability to continue to move updates along
1580/// the spine. We are explicitly making the trade-off that while some batches
1581/// might compact at lower levels, we want to treat them as if they contained
1582/// their full set of updates for accounting reasons (to apply work to higher
1583/// levels).
1584///
1585/// We maintain the invariant that for any in-progress merge at level k there
1586/// should be fewer than 2^k records at levels lower than k. That is, even if we
1587/// were to apply an unbounded amount of effort to those records, we would not
1588/// have enough records to prompt a merge into the in-progress merge. Ideally,
1589/// we maintain the extended invariant that for any in-progress merge at level
1590/// k, the remaining effort required (number of records minus applied effort) is
1591/// less than the number of records that would need to be added to reach 2^k
1592/// records in layers below.
1593///
1594/// ## Mathematics
1595///
1596/// When a merge is initiated, there should be a non-negative *deficit* of
1597/// updates before the layers below could plausibly produce a new batch for the
1598/// currently merging layer. We must determine a factor of proportionality, so
1599/// that newly arrived updates provide at least that amount of "fuel" towards
1600/// the merging layer, so that the merge completes before lower levels invade.
1601///
1602/// ### Deficit:
1603///
1604/// A new merge is initiated only in response to the completion of a prior
1605/// merge, or the introduction of new records from outside. The latter case is
1606/// special, and will maintain our invariant trivially, so we will focus on the
1607/// former case.
1608///
1609/// When a merge at level k completes, assuming we have maintained our invariant
1610/// then there should be fewer than 2^k records at lower levels. The newly
1611/// created merge at level k+1 will require up to 2^k+2 units of work, and
1612/// should not expect a new batch until strictly more than 2^k records are
1613/// added. This means that a factor of proportionality of four should be
1614/// sufficient to ensure that the merge completes before a new merge is
1615/// initiated.
1616///
1617/// When new records get introduced, we will need to roll up any batches at
1618/// lower levels, which we treat as the introduction of records. Each of these
1619/// virtual records introduced should either be accounted for the fuel it should
1620/// contribute, as it results in the promotion of batches closer to in-progress
1621/// merges.
1622///
1623/// ### Fuel sharing
1624///
1625/// We like the idea of applying fuel preferentially to merges at *lower*
1626/// levels, under the idea that they are easier to complete, and we benefit from
1627/// fewer total merges in progress. This does delay the completion of merges at
1628/// higher levels, and may not obviously be a total win. If we choose to do
1629/// this, we should make sure that we correctly account for completed merges at
1630/// low layers: they should still extract fuel from new updates even though they
1631/// have completed, at least until they have paid back any "debt" to higher
1632/// layers by continuing to provide fuel as updates arrive.
1633#[derive(Debug, Clone)]
1634struct Spine<T> {
1635    effort: usize,
1636    next_id: usize,
1637    since: Antichain<T>,
1638    upper: Antichain<T>,
1639    merging: Vec<MergeState<T>>,
1640}
1641
1642impl<T> Spine<T> {
1643    /// All batches in the spine, oldest to newest.
1644    pub fn spine_batches(&self) -> impl Iterator<Item = &SpineBatch<T>> {
1645        self.merging.iter().rev().flat_map(|m| &m.batches)
1646    }
1647
1648    /// All (mutable) batches in the spine, oldest to newest.
1649    pub fn spine_batches_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut SpineBatch<T>> {
1650        self.merging.iter_mut().rev().flat_map(|m| &mut m.batches)
1651    }
1652}
1653
1654impl<T: Timestamp + Lattice> Spine<T> {
1655    /// Allocates a fueled `Spine`.
1656    ///
1657    /// This trace will merge batches progressively, with each inserted batch
1658    /// applying a multiple of the batch's length in effort to each merge. The
1659    /// `effort` parameter is that multiplier. This value should be at least one
1660    /// for the merging to happen; a value of zero is not helpful.
1661    pub fn new() -> Self {
1662        Spine {
1663            effort: 1,
1664            next_id: 0,
1665            since: Antichain::from_elem(T::minimum()),
1666            upper: Antichain::from_elem(T::minimum()),
1667            merging: Vec::new(),
1668        }
1669    }
1670
1671    /// Apply some amount of effort to trace maintenance.
1672    ///
1673    /// The units of effort are updates, and the method should be thought of as
1674    /// analogous to inserting as many empty updates, where the trace is
1675    /// permitted to perform proportionate work.
1676    ///
1677    /// Returns true if this did work and false if it left the spine unchanged.
1678    fn exert(&mut self, effort: usize, log: &mut SpineLog<'_, T>) -> bool {
1679        self.tidy_layers();
1680        if self.reduced() {
1681            return false;
1682        }
1683
1684        if self.merging.iter().any(|b| b.merge.is_some()) {
1685            let fuel = isize::try_from(effort).unwrap_or(isize::MAX);
1686            // If any merges exist, we can directly call `apply_fuel`.
1687            self.apply_fuel(&fuel, log);
1688        } else {
1689            // Otherwise, we'll need to introduce fake updates to move merges
1690            // along.
1691
1692            // Introduce an empty batch with roughly *effort number of virtual updates.
1693            let level = usize::cast_from(effort.next_power_of_two().trailing_zeros());
1694            let id = self.next_id();
1695            self.introduce_batch(
1696                SpineBatch::empty(
1697                    id,
1698                    self.upper.clone(),
1699                    self.upper.clone(),
1700                    self.since.clone(),
1701                ),
1702                level,
1703                log,
1704            );
1705        }
1706        true
1707    }
1708
1709    pub fn next_id(&mut self) -> SpineId {
1710        let id = self.next_id;
1711        self.next_id += 1;
1712        SpineId(id, self.next_id)
1713    }
1714
1715    // Ideally, this method acts as insertion of `batch`, even if we are not yet
1716    // able to begin merging the batch. This means it is a good time to perform
1717    // amortized work proportional to the size of batch.
1718    pub fn insert(&mut self, batch: HollowBatch<T>, log: &mut SpineLog<'_, T>) {
1719        assert!(batch.desc.lower() != batch.desc.upper());
1720        assert_eq!(batch.desc.lower(), &self.upper);
1721
1722        let id = self.next_id();
1723        let batch = SpineBatch::merged(IdHollowBatch {
1724            id,
1725            batch: Arc::new(batch),
1726        });
1727
1728        self.upper.clone_from(batch.upper());
1729
1730        // If `batch` and the most recently inserted batch are both empty,
1731        // we can just fuse them.
1732        if batch.is_empty() {
1733            if let Some(position) = self.merging.iter().position(|m| !m.is_vacant()) {
1734                if self.merging[position].is_single() && self.merging[position].is_empty() {
1735                    self.insert_at(batch, position);
1736                    // Since we just inserted a batch, we should always have work to complete...
1737                    // but otherwise we just leave this layer vacant.
1738                    if let Some(merged) = self.complete_at(position, log) {
1739                        self.merging[position] = MergeState::single(merged);
1740                    }
1741                    return;
1742                }
1743            }
1744        }
1745
1746        // Normal insertion for the batch.
1747        let index = batch.len().next_power_of_two();
1748        self.introduce_batch(batch, usize::cast_from(index.trailing_zeros()), log);
1749    }
1750
1751    /// Returns true when the trace is considered *structurally reduced*.
1752    ///
1753    /// Reduced == the total number of runs (across every
1754    /// `SpineBatch` and all of their inner hollow batches) is < 2. In other
1755    /// words, there are either zero runs (fully empty) or exactly one logical
1756    /// run of data remaining.
1757    fn reduced(&self) -> bool {
1758        self.spine_batches()
1759            .map(|b| {
1760                b.parts
1761                    .iter()
1762                    .map(|p| p.batch.run_meta.len())
1763                    .sum::<usize>()
1764            })
1765            .sum::<usize>()
1766            < 2
1767    }
1768
1769    /// Describes the merge progress of layers in the trace.
1770    ///
1771    /// Intended for diagnostics rather than public consumption.
1772    #[allow(dead_code)]
1773    fn describe(&self) -> Vec<(usize, usize)> {
1774        self.merging
1775            .iter()
1776            .map(|b| (b.batches.len(), b.len()))
1777            .collect()
1778    }
1779
1780    /// Introduces a batch at an indicated level.
1781    ///
1782    /// The level indication is often related to the size of the batch, but it
1783    /// can also be used to artificially fuel the computation by supplying empty
1784    /// batches at non-trivial indices, to move merges along.
1785    fn introduce_batch(
1786        &mut self,
1787        batch: SpineBatch<T>,
1788        batch_index: usize,
1789        log: &mut SpineLog<'_, T>,
1790    ) {
1791        // Step 0.  Determine an amount of fuel to use for the computation.
1792        //
1793        //          Fuel is used to drive maintenance of the data structure,
1794        //          and in particular are used to make progress through merges
1795        //          that are in progress. The amount of fuel to use should be
1796        //          proportional to the number of records introduced, so that
1797        //          we are guaranteed to complete all merges before they are
1798        //          required as arguments to merges again.
1799        //
1800        //          The fuel use policy is negotiable, in that we might aim
1801        //          to use relatively less when we can, so that we return
1802        //          control promptly, or we might account more work to larger
1803        //          batches. Not clear to me which are best, of if there
1804        //          should be a configuration knob controlling this.
1805
1806        // The amount of fuel to use is proportional to 2^batch_index, scaled by
1807        // a factor of self.effort which determines how eager we are in
1808        // performing maintenance work. We need to ensure that each merge in
1809        // progress receives fuel for each introduced batch, and so multiply by
1810        // that as well.
1811        if batch_index > 32 {
1812            println!("Large batch index: {}", batch_index);
1813        }
1814
1815        // We believe that eight units of fuel is sufficient for each introduced
1816        // record, accounted as four for each record, and a potential four more
1817        // for each virtual record associated with promoting existing smaller
1818        // batches. We could try and make this be less, or be scaled to merges
1819        // based on their deficit at time of instantiation. For now, we remain
1820        // conservative.
1821        let mut fuel = 8 << batch_index;
1822        // Scale up by the effort parameter, which is calibrated to one as the
1823        // minimum amount of effort.
1824        fuel *= self.effort;
1825        // Convert to an `isize` so we can observe any fuel shortfall.
1826        // TODO(benesch): avoid dangerous usage of `as`.
1827        #[allow(clippy::as_conversions)]
1828        let fuel = fuel as isize;
1829
1830        // Step 1.  Apply fuel to each in-progress merge.
1831        //
1832        //          Before we can introduce new updates, we must apply any
1833        //          fuel to in-progress merges, as this fuel is what ensures
1834        //          that the merges will be complete by the time we insert
1835        //          the updates.
1836        self.apply_fuel(&fuel, log);
1837
1838        // Step 2.  We must ensure the invariant that adjacent layers do not
1839        //          contain two batches will be satisfied when we insert the
1840        //          batch. We forcibly completing all merges at layers lower
1841        //          than and including `batch_index`, so that the new batch is
1842        //          inserted into an empty layer.
1843        //
1844        //          We could relax this to "strictly less than `batch_index`"
1845        //          if the layer above has only a single batch in it, which
1846        //          seems not implausible if it has been the focus of effort.
1847        //
1848        //          This should be interpreted as the introduction of some
1849        //          volume of fake updates, and we will need to fuel merges
1850        //          by a proportional amount to ensure that they are not
1851        //          surprised later on. The number of fake updates should
1852        //          correspond to the deficit for the layer, which perhaps
1853        //          we should track explicitly.
1854        self.roll_up(batch_index, log);
1855
1856        // Step 3. This insertion should be into an empty layer. It is a logical
1857        //         error otherwise, as we may be violating our invariant, from
1858        //         which all wonderment derives.
1859        self.insert_at(batch, batch_index);
1860
1861        // Step 4. Tidy the largest layers.
1862        //
1863        //         It is important that we not tidy only smaller layers,
1864        //         as their ascension is what ensures the merging and
1865        //         eventual compaction of the largest layers.
1866        self.tidy_layers();
1867    }
1868
1869    /// Ensures that an insertion at layer `index` will succeed.
1870    ///
1871    /// This method is subject to the constraint that all existing batches
1872    /// should occur at higher levels, which requires it to "roll up" batches
1873    /// present at lower levels before the method is called. In doing this, we
1874    /// should not introduce more virtual records than 2^index, as that is the
1875    /// amount of excess fuel we have budgeted for completing merges.
1876    fn roll_up(&mut self, index: usize, log: &mut SpineLog<'_, T>) {
1877        // Ensure entries sufficient for `index`.
1878        while self.merging.len() <= index {
1879            self.merging.push(MergeState::default());
1880        }
1881
1882        // We only need to roll up if there are non-vacant layers.
1883        if self.merging[..index].iter().any(|m| !m.is_vacant()) {
1884            // Collect and merge all batches at layers up to but not including
1885            // `index`.
1886            let mut merged = None;
1887            for i in 0..index {
1888                if let Some(merged) = merged.take() {
1889                    self.insert_at(merged, i);
1890                }
1891                merged = self.complete_at(i, log);
1892            }
1893
1894            // The merged results should be introduced at level `index`, which
1895            // should be ready to absorb them (possibly creating a new merge at
1896            // the time).
1897            if let Some(merged) = merged {
1898                self.insert_at(merged, index);
1899            }
1900
1901            // If the insertion results in a merge, we should complete it to
1902            // ensure the upcoming insertion at `index` does not panic.
1903            if self.merging[index].is_full() {
1904                let merged = self.complete_at(index, log).expect("double batch");
1905                self.insert_at(merged, index + 1);
1906            }
1907        }
1908    }
1909
1910    /// Applies an amount of fuel to merges in progress.
1911    ///
1912    /// The supplied `fuel` is for each in progress merge, and if we want to
1913    /// spend the fuel non-uniformly (e.g. prioritizing merges at low layers) we
1914    /// could do so in order to maintain fewer batches on average (at the risk
1915    /// of completing merges of large batches later, but tbh probably not much
1916    /// later).
1917    pub fn apply_fuel(&mut self, fuel: &isize, log: &mut SpineLog<'_, T>) {
1918        // For the moment our strategy is to apply fuel independently to each
1919        // merge in progress, rather than prioritizing small merges. This sounds
1920        // like a great idea, but we need better accounting in place to ensure
1921        // that merges that borrow against later layers but then complete still
1922        // "acquire" fuel to pay back their debts.
1923        for index in 0..self.merging.len() {
1924            // Give each level independent fuel, for now.
1925            let mut fuel = *fuel;
1926            // Pass along various logging stuffs, in case we need to report
1927            // success.
1928            self.merging[index].work(&mut fuel);
1929            // `fuel` could have a deficit at this point, meaning we over-spent
1930            // when we took a merge step. We could ignore this, or maintain the
1931            // deficit and account future fuel against it before spending again.
1932            // It isn't clear why that would be especially helpful to do; we
1933            // might want to avoid overspends at multiple layers in the same
1934            // invocation (to limit latencies), but there is probably a rich
1935            // policy space here.
1936
1937            // If a merge completes, we can immediately merge it in to the next
1938            // level, which is "guaranteed" to be complete at this point, by our
1939            // fueling discipline.
1940            if self.merging[index].is_complete() {
1941                let complete = self.complete_at(index, log).expect("complete batch");
1942                self.insert_at(complete, index + 1);
1943            }
1944        }
1945    }
1946
1947    /// Inserts a batch at a specific location.
1948    ///
1949    /// This is a non-public internal method that can panic if we try and insert
1950    /// into a layer which already contains two batches (and is still in the
1951    /// process of merging).
1952    fn insert_at(&mut self, batch: SpineBatch<T>, index: usize) {
1953        // Ensure the spine is large enough.
1954        while self.merging.len() <= index {
1955            self.merging.push(MergeState::default());
1956        }
1957
1958        // Insert the batch at the location.
1959        let merging = &mut self.merging[index];
1960        merging.push_batch(batch);
1961        if merging.batches.is_full() {
1962            let compaction_frontier = Some(self.since.borrow());
1963            merging.merge = SpineBatch::begin_merge(&merging.batches[..], compaction_frontier)
1964        }
1965    }
1966
1967    /// Completes and extracts what ever is at layer `index`, leaving this layer vacant.
1968    fn complete_at(&mut self, index: usize, log: &mut SpineLog<'_, T>) -> Option<SpineBatch<T>> {
1969        self.merging[index].complete(log)
1970    }
1971
1972    /// Attempts to draw down large layers to size appropriate layers.
1973    fn tidy_layers(&mut self) {
1974        // If the largest layer is complete (not merging), we can attempt to
1975        // draw it down to the next layer. This is permitted if we can maintain
1976        // our invariant that below each merge there are at most half the
1977        // records that would be required to invade the merge.
1978        if !self.merging.is_empty() {
1979            let mut length = self.merging.len();
1980            if self.merging[length - 1].is_single() {
1981                // To move a batch down, we require that it contain few enough
1982                // records that the lower level is appropriate, and that moving
1983                // the batch would not create a merge violating our invariant.
1984                let appropriate_level = usize::cast_from(
1985                    self.merging[length - 1]
1986                        .len()
1987                        .next_power_of_two()
1988                        .trailing_zeros(),
1989                );
1990
1991                // Continue only as far as is appropriate
1992                while appropriate_level < length - 1 {
1993                    let current = &mut self.merging[length - 2];
1994                    if current.is_vacant() {
1995                        // Vacant batches can be absorbed.
1996                        self.merging.remove(length - 2);
1997                        length = self.merging.len();
1998                    } else {
1999                        if !current.is_full() {
2000                            // Single batches may initiate a merge, if sizes are
2001                            // within bounds, but terminate the loop either way.
2002
2003                            // Determine the number of records that might lead
2004                            // to a merge. Importantly, this is not the number
2005                            // of actual records, but the sum of upper bounds
2006                            // based on indices.
2007                            let mut smaller = 0;
2008                            for (index, batch) in self.merging[..(length - 2)].iter().enumerate() {
2009                                smaller += batch.batches.len() << index;
2010                            }
2011
2012                            if smaller <= (1 << length) / 8 {
2013                                // Remove the batch under consideration (shifting the deeper batches up a level),
2014                                // then merge in the single batch at the current level.
2015                                let state = self.merging.remove(length - 2);
2016                                assert_eq!(state.batches.len(), 1);
2017                                for batch in state.batches {
2018                                    self.insert_at(batch, length - 2);
2019                                }
2020                            }
2021                        }
2022                        break;
2023                    }
2024                }
2025            }
2026        }
2027    }
2028
2029    /// Checks invariants:
2030    /// - The lowers and uppers of all batches "line up".
2031    /// - The lower of the "minimum" batch is `antichain[T::minimum]`.
2032    /// - The upper of the "maximum" batch is `== self.upper`.
2033    /// - The since of each batch is `less_equal self.since`.
2034    /// - The `SpineIds` all "line up" and cover from `0` to `self.next_id`.
2035    /// - TODO: Verify fuel and level invariants.
2036    fn validate(&self) -> Result<(), String> {
2037        let mut id = SpineId(0, 0);
2038        let mut frontier = Antichain::from_elem(T::minimum());
2039        for x in self.merging.iter().rev() {
2040            if x.is_full() != x.merge.is_some() {
2041                return Err(format!(
2042                    "all (and only) full batches should have fueling merges (full={}, merge={:?})",
2043                    x.is_full(),
2044                    x.merge,
2045                ));
2046            }
2047
2048            if let Some(m) = &x.merge {
2049                if !x.is_full() {
2050                    return Err(format!(
2051                        "merge should only exist for full batches (len={:?}, merge={:?})",
2052                        x.batches.len(),
2053                        m.id,
2054                    ));
2055                }
2056                if x.id() != Some(m.id) {
2057                    return Err(format!(
2058                        "merge id should match the range of the batch ids (batch={:?}, merge={:?})",
2059                        x.id(),
2060                        m.id,
2061                    ));
2062                }
2063            }
2064
2065            // TODO: Anything we can validate about x.merge? It'd
2066            // be nice to assert that it's bigger than the len of the
2067            // two batches, but apply_merge_res might swap those lengths
2068            // out from under us.
2069            for batch in &x.batches {
2070                if batch.id().0 != id.1 {
2071                    return Err(format!(
2072                        "batch id {:?} does not match the previous id {:?}: {:?}",
2073                        batch.id(),
2074                        id,
2075                        self
2076                    ));
2077                }
2078                id = batch.id();
2079                if batch.desc().lower() != &frontier {
2080                    return Err(format!(
2081                        "batch lower {:?} does not match the previous upper {:?}: {:?}",
2082                        batch.desc().lower(),
2083                        frontier,
2084                        self
2085                    ));
2086                }
2087                frontier.clone_from(batch.desc().upper());
2088                if !PartialOrder::less_equal(batch.desc().since(), &self.since) {
2089                    return Err(format!(
2090                        "since of batch {:?} past the spine since {:?}: {:?}",
2091                        batch.desc().since(),
2092                        self.since,
2093                        self
2094                    ));
2095                }
2096            }
2097        }
2098        if self.next_id != id.1 {
2099            return Err(format!(
2100                "spine next_id {:?} does not match the last batch's id {:?}: {:?}",
2101                self.next_id, id, self
2102            ));
2103        }
2104        if self.upper != frontier {
2105            return Err(format!(
2106                "spine upper {:?} does not match the last batch's upper {:?}: {:?}",
2107                self.upper, frontier, self
2108            ));
2109        }
2110        Ok(())
2111    }
2112}
2113
2114/// Describes the state of a layer.
2115///
2116/// A layer can be empty, contain a single batch, or contain a pair of batches
2117/// that are in the process of merging into a batch for the next layer.
2118#[derive(Debug, Clone)]
2119struct MergeState<T> {
2120    batches: ArrayVec<SpineBatch<T>, BATCHES_PER_LEVEL>,
2121    merge: Option<IdFuelingMerge<T>>,
2122}
2123
2124impl<T> Default for MergeState<T> {
2125    fn default() -> Self {
2126        Self {
2127            batches: ArrayVec::new(),
2128            merge: None,
2129        }
2130    }
2131}
2132
2133impl<T: Timestamp + Lattice> MergeState<T> {
2134    /// An id that covers all the batches in the given merge state, assuming there are any.
2135    fn id(&self) -> Option<SpineId> {
2136        if let (Some(first), Some(last)) = (self.batches.first(), self.batches.last()) {
2137            Some(SpineId(first.id().0, last.id().1))
2138        } else {
2139            None
2140        }
2141    }
2142
2143    /// A new single-batch merge state.
2144    fn single(batch: SpineBatch<T>) -> Self {
2145        let mut state = Self::default();
2146        state.push_batch(batch);
2147        state
2148    }
2149
2150    /// Push a new batch at this level, checking invariants.
2151    fn push_batch(&mut self, batch: SpineBatch<T>) {
2152        if let Some(last) = self.batches.last() {
2153            assert_eq!(last.id().1, batch.id().0);
2154            assert_eq!(last.upper(), batch.lower());
2155        }
2156        assert!(
2157            self.merge.is_none(),
2158            "Attempted to insert batch into incomplete merge! (batch={:?}, batch_count={})",
2159            batch.id,
2160            self.batches.len(),
2161        );
2162        self.batches
2163            .try_push(batch)
2164            .expect("Attempted to insert batch into full layer!");
2165    }
2166
2167    /// The number of actual updates contained in the level.
2168    fn len(&self) -> usize {
2169        self.batches.iter().map(SpineBatch::len).sum()
2170    }
2171
2172    /// True if this merge state contains no updates.
2173    fn is_empty(&self) -> bool {
2174        self.batches.iter().all(SpineBatch::is_empty)
2175    }
2176
2177    /// True if this level contains no batches.
2178    fn is_vacant(&self) -> bool {
2179        self.batches.is_empty()
2180    }
2181
2182    /// True only for a single-batch state.
2183    fn is_single(&self) -> bool {
2184        self.batches.len() == 1
2185    }
2186
2187    /// True if this merge cannot hold any more batches.
2188    /// (i.e. for a binary merge tree, true if this layer holds two batches.)
2189    fn is_full(&self) -> bool {
2190        self.batches.is_full()
2191    }
2192
2193    /// Immediately complete any merge.
2194    ///
2195    /// The result is either a batch, if there is a non-trivial batch to return
2196    /// or `None` if there is no meaningful batch to return.
2197    ///
2198    /// There is the additional option of input batches.
2199    fn complete(&mut self, log: &mut SpineLog<'_, T>) -> Option<SpineBatch<T>> {
2200        let mut this = mem::take(self);
2201        if this.batches.len() <= 1 {
2202            this.batches.pop()
2203        } else {
2204            // Merge the remaining batches, regardless of whether we have a fully fueled merge.
2205            let id_merge = this
2206                .merge
2207                .or_else(|| SpineBatch::begin_merge(&self.batches[..], None))?;
2208            id_merge.merge.done(this.batches, log)
2209        }
2210    }
2211
2212    /// True iff the layer is a complete merge, ready for extraction.
2213    fn is_complete(&self) -> bool {
2214        match &self.merge {
2215            Some(IdFuelingMerge { merge, .. }) => merge.remaining_work == 0,
2216            None => false,
2217        }
2218    }
2219
2220    /// Performs a bounded amount of work towards a merge.
2221    fn work(&mut self, fuel: &mut isize) {
2222        // We only perform work for merges in progress.
2223        if let Some(IdFuelingMerge { merge, .. }) = &mut self.merge {
2224            merge.work(&self.batches[..], fuel)
2225        }
2226    }
2227}
2228
2229#[cfg(test)]
2230pub mod datadriven {
2231    use mz_ore::fmt::FormatBuffer;
2232
2233    use crate::internal::datadriven::DirectiveArgs;
2234
2235    use super::*;
2236
2237    /// Shared state for a single [crate::internal::trace] [datadriven::TestFile].
2238    #[derive(Debug, Default)]
2239    pub struct TraceState {
2240        pub trace: Trace<u64>,
2241        pub merge_reqs: Vec<FueledMergeReq<u64>>,
2242    }
2243
2244    pub fn since_upper(
2245        datadriven: &TraceState,
2246        _args: DirectiveArgs,
2247    ) -> Result<String, anyhow::Error> {
2248        Ok(format!(
2249            "{:?}{:?}\n",
2250            datadriven.trace.since().elements(),
2251            datadriven.trace.upper().elements()
2252        ))
2253    }
2254
2255    pub fn batches(datadriven: &TraceState, _args: DirectiveArgs) -> Result<String, anyhow::Error> {
2256        let mut s = String::new();
2257        for b in datadriven.trace.spine.spine_batches() {
2258            s.push_str(b.describe(true).as_str());
2259            s.push('\n');
2260        }
2261        Ok(s)
2262    }
2263
2264    pub fn insert(
2265        datadriven: &mut TraceState,
2266        args: DirectiveArgs,
2267    ) -> Result<String, anyhow::Error> {
2268        for x in args
2269            .input
2270            .trim()
2271            .split('\n')
2272            .map(DirectiveArgs::parse_hollow_batch)
2273        {
2274            datadriven
2275                .merge_reqs
2276                .append(&mut datadriven.trace.push_batch(x));
2277        }
2278        Ok("ok\n".to_owned())
2279    }
2280
2281    pub fn downgrade_since(
2282        datadriven: &mut TraceState,
2283        args: DirectiveArgs,
2284    ) -> Result<String, anyhow::Error> {
2285        let since = args.expect("since");
2286        datadriven
2287            .trace
2288            .downgrade_since(&Antichain::from_elem(since));
2289        Ok("ok\n".to_owned())
2290    }
2291
2292    pub fn take_merge_req(
2293        datadriven: &mut TraceState,
2294        _args: DirectiveArgs,
2295    ) -> Result<String, anyhow::Error> {
2296        let mut s = String::new();
2297        for merge_req in std::mem::take(&mut datadriven.merge_reqs) {
2298            write!(
2299                s,
2300                "{:?}{:?}{:?} {}\n",
2301                merge_req.desc.lower().elements(),
2302                merge_req.desc.upper().elements(),
2303                merge_req.desc.since().elements(),
2304                merge_req
2305                    .inputs
2306                    .iter()
2307                    .flat_map(|x| x.batch.parts.iter())
2308                    .map(|x| x.printable_name())
2309                    .collect::<Vec<_>>()
2310                    .join(" ")
2311            );
2312        }
2313        Ok(s)
2314    }
2315
2316    pub fn apply_merge_res(
2317        datadriven: &mut TraceState,
2318        args: DirectiveArgs,
2319    ) -> Result<String, anyhow::Error> {
2320        let res = FueledMergeRes {
2321            output: DirectiveArgs::parse_hollow_batch(args.input),
2322            input: CompactionInput::Legacy,
2323            new_active_compaction: None,
2324        };
2325        match datadriven.trace.apply_merge_res_unchecked(&res) {
2326            ApplyMergeResult::AppliedExact => Ok("applied exact\n".into()),
2327            ApplyMergeResult::AppliedSubset => Ok("applied subset\n".into()),
2328            ApplyMergeResult::NotAppliedNoMatch => Ok("no-op\n".into()),
2329            ApplyMergeResult::NotAppliedInvalidSince => Ok("no-op invalid since\n".into()),
2330            ApplyMergeResult::NotAppliedTooManyUpdates => Ok("no-op too many updates\n".into()),
2331        }
2332    }
2333}
2334
2335#[cfg(test)]
2336pub(crate) mod tests {
2337    use std::ops::Range;
2338
2339    use proptest::prelude::*;
2340    use semver::Version;
2341
2342    use crate::internal::state::tests::{any_hollow_batch, any_hollow_batch_with_exact_runs};
2343
2344    use super::*;
2345
2346    pub fn any_trace<T: Arbitrary + Timestamp + Lattice>(
2347        num_batches: Range<usize>,
2348    ) -> impl Strategy<Value = Trace<T>> {
2349        Strategy::prop_map(
2350            (
2351                any::<Option<T>>(),
2352                proptest::collection::vec(any_hollow_batch::<T>(), num_batches),
2353                any::<bool>(),
2354                any::<u64>(),
2355            ),
2356            |(since, mut batches, roundtrip_structure, timeout_ms)| {
2357                let mut trace = Trace::<T>::default();
2358                trace.downgrade_since(&since.map_or_else(Antichain::new, Antichain::from_elem));
2359
2360                // Fix up the arbitrary HollowBatches so the lowers and uppers
2361                // align.
2362                batches.sort_by(|x, y| x.desc.upper().elements().cmp(y.desc.upper().elements()));
2363                let mut lower = Antichain::from_elem(T::minimum());
2364                for mut batch in batches {
2365                    // Overall trace since has to be past each batch's since.
2366                    if PartialOrder::less_than(trace.since(), batch.desc.since()) {
2367                        trace.downgrade_since(batch.desc.since());
2368                    }
2369                    batch.desc = Description::new(
2370                        lower.clone(),
2371                        batch.desc.upper().clone(),
2372                        batch.desc.since().clone(),
2373                    );
2374                    lower.clone_from(batch.desc.upper());
2375                    let _merge_req = trace.push_batch(batch);
2376                }
2377                let reqs: Vec<_> = trace
2378                    .fueled_merge_reqs_before_ms(timeout_ms, None)
2379                    .collect();
2380                for req in reqs {
2381                    trace.claim_compaction(req.id, ActiveCompaction { start_ms: 0 })
2382                }
2383                trace.roundtrip_structure = roundtrip_structure;
2384                trace
2385            },
2386        )
2387    }
2388
2389    #[mz_ore::test]
2390    #[cfg_attr(miri, ignore)] // proptest is too heavy for miri!
2391    fn test_roundtrips() {
2392        fn check(trace: Trace<i64>) {
2393            trace.validate().unwrap();
2394            let flat = trace.flatten();
2395            let unflat = Trace::unflatten(flat).unwrap();
2396            assert_eq!(trace, unflat);
2397        }
2398
2399        proptest!(|(trace in any_trace::<i64>(1..10))| { check(trace) })
2400    }
2401
2402    #[mz_ore::test]
2403    fn fueled_merge_reqs() {
2404        let mut trace: Trace<u64> = Trace::default();
2405        let fueled_reqs = trace.push_batch(crate::internal::state::tests::hollow(
2406            0,
2407            10,
2408            &["n0011500/p3122e2a1-a0c7-429f-87aa-1019bf4f5f86"],
2409            1000,
2410        ));
2411
2412        assert!(fueled_reqs.is_empty());
2413        assert_eq!(
2414            trace.fueled_merge_reqs_before_ms(u64::MAX, None).count(),
2415            0,
2416            "no merge reqs when not filtering by version"
2417        );
2418        assert_eq!(
2419            trace
2420                .fueled_merge_reqs_before_ms(
2421                    u64::MAX,
2422                    Some(WriterKey::for_version(&Version::new(0, 50, 0)))
2423                )
2424                .count(),
2425            0,
2426            "zero batches are older than a past version"
2427        );
2428        assert_eq!(
2429            trace
2430                .fueled_merge_reqs_before_ms(
2431                    u64::MAX,
2432                    Some(WriterKey::for_version(&Version::new(99, 99, 0)))
2433                )
2434                .count(),
2435            1,
2436            "one batch is older than a future version"
2437        );
2438    }
2439
2440    #[mz_ore::test]
2441    fn remove_redundant_merge_reqs() {
2442        fn req(lower: u64, upper: u64) -> FueledMergeReq<u64> {
2443            FueledMergeReq {
2444                id: SpineId(usize::cast_from(lower), usize::cast_from(upper)),
2445                desc: Description::new(
2446                    Antichain::from_elem(lower),
2447                    Antichain::from_elem(upper),
2448                    Antichain::new(),
2449                ),
2450                inputs: vec![],
2451            }
2452        }
2453
2454        // Empty
2455        assert_eq!(Trace::<u64>::remove_redundant_merge_reqs(vec![]), vec![]);
2456
2457        // Single
2458        assert_eq!(
2459            Trace::remove_redundant_merge_reqs(vec![req(0, 1)]),
2460            vec![req(0, 1)]
2461        );
2462
2463        // Duplicate
2464        assert_eq!(
2465            Trace::remove_redundant_merge_reqs(vec![req(0, 1), req(0, 1)]),
2466            vec![req(0, 1)]
2467        );
2468
2469        // Nothing covered
2470        assert_eq!(
2471            Trace::remove_redundant_merge_reqs(vec![req(0, 1), req(1, 2)]),
2472            vec![req(1, 2), req(0, 1)]
2473        );
2474
2475        // Covered
2476        assert_eq!(
2477            Trace::remove_redundant_merge_reqs(vec![req(1, 2), req(0, 3)]),
2478            vec![req(0, 3)]
2479        );
2480
2481        // Covered, lower equal
2482        assert_eq!(
2483            Trace::remove_redundant_merge_reqs(vec![req(0, 2), req(0, 3)]),
2484            vec![req(0, 3)]
2485        );
2486
2487        // Covered, upper equal
2488        assert_eq!(
2489            Trace::remove_redundant_merge_reqs(vec![req(1, 3), req(0, 3)]),
2490            vec![req(0, 3)]
2491        );
2492
2493        // Covered, unexpected order (doesn't happen in practice)
2494        assert_eq!(
2495            Trace::remove_redundant_merge_reqs(vec![req(0, 3), req(1, 2)]),
2496            vec![req(0, 3)]
2497        );
2498
2499        // Partially overlapping
2500        assert_eq!(
2501            Trace::remove_redundant_merge_reqs(vec![req(0, 2), req(1, 3)]),
2502            vec![req(1, 3), req(0, 2)]
2503        );
2504
2505        // Partially overlapping, the other order
2506        assert_eq!(
2507            Trace::remove_redundant_merge_reqs(vec![req(1, 3), req(0, 2)]),
2508            vec![req(0, 2), req(1, 3)]
2509        );
2510
2511        // Different sinces (doesn't happen in practice)
2512        let req015 = FueledMergeReq {
2513            id: SpineId(0, 1),
2514            desc: Description::new(
2515                Antichain::from_elem(0),
2516                Antichain::from_elem(1),
2517                Antichain::from_elem(5),
2518            ),
2519            inputs: vec![],
2520        };
2521        assert_eq!(
2522            Trace::remove_redundant_merge_reqs(vec![req(0, 1), req015.clone()]),
2523            vec![req015, req(0, 1)]
2524        );
2525    }
2526
2527    #[mz_ore::test]
2528    #[cfg_attr(miri, ignore)] // proptest is too heavy for miri!
2529    fn construct_batch_with_runs_replaced_test() {
2530        let batch_strategy = any_hollow_batch::<u64>();
2531        let to_replace_strategy = any_hollow_batch_with_exact_runs::<u64>(1);
2532
2533        let combined_strategy = (batch_strategy, to_replace_strategy)
2534            .prop_filter("non-empty batch", |(batch, _)| batch.run_meta.len() >= 1);
2535
2536        let final_strategy = combined_strategy.prop_flat_map(|(batch, to_replace)| {
2537            let batch_len = batch.run_meta.len();
2538            let batch_clone = batch.clone();
2539            let to_replace_clone = to_replace.clone();
2540
2541            proptest::collection::vec(any::<bool>(), batch_len)
2542                .prop_filter("at least one run selected", |mask| mask.iter().any(|&x| x))
2543                .prop_map(move |mask| {
2544                    let indices: Vec<usize> = mask
2545                        .iter()
2546                        .enumerate()
2547                        .filter_map(|(i, &selected)| if selected { Some(i) } else { None })
2548                        .collect();
2549                    (batch_clone.clone(), to_replace_clone.clone(), indices)
2550                })
2551        });
2552
2553        proptest!(|(
2554            (batch, to_replace, runs) in final_strategy
2555        )| {
2556            let original_run_ids: Vec<_> = batch.run_meta.iter().map(|x|
2557                x.id.unwrap().clone()
2558            ).collect();
2559
2560            let run_ids = runs.iter().map(|&i| original_run_ids[i].clone()).collect::<Vec<_>>();
2561
2562            let new_batch = SpineBatch::construct_batch_with_runs_replaced(
2563                &batch,
2564                &run_ids,
2565                &to_replace,
2566            ).unwrap();
2567
2568            prop_assert!(new_batch.run_meta.len() == batch.run_meta.len() - runs.len() + to_replace.run_meta.len());
2569        });
2570    }
2571}