mz_persist_client/internal/
trace.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10//! An append-only collection of compactable update batches. The Spine below is
11//! a fork of Differential Dataflow's [Spine] with minimal modifications. The
12//! original Spine code is designed for incremental (via "fuel"ing) synchronous
13//! merge of in-memory batches. Persist doesn't want compaction to block
14//! incoming writes and, in fact, may in the future elect to push the work of
15//! compaction onto another machine entirely via RPC. As a result, we abuse the
16//! Spine code as follows:
17//!
18//! [Spine]: differential_dataflow::trace::implementations::spine_fueled::Spine
19//!
20//! - The normal Spine works in terms of [Batch] impls. A `Batch` is added to
21//!   the Spine. As progress is made, the Spine will merge two batches together
22//!   by: constructing a [Batch::Merger], giving it bits of fuel to
23//!   incrementally perform the merge (which spreads out the work, keeping
24//!   latencies even), and then once it's done fueling extracting the new single
25//!   output `Batch` and discarding the inputs.
26//! - Persist instead represents a batch of blob data with a [HollowBatch]
27//!   pointer which contains the normal `Batch` metadata plus the keys necessary
28//!   to retrieve the updates.
29//! - [SpineBatch] wraps `HollowBatch` and has a [FuelingMerge] companion
30//!   (analogous to `Batch::Merger`) that allows us to represent a merge as it
31//!   is fueling. Normally, this would represent real incremental compaction
32//!   progress, but in persist, it's simply a bookkeeping mechanism. Once fully
33//!   fueled, the `FuelingMerge` is turned into a fueled [SpineBatch],
34//!   which to the Spine is indistinguishable from a merged batch. At this
35//!   point, it is eligible for asynchronous compaction and a `FueledMergeReq`
36//!   is generated.
37//! - At any later point, this request may be answered via
38//!   [Trace::apply_merge_res_checked] or [Trace::apply_merge_res_unchecked].
39//!   This internally replaces the`SpineBatch`, which has no
40//!   effect on the structure of `Spine` but replaces the metadata
41//!   in persist's state to point at the new batch.
42//! - `SpineBatch` is explictly allowed to accumulate a list of `HollowBatch`s.
43//!   This decouples compaction from Spine progress and also allows us to reduce
44//!   write amplification by merging `N` batches at once where `N` can be
45//!   greater than 2.
46//!
47//! [Batch]: differential_dataflow::trace::Batch
48//! [Batch::Merger]: differential_dataflow::trace::Batch::Merger
49
50use std::cmp::Ordering;
51use std::collections::{BTreeMap, BTreeSet};
52use std::fmt::{Debug, Display};
53use std::mem;
54use std::ops::Range;
55use std::sync::Arc;
56
57use arrayvec::ArrayVec;
58use differential_dataflow::difference::Monoid;
59use differential_dataflow::lattice::Lattice;
60use differential_dataflow::trace::Description;
61use itertools::Itertools;
62use mz_ore::cast::CastFrom;
63use mz_persist::metrics::ColumnarMetrics;
64use mz_persist_types::Codec64;
65use serde::{Serialize, Serializer};
66use timely::PartialOrder;
67use timely::progress::frontier::AntichainRef;
68use timely::progress::{Antichain, Timestamp};
69use tracing::warn;
70
71use crate::internal::paths::WriterKey;
72use crate::internal::state::{HollowBatch, RunId};
73
74use super::state::RunPart;
75
76#[derive(Debug, Clone, PartialEq)]
77pub struct FueledMergeReq<T> {
78    pub id: SpineId,
79    pub desc: Description<T>,
80    pub inputs: Vec<IdHollowBatch<T>>,
81}
82
83#[derive(Debug)]
84pub struct FueledMergeRes<T> {
85    pub output: HollowBatch<T>,
86    pub input: CompactionInput,
87    pub new_active_compaction: Option<ActiveCompaction>,
88}
89
90/// An append-only collection of compactable update batches.
91///
92/// In an effort to keep our fork of Spine as close as possible to the original,
93/// we push as many changes as possible into this wrapper.
94#[derive(Debug, Clone)]
95pub struct Trace<T> {
96    spine: Spine<T>,
97    pub(crate) roundtrip_structure: bool,
98}
99
100#[cfg(any(test, debug_assertions))]
101impl<T: PartialEq> PartialEq for Trace<T> {
102    fn eq(&self, other: &Self) -> bool {
103        // Deconstruct self and other so we get a compile failure if new fields
104        // are added.
105        let Trace {
106            spine: _,
107            roundtrip_structure: _,
108        } = self;
109        let Trace {
110            spine: _,
111            roundtrip_structure: _,
112        } = other;
113
114        // Intentionally use HollowBatches for this comparison so we ignore
115        // differences in spine layers.
116        self.batches().eq(other.batches())
117    }
118}
119
120impl<T: Timestamp + Lattice> Default for Trace<T> {
121    fn default() -> Self {
122        Self {
123            spine: Spine::new(),
124            roundtrip_structure: true,
125        }
126    }
127}
128
129#[derive(Clone, Debug, Serialize)]
130pub struct ThinSpineBatch<T> {
131    pub(crate) level: usize,
132    pub(crate) desc: Description<T>,
133    pub(crate) parts: Vec<SpineId>,
134    /// NB: this exists to validate legacy batch bounds during the migration;
135    /// it can be deleted once the roundtrip_structure flag is permanently rolled out.
136    pub(crate) descs: Vec<Description<T>>,
137}
138
139impl<T: PartialEq> PartialEq for ThinSpineBatch<T> {
140    fn eq(&self, other: &Self) -> bool {
141        // Ignore the temporary descs vector when comparing for equality.
142        (self.level, &self.desc, &self.parts).eq(&(other.level, &other.desc, &other.parts))
143    }
144}
145
146#[derive(Clone, Debug, Eq, PartialEq, Serialize)]
147pub struct ThinMerge<T> {
148    pub(crate) since: Antichain<T>,
149    pub(crate) remaining_work: usize,
150    pub(crate) active_compaction: Option<ActiveCompaction>,
151}
152
153impl<T: Clone> ThinMerge<T> {
154    fn fueling(merge: &FuelingMerge<T>) -> Self {
155        ThinMerge {
156            since: merge.since.clone(),
157            remaining_work: merge.remaining_work,
158            active_compaction: None,
159        }
160    }
161
162    fn fueled(batch: &SpineBatch<T>) -> Self {
163        ThinMerge {
164            since: batch.desc.since().clone(),
165            remaining_work: 0,
166            active_compaction: batch.active_compaction.clone(),
167        }
168    }
169}
170
171/// This is a "flattened" representation of a Trace. Goals:
172/// - small updates to the trace should result in small differences in the `FlatTrace`;
173/// - two `FlatTrace`s should be efficient to diff;
174/// - converting to and from a `Trace` should be relatively straightforward.
175///
176/// These goals are all somewhat in tension, and the space of possible representations is pretty
177/// large. See individual fields for comments on some of the tradeoffs.
178#[derive(Clone, Debug)]
179pub struct FlatTrace<T> {
180    pub(crate) since: Antichain<T>,
181    /// Hollow batches without an associated ID. If this flattened trace contains spine batches,
182    /// we can figure out which legacy batch belongs in which spine batch by comparing the `desc`s.
183    /// Previously, we serialized a trace as just this list of batches. Keeping this data around
184    /// helps ensure backwards compatibility. In the near future, we may still keep some batches
185    /// here to help minimize the size of diffs -- rewriting all the hollow batches in a shard
186    /// can be prohibitively expensive. Eventually, we'd like to remove this in favour of the
187    /// collection below.
188    pub(crate) legacy_batches: BTreeMap<Arc<HollowBatch<T>>, ()>,
189    /// Hollow batches _with_ an associated ID. Spine batches can reference these hollow batches
190    /// by id directly.
191    pub(crate) hollow_batches: BTreeMap<SpineId, Arc<HollowBatch<T>>>,
192    /// Spine batches stored by ID. We reference hollow batches by ID, instead of inlining them,
193    /// to make differential updates smaller when two batches merge together. We also store the
194    /// level on the batch, instead of mapping from level to a list of batches... the level of a
195    /// spine batch doesn't change over time, but the list of batches at a particular level does.
196    pub(crate) spine_batches: BTreeMap<SpineId, ThinSpineBatch<T>>,
197    /// In-progress merges. We store this by spine id instead of level to prepare for some possible
198    /// generalizations to spine (merging N of M batches at a level). This is also a natural place
199    /// to store incremental merge progress in the future.
200    pub(crate) merges: BTreeMap<SpineId, ThinMerge<T>>,
201}
202
203impl<T: Timestamp + Lattice> Trace<T> {
204    pub(crate) fn flatten(&self) -> FlatTrace<T> {
205        let since = self.spine.since.clone();
206        let mut legacy_batches = BTreeMap::new();
207        let mut hollow_batches = BTreeMap::new();
208        let mut spine_batches = BTreeMap::new();
209        let mut merges = BTreeMap::new();
210
211        let mut push_spine_batch = |level: usize, batch: &SpineBatch<T>| {
212            let id = batch.id();
213            let desc = batch.desc.clone();
214            let mut parts = Vec::with_capacity(batch.parts.len());
215            let mut descs = Vec::with_capacity(batch.parts.len());
216            for IdHollowBatch { id, batch } in &batch.parts {
217                parts.push(*id);
218                descs.push(batch.desc.clone());
219                // Ideally, we'd like to put all batches in the hollow_batches collection, since
220                // tracking the spine id reduces ambiguity and makes diffing cheaper. However,
221                // we currently keep most batches in the legacy collection for backwards
222                // compatibility.
223                // As an exception, we add batches with empty time ranges to hollow_batches:
224                // they're otherwise not guaranteed to be unique, and since we only started writing
225                // them down recently there's no backwards compatibility risk.
226                if batch.desc.lower() == batch.desc.upper() {
227                    hollow_batches.insert(*id, Arc::clone(batch));
228                } else {
229                    legacy_batches.insert(Arc::clone(batch), ());
230                }
231            }
232
233            let spine_batch = ThinSpineBatch {
234                level,
235                desc,
236                parts,
237                descs,
238            };
239            spine_batches.insert(id, spine_batch);
240        };
241
242        for (level, state) in self.spine.merging.iter().enumerate() {
243            for batch in &state.batches {
244                push_spine_batch(level, batch);
245                if let Some(c) = &batch.active_compaction {
246                    let previous = merges.insert(batch.id, ThinMerge::fueled(batch));
247                    assert!(
248                        previous.is_none(),
249                        "recording a compaction for a batch that already exists! (level={level}, id={:?}, compaction={c:?})",
250                        batch.id,
251                    )
252                }
253            }
254            if let Some(IdFuelingMerge { id, merge }) = state.merge.as_ref() {
255                let previous = merges.insert(*id, ThinMerge::fueling(merge));
256                assert!(
257                    previous.is_none(),
258                    "fueling a merge for a batch that already exists! (level={level}, id={id:?}, merge={merge:?})"
259                )
260            }
261        }
262
263        if !self.roundtrip_structure {
264            assert!(hollow_batches.is_empty());
265            spine_batches.clear();
266            merges.clear();
267        }
268
269        FlatTrace {
270            since,
271            legacy_batches,
272            hollow_batches,
273            spine_batches,
274            merges,
275        }
276    }
277    pub(crate) fn unflatten(value: FlatTrace<T>) -> Result<Self, String> {
278        let FlatTrace {
279            since,
280            legacy_batches,
281            mut hollow_batches,
282            spine_batches,
283            mut merges,
284        } = value;
285
286        // If the flattened representation has spine batches (or is empty)
287        // we know to preserve the structure for this trace.
288        let roundtrip_structure = !spine_batches.is_empty() || legacy_batches.is_empty();
289
290        // We need to look up legacy batches somehow, but we don't have a spine id for them.
291        // Instead, we rely on the fact that the spine must store them in antichain order.
292        // Our timestamp type may not be totally ordered, so we need to implement our own comparator
293        // here. Persist's invariants ensure that all the frontiers we're comparing are comparable,
294        // though.
295        let compare_chains = |left: &Antichain<T>, right: &Antichain<T>| {
296            if PartialOrder::less_than(left, right) {
297                Ordering::Less
298            } else if PartialOrder::less_than(right, left) {
299                Ordering::Greater
300            } else {
301                Ordering::Equal
302            }
303        };
304        let mut legacy_batches: Vec<_> = legacy_batches.into_iter().map(|(k, _)| k).collect();
305        legacy_batches.sort_by(|a, b| compare_chains(a.desc.lower(), b.desc.lower()).reverse());
306
307        let mut pop_batch =
308            |id: SpineId, expected_desc: Option<&Description<T>>| -> Result<_, String> {
309                if let Some(batch) = hollow_batches.remove(&id) {
310                    if let Some(desc) = expected_desc {
311                        // We don't expect the desc's upper and lower to change for a given spine id.
312                        assert_eq!(desc.lower(), batch.desc.lower());
313                        assert_eq!(desc.upper(), batch.desc.upper());
314                        // Due to the way thin spine batches are diffed, the sinces can be out of sync.
315                        // This should be rare, and hopefully impossible once we change how diffs work.
316                        if desc.since() != batch.desc.since() {
317                            warn!(
318                                "unexpected since out of sync for spine batch: {:?} != {:?}",
319                                desc.since().elements(),
320                                batch.desc.since().elements()
321                            );
322                        }
323                    }
324                    return Ok(IdHollowBatch { id, batch });
325                }
326                let mut batch = legacy_batches
327                    .pop()
328                    .ok_or_else(|| format!("missing referenced hollow batch {id:?}"))?;
329
330                let Some(expected_desc) = expected_desc else {
331                    return Ok(IdHollowBatch { id, batch });
332                };
333
334                if expected_desc.lower() != batch.desc.lower() {
335                    return Err(format!(
336                        "hollow batch lower {:?} did not match expected lower {:?}",
337                        batch.desc.lower().elements(),
338                        expected_desc.lower().elements()
339                    ));
340                }
341
342                // Empty legacy batches are not deterministic: different nodes may split them up
343                // in different ways. For now, we rearrange them such to match the spine data.
344                if batch.parts.is_empty() && batch.run_splits.is_empty() && batch.len == 0 {
345                    let mut new_upper = batch.desc.upper().clone();
346
347                    // While our current batch is too small, and there's another empty batch
348                    // in the list, roll it in.
349                    while PartialOrder::less_than(&new_upper, expected_desc.upper()) {
350                        let Some(next_batch) = legacy_batches.pop() else {
351                            break;
352                        };
353                        if next_batch.is_empty() {
354                            new_upper.clone_from(next_batch.desc.upper());
355                        } else {
356                            legacy_batches.push(next_batch);
357                            break;
358                        }
359                    }
360
361                    // If our current batch is too large, split it by the expected upper
362                    // and preserve the remainder.
363                    if PartialOrder::less_than(expected_desc.upper(), &new_upper) {
364                        legacy_batches.push(Arc::new(HollowBatch::empty(Description::new(
365                            expected_desc.upper().clone(),
366                            new_upper.clone(),
367                            batch.desc.since().clone(),
368                        ))));
369                        new_upper.clone_from(expected_desc.upper());
370                    }
371                    batch = Arc::new(HollowBatch::empty(Description::new(
372                        batch.desc.lower().clone(),
373                        new_upper,
374                        batch.desc.since().clone(),
375                    )))
376                }
377
378                if expected_desc.upper() != batch.desc.upper() {
379                    return Err(format!(
380                        "hollow batch upper {:?} did not match expected upper {:?}",
381                        batch.desc.upper().elements(),
382                        expected_desc.upper().elements()
383                    ));
384                }
385
386                Ok(IdHollowBatch { id, batch })
387            };
388
389        let (upper, next_id) = if let Some((id, batch)) = spine_batches.last_key_value() {
390            (batch.desc.upper().clone(), id.1)
391        } else {
392            (Antichain::from_elem(T::minimum()), 0)
393        };
394        let levels = spine_batches
395            .first_key_value()
396            .map(|(_, batch)| batch.level + 1)
397            .unwrap_or(0);
398        let mut merging = vec![MergeState::default(); levels];
399        for (id, batch) in spine_batches {
400            let level = batch.level;
401
402            let descs = batch.descs.iter().map(Some).chain(std::iter::repeat_n(
403                None,
404                batch.parts.len() - batch.descs.len(),
405            ));
406            let parts = batch
407                .parts
408                .into_iter()
409                .zip_eq(descs)
410                .map(|(id, desc)| pop_batch(id, desc))
411                .collect::<Result<Vec<_>, _>>()?;
412            let len = parts.iter().map(|p| (*p).batch.len).sum();
413            let active_compaction = merges.remove(&id).and_then(|m| m.active_compaction);
414            let batch = SpineBatch {
415                id,
416                desc: batch.desc,
417                parts,
418                active_compaction,
419                len,
420            };
421
422            let state = &mut merging[level];
423
424            state.push_batch(batch);
425            if let Some(id) = state.id() {
426                if let Some(merge) = merges.remove(&id) {
427                    state.merge = Some(IdFuelingMerge {
428                        id,
429                        merge: FuelingMerge {
430                            since: merge.since,
431                            remaining_work: merge.remaining_work,
432                        },
433                    })
434                }
435            }
436        }
437
438        let mut trace = Trace {
439            spine: Spine {
440                effort: 1,
441                next_id,
442                since,
443                upper,
444                merging,
445            },
446            roundtrip_structure,
447        };
448
449        fn check_empty(name: &str, len: usize) -> Result<(), String> {
450            if len != 0 {
451                Err(format!("{len} {name} left after reconstructing spine"))
452            } else {
453                Ok(())
454            }
455        }
456
457        if roundtrip_structure {
458            check_empty("legacy batches", legacy_batches.len())?;
459        } else {
460            // If the structure wasn't actually serialized, we may have legacy batches left over.
461            for batch in legacy_batches.into_iter().rev() {
462                trace.push_batch_no_merge_reqs(Arc::unwrap_or_clone(batch));
463            }
464        }
465        check_empty("hollow batches", hollow_batches.len())?;
466        check_empty("merges", merges.len())?;
467
468        debug_assert_eq!(trace.validate(), Ok(()), "{:?}", trace);
469
470        Ok(trace)
471    }
472}
473
474#[derive(Clone, Debug, Default)]
475pub(crate) struct SpineMetrics {
476    pub compact_batches: u64,
477    pub compacting_batches: u64,
478    pub noncompact_batches: u64,
479}
480
481impl<T> Trace<T> {
482    pub fn since(&self) -> &Antichain<T> {
483        &self.spine.since
484    }
485
486    pub fn upper(&self) -> &Antichain<T> {
487        &self.spine.upper
488    }
489
490    pub fn map_batches<'a, F: FnMut(&'a HollowBatch<T>)>(&'a self, mut f: F) {
491        for batch in self.batches() {
492            f(batch);
493        }
494    }
495
496    pub fn batches(&self) -> impl Iterator<Item = &HollowBatch<T>> {
497        self.spine
498            .spine_batches()
499            .flat_map(|b| b.parts.as_slice())
500            .map(|b| &*b.batch)
501    }
502
503    pub fn num_spine_batches(&self) -> usize {
504        self.spine.spine_batches().count()
505    }
506
507    #[cfg(test)]
508    pub fn num_hollow_batches(&self) -> usize {
509        self.batches().count()
510    }
511
512    #[cfg(test)]
513    pub fn num_updates(&self) -> usize {
514        self.batches().map(|b| b.len).sum()
515    }
516}
517
518impl<T: Timestamp + Lattice> Trace<T> {
519    pub fn downgrade_since(&mut self, since: &Antichain<T>) {
520        self.spine.since.clone_from(since);
521    }
522
523    #[must_use]
524    pub fn push_batch(&mut self, batch: HollowBatch<T>) -> Vec<FueledMergeReq<T>> {
525        let mut merge_reqs = Vec::new();
526        self.spine.insert(
527            batch,
528            &mut SpineLog::Enabled {
529                merge_reqs: &mut merge_reqs,
530            },
531        );
532        debug_assert_eq!(self.spine.validate(), Ok(()), "{:?}", self);
533        // Spine::roll_up (internally used by insert) clears all batches out of
534        // levels below a target by walking up from level 0 and merging each
535        // level into the next (providing the necessary fuel). In practice, this
536        // means we'll get a series of requests like `(a, b), (a, b, c), ...`.
537        // It's a waste to do all of these (we'll throw away the results), so we
538        // filter out any that are entirely covered by some other request.
539        Self::remove_redundant_merge_reqs(merge_reqs)
540    }
541
542    pub fn claim_compaction(&mut self, id: SpineId, compaction: ActiveCompaction) {
543        // TODO: we ought to be able to look up the id for a batch by binary searching the levels.
544        // In the meantime, search backwards, since most compactions are for recent batches.
545        for batch in self.spine.spine_batches_mut().rev() {
546            if batch.id == id {
547                batch.active_compaction = Some(compaction);
548                break;
549            }
550        }
551    }
552
553    /// The same as [Self::push_batch] but without the `FueledMergeReq`s, which
554    /// account for a surprising amount of cpu in prod. database-issues#5411
555    pub(crate) fn push_batch_no_merge_reqs(&mut self, batch: HollowBatch<T>) {
556        self.spine.insert(batch, &mut SpineLog::Disabled);
557    }
558
559    /// Apply some amount of effort to trace maintenance.
560    ///
561    /// The units of effort are updates, and the method should be thought of as
562    /// analogous to inserting as many empty updates, where the trace is
563    /// permitted to perform proportionate work.
564    ///
565    /// Returns true if this did work and false if it left the spine unchanged.
566    #[must_use]
567    pub fn exert(&mut self, fuel: usize) -> (Vec<FueledMergeReq<T>>, bool) {
568        let mut merge_reqs = Vec::new();
569        let did_work = self.spine.exert(
570            fuel,
571            &mut SpineLog::Enabled {
572                merge_reqs: &mut merge_reqs,
573            },
574        );
575        debug_assert_eq!(self.spine.validate(), Ok(()), "{:?}", self);
576        // See the comment in [Self::push_batch].
577        let merge_reqs = Self::remove_redundant_merge_reqs(merge_reqs);
578        (merge_reqs, did_work)
579    }
580
581    /// Validates invariants.
582    ///
583    /// See `Spine::validate` for details.
584    pub fn validate(&self) -> Result<(), String> {
585        self.spine.validate()
586    }
587
588    /// Obtain all fueled merge reqs that either have no active compaction, or the previous
589    /// compaction was started at or before the threshold time, in order from oldest to newest.
590    pub(crate) fn fueled_merge_reqs_before_ms(
591        &self,
592        threshold_ms: u64,
593        threshold_writer: Option<WriterKey>,
594    ) -> impl Iterator<Item = FueledMergeReq<T>> + '_ {
595        self.spine
596            .spine_batches()
597            .filter(move |b| {
598                let noncompact = !b.is_compact();
599                let old_writer = threshold_writer.as_ref().map_or(false, |min_writer| {
600                    b.parts.iter().any(|b| {
601                        b.batch
602                            .parts
603                            .iter()
604                            .any(|p| p.writer_key().map_or(false, |writer| writer < *min_writer))
605                    })
606                });
607                noncompact || old_writer
608            })
609            .filter(move |b| {
610                // Either there's no active compaction, or the last active compaction
611                // is not after the timeout timestamp.
612                b.active_compaction
613                    .as_ref()
614                    .map_or(true, move |c| c.start_ms <= threshold_ms)
615            })
616            .map(|b| FueledMergeReq {
617                id: b.id,
618                desc: b.desc.clone(),
619                inputs: b.parts.clone(),
620            })
621    }
622
623    // This is only called with the results of one `insert` and so the length of
624    // `merge_reqs` is bounded by the number of levels in the spine (or possibly
625    // some small constant multiple?). The number of levels is logarithmic in the
626    // number of updates in the spine, so this number should stay very small. As
627    // a result, we simply use the naive O(n^2) algorithm here instead of doing
628    // anything fancy with e.g. interval trees.
629    fn remove_redundant_merge_reqs(
630        mut merge_reqs: Vec<FueledMergeReq<T>>,
631    ) -> Vec<FueledMergeReq<T>> {
632        // Returns true if b0 covers b1, false otherwise.
633        fn covers<T: PartialOrder>(b0: &FueledMergeReq<T>, b1: &FueledMergeReq<T>) -> bool {
634            // TODO: can we relax or remove this since check?
635            b0.id.covers(b1.id) && b0.desc.since() == b1.desc.since()
636        }
637
638        let mut ret = Vec::<FueledMergeReq<T>>::with_capacity(merge_reqs.len());
639        // In practice, merge_reqs will come in sorted such that the "large"
640        // requests are later. Take advantage of this by processing back to
641        // front.
642        while let Some(merge_req) = merge_reqs.pop() {
643            let covered = ret.iter().any(|r| covers(r, &merge_req));
644            if !covered {
645                // Now check if anything we've already staged is covered by this
646                // new req. In practice, the merge_reqs come in sorted and so
647                // this `retain` is a no-op.
648                ret.retain(|r| !covers(&merge_req, r));
649                ret.push(merge_req);
650            }
651        }
652        ret
653    }
654
655    pub fn spine_metrics(&self) -> SpineMetrics {
656        let mut metrics = SpineMetrics::default();
657        for batch in self.spine.spine_batches() {
658            if batch.is_compact() {
659                metrics.compact_batches += 1;
660            } else if batch.is_merging() {
661                metrics.compacting_batches += 1;
662            } else {
663                metrics.noncompact_batches += 1;
664            }
665        }
666        metrics
667    }
668}
669
670impl<T: Timestamp + Lattice + Codec64> Trace<T> {
671    pub fn apply_merge_res_checked<D: Codec64 + Monoid + PartialEq>(
672        &mut self,
673        res: &FueledMergeRes<T>,
674        metrics: &ColumnarMetrics,
675    ) -> ApplyMergeResult {
676        for batch in self.spine.spine_batches_mut().rev() {
677            let result = batch.maybe_replace_checked::<D>(res, metrics);
678            if result.matched() {
679                return result;
680            }
681        }
682        ApplyMergeResult::NotAppliedNoMatch
683    }
684
685    pub fn apply_merge_res_unchecked(&mut self, res: &FueledMergeRes<T>) -> ApplyMergeResult {
686        for batch in self.spine.spine_batches_mut().rev() {
687            let result = batch.maybe_replace_unchecked(res);
688            if result.matched() {
689                return result;
690            }
691        }
692        ApplyMergeResult::NotAppliedNoMatch
693    }
694
695    pub fn apply_tombstone_merge(&mut self, desc: &Description<T>) -> ApplyMergeResult {
696        for batch in self.spine.spine_batches_mut().rev() {
697            let result = batch.maybe_replace_with_tombstone(desc);
698            if result.matched() {
699                return result;
700            }
701        }
702        ApplyMergeResult::NotAppliedNoMatch
703    }
704}
705
706/// A log of what transitively happened during a Spine operation: e.g.
707/// FueledMergeReqs were generated.
708enum SpineLog<'a, T> {
709    Enabled {
710        merge_reqs: &'a mut Vec<FueledMergeReq<T>>,
711    },
712    Disabled,
713}
714
715#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
716pub enum CompactionInput {
717    /// We don't know what our inputs were; this should only be used for
718    /// unchecked legacy replacements.
719    Legacy,
720    /// This compaction output is a total replacement for all batches in this id range.
721    IdRange(SpineId),
722    /// This compaction output replaces the specified runs in this id range.
723    PartialBatch(SpineId, BTreeSet<RunId>),
724}
725
726#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
727pub struct SpineId(pub usize, pub usize);
728
729impl Display for SpineId {
730    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
731        write!(f, "[{}, {})", self.0, self.1)
732    }
733}
734
735impl Serialize for SpineId {
736    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
737    where
738        S: Serializer,
739    {
740        let SpineId(lo, hi) = self;
741        serializer.serialize_str(&format!("{lo}-{hi}"))
742    }
743}
744
745/// Creates a `SpineId` that covers the range of ids in the set.
746pub fn id_range(ids: BTreeSet<SpineId>) -> SpineId {
747    let mut id_iter = ids.iter().copied();
748    let Some(mut result) = id_iter.next() else {
749        panic!("at least one batch must be present")
750    };
751
752    for id in id_iter {
753        assert_eq!(
754            result.1, id.0,
755            "expected contiguous ids, but {result:?} is not adjacent to {id:?} in ids {ids:?}"
756        );
757        result.1 = id.1;
758    }
759    result
760}
761
762impl SpineId {
763    fn covers(self, other: SpineId) -> bool {
764        self.0 <= other.0 && other.1 <= self.1
765    }
766}
767
768#[derive(Debug, Clone, PartialEq)]
769pub struct IdHollowBatch<T> {
770    pub id: SpineId,
771    pub batch: Arc<HollowBatch<T>>,
772}
773
774#[derive(Debug, Clone, Eq, PartialEq, Serialize)]
775pub struct ActiveCompaction {
776    pub start_ms: u64,
777}
778
779#[derive(Debug, Clone, PartialEq)]
780struct SpineBatch<T> {
781    id: SpineId,
782    desc: Description<T>,
783    parts: Vec<IdHollowBatch<T>>,
784    active_compaction: Option<ActiveCompaction>,
785    // A cached version of parts.iter().map(|x| x.len).sum()
786    len: usize,
787}
788
789impl<T> SpineBatch<T> {
790    fn merged(batch: IdHollowBatch<T>) -> Self
791    where
792        T: Clone,
793    {
794        Self {
795            id: batch.id,
796            desc: batch.batch.desc.clone(),
797            len: batch.batch.len,
798            parts: vec![batch],
799            active_compaction: None,
800        }
801    }
802}
803
804#[derive(Debug, Copy, Clone)]
805pub enum ApplyMergeResult {
806    AppliedExact,
807    AppliedSubset,
808    NotAppliedNoMatch,
809    NotAppliedInvalidSince,
810    NotAppliedTooManyUpdates,
811}
812
813impl ApplyMergeResult {
814    pub fn applied(&self) -> bool {
815        match self {
816            ApplyMergeResult::AppliedExact | ApplyMergeResult::AppliedSubset => true,
817            _ => false,
818        }
819    }
820    pub fn matched(&self) -> bool {
821        match self {
822            ApplyMergeResult::AppliedExact
823            | ApplyMergeResult::AppliedSubset
824            | ApplyMergeResult::NotAppliedTooManyUpdates => true,
825            _ => false,
826        }
827    }
828}
829
830impl<T: Timestamp + Lattice> SpineBatch<T> {
831    pub fn lower(&self) -> &Antichain<T> {
832        self.desc().lower()
833    }
834
835    pub fn upper(&self) -> &Antichain<T> {
836        self.desc().upper()
837    }
838
839    fn id(&self) -> SpineId {
840        debug_assert_eq!(self.parts.first().map(|x| x.id.0), Some(self.id.0));
841        debug_assert_eq!(self.parts.last().map(|x| x.id.1), Some(self.id.1));
842        self.id
843    }
844
845    pub fn is_compact(&self) -> bool {
846        // A compact batch has at most one run.
847        // This check used to be if there was at most one hollow batch with at most one run,
848        // but that was a bit too strict since introducing incremental compaction.
849        // Incremental compaction can result in a batch with a single run, but multiple empty
850        // hollow batches, which we still consider compact. As levels are merged, we
851        // will eventually clean up the empty hollow batches.
852        self.parts
853            .iter()
854            .map(|p| p.batch.run_meta.len())
855            .sum::<usize>()
856            <= 1
857    }
858
859    pub fn is_merging(&self) -> bool {
860        self.active_compaction.is_some()
861    }
862
863    fn desc(&self) -> &Description<T> {
864        &self.desc
865    }
866
867    pub fn len(&self) -> usize {
868        // NB: This is an upper bound on len for a non-compact batch; we won't know for sure until
869        // we compact it.
870        debug_assert_eq!(
871            self.len,
872            self.parts.iter().map(|x| x.batch.len).sum::<usize>()
873        );
874        self.len
875    }
876
877    pub fn is_empty(&self) -> bool {
878        self.len() == 0
879    }
880
881    pub fn empty(
882        id: SpineId,
883        lower: Antichain<T>,
884        upper: Antichain<T>,
885        since: Antichain<T>,
886    ) -> Self {
887        SpineBatch::merged(IdHollowBatch {
888            id,
889            batch: Arc::new(HollowBatch::empty(Description::new(lower, upper, since))),
890        })
891    }
892
893    pub fn begin_merge(
894        bs: &[Self],
895        compaction_frontier: Option<AntichainRef<T>>,
896    ) -> Option<IdFuelingMerge<T>> {
897        let from = bs.first()?.id().0;
898        let until = bs.last()?.id().1;
899        let id = SpineId(from, until);
900        let mut sinces = bs.iter().map(|b| b.desc().since());
901        let mut since = sinces.next()?.clone();
902        for b in bs {
903            since.join_assign(b.desc().since())
904        }
905        if let Some(compaction_frontier) = compaction_frontier {
906            since.join_assign(&compaction_frontier.to_owned());
907        }
908        let remaining_work = bs.iter().map(|x| x.len()).sum();
909        Some(IdFuelingMerge {
910            id,
911            merge: FuelingMerge {
912                since,
913                remaining_work,
914            },
915        })
916    }
917
918    #[cfg(test)]
919    fn describe(&self, extended: bool) -> String {
920        let SpineBatch {
921            id,
922            parts,
923            desc,
924            active_compaction,
925            len,
926        } = self;
927        let compaction = match active_compaction {
928            None => "".to_owned(),
929            Some(c) => format!(" (c@{})", c.start_ms),
930        };
931        match extended {
932            false => format!(
933                "[{}-{}]{:?}{:?}{}/{}{compaction}",
934                id.0,
935                id.1,
936                desc.lower().elements(),
937                desc.upper().elements(),
938                parts.len(),
939                len
940            ),
941            true => {
942                format!(
943                    "[{}-{}]{:?}{:?}{:?} {}/{}{}{compaction}",
944                    id.0,
945                    id.1,
946                    desc.lower().elements(),
947                    desc.upper().elements(),
948                    desc.since().elements(),
949                    parts.len(),
950                    len,
951                    parts
952                        .iter()
953                        .flat_map(|x| x.batch.parts.iter())
954                        .map(|x| format!(" {}", x.printable_name()))
955                        .collect::<Vec<_>>()
956                        .join("")
957                )
958            }
959        }
960    }
961}
962
963impl<T: Timestamp + Lattice + Codec64> SpineBatch<T> {
964    fn diffs_sum<'a, D: Monoid + Codec64>(
965        parts: impl IntoIterator<Item = &'a RunPart<T>>,
966        metrics: &ColumnarMetrics,
967    ) -> Option<D> {
968        let mut sum = D::zero();
969        for part in parts {
970            sum.plus_equals(&part.diffs_sum::<D>(metrics)?);
971        }
972        Some(sum)
973    }
974
975    /// Get the diff sum from the given batch for the given runs.
976    /// Returns `None` if the runs aren't present or any parts don't have statistics.
977    fn diffs_sum_for_runs<D: Monoid + Codec64>(
978        batch: &HollowBatch<T>,
979        run_ids: &[RunId],
980        metrics: &ColumnarMetrics,
981    ) -> Option<D> {
982        let mut run_ids = BTreeSet::from_iter(run_ids.iter().copied());
983        let mut sum = D::zero();
984
985        for (meta, run) in batch.runs() {
986            let id = meta.id?;
987            if run_ids.remove(&id) {
988                sum.plus_equals(&Self::diffs_sum(run, metrics)?);
989            }
990        }
991
992        run_ids.is_empty().then_some(sum)
993    }
994
995    fn maybe_replace_with_tombstone(&mut self, desc: &Description<T>) -> ApplyMergeResult {
996        let exact_match =
997            desc.lower() == self.desc().lower() && desc.upper() == self.desc().upper();
998
999        let empty_batch = HollowBatch::empty(desc.clone());
1000        if exact_match {
1001            *self = SpineBatch::merged(IdHollowBatch {
1002                id: self.id(),
1003                batch: Arc::new(empty_batch),
1004            });
1005            return ApplyMergeResult::AppliedExact;
1006        }
1007
1008        if let Some((id, range)) = self.find_replacement_range(desc) {
1009            self.perform_subset_replacement(&empty_batch, id, range, None)
1010        } else {
1011            ApplyMergeResult::NotAppliedNoMatch
1012        }
1013    }
1014
1015    fn construct_batch_with_runs_replaced(
1016        original: &HollowBatch<T>,
1017        run_ids: &[RunId],
1018        replacement: &HollowBatch<T>,
1019    ) -> Result<HollowBatch<T>, ApplyMergeResult> {
1020        if run_ids.is_empty() {
1021            return Err(ApplyMergeResult::NotAppliedNoMatch);
1022        }
1023
1024        let orig_run_ids: BTreeSet<_> = original.runs().filter_map(|(meta, _)| meta.id).collect();
1025        let run_ids: BTreeSet<_> = run_ids.iter().cloned().collect();
1026        if !orig_run_ids.is_superset(&run_ids) {
1027            return Err(ApplyMergeResult::NotAppliedNoMatch);
1028        }
1029
1030        let runs: Vec<_> = original
1031            .runs()
1032            .filter(|(meta, _)| {
1033                !run_ids.contains(&meta.id.expect("id should be present at this point"))
1034            })
1035            .chain(replacement.runs())
1036            .collect();
1037
1038        let len = runs.iter().filter_map(|(meta, _)| meta.len).sum::<usize>();
1039
1040        let run_meta = runs
1041            .iter()
1042            .map(|(meta, _)| *meta)
1043            .cloned()
1044            .collect::<Vec<_>>();
1045
1046        let parts = runs
1047            .iter()
1048            .flat_map(|(_, parts)| *parts)
1049            .cloned()
1050            .collect::<Vec<_>>();
1051
1052        let run_splits = {
1053            let mut splits = Vec::with_capacity(run_meta.len().saturating_sub(1));
1054            let mut pointer = 0;
1055            for (i, (_, parts)) in runs.into_iter().enumerate() {
1056                if parts.is_empty() {
1057                    continue;
1058                }
1059                if i < run_meta.len() - 1 {
1060                    splits.push(pointer + parts.len());
1061                }
1062                pointer += parts.len();
1063            }
1064            splits
1065        };
1066
1067        Ok(HollowBatch::new(
1068            replacement.desc.clone(),
1069            parts,
1070            len,
1071            run_meta,
1072            run_splits,
1073        ))
1074    }
1075
1076    fn maybe_replace_checked<D>(
1077        &mut self,
1078        res: &FueledMergeRes<T>,
1079        metrics: &ColumnarMetrics,
1080    ) -> ApplyMergeResult
1081    where
1082        D: Monoid + Codec64 + PartialEq + Debug,
1083    {
1084        // The spine's and merge res's sinces don't need to match (which could occur if Spine
1085        // has been reloaded from state due to compare_and_set mismatch), but if so, the Spine
1086        // since must be in advance of the merge res since.
1087        if !PartialOrder::less_equal(res.output.desc.since(), self.desc().since()) {
1088            return ApplyMergeResult::NotAppliedInvalidSince;
1089        }
1090
1091        let new_diffs_sum = Self::diffs_sum(res.output.parts.iter(), metrics);
1092        let num_batches = self.parts.len();
1093
1094        let result = match &res.input {
1095            CompactionInput::IdRange(id) => {
1096                self.handle_id_range_replacement::<D>(res, id, new_diffs_sum, metrics)
1097            }
1098            CompactionInput::PartialBatch(id, runs) => {
1099                self.handle_partial_batch_replacement::<D>(res, *id, runs, new_diffs_sum, metrics)
1100            }
1101            CompactionInput::Legacy => self.maybe_replace_checked_classic::<D>(res, metrics),
1102        };
1103
1104        let num_batches_after = self.parts.len();
1105        assert!(
1106            num_batches_after <= num_batches,
1107            "replacing parts should not increase the number of batches"
1108        );
1109        result
1110    }
1111
1112    fn handle_id_range_replacement<D>(
1113        &mut self,
1114        res: &FueledMergeRes<T>,
1115        id: &SpineId,
1116        new_diffs_sum: Option<D>,
1117        metrics: &ColumnarMetrics,
1118    ) -> ApplyMergeResult
1119    where
1120        D: Monoid + Codec64 + PartialEq + Debug,
1121    {
1122        let range = self
1123            .parts
1124            .iter()
1125            .enumerate()
1126            .filter_map(|(i, p)| {
1127                if id.covers(p.id) {
1128                    Some((i, p.id))
1129                } else {
1130                    None
1131                }
1132            })
1133            .collect::<Vec<_>>();
1134
1135        let ids: BTreeSet<_> = range.iter().map(|(_, id)| *id).collect();
1136
1137        // If ids is empty, it means that we didn't find any parts that match the id range.
1138        // We also check that the id matches the range of ids we found.
1139        // At scale, sometimes regular compaction will race forced compaction,
1140        // for things like the catalog. In that case, we may have a
1141        // replacement that no longer lines up with the spine batches.
1142        // I think this is because forced compaction ignores the active_compaction
1143        // and just goes for it. This is slightly annoying but probably the right behavior
1144        // for a functions whose prefix is `force_`, so we just return
1145        // NotAppliedNoMatch here.
1146        if ids.is_empty() || id != &id_range(ids) {
1147            return ApplyMergeResult::NotAppliedNoMatch;
1148        }
1149
1150        let range: BTreeSet<_> = range.iter().map(|(i, _)| *i).collect();
1151
1152        // This is the range of hollow batches that we will replace.
1153        let min = *range.iter().min().unwrap();
1154        let max = *range.iter().max().unwrap();
1155        let replacement_range = min..max + 1;
1156
1157        // We need to replace a range of parts. Here we don't care about the run_indices
1158        // because we must be replacing the entire part(s)
1159        let old_diffs_sum = Self::diffs_sum::<D>(
1160            self.parts[replacement_range.clone()]
1161                .iter()
1162                .flat_map(|p| p.batch.parts.iter()),
1163            metrics,
1164        );
1165
1166        Self::validate_diffs_sum_match(old_diffs_sum, new_diffs_sum, "id range replacement");
1167
1168        self.perform_subset_replacement(
1169            &res.output,
1170            *id,
1171            replacement_range,
1172            res.new_active_compaction.clone(),
1173        )
1174    }
1175
1176    fn handle_partial_batch_replacement<D>(
1177        &mut self,
1178        res: &FueledMergeRes<T>,
1179        id: SpineId,
1180        runs: &BTreeSet<RunId>,
1181        new_diffs_sum: Option<D>,
1182        metrics: &ColumnarMetrics,
1183    ) -> ApplyMergeResult
1184    where
1185        D: Monoid + Codec64 + PartialEq + Debug,
1186    {
1187        if runs.is_empty() {
1188            return ApplyMergeResult::NotAppliedNoMatch;
1189        }
1190
1191        let part = self.parts.iter().enumerate().find(|(_, p)| p.id == id);
1192        let Some((i, batch)) = part else {
1193            return ApplyMergeResult::NotAppliedNoMatch;
1194        };
1195        let replacement_range = i..(i + 1);
1196
1197        let replacement_desc = &res.output.desc;
1198        let existing_desc = &batch.batch.desc;
1199        assert_eq!(
1200            replacement_desc.lower(),
1201            existing_desc.lower(),
1202            "batch lower should match, but {:?} != {:?}",
1203            replacement_desc.lower(),
1204            existing_desc.lower()
1205        );
1206        assert_eq!(
1207            replacement_desc.upper(),
1208            existing_desc.upper(),
1209            "batch upper should match, but {:?} != {:?}",
1210            replacement_desc.upper(),
1211            existing_desc.upper()
1212        );
1213
1214        let batch = &batch.batch;
1215        let run_ids = runs.iter().cloned().collect::<Vec<_>>();
1216
1217        match Self::construct_batch_with_runs_replaced(batch, &run_ids, &res.output) {
1218            Ok(new_batch) => {
1219                let old_diffs_sum = Self::diffs_sum_for_runs::<D>(batch, &run_ids, metrics);
1220                Self::validate_diffs_sum_match(
1221                    old_diffs_sum,
1222                    new_diffs_sum,
1223                    "partial batch replacement",
1224                );
1225                let old_batch_diff_sum = Self::diffs_sum::<D>(batch.parts.iter(), metrics);
1226                let new_batch_diff_sum = Self::diffs_sum::<D>(new_batch.parts.iter(), metrics);
1227                Self::validate_diffs_sum_match(
1228                    old_batch_diff_sum,
1229                    new_batch_diff_sum,
1230                    "sanity checking diffs sum for replaced runs",
1231                );
1232                self.perform_subset_replacement(
1233                    &new_batch,
1234                    id,
1235                    replacement_range,
1236                    res.new_active_compaction.clone(),
1237                )
1238            }
1239            Err(err) => err,
1240        }
1241    }
1242
1243    fn validate_diffs_sum_match<D>(
1244        old_diffs_sum: Option<D>,
1245        new_diffs_sum: Option<D>,
1246        context: &str,
1247    ) where
1248        D: Monoid + Codec64 + PartialEq + Debug,
1249    {
1250        let new_diffs_sum = new_diffs_sum.unwrap_or_else(D::zero);
1251        if let Some(old_diffs_sum) = old_diffs_sum {
1252            assert_eq!(
1253                old_diffs_sum, new_diffs_sum,
1254                "merge res diffs sum ({:?}) did not match spine batch diffs sum ({:?}) ({})",
1255                new_diffs_sum, old_diffs_sum, context
1256            )
1257        }
1258    }
1259
1260    /// This is the "legacy" way of replacing a spine batch with a merge result.
1261    /// It is used in moments when we don't have the full compaction input
1262    /// information.
1263    /// Eventually we should strive to roundtrip Spine IDs everywhere and
1264    /// deprecate this method.
1265    fn maybe_replace_checked_classic<D>(
1266        &mut self,
1267        res: &FueledMergeRes<T>,
1268        metrics: &ColumnarMetrics,
1269    ) -> ApplyMergeResult
1270    where
1271        D: Monoid + Codec64 + PartialEq + Debug,
1272    {
1273        // The spine's and merge res's sinces don't need to match (which could occur if Spine
1274        // has been reloaded from state due to compare_and_set mismatch), but if so, the Spine
1275        // since must be in advance of the merge res since.
1276        if !PartialOrder::less_equal(res.output.desc.since(), self.desc().since()) {
1277            return ApplyMergeResult::NotAppliedInvalidSince;
1278        }
1279
1280        let new_diffs_sum = Self::diffs_sum(res.output.parts.iter(), metrics);
1281
1282        // If our merge result exactly matches a spine batch, we can swap it in directly
1283        let exact_match = res.output.desc.lower() == self.desc().lower()
1284            && res.output.desc.upper() == self.desc().upper();
1285        if exact_match {
1286            let old_diffs_sum = Self::diffs_sum::<D>(
1287                self.parts.iter().flat_map(|p| p.batch.parts.iter()),
1288                metrics,
1289            );
1290
1291            if let (Some(old_diffs_sum), Some(new_diffs_sum)) = (old_diffs_sum, new_diffs_sum) {
1292                assert_eq!(
1293                    old_diffs_sum, new_diffs_sum,
1294                    "merge res diffs sum ({:?}) did not match spine batch diffs sum ({:?})",
1295                    new_diffs_sum, old_diffs_sum
1296                );
1297            }
1298
1299            // Spine internally has an invariant about a batch being at some level
1300            // or higher based on the len. We could end up violating this invariant
1301            // if we increased the length of the batch.
1302            //
1303            // A res output with length greater than the existing spine batch implies
1304            // a compaction has already been applied to this range, and with a higher
1305            // rate of consolidation than this one. This could happen as a result of
1306            // compaction's memory bound limiting the amount of consolidation possible.
1307            if res.output.len > self.len() {
1308                return ApplyMergeResult::NotAppliedTooManyUpdates;
1309            }
1310            *self = SpineBatch::merged(IdHollowBatch {
1311                id: self.id(),
1312                batch: Arc::new(res.output.clone()),
1313            });
1314            return ApplyMergeResult::AppliedExact;
1315        }
1316
1317        // Try subset replacement
1318        if let Some((id, range)) = self.find_replacement_range(&res.output.desc) {
1319            let old_diffs_sum = Self::diffs_sum::<D>(
1320                self.parts[range.clone()]
1321                    .iter()
1322                    .flat_map(|p| p.batch.parts.iter()),
1323                metrics,
1324            );
1325
1326            if let (Some(old_diffs_sum), Some(new_diffs_sum)) = (old_diffs_sum, new_diffs_sum) {
1327                assert_eq!(
1328                    old_diffs_sum, new_diffs_sum,
1329                    "merge res diffs sum ({:?}) did not match spine batch diffs sum ({:?})",
1330                    new_diffs_sum, old_diffs_sum
1331                );
1332            }
1333
1334            self.perform_subset_replacement(
1335                &res.output,
1336                id,
1337                range,
1338                res.new_active_compaction.clone(),
1339            )
1340        } else {
1341            ApplyMergeResult::NotAppliedNoMatch
1342        }
1343    }
1344
1345    /// This is the even more legacy way of replacing a spine batch with a merge result.
1346    /// It is used in moments when we don't have the full compaction input
1347    /// information, and we don't have the diffs sum.
1348    /// Eventually we should strive to roundtrip Spine IDs and diffs sums everywhere and
1349    /// deprecate this method.
1350    fn maybe_replace_unchecked(&mut self, res: &FueledMergeRes<T>) -> ApplyMergeResult {
1351        // The spine's and merge res's sinces don't need to match (which could occur if Spine
1352        // has been reloaded from state due to compare_and_set mismatch), but if so, the Spine
1353        // since must be in advance of the merge res since.
1354        if !PartialOrder::less_equal(res.output.desc.since(), self.desc().since()) {
1355            return ApplyMergeResult::NotAppliedInvalidSince;
1356        }
1357
1358        // If our merge result exactly matches a spine batch, we can swap it in directly
1359        let exact_match = res.output.desc.lower() == self.desc().lower()
1360            && res.output.desc.upper() == self.desc().upper();
1361        if exact_match {
1362            // Spine internally has an invariant about a batch being at some level
1363            // or higher based on the len. We could end up violating this invariant
1364            // if we increased the length of the batch.
1365            //
1366            // A res output with length greater than the existing spine batch implies
1367            // a compaction has already been applied to this range, and with a higher
1368            // rate of consolidation than this one. This could happen as a result of
1369            // compaction's memory bound limiting the amount of consolidation possible.
1370            if res.output.len > self.len() {
1371                return ApplyMergeResult::NotAppliedTooManyUpdates;
1372            }
1373
1374            *self = SpineBatch::merged(IdHollowBatch {
1375                id: self.id(),
1376                batch: Arc::new(res.output.clone()),
1377            });
1378            return ApplyMergeResult::AppliedExact;
1379        }
1380
1381        // Try subset replacement
1382        if let Some((id, range)) = self.find_replacement_range(&res.output.desc) {
1383            self.perform_subset_replacement(
1384                &res.output,
1385                id,
1386                range,
1387                res.new_active_compaction.clone(),
1388            )
1389        } else {
1390            ApplyMergeResult::NotAppliedNoMatch
1391        }
1392    }
1393
1394    /// Find the range of parts that can be replaced by the merge result
1395    fn find_replacement_range(&self, desc: &Description<T>) -> Option<(SpineId, Range<usize>)> {
1396        // It is possible the structure of the spine has changed since the merge res
1397        // was created, such that it no longer exactly matches the description of a
1398        // spine batch. This can happen if another merge has happened in the interim,
1399        // or if spine needed to be rebuilt from state.
1400        //
1401        // When this occurs, we can still attempt to slot the merge res in to replace
1402        // the parts of a fueled merge. e.g. if the res is for `[1,3)` and the parts
1403        // are `[0,1),[1,2),[2,3),[3,4)`, we can swap out the middle two parts for res.
1404
1405        let mut lower = None;
1406        let mut upper = None;
1407
1408        for (i, batch) in self.parts.iter().enumerate() {
1409            if batch.batch.desc.lower() == desc.lower() {
1410                lower = Some((i, batch.id.0));
1411            }
1412            if batch.batch.desc.upper() == desc.upper() {
1413                upper = Some((i, batch.id.1));
1414            }
1415            if lower.is_some() && upper.is_some() {
1416                break;
1417            }
1418        }
1419
1420        match (lower, upper) {
1421            (Some((lower_idx, id_lower)), Some((upper_idx, id_upper))) => {
1422                Some((SpineId(id_lower, id_upper), lower_idx..(upper_idx + 1)))
1423            }
1424            _ => None,
1425        }
1426    }
1427
1428    /// Perform the actual subset replacement
1429    fn perform_subset_replacement(
1430        &mut self,
1431        res: &HollowBatch<T>,
1432        spine_id: SpineId,
1433        range: Range<usize>,
1434        new_active_compaction: Option<ActiveCompaction>,
1435    ) -> ApplyMergeResult {
1436        let SpineBatch {
1437            id,
1438            parts,
1439            desc,
1440            active_compaction: _,
1441            len: _,
1442        } = self;
1443
1444        let mut new_parts = vec![];
1445        new_parts.extend_from_slice(&parts[..range.start]);
1446        new_parts.push(IdHollowBatch {
1447            id: spine_id,
1448            batch: Arc::new(res.clone()),
1449        });
1450        new_parts.extend_from_slice(&parts[range.end..]);
1451
1452        let res = if range.len() == parts.len() {
1453            ApplyMergeResult::AppliedExact
1454        } else {
1455            ApplyMergeResult::AppliedSubset
1456        };
1457
1458        let new_spine_batch = SpineBatch {
1459            id: *id,
1460            desc: desc.to_owned(),
1461            len: new_parts.iter().map(|x| x.batch.len).sum(),
1462            parts: new_parts,
1463            active_compaction: new_active_compaction,
1464        };
1465
1466        if new_spine_batch.len() > self.len() {
1467            return ApplyMergeResult::NotAppliedTooManyUpdates;
1468        }
1469
1470        *self = new_spine_batch;
1471        res
1472    }
1473}
1474
1475#[derive(Debug, Clone, PartialEq, Serialize)]
1476pub struct FuelingMerge<T> {
1477    pub(crate) since: Antichain<T>,
1478    pub(crate) remaining_work: usize,
1479}
1480
1481#[derive(Debug, Clone, PartialEq, Serialize)]
1482pub struct IdFuelingMerge<T> {
1483    id: SpineId,
1484    merge: FuelingMerge<T>,
1485}
1486
1487impl<T: Timestamp + Lattice> FuelingMerge<T> {
1488    /// Perform some amount of work, decrementing `fuel`.
1489    ///
1490    /// If `fuel` is non-zero after the call, the merging is complete and one
1491    /// should call `done` to extract the merged results.
1492    // TODO(benesch): rewrite to avoid usage of `as`.
1493    #[allow(clippy::as_conversions)]
1494    fn work(&mut self, _: &[SpineBatch<T>], fuel: &mut isize) {
1495        let used = std::cmp::min(*fuel as usize, self.remaining_work);
1496        self.remaining_work = self.remaining_work.saturating_sub(used);
1497        *fuel -= used as isize;
1498    }
1499
1500    /// Extracts merged results.
1501    ///
1502    /// This method should only be called after `work` has been called and has
1503    /// not brought `fuel` to zero. Otherwise, the merge is still in progress.
1504    fn done(
1505        self,
1506        bs: ArrayVec<SpineBatch<T>, BATCHES_PER_LEVEL>,
1507        log: &mut SpineLog<'_, T>,
1508    ) -> Option<SpineBatch<T>> {
1509        let first = bs.first()?;
1510        let last = bs.last()?;
1511        let id = SpineId(first.id().0, last.id().1);
1512        assert!(id.0 < id.1);
1513        let lower = first.desc().lower().clone();
1514        let upper = last.desc().upper().clone();
1515        let since = self.since;
1516
1517        // Special case empty batches.
1518        if bs.iter().all(SpineBatch::is_empty) {
1519            return Some(SpineBatch::empty(id, lower, upper, since));
1520        }
1521
1522        let desc = Description::new(lower, upper, since);
1523        let len = bs.iter().map(SpineBatch::len).sum();
1524
1525        // Pre-size the merged_parts Vec. Benchmarking has shown that, at least
1526        // in the worst case, the double iteration is absolutely worth having
1527        // merged_parts pre-sized.
1528        let mut merged_parts_len = 0;
1529        for b in &bs {
1530            merged_parts_len += b.parts.len();
1531        }
1532        let mut merged_parts = Vec::with_capacity(merged_parts_len);
1533        for b in bs {
1534            merged_parts.extend(b.parts)
1535        }
1536        // Sanity check the pre-size code.
1537        debug_assert_eq!(merged_parts.len(), merged_parts_len);
1538
1539        if let SpineLog::Enabled { merge_reqs } = log {
1540            merge_reqs.push(FueledMergeReq {
1541                id,
1542                desc: desc.clone(),
1543                inputs: merged_parts.clone(),
1544            });
1545        }
1546
1547        Some(SpineBatch {
1548            id,
1549            desc,
1550            len,
1551            parts: merged_parts,
1552            active_compaction: None,
1553        })
1554    }
1555}
1556
1557/// The maximum number of batches per level in the spine.
1558/// In practice, we probably want a larger max and a configurable soft cap, but using a
1559/// stack-friendly data structure and keeping this number low makes this safer during the
1560/// initial rollout.
1561const BATCHES_PER_LEVEL: usize = 2;
1562
1563/// An append-only collection of update batches.
1564///
1565/// The `Spine` is a general-purpose trace implementation based on collection
1566/// and merging immutable batches of updates. It is generic with respect to the
1567/// batch type, and can be instantiated for any implementor of `trace::Batch`.
1568///
1569/// ## Design
1570///
1571/// This spine is represented as a list of layers, where each element in the
1572/// list is either
1573///
1574///   1. MergeState::Vacant  empty
1575///   2. MergeState::Single  a single batch
1576///   3. MergeState::Double  a pair of batches
1577///
1578/// Each "batch" has the option to be `None`, indicating a non-batch that
1579/// nonetheless acts as a number of updates proportionate to the level at which
1580/// it exists (for bookkeeping).
1581///
1582/// Each of the batches at layer i contains at most 2^i elements. The sequence
1583/// of batches should have the upper bound of one match the lower bound of the
1584/// next. Batches may be logically empty, with matching upper and lower bounds,
1585/// as a bookkeeping mechanism.
1586///
1587/// Each batch at layer i is treated as if it contains exactly 2^i elements,
1588/// even though it may actually contain fewer elements. This allows us to
1589/// decouple the physical representation from logical amounts of effort invested
1590/// in each batch. It allows us to begin compaction and to reduce the number of
1591/// updates, without compromising our ability to continue to move updates along
1592/// the spine. We are explicitly making the trade-off that while some batches
1593/// might compact at lower levels, we want to treat them as if they contained
1594/// their full set of updates for accounting reasons (to apply work to higher
1595/// levels).
1596///
1597/// We maintain the invariant that for any in-progress merge at level k there
1598/// should be fewer than 2^k records at levels lower than k. That is, even if we
1599/// were to apply an unbounded amount of effort to those records, we would not
1600/// have enough records to prompt a merge into the in-progress merge. Ideally,
1601/// we maintain the extended invariant that for any in-progress merge at level
1602/// k, the remaining effort required (number of records minus applied effort) is
1603/// less than the number of records that would need to be added to reach 2^k
1604/// records in layers below.
1605///
1606/// ## Mathematics
1607///
1608/// When a merge is initiated, there should be a non-negative *deficit* of
1609/// updates before the layers below could plausibly produce a new batch for the
1610/// currently merging layer. We must determine a factor of proportionality, so
1611/// that newly arrived updates provide at least that amount of "fuel" towards
1612/// the merging layer, so that the merge completes before lower levels invade.
1613///
1614/// ### Deficit:
1615///
1616/// A new merge is initiated only in response to the completion of a prior
1617/// merge, or the introduction of new records from outside. The latter case is
1618/// special, and will maintain our invariant trivially, so we will focus on the
1619/// former case.
1620///
1621/// When a merge at level k completes, assuming we have maintained our invariant
1622/// then there should be fewer than 2^k records at lower levels. The newly
1623/// created merge at level k+1 will require up to 2^k+2 units of work, and
1624/// should not expect a new batch until strictly more than 2^k records are
1625/// added. This means that a factor of proportionality of four should be
1626/// sufficient to ensure that the merge completes before a new merge is
1627/// initiated.
1628///
1629/// When new records get introduced, we will need to roll up any batches at
1630/// lower levels, which we treat as the introduction of records. Each of these
1631/// virtual records introduced should either be accounted for the fuel it should
1632/// contribute, as it results in the promotion of batches closer to in-progress
1633/// merges.
1634///
1635/// ### Fuel sharing
1636///
1637/// We like the idea of applying fuel preferentially to merges at *lower*
1638/// levels, under the idea that they are easier to complete, and we benefit from
1639/// fewer total merges in progress. This does delay the completion of merges at
1640/// higher levels, and may not obviously be a total win. If we choose to do
1641/// this, we should make sure that we correctly account for completed merges at
1642/// low layers: they should still extract fuel from new updates even though they
1643/// have completed, at least until they have paid back any "debt" to higher
1644/// layers by continuing to provide fuel as updates arrive.
1645#[derive(Debug, Clone)]
1646struct Spine<T> {
1647    effort: usize,
1648    next_id: usize,
1649    since: Antichain<T>,
1650    upper: Antichain<T>,
1651    merging: Vec<MergeState<T>>,
1652}
1653
1654impl<T> Spine<T> {
1655    /// All batches in the spine, oldest to newest.
1656    pub fn spine_batches(&self) -> impl Iterator<Item = &SpineBatch<T>> {
1657        self.merging.iter().rev().flat_map(|m| &m.batches)
1658    }
1659
1660    /// All (mutable) batches in the spine, oldest to newest.
1661    pub fn spine_batches_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut SpineBatch<T>> {
1662        self.merging.iter_mut().rev().flat_map(|m| &mut m.batches)
1663    }
1664}
1665
1666impl<T: Timestamp + Lattice> Spine<T> {
1667    /// Allocates a fueled `Spine`.
1668    ///
1669    /// This trace will merge batches progressively, with each inserted batch
1670    /// applying a multiple of the batch's length in effort to each merge. The
1671    /// `effort` parameter is that multiplier. This value should be at least one
1672    /// for the merging to happen; a value of zero is not helpful.
1673    pub fn new() -> Self {
1674        Spine {
1675            effort: 1,
1676            next_id: 0,
1677            since: Antichain::from_elem(T::minimum()),
1678            upper: Antichain::from_elem(T::minimum()),
1679            merging: Vec::new(),
1680        }
1681    }
1682
1683    /// Apply some amount of effort to trace maintenance.
1684    ///
1685    /// The units of effort are updates, and the method should be thought of as
1686    /// analogous to inserting as many empty updates, where the trace is
1687    /// permitted to perform proportionate work.
1688    ///
1689    /// Returns true if this did work and false if it left the spine unchanged.
1690    fn exert(&mut self, effort: usize, log: &mut SpineLog<'_, T>) -> bool {
1691        self.tidy_layers();
1692        if self.reduced() {
1693            return false;
1694        }
1695
1696        if self.merging.iter().any(|b| b.merge.is_some()) {
1697            let fuel = isize::try_from(effort).unwrap_or(isize::MAX);
1698            // If any merges exist, we can directly call `apply_fuel`.
1699            self.apply_fuel(&fuel, log);
1700        } else {
1701            // Otherwise, we'll need to introduce fake updates to move merges
1702            // along.
1703
1704            // Introduce an empty batch with roughly *effort number of virtual updates.
1705            let level = usize::cast_from(effort.next_power_of_two().trailing_zeros());
1706            let id = self.next_id();
1707            self.introduce_batch(
1708                SpineBatch::empty(
1709                    id,
1710                    self.upper.clone(),
1711                    self.upper.clone(),
1712                    self.since.clone(),
1713                ),
1714                level,
1715                log,
1716            );
1717        }
1718        true
1719    }
1720
1721    pub fn next_id(&mut self) -> SpineId {
1722        let id = self.next_id;
1723        self.next_id += 1;
1724        SpineId(id, self.next_id)
1725    }
1726
1727    // Ideally, this method acts as insertion of `batch`, even if we are not yet
1728    // able to begin merging the batch. This means it is a good time to perform
1729    // amortized work proportional to the size of batch.
1730    pub fn insert(&mut self, batch: HollowBatch<T>, log: &mut SpineLog<'_, T>) {
1731        assert!(batch.desc.lower() != batch.desc.upper());
1732        assert_eq!(batch.desc.lower(), &self.upper);
1733
1734        let id = self.next_id();
1735        let batch = SpineBatch::merged(IdHollowBatch {
1736            id,
1737            batch: Arc::new(batch),
1738        });
1739
1740        self.upper.clone_from(batch.upper());
1741
1742        // If `batch` and the most recently inserted batch are both empty,
1743        // we can just fuse them.
1744        if batch.is_empty() {
1745            if let Some(position) = self.merging.iter().position(|m| !m.is_vacant()) {
1746                if self.merging[position].is_single() && self.merging[position].is_empty() {
1747                    self.insert_at(batch, position);
1748                    // Since we just inserted a batch, we should always have work to complete...
1749                    // but otherwise we just leave this layer vacant.
1750                    if let Some(merged) = self.complete_at(position, log) {
1751                        self.merging[position] = MergeState::single(merged);
1752                    }
1753                    return;
1754                }
1755            }
1756        }
1757
1758        // Normal insertion for the batch.
1759        let index = batch.len().next_power_of_two();
1760        self.introduce_batch(batch, usize::cast_from(index.trailing_zeros()), log);
1761    }
1762
1763    /// Returns true when the trace is considered *structurally reduced*.
1764    ///
1765    /// Reduced == the total number of runs (across every
1766    /// `SpineBatch` and all of their inner hollow batches) is < 2. In other
1767    /// words, there are either zero runs (fully empty) or exactly one logical
1768    /// run of data remaining.
1769    fn reduced(&self) -> bool {
1770        self.spine_batches()
1771            .map(|b| {
1772                b.parts
1773                    .iter()
1774                    .map(|p| p.batch.run_meta.len())
1775                    .sum::<usize>()
1776            })
1777            .sum::<usize>()
1778            < 2
1779    }
1780
1781    /// Describes the merge progress of layers in the trace.
1782    ///
1783    /// Intended for diagnostics rather than public consumption.
1784    #[allow(dead_code)]
1785    fn describe(&self) -> Vec<(usize, usize)> {
1786        self.merging
1787            .iter()
1788            .map(|b| (b.batches.len(), b.len()))
1789            .collect()
1790    }
1791
1792    /// Introduces a batch at an indicated level.
1793    ///
1794    /// The level indication is often related to the size of the batch, but it
1795    /// can also be used to artificially fuel the computation by supplying empty
1796    /// batches at non-trivial indices, to move merges along.
1797    fn introduce_batch(
1798        &mut self,
1799        batch: SpineBatch<T>,
1800        batch_index: usize,
1801        log: &mut SpineLog<'_, T>,
1802    ) {
1803        // Step 0.  Determine an amount of fuel to use for the computation.
1804        //
1805        //          Fuel is used to drive maintenance of the data structure,
1806        //          and in particular are used to make progress through merges
1807        //          that are in progress. The amount of fuel to use should be
1808        //          proportional to the number of records introduced, so that
1809        //          we are guaranteed to complete all merges before they are
1810        //          required as arguments to merges again.
1811        //
1812        //          The fuel use policy is negotiable, in that we might aim
1813        //          to use relatively less when we can, so that we return
1814        //          control promptly, or we might account more work to larger
1815        //          batches. Not clear to me which are best, of if there
1816        //          should be a configuration knob controlling this.
1817
1818        // The amount of fuel to use is proportional to 2^batch_index, scaled by
1819        // a factor of self.effort which determines how eager we are in
1820        // performing maintenance work. We need to ensure that each merge in
1821        // progress receives fuel for each introduced batch, and so multiply by
1822        // that as well.
1823        if batch_index > 32 {
1824            println!("Large batch index: {}", batch_index);
1825        }
1826
1827        // We believe that eight units of fuel is sufficient for each introduced
1828        // record, accounted as four for each record, and a potential four more
1829        // for each virtual record associated with promoting existing smaller
1830        // batches. We could try and make this be less, or be scaled to merges
1831        // based on their deficit at time of instantiation. For now, we remain
1832        // conservative.
1833        let mut fuel = 8 << batch_index;
1834        // Scale up by the effort parameter, which is calibrated to one as the
1835        // minimum amount of effort.
1836        fuel *= self.effort;
1837        // Convert to an `isize` so we can observe any fuel shortfall.
1838        // TODO(benesch): avoid dangerous usage of `as`.
1839        #[allow(clippy::as_conversions)]
1840        let fuel = fuel as isize;
1841
1842        // Step 1.  Apply fuel to each in-progress merge.
1843        //
1844        //          Before we can introduce new updates, we must apply any
1845        //          fuel to in-progress merges, as this fuel is what ensures
1846        //          that the merges will be complete by the time we insert
1847        //          the updates.
1848        self.apply_fuel(&fuel, log);
1849
1850        // Step 2.  We must ensure the invariant that adjacent layers do not
1851        //          contain two batches will be satisfied when we insert the
1852        //          batch. We forcibly completing all merges at layers lower
1853        //          than and including `batch_index`, so that the new batch is
1854        //          inserted into an empty layer.
1855        //
1856        //          We could relax this to "strictly less than `batch_index`"
1857        //          if the layer above has only a single batch in it, which
1858        //          seems not implausible if it has been the focus of effort.
1859        //
1860        //          This should be interpreted as the introduction of some
1861        //          volume of fake updates, and we will need to fuel merges
1862        //          by a proportional amount to ensure that they are not
1863        //          surprised later on. The number of fake updates should
1864        //          correspond to the deficit for the layer, which perhaps
1865        //          we should track explicitly.
1866        self.roll_up(batch_index, log);
1867
1868        // Step 3. This insertion should be into an empty layer. It is a logical
1869        //         error otherwise, as we may be violating our invariant, from
1870        //         which all wonderment derives.
1871        self.insert_at(batch, batch_index);
1872
1873        // Step 4. Tidy the largest layers.
1874        //
1875        //         It is important that we not tidy only smaller layers,
1876        //         as their ascension is what ensures the merging and
1877        //         eventual compaction of the largest layers.
1878        self.tidy_layers();
1879    }
1880
1881    /// Ensures that an insertion at layer `index` will succeed.
1882    ///
1883    /// This method is subject to the constraint that all existing batches
1884    /// should occur at higher levels, which requires it to "roll up" batches
1885    /// present at lower levels before the method is called. In doing this, we
1886    /// should not introduce more virtual records than 2^index, as that is the
1887    /// amount of excess fuel we have budgeted for completing merges.
1888    fn roll_up(&mut self, index: usize, log: &mut SpineLog<'_, T>) {
1889        // Ensure entries sufficient for `index`.
1890        while self.merging.len() <= index {
1891            self.merging.push(MergeState::default());
1892        }
1893
1894        // We only need to roll up if there are non-vacant layers.
1895        if self.merging[..index].iter().any(|m| !m.is_vacant()) {
1896            // Collect and merge all batches at layers up to but not including
1897            // `index`.
1898            let mut merged = None;
1899            for i in 0..index {
1900                if let Some(merged) = merged.take() {
1901                    self.insert_at(merged, i);
1902                }
1903                merged = self.complete_at(i, log);
1904            }
1905
1906            // The merged results should be introduced at level `index`, which
1907            // should be ready to absorb them (possibly creating a new merge at
1908            // the time).
1909            if let Some(merged) = merged {
1910                self.insert_at(merged, index);
1911            }
1912
1913            // If the insertion results in a merge, we should complete it to
1914            // ensure the upcoming insertion at `index` does not panic.
1915            if self.merging[index].is_full() {
1916                let merged = self.complete_at(index, log).expect("double batch");
1917                self.insert_at(merged, index + 1);
1918            }
1919        }
1920    }
1921
1922    /// Applies an amount of fuel to merges in progress.
1923    ///
1924    /// The supplied `fuel` is for each in progress merge, and if we want to
1925    /// spend the fuel non-uniformly (e.g. prioritizing merges at low layers) we
1926    /// could do so in order to maintain fewer batches on average (at the risk
1927    /// of completing merges of large batches later, but tbh probably not much
1928    /// later).
1929    pub fn apply_fuel(&mut self, fuel: &isize, log: &mut SpineLog<'_, T>) {
1930        // For the moment our strategy is to apply fuel independently to each
1931        // merge in progress, rather than prioritizing small merges. This sounds
1932        // like a great idea, but we need better accounting in place to ensure
1933        // that merges that borrow against later layers but then complete still
1934        // "acquire" fuel to pay back their debts.
1935        for index in 0..self.merging.len() {
1936            // Give each level independent fuel, for now.
1937            let mut fuel = *fuel;
1938            // Pass along various logging stuffs, in case we need to report
1939            // success.
1940            self.merging[index].work(&mut fuel);
1941            // `fuel` could have a deficit at this point, meaning we over-spent
1942            // when we took a merge step. We could ignore this, or maintain the
1943            // deficit and account future fuel against it before spending again.
1944            // It isn't clear why that would be especially helpful to do; we
1945            // might want to avoid overspends at multiple layers in the same
1946            // invocation (to limit latencies), but there is probably a rich
1947            // policy space here.
1948
1949            // If a merge completes, we can immediately merge it in to the next
1950            // level, which is "guaranteed" to be complete at this point, by our
1951            // fueling discipline.
1952            if self.merging[index].is_complete() {
1953                let complete = self.complete_at(index, log).expect("complete batch");
1954                self.insert_at(complete, index + 1);
1955            }
1956        }
1957    }
1958
1959    /// Inserts a batch at a specific location.
1960    ///
1961    /// This is a non-public internal method that can panic if we try and insert
1962    /// into a layer which already contains two batches (and is still in the
1963    /// process of merging).
1964    fn insert_at(&mut self, batch: SpineBatch<T>, index: usize) {
1965        // Ensure the spine is large enough.
1966        while self.merging.len() <= index {
1967            self.merging.push(MergeState::default());
1968        }
1969
1970        // Insert the batch at the location.
1971        let merging = &mut self.merging[index];
1972        merging.push_batch(batch);
1973        if merging.batches.is_full() {
1974            let compaction_frontier = Some(self.since.borrow());
1975            merging.merge = SpineBatch::begin_merge(&merging.batches[..], compaction_frontier)
1976        }
1977    }
1978
1979    /// Completes and extracts what ever is at layer `index`, leaving this layer vacant.
1980    fn complete_at(&mut self, index: usize, log: &mut SpineLog<'_, T>) -> Option<SpineBatch<T>> {
1981        self.merging[index].complete(log)
1982    }
1983
1984    /// Attempts to draw down large layers to size appropriate layers.
1985    fn tidy_layers(&mut self) {
1986        // If the largest layer is complete (not merging), we can attempt to
1987        // draw it down to the next layer. This is permitted if we can maintain
1988        // our invariant that below each merge there are at most half the
1989        // records that would be required to invade the merge.
1990        if !self.merging.is_empty() {
1991            let mut length = self.merging.len();
1992            if self.merging[length - 1].is_single() {
1993                // To move a batch down, we require that it contain few enough
1994                // records that the lower level is appropriate, and that moving
1995                // the batch would not create a merge violating our invariant.
1996                let appropriate_level = usize::cast_from(
1997                    self.merging[length - 1]
1998                        .len()
1999                        .next_power_of_two()
2000                        .trailing_zeros(),
2001                );
2002
2003                // Continue only as far as is appropriate
2004                while appropriate_level < length - 1 {
2005                    let current = &mut self.merging[length - 2];
2006                    if current.is_vacant() {
2007                        // Vacant batches can be absorbed.
2008                        self.merging.remove(length - 2);
2009                        length = self.merging.len();
2010                    } else {
2011                        if !current.is_full() {
2012                            // Single batches may initiate a merge, if sizes are
2013                            // within bounds, but terminate the loop either way.
2014
2015                            // Determine the number of records that might lead
2016                            // to a merge. Importantly, this is not the number
2017                            // of actual records, but the sum of upper bounds
2018                            // based on indices.
2019                            let mut smaller = 0;
2020                            for (index, batch) in self.merging[..(length - 2)].iter().enumerate() {
2021                                smaller += batch.batches.len() << index;
2022                            }
2023
2024                            if smaller <= (1 << length) / 8 {
2025                                // Remove the batch under consideration (shifting the deeper batches up a level),
2026                                // then merge in the single batch at the current level.
2027                                let state = self.merging.remove(length - 2);
2028                                assert_eq!(state.batches.len(), 1);
2029                                for batch in state.batches {
2030                                    self.insert_at(batch, length - 2);
2031                                }
2032                            }
2033                        }
2034                        break;
2035                    }
2036                }
2037            }
2038        }
2039    }
2040
2041    /// Checks invariants:
2042    /// - The lowers and uppers of all batches "line up".
2043    /// - The lower of the "minimum" batch is `antichain[T::minimum]`.
2044    /// - The upper of the "maximum" batch is `== self.upper`.
2045    /// - The since of each batch is `less_equal self.since`.
2046    /// - The `SpineIds` all "line up" and cover from `0` to `self.next_id`.
2047    /// - TODO: Verify fuel and level invariants.
2048    fn validate(&self) -> Result<(), String> {
2049        let mut id = SpineId(0, 0);
2050        let mut frontier = Antichain::from_elem(T::minimum());
2051        for x in self.merging.iter().rev() {
2052            if x.is_full() != x.merge.is_some() {
2053                return Err(format!(
2054                    "all (and only) full batches should have fueling merges (full={}, merge={:?})",
2055                    x.is_full(),
2056                    x.merge,
2057                ));
2058            }
2059
2060            if let Some(m) = &x.merge {
2061                if !x.is_full() {
2062                    return Err(format!(
2063                        "merge should only exist for full batches (len={:?}, merge={:?})",
2064                        x.batches.len(),
2065                        m.id,
2066                    ));
2067                }
2068                if x.id() != Some(m.id) {
2069                    return Err(format!(
2070                        "merge id should match the range of the batch ids (batch={:?}, merge={:?})",
2071                        x.id(),
2072                        m.id,
2073                    ));
2074                }
2075            }
2076
2077            // TODO: Anything we can validate about x.merge? It'd
2078            // be nice to assert that it's bigger than the len of the
2079            // two batches, but apply_merge_res might swap those lengths
2080            // out from under us.
2081            for batch in &x.batches {
2082                if batch.id().0 != id.1 {
2083                    return Err(format!(
2084                        "batch id {:?} does not match the previous id {:?}: {:?}",
2085                        batch.id(),
2086                        id,
2087                        self
2088                    ));
2089                }
2090                id = batch.id();
2091                if batch.desc().lower() != &frontier {
2092                    return Err(format!(
2093                        "batch lower {:?} does not match the previous upper {:?}: {:?}",
2094                        batch.desc().lower(),
2095                        frontier,
2096                        self
2097                    ));
2098                }
2099                frontier.clone_from(batch.desc().upper());
2100                if !PartialOrder::less_equal(batch.desc().since(), &self.since) {
2101                    return Err(format!(
2102                        "since of batch {:?} past the spine since {:?}: {:?}",
2103                        batch.desc().since(),
2104                        self.since,
2105                        self
2106                    ));
2107                }
2108            }
2109        }
2110        if self.next_id != id.1 {
2111            return Err(format!(
2112                "spine next_id {:?} does not match the last batch's id {:?}: {:?}",
2113                self.next_id, id, self
2114            ));
2115        }
2116        if self.upper != frontier {
2117            return Err(format!(
2118                "spine upper {:?} does not match the last batch's upper {:?}: {:?}",
2119                self.upper, frontier, self
2120            ));
2121        }
2122        Ok(())
2123    }
2124}
2125
2126/// Describes the state of a layer.
2127///
2128/// A layer can be empty, contain a single batch, or contain a pair of batches
2129/// that are in the process of merging into a batch for the next layer.
2130#[derive(Debug, Clone)]
2131struct MergeState<T> {
2132    batches: ArrayVec<SpineBatch<T>, BATCHES_PER_LEVEL>,
2133    merge: Option<IdFuelingMerge<T>>,
2134}
2135
2136impl<T> Default for MergeState<T> {
2137    fn default() -> Self {
2138        Self {
2139            batches: ArrayVec::new(),
2140            merge: None,
2141        }
2142    }
2143}
2144
2145impl<T: Timestamp + Lattice> MergeState<T> {
2146    /// An id that covers all the batches in the given merge state, assuming there are any.
2147    fn id(&self) -> Option<SpineId> {
2148        if let (Some(first), Some(last)) = (self.batches.first(), self.batches.last()) {
2149            Some(SpineId(first.id().0, last.id().1))
2150        } else {
2151            None
2152        }
2153    }
2154
2155    /// A new single-batch merge state.
2156    fn single(batch: SpineBatch<T>) -> Self {
2157        let mut state = Self::default();
2158        state.push_batch(batch);
2159        state
2160    }
2161
2162    /// Push a new batch at this level, checking invariants.
2163    fn push_batch(&mut self, batch: SpineBatch<T>) {
2164        if let Some(last) = self.batches.last() {
2165            assert_eq!(last.id().1, batch.id().0);
2166            assert_eq!(last.upper(), batch.lower());
2167        }
2168        assert!(
2169            self.merge.is_none(),
2170            "Attempted to insert batch into incomplete merge! (batch={:?}, batch_count={})",
2171            batch.id,
2172            self.batches.len(),
2173        );
2174        self.batches
2175            .try_push(batch)
2176            .expect("Attempted to insert batch into full layer!");
2177    }
2178
2179    /// The number of actual updates contained in the level.
2180    fn len(&self) -> usize {
2181        self.batches.iter().map(SpineBatch::len).sum()
2182    }
2183
2184    /// True if this merge state contains no updates.
2185    fn is_empty(&self) -> bool {
2186        self.batches.iter().all(SpineBatch::is_empty)
2187    }
2188
2189    /// True if this level contains no batches.
2190    fn is_vacant(&self) -> bool {
2191        self.batches.is_empty()
2192    }
2193
2194    /// True only for a single-batch state.
2195    fn is_single(&self) -> bool {
2196        self.batches.len() == 1
2197    }
2198
2199    /// True if this merge cannot hold any more batches.
2200    /// (i.e. for a binary merge tree, true if this layer holds two batches.)
2201    fn is_full(&self) -> bool {
2202        self.batches.is_full()
2203    }
2204
2205    /// Immediately complete any merge.
2206    ///
2207    /// The result is either a batch, if there is a non-trivial batch to return
2208    /// or `None` if there is no meaningful batch to return.
2209    ///
2210    /// There is the additional option of input batches.
2211    fn complete(&mut self, log: &mut SpineLog<'_, T>) -> Option<SpineBatch<T>> {
2212        let mut this = mem::take(self);
2213        if this.batches.len() <= 1 {
2214            this.batches.pop()
2215        } else {
2216            // Merge the remaining batches, regardless of whether we have a fully fueled merge.
2217            let id_merge = this
2218                .merge
2219                .or_else(|| SpineBatch::begin_merge(&self.batches[..], None))?;
2220            id_merge.merge.done(this.batches, log)
2221        }
2222    }
2223
2224    /// True iff the layer is a complete merge, ready for extraction.
2225    fn is_complete(&self) -> bool {
2226        match &self.merge {
2227            Some(IdFuelingMerge { merge, .. }) => merge.remaining_work == 0,
2228            None => false,
2229        }
2230    }
2231
2232    /// Performs a bounded amount of work towards a merge.
2233    fn work(&mut self, fuel: &mut isize) {
2234        // We only perform work for merges in progress.
2235        if let Some(IdFuelingMerge { merge, .. }) = &mut self.merge {
2236            merge.work(&self.batches[..], fuel)
2237        }
2238    }
2239}
2240
2241#[cfg(test)]
2242pub mod datadriven {
2243    use mz_ore::fmt::FormatBuffer;
2244
2245    use crate::internal::datadriven::DirectiveArgs;
2246
2247    use super::*;
2248
2249    /// Shared state for a single [crate::internal::trace] [datadriven::TestFile].
2250    #[derive(Debug, Default)]
2251    pub struct TraceState {
2252        pub trace: Trace<u64>,
2253        pub merge_reqs: Vec<FueledMergeReq<u64>>,
2254    }
2255
2256    pub fn since_upper(
2257        datadriven: &TraceState,
2258        _args: DirectiveArgs,
2259    ) -> Result<String, anyhow::Error> {
2260        Ok(format!(
2261            "{:?}{:?}\n",
2262            datadriven.trace.since().elements(),
2263            datadriven.trace.upper().elements()
2264        ))
2265    }
2266
2267    pub fn batches(datadriven: &TraceState, _args: DirectiveArgs) -> Result<String, anyhow::Error> {
2268        let mut s = String::new();
2269        for b in datadriven.trace.spine.spine_batches() {
2270            s.push_str(b.describe(true).as_str());
2271            s.push('\n');
2272        }
2273        Ok(s)
2274    }
2275
2276    pub fn insert(
2277        datadriven: &mut TraceState,
2278        args: DirectiveArgs,
2279    ) -> Result<String, anyhow::Error> {
2280        for x in args
2281            .input
2282            .trim()
2283            .split('\n')
2284            .map(DirectiveArgs::parse_hollow_batch)
2285        {
2286            datadriven
2287                .merge_reqs
2288                .append(&mut datadriven.trace.push_batch(x));
2289        }
2290        Ok("ok\n".to_owned())
2291    }
2292
2293    pub fn downgrade_since(
2294        datadriven: &mut TraceState,
2295        args: DirectiveArgs,
2296    ) -> Result<String, anyhow::Error> {
2297        let since = args.expect("since");
2298        datadriven
2299            .trace
2300            .downgrade_since(&Antichain::from_elem(since));
2301        Ok("ok\n".to_owned())
2302    }
2303
2304    pub fn take_merge_req(
2305        datadriven: &mut TraceState,
2306        _args: DirectiveArgs,
2307    ) -> Result<String, anyhow::Error> {
2308        let mut s = String::new();
2309        for merge_req in std::mem::take(&mut datadriven.merge_reqs) {
2310            write!(
2311                s,
2312                "{:?}{:?}{:?} {}\n",
2313                merge_req.desc.lower().elements(),
2314                merge_req.desc.upper().elements(),
2315                merge_req.desc.since().elements(),
2316                merge_req
2317                    .inputs
2318                    .iter()
2319                    .flat_map(|x| x.batch.parts.iter())
2320                    .map(|x| x.printable_name())
2321                    .collect::<Vec<_>>()
2322                    .join(" ")
2323            );
2324        }
2325        Ok(s)
2326    }
2327
2328    pub fn apply_merge_res(
2329        datadriven: &mut TraceState,
2330        args: DirectiveArgs,
2331    ) -> Result<String, anyhow::Error> {
2332        let res = FueledMergeRes {
2333            output: DirectiveArgs::parse_hollow_batch(args.input),
2334            input: CompactionInput::Legacy,
2335            new_active_compaction: None,
2336        };
2337        match datadriven.trace.apply_merge_res_unchecked(&res) {
2338            ApplyMergeResult::AppliedExact => Ok("applied exact\n".into()),
2339            ApplyMergeResult::AppliedSubset => Ok("applied subset\n".into()),
2340            ApplyMergeResult::NotAppliedNoMatch => Ok("no-op\n".into()),
2341            ApplyMergeResult::NotAppliedInvalidSince => Ok("no-op invalid since\n".into()),
2342            ApplyMergeResult::NotAppliedTooManyUpdates => Ok("no-op too many updates\n".into()),
2343        }
2344    }
2345}
2346
2347#[cfg(test)]
2348pub(crate) mod tests {
2349    use std::ops::Range;
2350
2351    use proptest::prelude::*;
2352    use semver::Version;
2353
2354    use crate::internal::state::tests::{any_hollow_batch, any_hollow_batch_with_exact_runs};
2355
2356    use super::*;
2357
2358    pub fn any_trace<T: Arbitrary + Timestamp + Lattice>(
2359        num_batches: Range<usize>,
2360    ) -> impl Strategy<Value = Trace<T>> {
2361        Strategy::prop_map(
2362            (
2363                any::<Option<T>>(),
2364                proptest::collection::vec(any_hollow_batch::<T>(), num_batches),
2365                any::<bool>(),
2366                any::<u64>(),
2367            ),
2368            |(since, mut batches, roundtrip_structure, timeout_ms)| {
2369                let mut trace = Trace::<T>::default();
2370                trace.downgrade_since(&since.map_or_else(Antichain::new, Antichain::from_elem));
2371
2372                // Fix up the arbitrary HollowBatches so the lowers and uppers
2373                // align.
2374                batches.sort_by(|x, y| x.desc.upper().elements().cmp(y.desc.upper().elements()));
2375                let mut lower = Antichain::from_elem(T::minimum());
2376                for mut batch in batches {
2377                    // Overall trace since has to be past each batch's since.
2378                    if PartialOrder::less_than(trace.since(), batch.desc.since()) {
2379                        trace.downgrade_since(batch.desc.since());
2380                    }
2381                    batch.desc = Description::new(
2382                        lower.clone(),
2383                        batch.desc.upper().clone(),
2384                        batch.desc.since().clone(),
2385                    );
2386                    lower.clone_from(batch.desc.upper());
2387                    let _merge_req = trace.push_batch(batch);
2388                }
2389                let reqs: Vec<_> = trace
2390                    .fueled_merge_reqs_before_ms(timeout_ms, None)
2391                    .collect();
2392                for req in reqs {
2393                    trace.claim_compaction(req.id, ActiveCompaction { start_ms: 0 })
2394                }
2395                trace.roundtrip_structure = roundtrip_structure;
2396                trace
2397            },
2398        )
2399    }
2400
2401    #[mz_ore::test]
2402    #[cfg_attr(miri, ignore)] // proptest is too heavy for miri!
2403    fn test_roundtrips() {
2404        fn check(trace: Trace<i64>) {
2405            trace.validate().unwrap();
2406            let flat = trace.flatten();
2407            let unflat = Trace::unflatten(flat).unwrap();
2408            assert_eq!(trace, unflat);
2409        }
2410
2411        proptest!(|(trace in any_trace::<i64>(1..10))| { check(trace) })
2412    }
2413
2414    #[mz_ore::test]
2415    fn fueled_merge_reqs() {
2416        let mut trace: Trace<u64> = Trace::default();
2417        let fueled_reqs = trace.push_batch(crate::internal::state::tests::hollow(
2418            0,
2419            10,
2420            &["n0011500/p3122e2a1-a0c7-429f-87aa-1019bf4f5f86"],
2421            1000,
2422        ));
2423
2424        assert!(fueled_reqs.is_empty());
2425        assert_eq!(
2426            trace.fueled_merge_reqs_before_ms(u64::MAX, None).count(),
2427            0,
2428            "no merge reqs when not filtering by version"
2429        );
2430        assert_eq!(
2431            trace
2432                .fueled_merge_reqs_before_ms(
2433                    u64::MAX,
2434                    Some(WriterKey::for_version(&Version::new(0, 50, 0)))
2435                )
2436                .count(),
2437            0,
2438            "zero batches are older than a past version"
2439        );
2440        assert_eq!(
2441            trace
2442                .fueled_merge_reqs_before_ms(
2443                    u64::MAX,
2444                    Some(WriterKey::for_version(&Version::new(99, 99, 0)))
2445                )
2446                .count(),
2447            1,
2448            "one batch is older than a future version"
2449        );
2450    }
2451
2452    #[mz_ore::test]
2453    fn remove_redundant_merge_reqs() {
2454        fn req(lower: u64, upper: u64) -> FueledMergeReq<u64> {
2455            FueledMergeReq {
2456                id: SpineId(usize::cast_from(lower), usize::cast_from(upper)),
2457                desc: Description::new(
2458                    Antichain::from_elem(lower),
2459                    Antichain::from_elem(upper),
2460                    Antichain::new(),
2461                ),
2462                inputs: vec![],
2463            }
2464        }
2465
2466        // Empty
2467        assert_eq!(Trace::<u64>::remove_redundant_merge_reqs(vec![]), vec![]);
2468
2469        // Single
2470        assert_eq!(
2471            Trace::remove_redundant_merge_reqs(vec![req(0, 1)]),
2472            vec![req(0, 1)]
2473        );
2474
2475        // Duplicate
2476        assert_eq!(
2477            Trace::remove_redundant_merge_reqs(vec![req(0, 1), req(0, 1)]),
2478            vec![req(0, 1)]
2479        );
2480
2481        // Nothing covered
2482        assert_eq!(
2483            Trace::remove_redundant_merge_reqs(vec![req(0, 1), req(1, 2)]),
2484            vec![req(1, 2), req(0, 1)]
2485        );
2486
2487        // Covered
2488        assert_eq!(
2489            Trace::remove_redundant_merge_reqs(vec![req(1, 2), req(0, 3)]),
2490            vec![req(0, 3)]
2491        );
2492
2493        // Covered, lower equal
2494        assert_eq!(
2495            Trace::remove_redundant_merge_reqs(vec![req(0, 2), req(0, 3)]),
2496            vec![req(0, 3)]
2497        );
2498
2499        // Covered, upper equal
2500        assert_eq!(
2501            Trace::remove_redundant_merge_reqs(vec![req(1, 3), req(0, 3)]),
2502            vec![req(0, 3)]
2503        );
2504
2505        // Covered, unexpected order (doesn't happen in practice)
2506        assert_eq!(
2507            Trace::remove_redundant_merge_reqs(vec![req(0, 3), req(1, 2)]),
2508            vec![req(0, 3)]
2509        );
2510
2511        // Partially overlapping
2512        assert_eq!(
2513            Trace::remove_redundant_merge_reqs(vec![req(0, 2), req(1, 3)]),
2514            vec![req(1, 3), req(0, 2)]
2515        );
2516
2517        // Partially overlapping, the other order
2518        assert_eq!(
2519            Trace::remove_redundant_merge_reqs(vec![req(1, 3), req(0, 2)]),
2520            vec![req(0, 2), req(1, 3)]
2521        );
2522
2523        // Different sinces (doesn't happen in practice)
2524        let req015 = FueledMergeReq {
2525            id: SpineId(0, 1),
2526            desc: Description::new(
2527                Antichain::from_elem(0),
2528                Antichain::from_elem(1),
2529                Antichain::from_elem(5),
2530            ),
2531            inputs: vec![],
2532        };
2533        assert_eq!(
2534            Trace::remove_redundant_merge_reqs(vec![req(0, 1), req015.clone()]),
2535            vec![req015, req(0, 1)]
2536        );
2537    }
2538
2539    #[mz_ore::test]
2540    #[cfg_attr(miri, ignore)] // proptest is too heavy for miri!
2541    fn construct_batch_with_runs_replaced_test() {
2542        let batch_strategy = any_hollow_batch::<u64>();
2543        let to_replace_strategy = any_hollow_batch_with_exact_runs::<u64>(1);
2544
2545        let combined_strategy = (batch_strategy, to_replace_strategy)
2546            .prop_filter("non-empty batch", |(batch, _)| batch.run_meta.len() >= 1);
2547
2548        let final_strategy = combined_strategy.prop_flat_map(|(batch, to_replace)| {
2549            let batch_len = batch.run_meta.len();
2550            let batch_clone = batch.clone();
2551            let to_replace_clone = to_replace.clone();
2552
2553            proptest::collection::vec(any::<bool>(), batch_len)
2554                .prop_filter("at least one run selected", |mask| mask.iter().any(|&x| x))
2555                .prop_map(move |mask| {
2556                    let indices: Vec<usize> = mask
2557                        .iter()
2558                        .enumerate()
2559                        .filter_map(|(i, &selected)| if selected { Some(i) } else { None })
2560                        .collect();
2561                    (batch_clone.clone(), to_replace_clone.clone(), indices)
2562                })
2563        });
2564
2565        proptest!(|(
2566            (batch, to_replace, runs) in final_strategy
2567        )| {
2568            let original_run_ids: Vec<_> = batch.run_meta.iter().map(|x|
2569                x.id.unwrap().clone()
2570            ).collect();
2571
2572            let run_ids = runs.iter().map(|&i| original_run_ids[i].clone()).collect::<Vec<_>>();
2573
2574            let new_batch = SpineBatch::construct_batch_with_runs_replaced(
2575                &batch,
2576                &run_ids,
2577                &to_replace,
2578            ).unwrap();
2579
2580            prop_assert!(new_batch.run_meta.len() == batch.run_meta.len() - runs.len() + to_replace.run_meta.len());
2581        });
2582    }
2583
2584    #[mz_ore::test]
2585    fn test_perform_subset_replacement() {
2586        let batch1 = crate::internal::state::tests::hollow::<u64>(0, 10, &["a"], 10);
2587        let batch2 = crate::internal::state::tests::hollow::<u64>(10, 20, &["b"], 10);
2588        let batch3 = crate::internal::state::tests::hollow::<u64>(20, 30, &["c"], 10);
2589
2590        let id_batch1 = IdHollowBatch {
2591            id: SpineId(0, 1),
2592            batch: Arc::new(batch1.clone()),
2593        };
2594        let id_batch2 = IdHollowBatch {
2595            id: SpineId(1, 2),
2596            batch: Arc::new(batch2.clone()),
2597        };
2598        let id_batch3 = IdHollowBatch {
2599            id: SpineId(2, 3),
2600            batch: Arc::new(batch3.clone()),
2601        };
2602
2603        let spine_batch = SpineBatch {
2604            id: SpineId(0, 3),
2605            desc: Description::new(
2606                Antichain::from_elem(0),
2607                Antichain::from_elem(30),
2608                Antichain::from_elem(0),
2609            ),
2610            parts: vec![id_batch1, id_batch2, id_batch3],
2611            active_compaction: None,
2612            len: 30,
2613        };
2614
2615        let res_exact = crate::internal::state::tests::hollow::<u64>(0, 30, &["d"], 30);
2616        let mut sb_exact = spine_batch.clone();
2617        let result = sb_exact.perform_subset_replacement(&res_exact, SpineId(0, 3), 0..3, None);
2618        assert!(matches!(result, ApplyMergeResult::AppliedExact));
2619        assert_eq!(sb_exact.parts.len(), 1);
2620        assert_eq!(sb_exact.len(), 30);
2621
2622        let res_subset = crate::internal::state::tests::hollow::<u64>(0, 20, &["e"], 20);
2623        let mut sb_subset = spine_batch.clone();
2624        let result = sb_subset.perform_subset_replacement(&res_subset, SpineId(0, 2), 0..2, None);
2625        assert!(matches!(result, ApplyMergeResult::AppliedSubset));
2626        assert_eq!(sb_subset.parts.len(), 2); // One new part + one old part
2627        assert_eq!(sb_subset.len(), 30);
2628
2629        let res_too_big = crate::internal::state::tests::hollow::<u64>(0, 30, &["f"], 31);
2630        let mut sb_too_big = spine_batch.clone();
2631        let result = sb_too_big.perform_subset_replacement(&res_too_big, SpineId(0, 3), 0..3, None);
2632        assert!(matches!(result, ApplyMergeResult::NotAppliedTooManyUpdates));
2633        assert_eq!(sb_too_big.parts.len(), 3);
2634        assert_eq!(sb_too_big.len(), 30);
2635    }
2636}