1use std::cmp::Ordering;
51use std::collections::{BTreeMap, BTreeSet};
52use std::fmt::{Debug, Display};
53use std::mem;
54use std::ops::Range;
55use std::sync::Arc;
56
57use arrayvec::ArrayVec;
58use differential_dataflow::difference::Monoid;
59use differential_dataflow::lattice::Lattice;
60use differential_dataflow::trace::Description;
61use itertools::Itertools;
62use mz_ore::cast::CastFrom;
63use mz_persist::metrics::ColumnarMetrics;
64use mz_persist_types::Codec64;
65use serde::{Serialize, Serializer};
66use timely::PartialOrder;
67use timely::progress::frontier::AntichainRef;
68use timely::progress::{Antichain, Timestamp};
69use tracing::{error, warn};
70
71use crate::internal::paths::WriterKey;
72use crate::internal::state::{HollowBatch, RunId};
73
74use super::state::RunPart;
75
76#[derive(Debug, Clone, PartialEq)]
77pub struct FueledMergeReq<T> {
78 pub id: SpineId,
79 pub desc: Description<T>,
80 pub inputs: Vec<IdHollowBatch<T>>,
81}
82
83#[derive(Debug)]
84pub struct FueledMergeRes<T> {
85 pub output: HollowBatch<T>,
86 pub input: CompactionInput,
87 pub new_active_compaction: Option<ActiveCompaction>,
88}
89
90#[derive(Debug, Clone)]
95pub struct Trace<T> {
96 spine: Spine<T>,
97 pub(crate) roundtrip_structure: bool,
98}
99
100#[cfg(any(test, debug_assertions))]
101impl<T: PartialEq> PartialEq for Trace<T> {
102 fn eq(&self, other: &Self) -> bool {
103 let Trace {
106 spine: _,
107 roundtrip_structure: _,
108 } = self;
109 let Trace {
110 spine: _,
111 roundtrip_structure: _,
112 } = other;
113
114 self.batches().eq(other.batches())
117 }
118}
119
120impl<T: Timestamp + Lattice> Default for Trace<T> {
121 fn default() -> Self {
122 Self {
123 spine: Spine::new(),
124 roundtrip_structure: true,
125 }
126 }
127}
128
129#[derive(Clone, Debug, Serialize)]
130pub struct ThinSpineBatch<T> {
131 pub(crate) level: usize,
132 pub(crate) desc: Description<T>,
133 pub(crate) parts: Vec<SpineId>,
134 pub(crate) descs: Vec<Description<T>>,
137}
138
139impl<T: PartialEq> PartialEq for ThinSpineBatch<T> {
140 fn eq(&self, other: &Self) -> bool {
141 (self.level, &self.desc, &self.parts).eq(&(other.level, &other.desc, &other.parts))
143 }
144}
145
146#[derive(Clone, Debug, Eq, PartialEq, Serialize)]
147pub struct ThinMerge<T> {
148 pub(crate) since: Antichain<T>,
149 pub(crate) remaining_work: usize,
150 pub(crate) active_compaction: Option<ActiveCompaction>,
151}
152
153impl<T: Clone> ThinMerge<T> {
154 fn fueling(merge: &FuelingMerge<T>) -> Self {
155 ThinMerge {
156 since: merge.since.clone(),
157 remaining_work: merge.remaining_work,
158 active_compaction: None,
159 }
160 }
161
162 fn fueled(batch: &SpineBatch<T>) -> Self {
163 ThinMerge {
164 since: batch.desc.since().clone(),
165 remaining_work: 0,
166 active_compaction: batch.active_compaction.clone(),
167 }
168 }
169}
170
171#[derive(Clone, Debug)]
179pub struct FlatTrace<T> {
180 pub(crate) since: Antichain<T>,
181 pub(crate) legacy_batches: BTreeMap<Arc<HollowBatch<T>>, ()>,
189 pub(crate) hollow_batches: BTreeMap<SpineId, Arc<HollowBatch<T>>>,
192 pub(crate) spine_batches: BTreeMap<SpineId, ThinSpineBatch<T>>,
197 pub(crate) merges: BTreeMap<SpineId, ThinMerge<T>>,
201}
202
203impl<T: Timestamp + Lattice> Trace<T> {
204 pub(crate) fn flatten(&self) -> FlatTrace<T> {
205 let since = self.spine.since.clone();
206 let mut legacy_batches = BTreeMap::new();
207 let mut hollow_batches = BTreeMap::new();
208 let mut spine_batches = BTreeMap::new();
209 let mut merges = BTreeMap::new();
210
211 let mut push_spine_batch = |level: usize, batch: &SpineBatch<T>| {
212 let id = batch.id();
213 let desc = batch.desc.clone();
214 let mut parts = Vec::with_capacity(batch.parts.len());
215 let mut descs = Vec::with_capacity(batch.parts.len());
216 for IdHollowBatch { id, batch } in &batch.parts {
217 parts.push(*id);
218 descs.push(batch.desc.clone());
219 if batch.desc.lower() == batch.desc.upper() {
227 hollow_batches.insert(*id, Arc::clone(batch));
228 } else {
229 legacy_batches.insert(Arc::clone(batch), ());
230 }
231 }
232
233 let spine_batch = ThinSpineBatch {
234 level,
235 desc,
236 parts,
237 descs,
238 };
239 spine_batches.insert(id, spine_batch);
240 };
241
242 for (level, state) in self.spine.merging.iter().enumerate() {
243 for batch in &state.batches {
244 push_spine_batch(level, batch);
245 if let Some(c) = &batch.active_compaction {
246 let previous = merges.insert(batch.id, ThinMerge::fueled(batch));
247 assert!(
248 previous.is_none(),
249 "recording a compaction for a batch that already exists! (level={level}, id={:?}, compaction={c:?})",
250 batch.id,
251 )
252 }
253 }
254 if let Some(IdFuelingMerge { id, merge }) = state.merge.as_ref() {
255 let previous = merges.insert(*id, ThinMerge::fueling(merge));
256 assert!(
257 previous.is_none(),
258 "fueling a merge for a batch that already exists! (level={level}, id={id:?}, merge={merge:?})"
259 )
260 }
261 }
262
263 if !self.roundtrip_structure {
264 assert!(hollow_batches.is_empty());
265 spine_batches.clear();
266 merges.clear();
267 }
268
269 FlatTrace {
270 since,
271 legacy_batches,
272 hollow_batches,
273 spine_batches,
274 merges,
275 }
276 }
277 pub(crate) fn unflatten(value: FlatTrace<T>) -> Result<Self, String> {
278 let FlatTrace {
279 since,
280 legacy_batches,
281 mut hollow_batches,
282 spine_batches,
283 mut merges,
284 } = value;
285
286 let roundtrip_structure = !spine_batches.is_empty() || legacy_batches.is_empty();
289
290 let compare_chains = |left: &Antichain<T>, right: &Antichain<T>| {
296 if PartialOrder::less_than(left, right) {
297 Ordering::Less
298 } else if PartialOrder::less_than(right, left) {
299 Ordering::Greater
300 } else {
301 Ordering::Equal
302 }
303 };
304 let mut legacy_batches: Vec<_> = legacy_batches.into_iter().map(|(k, _)| k).collect();
305 legacy_batches.sort_by(|a, b| compare_chains(a.desc.lower(), b.desc.lower()).reverse());
306
307 let mut pop_batch =
308 |id: SpineId, expected_desc: Option<&Description<T>>| -> Result<_, String> {
309 if let Some(batch) = hollow_batches.remove(&id) {
310 if let Some(desc) = expected_desc {
311 assert_eq!(desc.lower(), batch.desc.lower());
313 assert_eq!(desc.upper(), batch.desc.upper());
314 if desc.since() != batch.desc.since() {
317 warn!(
318 "unexpected since out of sync for spine batch: {:?} != {:?}",
319 desc.since().elements(),
320 batch.desc.since().elements()
321 );
322 }
323 }
324 return Ok(IdHollowBatch { id, batch });
325 }
326 let mut batch = legacy_batches
327 .pop()
328 .ok_or_else(|| format!("missing referenced hollow batch {id:?}"))?;
329
330 let Some(expected_desc) = expected_desc else {
331 return Ok(IdHollowBatch { id, batch });
332 };
333
334 if expected_desc.lower() != batch.desc.lower() {
335 return Err(format!(
336 "hollow batch lower {:?} did not match expected lower {:?}",
337 batch.desc.lower().elements(),
338 expected_desc.lower().elements()
339 ));
340 }
341
342 if batch.parts.is_empty() && batch.run_splits.is_empty() && batch.len == 0 {
345 let mut new_upper = batch.desc.upper().clone();
346
347 while PartialOrder::less_than(&new_upper, expected_desc.upper()) {
350 let Some(next_batch) = legacy_batches.pop() else {
351 break;
352 };
353 if next_batch.is_empty() {
354 new_upper.clone_from(next_batch.desc.upper());
355 } else {
356 legacy_batches.push(next_batch);
357 break;
358 }
359 }
360
361 if PartialOrder::less_than(expected_desc.upper(), &new_upper) {
364 legacy_batches.push(Arc::new(HollowBatch::empty(Description::new(
365 expected_desc.upper().clone(),
366 new_upper.clone(),
367 batch.desc.since().clone(),
368 ))));
369 new_upper.clone_from(expected_desc.upper());
370 }
371 batch = Arc::new(HollowBatch::empty(Description::new(
372 batch.desc.lower().clone(),
373 new_upper,
374 batch.desc.since().clone(),
375 )))
376 }
377
378 if expected_desc.upper() != batch.desc.upper() {
379 return Err(format!(
380 "hollow batch upper {:?} did not match expected upper {:?}",
381 batch.desc.upper().elements(),
382 expected_desc.upper().elements()
383 ));
384 }
385
386 Ok(IdHollowBatch { id, batch })
387 };
388
389 let (upper, next_id) = if let Some((id, batch)) = spine_batches.last_key_value() {
390 (batch.desc.upper().clone(), id.1)
391 } else {
392 (Antichain::from_elem(T::minimum()), 0)
393 };
394 let levels = spine_batches
395 .first_key_value()
396 .map(|(_, batch)| batch.level + 1)
397 .unwrap_or(0);
398 let mut merging = vec![MergeState::default(); levels];
399 for (id, batch) in spine_batches {
400 let level = batch.level;
401
402 let descs = batch.descs.iter().map(Some).chain(std::iter::repeat_n(
403 None,
404 batch.parts.len() - batch.descs.len(),
405 ));
406 let parts = batch
407 .parts
408 .into_iter()
409 .zip_eq(descs)
410 .map(|(id, desc)| pop_batch(id, desc))
411 .collect::<Result<Vec<_>, _>>()?;
412 let len = parts.iter().map(|p| (*p).batch.len).sum();
413 let active_compaction = merges.remove(&id).and_then(|m| m.active_compaction);
414 let batch = SpineBatch {
415 id,
416 desc: batch.desc,
417 parts,
418 active_compaction,
419 len,
420 };
421
422 let state = &mut merging[level];
423
424 state.push_batch(batch);
425 if let Some(id) = state.id() {
426 if let Some(merge) = merges.remove(&id) {
427 state.merge = Some(IdFuelingMerge {
428 id,
429 merge: FuelingMerge {
430 since: merge.since,
431 remaining_work: merge.remaining_work,
432 },
433 })
434 }
435 }
436 }
437
438 let mut trace = Trace {
439 spine: Spine {
440 effort: 1,
441 next_id,
442 since,
443 upper,
444 merging,
445 },
446 roundtrip_structure,
447 };
448
449 fn check_empty(name: &str, len: usize) -> Result<(), String> {
450 if len != 0 {
451 Err(format!("{len} {name} left after reconstructing spine"))
452 } else {
453 Ok(())
454 }
455 }
456
457 if roundtrip_structure {
458 check_empty("legacy batches", legacy_batches.len())?;
459 } else {
460 for batch in legacy_batches.into_iter().rev() {
462 trace.push_batch_no_merge_reqs(Arc::unwrap_or_clone(batch));
463 }
464 }
465 check_empty("hollow batches", hollow_batches.len())?;
466 check_empty("merges", merges.len())?;
467
468 debug_assert_eq!(trace.validate(), Ok(()), "{:?}", trace);
469
470 Ok(trace)
471 }
472}
473
474#[derive(Clone, Debug, Default)]
475pub(crate) struct SpineMetrics {
476 pub compact_batches: u64,
477 pub compacting_batches: u64,
478 pub noncompact_batches: u64,
479}
480
481impl<T> Trace<T> {
482 pub fn since(&self) -> &Antichain<T> {
483 &self.spine.since
484 }
485
486 pub fn upper(&self) -> &Antichain<T> {
487 &self.spine.upper
488 }
489
490 pub fn map_batches<'a, F: FnMut(&'a HollowBatch<T>)>(&'a self, mut f: F) {
491 for batch in self.batches() {
492 f(batch);
493 }
494 }
495
496 pub fn batches(&self) -> impl Iterator<Item = &HollowBatch<T>> {
497 self.spine
498 .spine_batches()
499 .flat_map(|b| b.parts.as_slice())
500 .map(|b| &*b.batch)
501 }
502
503 pub fn num_spine_batches(&self) -> usize {
504 self.spine.spine_batches().count()
505 }
506
507 #[cfg(test)]
508 pub fn num_hollow_batches(&self) -> usize {
509 self.batches().count()
510 }
511
512 #[cfg(test)]
513 pub fn num_updates(&self) -> usize {
514 self.batches().map(|b| b.len).sum()
515 }
516}
517
518impl<T: Timestamp + Lattice> Trace<T> {
519 pub fn downgrade_since(&mut self, since: &Antichain<T>) {
520 self.spine.since.clone_from(since);
521 }
522
523 #[must_use]
524 pub fn push_batch(&mut self, batch: HollowBatch<T>) -> Vec<FueledMergeReq<T>> {
525 let mut merge_reqs = Vec::new();
526 self.spine.insert(
527 batch,
528 &mut SpineLog::Enabled {
529 merge_reqs: &mut merge_reqs,
530 },
531 );
532 debug_assert_eq!(self.spine.validate(), Ok(()), "{:?}", self);
533 Self::remove_redundant_merge_reqs(merge_reqs)
540 }
541
542 pub fn claim_compaction(&mut self, id: SpineId, compaction: ActiveCompaction) {
543 for batch in self.spine.spine_batches_mut().rev() {
546 if batch.id == id {
547 batch.active_compaction = Some(compaction);
548 break;
549 }
550 }
551 }
552
553 pub(crate) fn push_batch_no_merge_reqs(&mut self, batch: HollowBatch<T>) {
556 self.spine.insert(batch, &mut SpineLog::Disabled);
557 }
558
559 #[must_use]
567 pub fn exert(&mut self, fuel: usize) -> (Vec<FueledMergeReq<T>>, bool) {
568 let mut merge_reqs = Vec::new();
569 let did_work = self.spine.exert(
570 fuel,
571 &mut SpineLog::Enabled {
572 merge_reqs: &mut merge_reqs,
573 },
574 );
575 debug_assert_eq!(self.spine.validate(), Ok(()), "{:?}", self);
576 let merge_reqs = Self::remove_redundant_merge_reqs(merge_reqs);
578 (merge_reqs, did_work)
579 }
580
581 pub fn validate(&self) -> Result<(), String> {
585 self.spine.validate()
586 }
587
588 pub(crate) fn fueled_merge_reqs_before_ms(
591 &self,
592 threshold_ms: u64,
593 threshold_writer: Option<WriterKey>,
594 ) -> impl Iterator<Item = FueledMergeReq<T>> + '_ {
595 self.spine
596 .spine_batches()
597 .filter(move |b| {
598 let noncompact = !b.is_compact();
599 let old_writer = threshold_writer.as_ref().map_or(false, |min_writer| {
600 b.parts.iter().any(|b| {
601 b.batch
602 .parts
603 .iter()
604 .any(|p| p.writer_key().map_or(false, |writer| writer < *min_writer))
605 })
606 });
607 noncompact || old_writer
608 })
609 .filter(move |b| {
610 b.active_compaction
613 .as_ref()
614 .map_or(true, move |c| c.start_ms <= threshold_ms)
615 })
616 .map(|b| FueledMergeReq {
617 id: b.id,
618 desc: b.desc.clone(),
619 inputs: b.parts.clone(),
620 })
621 }
622
623 fn remove_redundant_merge_reqs(
630 mut merge_reqs: Vec<FueledMergeReq<T>>,
631 ) -> Vec<FueledMergeReq<T>> {
632 fn covers<T: PartialOrder>(b0: &FueledMergeReq<T>, b1: &FueledMergeReq<T>) -> bool {
634 b0.id.covers(b1.id) && b0.desc.since() == b1.desc.since()
636 }
637
638 let mut ret = Vec::<FueledMergeReq<T>>::with_capacity(merge_reqs.len());
639 while let Some(merge_req) = merge_reqs.pop() {
643 let covered = ret.iter().any(|r| covers(r, &merge_req));
644 if !covered {
645 ret.retain(|r| !covers(&merge_req, r));
649 ret.push(merge_req);
650 }
651 }
652 ret
653 }
654
655 pub fn spine_metrics(&self) -> SpineMetrics {
656 let mut metrics = SpineMetrics::default();
657 for batch in self.spine.spine_batches() {
658 if batch.is_compact() {
659 metrics.compact_batches += 1;
660 } else if batch.is_merging() {
661 metrics.compacting_batches += 1;
662 } else {
663 metrics.noncompact_batches += 1;
664 }
665 }
666 metrics
667 }
668}
669
670impl<T: Timestamp + Lattice + Codec64> Trace<T> {
671 pub fn apply_merge_res_checked<D: Codec64 + Monoid + PartialEq>(
672 &mut self,
673 res: &FueledMergeRes<T>,
674 metrics: &ColumnarMetrics,
675 ) -> ApplyMergeResult {
676 for batch in self.spine.spine_batches_mut().rev() {
677 let result = batch.maybe_replace_checked::<D>(res, metrics);
678 if result.matched() {
679 return result;
680 }
681 }
682 ApplyMergeResult::NotAppliedNoMatch
683 }
684
685 pub fn apply_merge_res_unchecked(&mut self, res: &FueledMergeRes<T>) -> ApplyMergeResult {
686 for batch in self.spine.spine_batches_mut().rev() {
687 let result = batch.maybe_replace_unchecked(res);
688 if result.matched() {
689 return result;
690 }
691 }
692 ApplyMergeResult::NotAppliedNoMatch
693 }
694
695 pub fn apply_tombstone_merge(&mut self, desc: &Description<T>) -> ApplyMergeResult {
696 for batch in self.spine.spine_batches_mut().rev() {
697 let result = batch.maybe_replace_with_tombstone(desc);
698 if result.matched() {
699 return result;
700 }
701 }
702 ApplyMergeResult::NotAppliedNoMatch
703 }
704}
705
706enum SpineLog<'a, T> {
709 Enabled {
710 merge_reqs: &'a mut Vec<FueledMergeReq<T>>,
711 },
712 Disabled,
713}
714
715#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
716pub enum CompactionInput {
717 Legacy,
720 IdRange(SpineId),
722 PartialBatch(SpineId, BTreeSet<RunId>),
724}
725
726#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
727pub struct SpineId(pub usize, pub usize);
728
729impl Display for SpineId {
730 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
731 write!(f, "[{}, {})", self.0, self.1)
732 }
733}
734
735impl Serialize for SpineId {
736 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
737 where
738 S: Serializer,
739 {
740 let SpineId(lo, hi) = self;
741 serializer.serialize_str(&format!("{lo}-{hi}"))
742 }
743}
744
745pub fn id_range(ids: BTreeSet<SpineId>) -> SpineId {
747 let mut id_iter = ids.iter().copied();
748 let Some(mut result) = id_iter.next() else {
749 panic!("at least one batch must be present")
750 };
751
752 for id in id_iter {
753 assert_eq!(
754 result.1, id.0,
755 "expected contiguous ids, but {result:?} is not adjacent to {id:?} in ids {ids:?}"
756 );
757 result.1 = id.1;
758 }
759 result
760}
761
762impl SpineId {
763 fn covers(self, other: SpineId) -> bool {
764 self.0 <= other.0 && other.1 <= self.1
765 }
766}
767
768#[derive(Debug, Clone, PartialEq)]
769pub struct IdHollowBatch<T> {
770 pub id: SpineId,
771 pub batch: Arc<HollowBatch<T>>,
772}
773
774#[derive(Debug, Clone, Eq, PartialEq, Serialize)]
775pub struct ActiveCompaction {
776 pub start_ms: u64,
777}
778
779#[derive(Debug, Clone, PartialEq)]
780struct SpineBatch<T> {
781 id: SpineId,
782 desc: Description<T>,
783 parts: Vec<IdHollowBatch<T>>,
784 active_compaction: Option<ActiveCompaction>,
785 len: usize,
787}
788
789impl<T> SpineBatch<T> {
790 fn merged(batch: IdHollowBatch<T>) -> Self
791 where
792 T: Clone,
793 {
794 Self {
795 id: batch.id,
796 desc: batch.batch.desc.clone(),
797 len: batch.batch.len,
798 parts: vec![batch],
799 active_compaction: None,
800 }
801 }
802}
803
804#[derive(Debug, Copy, Clone)]
805pub enum ApplyMergeResult {
806 AppliedExact,
807 AppliedSubset,
808 NotAppliedNoMatch,
809 NotAppliedInvalidSince,
810 NotAppliedTooManyUpdates,
811}
812
813impl ApplyMergeResult {
814 pub fn applied(&self) -> bool {
815 match self {
816 ApplyMergeResult::AppliedExact | ApplyMergeResult::AppliedSubset => true,
817 _ => false,
818 }
819 }
820 pub fn matched(&self) -> bool {
821 match self {
822 ApplyMergeResult::AppliedExact
823 | ApplyMergeResult::AppliedSubset
824 | ApplyMergeResult::NotAppliedTooManyUpdates => true,
825 _ => false,
826 }
827 }
828}
829
830impl<T: Timestamp + Lattice> SpineBatch<T> {
831 pub fn lower(&self) -> &Antichain<T> {
832 self.desc().lower()
833 }
834
835 pub fn upper(&self) -> &Antichain<T> {
836 self.desc().upper()
837 }
838
839 fn id(&self) -> SpineId {
840 debug_assert_eq!(self.parts.first().map(|x| x.id.0), Some(self.id.0));
841 debug_assert_eq!(self.parts.last().map(|x| x.id.1), Some(self.id.1));
842 self.id
843 }
844
845 pub fn is_compact(&self) -> bool {
846 self.parts
853 .iter()
854 .map(|p| p.batch.run_meta.len())
855 .sum::<usize>()
856 <= 1
857 }
858
859 pub fn is_merging(&self) -> bool {
860 self.active_compaction.is_some()
861 }
862
863 fn desc(&self) -> &Description<T> {
864 &self.desc
865 }
866
867 pub fn len(&self) -> usize {
868 debug_assert_eq!(
871 self.len,
872 self.parts.iter().map(|x| x.batch.len).sum::<usize>()
873 );
874 self.len
875 }
876
877 pub fn is_empty(&self) -> bool {
878 self.len() == 0
879 }
880
881 pub fn empty(
882 id: SpineId,
883 lower: Antichain<T>,
884 upper: Antichain<T>,
885 since: Antichain<T>,
886 ) -> Self {
887 SpineBatch::merged(IdHollowBatch {
888 id,
889 batch: Arc::new(HollowBatch::empty(Description::new(lower, upper, since))),
890 })
891 }
892
893 pub fn begin_merge(
894 bs: &[Self],
895 compaction_frontier: Option<AntichainRef<T>>,
896 ) -> Option<IdFuelingMerge<T>> {
897 let from = bs.first()?.id().0;
898 let until = bs.last()?.id().1;
899 let id = SpineId(from, until);
900 let mut sinces = bs.iter().map(|b| b.desc().since());
901 let mut since = sinces.next()?.clone();
902 for b in bs {
903 since.join_assign(b.desc().since())
904 }
905 if let Some(compaction_frontier) = compaction_frontier {
906 since.join_assign(&compaction_frontier.to_owned());
907 }
908 let remaining_work = bs.iter().map(|x| x.len()).sum();
909 Some(IdFuelingMerge {
910 id,
911 merge: FuelingMerge {
912 since,
913 remaining_work,
914 },
915 })
916 }
917
918 #[cfg(test)]
919 fn describe(&self, extended: bool) -> String {
920 let SpineBatch {
921 id,
922 parts,
923 desc,
924 active_compaction,
925 len,
926 } = self;
927 let compaction = match active_compaction {
928 None => "".to_owned(),
929 Some(c) => format!(" (c@{})", c.start_ms),
930 };
931 match extended {
932 false => format!(
933 "[{}-{}]{:?}{:?}{}/{}{compaction}",
934 id.0,
935 id.1,
936 desc.lower().elements(),
937 desc.upper().elements(),
938 parts.len(),
939 len
940 ),
941 true => {
942 format!(
943 "[{}-{}]{:?}{:?}{:?} {}/{}{}{compaction}",
944 id.0,
945 id.1,
946 desc.lower().elements(),
947 desc.upper().elements(),
948 desc.since().elements(),
949 parts.len(),
950 len,
951 parts
952 .iter()
953 .flat_map(|x| x.batch.parts.iter())
954 .map(|x| format!(" {}", x.printable_name()))
955 .collect::<Vec<_>>()
956 .join("")
957 )
958 }
959 }
960 }
961}
962
963impl<T: Timestamp + Lattice + Codec64> SpineBatch<T> {
964 fn diffs_sum<'a, D: Monoid + Codec64>(
965 parts: impl IntoIterator<Item = &'a RunPart<T>>,
966 metrics: &ColumnarMetrics,
967 ) -> Option<D> {
968 let mut sum = D::zero();
969 for part in parts {
970 sum.plus_equals(&part.diffs_sum::<D>(metrics)?);
971 }
972 Some(sum)
973 }
974
975 fn diffs_sum_for_runs<D: Monoid + Codec64>(
978 batch: &HollowBatch<T>,
979 run_ids: &[RunId],
980 metrics: &ColumnarMetrics,
981 ) -> Option<D> {
982 let mut run_ids = BTreeSet::from_iter(run_ids.iter().copied());
983 let mut sum = D::zero();
984
985 for (meta, run) in batch.runs() {
986 let id = meta.id?;
987 if run_ids.remove(&id) {
988 sum.plus_equals(&Self::diffs_sum(run, metrics)?);
989 }
990 }
991
992 run_ids.is_empty().then_some(sum)
993 }
994
995 fn maybe_replace_with_tombstone(&mut self, desc: &Description<T>) -> ApplyMergeResult {
996 let exact_match =
997 desc.lower() == self.desc().lower() && desc.upper() == self.desc().upper();
998
999 let empty_batch = HollowBatch::empty(desc.clone());
1000 if exact_match {
1001 *self = SpineBatch::merged(IdHollowBatch {
1002 id: self.id(),
1003 batch: Arc::new(empty_batch),
1004 });
1005 return ApplyMergeResult::AppliedExact;
1006 }
1007
1008 if let Some((id, range)) = self.find_replacement_range(desc) {
1009 self.perform_subset_replacement(&empty_batch, id, range, None)
1010 } else {
1011 ApplyMergeResult::NotAppliedNoMatch
1012 }
1013 }
1014
1015 fn construct_batch_with_runs_replaced(
1016 original: &HollowBatch<T>,
1017 run_ids: &[RunId],
1018 replacement: &HollowBatch<T>,
1019 ) -> Result<HollowBatch<T>, ApplyMergeResult> {
1020 if run_ids.is_empty() {
1021 return Err(ApplyMergeResult::NotAppliedNoMatch);
1022 }
1023
1024 let orig_run_ids: BTreeSet<_> = original.runs().filter_map(|(meta, _)| meta.id).collect();
1025 let run_ids: BTreeSet<_> = run_ids.iter().cloned().collect();
1026 if !orig_run_ids.is_superset(&run_ids) {
1027 return Err(ApplyMergeResult::NotAppliedNoMatch);
1028 }
1029
1030 let runs: Vec<_> = original
1031 .runs()
1032 .filter(|(meta, _)| {
1033 !run_ids.contains(&meta.id.expect("id should be present at this point"))
1034 })
1035 .chain(replacement.runs())
1036 .collect();
1037
1038 let len = runs.iter().filter_map(|(meta, _)| meta.len).sum::<usize>();
1039
1040 let run_meta = runs
1041 .iter()
1042 .map(|(meta, _)| *meta)
1043 .cloned()
1044 .collect::<Vec<_>>();
1045
1046 let parts = runs
1047 .iter()
1048 .flat_map(|(_, parts)| *parts)
1049 .cloned()
1050 .collect::<Vec<_>>();
1051
1052 let run_splits = {
1053 let mut splits = Vec::with_capacity(run_meta.len().saturating_sub(1));
1054 let mut pointer = 0;
1055 for (i, (_, parts)) in runs.into_iter().enumerate() {
1056 if parts.is_empty() {
1057 continue;
1058 }
1059 if i < run_meta.len() - 1 {
1060 splits.push(pointer + parts.len());
1061 }
1062 pointer += parts.len();
1063 }
1064 splits
1065 };
1066
1067 Ok(HollowBatch::new(
1068 replacement.desc.clone(),
1069 parts,
1070 len,
1071 run_meta,
1072 run_splits,
1073 ))
1074 }
1075
1076 fn maybe_replace_checked<D>(
1077 &mut self,
1078 res: &FueledMergeRes<T>,
1079 metrics: &ColumnarMetrics,
1080 ) -> ApplyMergeResult
1081 where
1082 D: Monoid + Codec64 + PartialEq + Debug,
1083 {
1084 if !PartialOrder::less_equal(res.output.desc.since(), self.desc().since()) {
1088 return ApplyMergeResult::NotAppliedInvalidSince;
1089 }
1090
1091 let new_diffs_sum = Self::diffs_sum(res.output.parts.iter(), metrics);
1092 let num_batches = self.parts.len();
1093
1094 let result = match &res.input {
1095 CompactionInput::IdRange(id) => {
1096 self.handle_id_range_replacement::<D>(res, id, new_diffs_sum, metrics)
1097 }
1098 CompactionInput::PartialBatch(id, runs) => {
1099 self.handle_partial_batch_replacement::<D>(res, *id, runs, new_diffs_sum, metrics)
1100 }
1101 CompactionInput::Legacy => self.maybe_replace_checked_classic::<D>(res, metrics),
1102 };
1103
1104 let num_batches_after = self.parts.len();
1105 assert!(
1106 num_batches_after <= num_batches,
1107 "replacing parts should not increase the number of batches"
1108 );
1109 result
1110 }
1111
1112 fn handle_id_range_replacement<D>(
1113 &mut self,
1114 res: &FueledMergeRes<T>,
1115 id: &SpineId,
1116 new_diffs_sum: Option<D>,
1117 metrics: &ColumnarMetrics,
1118 ) -> ApplyMergeResult
1119 where
1120 D: Monoid + Codec64 + PartialEq + Debug,
1121 {
1122 let range = self
1123 .parts
1124 .iter()
1125 .enumerate()
1126 .filter_map(|(i, p)| {
1127 if id.covers(p.id) {
1128 Some((i, p.id))
1129 } else {
1130 None
1131 }
1132 })
1133 .collect::<Vec<_>>();
1134
1135 let ids: BTreeSet<_> = range.iter().map(|(_, id)| *id).collect();
1136
1137 if ids.is_empty() || id != &id_range(ids) {
1147 return ApplyMergeResult::NotAppliedNoMatch;
1148 }
1149
1150 let range: BTreeSet<_> = range.iter().map(|(i, _)| *i).collect();
1151
1152 let min = *range.iter().min().unwrap();
1154 let max = *range.iter().max().unwrap();
1155 let replacement_range = min..max + 1;
1156
1157 let old_diffs_sum = Self::diffs_sum::<D>(
1160 self.parts[replacement_range.clone()]
1161 .iter()
1162 .flat_map(|p| p.batch.parts.iter()),
1163 metrics,
1164 );
1165
1166 Self::validate_diffs_sum_match(old_diffs_sum, new_diffs_sum, "id range replacement");
1167
1168 self.perform_subset_replacement(
1169 &res.output,
1170 *id,
1171 replacement_range,
1172 res.new_active_compaction.clone(),
1173 )
1174 }
1175
1176 fn handle_partial_batch_replacement<D>(
1177 &mut self,
1178 res: &FueledMergeRes<T>,
1179 id: SpineId,
1180 runs: &BTreeSet<RunId>,
1181 new_diffs_sum: Option<D>,
1182 metrics: &ColumnarMetrics,
1183 ) -> ApplyMergeResult
1184 where
1185 D: Monoid + Codec64 + PartialEq + Debug,
1186 {
1187 if runs.is_empty() {
1188 return ApplyMergeResult::NotAppliedNoMatch;
1189 }
1190
1191 let part = self.parts.iter().enumerate().find(|(_, p)| p.id == id);
1192 let Some((i, batch)) = part else {
1193 return ApplyMergeResult::NotAppliedNoMatch;
1194 };
1195 let replacement_range = i..(i + 1);
1196
1197 let replacement_desc = &res.output.desc;
1198 let existing_desc = &batch.batch.desc;
1199 assert_eq!(
1200 replacement_desc.lower(),
1201 existing_desc.lower(),
1202 "batch lower should match, but {:?} != {:?}",
1203 replacement_desc.lower(),
1204 existing_desc.lower()
1205 );
1206 assert_eq!(
1207 replacement_desc.upper(),
1208 existing_desc.upper(),
1209 "batch upper should match, but {:?} != {:?}",
1210 replacement_desc.upper(),
1211 existing_desc.upper()
1212 );
1213 if !PartialOrder::less_equal(existing_desc.since(), replacement_desc.since()) {
1214 error!(
1215 "batch since should advance, but {:?} !<= {:?}",
1216 existing_desc.since(),
1217 replacement_desc.since()
1218 );
1219 return ApplyMergeResult::NotAppliedInvalidSince;
1220 }
1221
1222 let batch = &batch.batch;
1223 let run_ids = runs.iter().cloned().collect::<Vec<_>>();
1224
1225 match Self::construct_batch_with_runs_replaced(batch, &run_ids, &res.output) {
1226 Ok(new_batch) => {
1227 let old_diffs_sum = Self::diffs_sum_for_runs::<D>(batch, &run_ids, metrics);
1228 Self::validate_diffs_sum_match(
1229 old_diffs_sum,
1230 new_diffs_sum,
1231 "partial batch replacement",
1232 );
1233 let old_batch_diff_sum = Self::diffs_sum::<D>(batch.parts.iter(), metrics);
1234 let new_batch_diff_sum = Self::diffs_sum::<D>(new_batch.parts.iter(), metrics);
1235 Self::validate_diffs_sum_match(
1236 old_batch_diff_sum,
1237 new_batch_diff_sum,
1238 "sanity checking diffs sum for replaced runs",
1239 );
1240 self.perform_subset_replacement(
1241 &new_batch,
1242 id,
1243 replacement_range,
1244 res.new_active_compaction.clone(),
1245 )
1246 }
1247 Err(err) => err,
1248 }
1249 }
1250
1251 fn validate_diffs_sum_match<D>(
1252 old_diffs_sum: Option<D>,
1253 new_diffs_sum: Option<D>,
1254 context: &str,
1255 ) where
1256 D: Monoid + Codec64 + PartialEq + Debug,
1257 {
1258 let new_diffs_sum = new_diffs_sum.unwrap_or_else(D::zero);
1259 if let Some(old_diffs_sum) = old_diffs_sum {
1260 assert_eq!(
1261 old_diffs_sum, new_diffs_sum,
1262 "merge res diffs sum ({:?}) did not match spine batch diffs sum ({:?}) ({})",
1263 new_diffs_sum, old_diffs_sum, context
1264 )
1265 }
1266 }
1267
1268 fn maybe_replace_checked_classic<D>(
1274 &mut self,
1275 res: &FueledMergeRes<T>,
1276 metrics: &ColumnarMetrics,
1277 ) -> ApplyMergeResult
1278 where
1279 D: Monoid + Codec64 + PartialEq + Debug,
1280 {
1281 if !PartialOrder::less_equal(res.output.desc.since(), self.desc().since()) {
1285 return ApplyMergeResult::NotAppliedInvalidSince;
1286 }
1287
1288 let new_diffs_sum = Self::diffs_sum(res.output.parts.iter(), metrics);
1289
1290 let exact_match = res.output.desc.lower() == self.desc().lower()
1292 && res.output.desc.upper() == self.desc().upper();
1293 if exact_match {
1294 let old_diffs_sum = Self::diffs_sum::<D>(
1295 self.parts.iter().flat_map(|p| p.batch.parts.iter()),
1296 metrics,
1297 );
1298
1299 if let (Some(old_diffs_sum), Some(new_diffs_sum)) = (old_diffs_sum, new_diffs_sum) {
1300 assert_eq!(
1301 old_diffs_sum, new_diffs_sum,
1302 "merge res diffs sum ({:?}) did not match spine batch diffs sum ({:?})",
1303 new_diffs_sum, old_diffs_sum
1304 );
1305 }
1306
1307 if res.output.len > self.len() {
1316 return ApplyMergeResult::NotAppliedTooManyUpdates;
1317 }
1318 *self = SpineBatch::merged(IdHollowBatch {
1319 id: self.id(),
1320 batch: Arc::new(res.output.clone()),
1321 });
1322 return ApplyMergeResult::AppliedExact;
1323 }
1324
1325 if let Some((id, range)) = self.find_replacement_range(&res.output.desc) {
1327 let old_diffs_sum = Self::diffs_sum::<D>(
1328 self.parts[range.clone()]
1329 .iter()
1330 .flat_map(|p| p.batch.parts.iter()),
1331 metrics,
1332 );
1333
1334 if let (Some(old_diffs_sum), Some(new_diffs_sum)) = (old_diffs_sum, new_diffs_sum) {
1335 assert_eq!(
1336 old_diffs_sum, new_diffs_sum,
1337 "merge res diffs sum ({:?}) did not match spine batch diffs sum ({:?})",
1338 new_diffs_sum, old_diffs_sum
1339 );
1340 }
1341
1342 self.perform_subset_replacement(
1343 &res.output,
1344 id,
1345 range,
1346 res.new_active_compaction.clone(),
1347 )
1348 } else {
1349 ApplyMergeResult::NotAppliedNoMatch
1350 }
1351 }
1352
1353 fn maybe_replace_unchecked(&mut self, res: &FueledMergeRes<T>) -> ApplyMergeResult {
1359 if !PartialOrder::less_equal(res.output.desc.since(), self.desc().since()) {
1363 return ApplyMergeResult::NotAppliedInvalidSince;
1364 }
1365
1366 let exact_match = res.output.desc.lower() == self.desc().lower()
1368 && res.output.desc.upper() == self.desc().upper();
1369 if exact_match {
1370 if res.output.len > self.len() {
1379 return ApplyMergeResult::NotAppliedTooManyUpdates;
1380 }
1381
1382 *self = SpineBatch::merged(IdHollowBatch {
1383 id: self.id(),
1384 batch: Arc::new(res.output.clone()),
1385 });
1386 return ApplyMergeResult::AppliedExact;
1387 }
1388
1389 if let Some((id, range)) = self.find_replacement_range(&res.output.desc) {
1391 self.perform_subset_replacement(
1392 &res.output,
1393 id,
1394 range,
1395 res.new_active_compaction.clone(),
1396 )
1397 } else {
1398 ApplyMergeResult::NotAppliedNoMatch
1399 }
1400 }
1401
1402 fn find_replacement_range(&self, desc: &Description<T>) -> Option<(SpineId, Range<usize>)> {
1404 let mut lower = None;
1414 let mut upper = None;
1415
1416 for (i, batch) in self.parts.iter().enumerate() {
1417 if batch.batch.desc.lower() == desc.lower() {
1418 lower = Some((i, batch.id.0));
1419 }
1420 if batch.batch.desc.upper() == desc.upper() {
1421 upper = Some((i, batch.id.1));
1422 }
1423 if lower.is_some() && upper.is_some() {
1424 break;
1425 }
1426 }
1427
1428 match (lower, upper) {
1429 (Some((lower_idx, id_lower)), Some((upper_idx, id_upper))) => {
1430 Some((SpineId(id_lower, id_upper), lower_idx..(upper_idx + 1)))
1431 }
1432 _ => None,
1433 }
1434 }
1435
1436 fn perform_subset_replacement(
1438 &mut self,
1439 res: &HollowBatch<T>,
1440 spine_id: SpineId,
1441 range: Range<usize>,
1442 new_active_compaction: Option<ActiveCompaction>,
1443 ) -> ApplyMergeResult {
1444 let SpineBatch {
1445 id,
1446 parts,
1447 desc,
1448 active_compaction: _,
1449 len: _,
1450 } = self;
1451
1452 let mut new_parts = vec![];
1453 new_parts.extend_from_slice(&parts[..range.start]);
1454 new_parts.push(IdHollowBatch {
1455 id: spine_id,
1456 batch: Arc::new(res.clone()),
1457 });
1458 new_parts.extend_from_slice(&parts[range.end..]);
1459
1460 let res = if range.len() == parts.len() {
1461 ApplyMergeResult::AppliedExact
1462 } else {
1463 ApplyMergeResult::AppliedSubset
1464 };
1465
1466 let new_spine_batch = SpineBatch {
1467 id: *id,
1468 desc: desc.to_owned(),
1469 len: new_parts.iter().map(|x| x.batch.len).sum(),
1470 parts: new_parts,
1471 active_compaction: new_active_compaction,
1472 };
1473
1474 if new_spine_batch.len() > self.len() {
1475 return ApplyMergeResult::NotAppliedTooManyUpdates;
1476 }
1477
1478 *self = new_spine_batch;
1479 res
1480 }
1481}
1482
1483#[derive(Debug, Clone, PartialEq, Serialize)]
1484pub struct FuelingMerge<T> {
1485 pub(crate) since: Antichain<T>,
1486 pub(crate) remaining_work: usize,
1487}
1488
1489#[derive(Debug, Clone, PartialEq, Serialize)]
1490pub struct IdFuelingMerge<T> {
1491 id: SpineId,
1492 merge: FuelingMerge<T>,
1493}
1494
1495impl<T: Timestamp + Lattice> FuelingMerge<T> {
1496 #[allow(clippy::as_conversions)]
1502 fn work(&mut self, _: &[SpineBatch<T>], fuel: &mut isize) {
1503 let used = std::cmp::min(*fuel as usize, self.remaining_work);
1504 self.remaining_work = self.remaining_work.saturating_sub(used);
1505 *fuel -= used as isize;
1506 }
1507
1508 fn done(
1513 self,
1514 bs: ArrayVec<SpineBatch<T>, BATCHES_PER_LEVEL>,
1515 log: &mut SpineLog<'_, T>,
1516 ) -> Option<SpineBatch<T>> {
1517 let first = bs.first()?;
1518 let last = bs.last()?;
1519 let id = SpineId(first.id().0, last.id().1);
1520 assert!(id.0 < id.1);
1521 let lower = first.desc().lower().clone();
1522 let upper = last.desc().upper().clone();
1523 let since = self.since;
1524
1525 if bs.iter().all(SpineBatch::is_empty) {
1527 return Some(SpineBatch::empty(id, lower, upper, since));
1528 }
1529
1530 let desc = Description::new(lower, upper, since);
1531 let len = bs.iter().map(SpineBatch::len).sum();
1532
1533 let mut merged_parts_len = 0;
1537 for b in &bs {
1538 merged_parts_len += b.parts.len();
1539 }
1540 let mut merged_parts = Vec::with_capacity(merged_parts_len);
1541 for b in bs {
1542 merged_parts.extend(b.parts)
1543 }
1544 debug_assert_eq!(merged_parts.len(), merged_parts_len);
1546
1547 if let SpineLog::Enabled { merge_reqs } = log {
1548 merge_reqs.push(FueledMergeReq {
1549 id,
1550 desc: desc.clone(),
1551 inputs: merged_parts.clone(),
1552 });
1553 }
1554
1555 Some(SpineBatch {
1556 id,
1557 desc,
1558 len,
1559 parts: merged_parts,
1560 active_compaction: None,
1561 })
1562 }
1563}
1564
1565const BATCHES_PER_LEVEL: usize = 2;
1570
1571#[derive(Debug, Clone)]
1654struct Spine<T> {
1655 effort: usize,
1656 next_id: usize,
1657 since: Antichain<T>,
1658 upper: Antichain<T>,
1659 merging: Vec<MergeState<T>>,
1660}
1661
1662impl<T> Spine<T> {
1663 pub fn spine_batches(&self) -> impl Iterator<Item = &SpineBatch<T>> {
1665 self.merging.iter().rev().flat_map(|m| &m.batches)
1666 }
1667
1668 pub fn spine_batches_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut SpineBatch<T>> {
1670 self.merging.iter_mut().rev().flat_map(|m| &mut m.batches)
1671 }
1672}
1673
1674impl<T: Timestamp + Lattice> Spine<T> {
1675 pub fn new() -> Self {
1682 Spine {
1683 effort: 1,
1684 next_id: 0,
1685 since: Antichain::from_elem(T::minimum()),
1686 upper: Antichain::from_elem(T::minimum()),
1687 merging: Vec::new(),
1688 }
1689 }
1690
1691 fn exert(&mut self, effort: usize, log: &mut SpineLog<'_, T>) -> bool {
1699 self.tidy_layers();
1700 if self.reduced() {
1701 return false;
1702 }
1703
1704 if self.merging.iter().any(|b| b.merge.is_some()) {
1705 let fuel = isize::try_from(effort).unwrap_or(isize::MAX);
1706 self.apply_fuel(&fuel, log);
1708 } else {
1709 let level = usize::cast_from(effort.next_power_of_two().trailing_zeros());
1714 let id = self.next_id();
1715 self.introduce_batch(
1716 SpineBatch::empty(
1717 id,
1718 self.upper.clone(),
1719 self.upper.clone(),
1720 self.since.clone(),
1721 ),
1722 level,
1723 log,
1724 );
1725 }
1726 true
1727 }
1728
1729 pub fn next_id(&mut self) -> SpineId {
1730 let id = self.next_id;
1731 self.next_id += 1;
1732 SpineId(id, self.next_id)
1733 }
1734
1735 pub fn insert(&mut self, batch: HollowBatch<T>, log: &mut SpineLog<'_, T>) {
1739 assert!(batch.desc.lower() != batch.desc.upper());
1740 assert_eq!(batch.desc.lower(), &self.upper);
1741
1742 let id = self.next_id();
1743 let batch = SpineBatch::merged(IdHollowBatch {
1744 id,
1745 batch: Arc::new(batch),
1746 });
1747
1748 self.upper.clone_from(batch.upper());
1749
1750 if batch.is_empty() {
1753 if let Some(position) = self.merging.iter().position(|m| !m.is_vacant()) {
1754 if self.merging[position].is_single() && self.merging[position].is_empty() {
1755 self.insert_at(batch, position);
1756 if let Some(merged) = self.complete_at(position, log) {
1759 self.merging[position] = MergeState::single(merged);
1760 }
1761 return;
1762 }
1763 }
1764 }
1765
1766 let index = batch.len().next_power_of_two();
1768 self.introduce_batch(batch, usize::cast_from(index.trailing_zeros()), log);
1769 }
1770
1771 fn reduced(&self) -> bool {
1778 self.spine_batches()
1779 .map(|b| {
1780 b.parts
1781 .iter()
1782 .map(|p| p.batch.run_meta.len())
1783 .sum::<usize>()
1784 })
1785 .sum::<usize>()
1786 < 2
1787 }
1788
1789 #[allow(dead_code)]
1793 fn describe(&self) -> Vec<(usize, usize)> {
1794 self.merging
1795 .iter()
1796 .map(|b| (b.batches.len(), b.len()))
1797 .collect()
1798 }
1799
1800 fn introduce_batch(
1806 &mut self,
1807 batch: SpineBatch<T>,
1808 batch_index: usize,
1809 log: &mut SpineLog<'_, T>,
1810 ) {
1811 if batch_index > 32 {
1832 println!("Large batch index: {}", batch_index);
1833 }
1834
1835 let mut fuel = 8 << batch_index;
1842 fuel *= self.effort;
1845 #[allow(clippy::as_conversions)]
1848 let fuel = fuel as isize;
1849
1850 self.apply_fuel(&fuel, log);
1857
1858 self.roll_up(batch_index, log);
1875
1876 self.insert_at(batch, batch_index);
1880
1881 self.tidy_layers();
1887 }
1888
1889 fn roll_up(&mut self, index: usize, log: &mut SpineLog<'_, T>) {
1897 while self.merging.len() <= index {
1899 self.merging.push(MergeState::default());
1900 }
1901
1902 if self.merging[..index].iter().any(|m| !m.is_vacant()) {
1904 let mut merged = None;
1907 for i in 0..index {
1908 if let Some(merged) = merged.take() {
1909 self.insert_at(merged, i);
1910 }
1911 merged = self.complete_at(i, log);
1912 }
1913
1914 if let Some(merged) = merged {
1918 self.insert_at(merged, index);
1919 }
1920
1921 if self.merging[index].is_full() {
1924 let merged = self.complete_at(index, log).expect("double batch");
1925 self.insert_at(merged, index + 1);
1926 }
1927 }
1928 }
1929
1930 pub fn apply_fuel(&mut self, fuel: &isize, log: &mut SpineLog<'_, T>) {
1938 for index in 0..self.merging.len() {
1944 let mut fuel = *fuel;
1946 self.merging[index].work(&mut fuel);
1949 if self.merging[index].is_complete() {
1961 let complete = self.complete_at(index, log).expect("complete batch");
1962 self.insert_at(complete, index + 1);
1963 }
1964 }
1965 }
1966
1967 fn insert_at(&mut self, batch: SpineBatch<T>, index: usize) {
1973 while self.merging.len() <= index {
1975 self.merging.push(MergeState::default());
1976 }
1977
1978 let merging = &mut self.merging[index];
1980 merging.push_batch(batch);
1981 if merging.batches.is_full() {
1982 let compaction_frontier = Some(self.since.borrow());
1983 merging.merge = SpineBatch::begin_merge(&merging.batches[..], compaction_frontier)
1984 }
1985 }
1986
1987 fn complete_at(&mut self, index: usize, log: &mut SpineLog<'_, T>) -> Option<SpineBatch<T>> {
1989 self.merging[index].complete(log)
1990 }
1991
1992 fn tidy_layers(&mut self) {
1994 if !self.merging.is_empty() {
1999 let mut length = self.merging.len();
2000 if self.merging[length - 1].is_single() {
2001 let appropriate_level = usize::cast_from(
2005 self.merging[length - 1]
2006 .len()
2007 .next_power_of_two()
2008 .trailing_zeros(),
2009 );
2010
2011 while appropriate_level < length - 1 {
2013 let current = &mut self.merging[length - 2];
2014 if current.is_vacant() {
2015 self.merging.remove(length - 2);
2017 length = self.merging.len();
2018 } else {
2019 if !current.is_full() {
2020 let mut smaller = 0;
2028 for (index, batch) in self.merging[..(length - 2)].iter().enumerate() {
2029 smaller += batch.batches.len() << index;
2030 }
2031
2032 if smaller <= (1 << length) / 8 {
2033 let state = self.merging.remove(length - 2);
2036 assert_eq!(state.batches.len(), 1);
2037 for batch in state.batches {
2038 self.insert_at(batch, length - 2);
2039 }
2040 }
2041 }
2042 break;
2043 }
2044 }
2045 }
2046 }
2047 }
2048
2049 fn validate(&self) -> Result<(), String> {
2057 let mut id = SpineId(0, 0);
2058 let mut frontier = Antichain::from_elem(T::minimum());
2059 for x in self.merging.iter().rev() {
2060 if x.is_full() != x.merge.is_some() {
2061 return Err(format!(
2062 "all (and only) full batches should have fueling merges (full={}, merge={:?})",
2063 x.is_full(),
2064 x.merge,
2065 ));
2066 }
2067
2068 if let Some(m) = &x.merge {
2069 if !x.is_full() {
2070 return Err(format!(
2071 "merge should only exist for full batches (len={:?}, merge={:?})",
2072 x.batches.len(),
2073 m.id,
2074 ));
2075 }
2076 if x.id() != Some(m.id) {
2077 return Err(format!(
2078 "merge id should match the range of the batch ids (batch={:?}, merge={:?})",
2079 x.id(),
2080 m.id,
2081 ));
2082 }
2083 }
2084
2085 for batch in &x.batches {
2090 if batch.id().0 != id.1 {
2091 return Err(format!(
2092 "batch id {:?} does not match the previous id {:?}: {:?}",
2093 batch.id(),
2094 id,
2095 self
2096 ));
2097 }
2098 id = batch.id();
2099 if batch.desc().lower() != &frontier {
2100 return Err(format!(
2101 "batch lower {:?} does not match the previous upper {:?}: {:?}",
2102 batch.desc().lower(),
2103 frontier,
2104 self
2105 ));
2106 }
2107 frontier.clone_from(batch.desc().upper());
2108 if !PartialOrder::less_equal(batch.desc().since(), &self.since) {
2109 return Err(format!(
2110 "since of batch {:?} past the spine since {:?}: {:?}",
2111 batch.desc().since(),
2112 self.since,
2113 self
2114 ));
2115 }
2116 }
2117 }
2118 if self.next_id != id.1 {
2119 return Err(format!(
2120 "spine next_id {:?} does not match the last batch's id {:?}: {:?}",
2121 self.next_id, id, self
2122 ));
2123 }
2124 if self.upper != frontier {
2125 return Err(format!(
2126 "spine upper {:?} does not match the last batch's upper {:?}: {:?}",
2127 self.upper, frontier, self
2128 ));
2129 }
2130 Ok(())
2131 }
2132}
2133
2134#[derive(Debug, Clone)]
2139struct MergeState<T> {
2140 batches: ArrayVec<SpineBatch<T>, BATCHES_PER_LEVEL>,
2141 merge: Option<IdFuelingMerge<T>>,
2142}
2143
2144impl<T> Default for MergeState<T> {
2145 fn default() -> Self {
2146 Self {
2147 batches: ArrayVec::new(),
2148 merge: None,
2149 }
2150 }
2151}
2152
2153impl<T: Timestamp + Lattice> MergeState<T> {
2154 fn id(&self) -> Option<SpineId> {
2156 if let (Some(first), Some(last)) = (self.batches.first(), self.batches.last()) {
2157 Some(SpineId(first.id().0, last.id().1))
2158 } else {
2159 None
2160 }
2161 }
2162
2163 fn single(batch: SpineBatch<T>) -> Self {
2165 let mut state = Self::default();
2166 state.push_batch(batch);
2167 state
2168 }
2169
2170 fn push_batch(&mut self, batch: SpineBatch<T>) {
2172 if let Some(last) = self.batches.last() {
2173 assert_eq!(last.id().1, batch.id().0);
2174 assert_eq!(last.upper(), batch.lower());
2175 }
2176 assert!(
2177 self.merge.is_none(),
2178 "Attempted to insert batch into incomplete merge! (batch={:?}, batch_count={})",
2179 batch.id,
2180 self.batches.len(),
2181 );
2182 self.batches
2183 .try_push(batch)
2184 .expect("Attempted to insert batch into full layer!");
2185 }
2186
2187 fn len(&self) -> usize {
2189 self.batches.iter().map(SpineBatch::len).sum()
2190 }
2191
2192 fn is_empty(&self) -> bool {
2194 self.batches.iter().all(SpineBatch::is_empty)
2195 }
2196
2197 fn is_vacant(&self) -> bool {
2199 self.batches.is_empty()
2200 }
2201
2202 fn is_single(&self) -> bool {
2204 self.batches.len() == 1
2205 }
2206
2207 fn is_full(&self) -> bool {
2210 self.batches.is_full()
2211 }
2212
2213 fn complete(&mut self, log: &mut SpineLog<'_, T>) -> Option<SpineBatch<T>> {
2220 let mut this = mem::take(self);
2221 if this.batches.len() <= 1 {
2222 this.batches.pop()
2223 } else {
2224 let id_merge = this
2226 .merge
2227 .or_else(|| SpineBatch::begin_merge(&self.batches[..], None))?;
2228 id_merge.merge.done(this.batches, log)
2229 }
2230 }
2231
2232 fn is_complete(&self) -> bool {
2234 match &self.merge {
2235 Some(IdFuelingMerge { merge, .. }) => merge.remaining_work == 0,
2236 None => false,
2237 }
2238 }
2239
2240 fn work(&mut self, fuel: &mut isize) {
2242 if let Some(IdFuelingMerge { merge, .. }) = &mut self.merge {
2244 merge.work(&self.batches[..], fuel)
2245 }
2246 }
2247}
2248
2249#[cfg(test)]
2250pub mod datadriven {
2251 use mz_ore::fmt::FormatBuffer;
2252
2253 use crate::internal::datadriven::DirectiveArgs;
2254
2255 use super::*;
2256
2257 #[derive(Debug, Default)]
2259 pub struct TraceState {
2260 pub trace: Trace<u64>,
2261 pub merge_reqs: Vec<FueledMergeReq<u64>>,
2262 }
2263
2264 pub fn since_upper(
2265 datadriven: &TraceState,
2266 _args: DirectiveArgs,
2267 ) -> Result<String, anyhow::Error> {
2268 Ok(format!(
2269 "{:?}{:?}\n",
2270 datadriven.trace.since().elements(),
2271 datadriven.trace.upper().elements()
2272 ))
2273 }
2274
2275 pub fn batches(datadriven: &TraceState, _args: DirectiveArgs) -> Result<String, anyhow::Error> {
2276 let mut s = String::new();
2277 for b in datadriven.trace.spine.spine_batches() {
2278 s.push_str(b.describe(true).as_str());
2279 s.push('\n');
2280 }
2281 Ok(s)
2282 }
2283
2284 pub fn insert(
2285 datadriven: &mut TraceState,
2286 args: DirectiveArgs,
2287 ) -> Result<String, anyhow::Error> {
2288 for x in args
2289 .input
2290 .trim()
2291 .split('\n')
2292 .map(DirectiveArgs::parse_hollow_batch)
2293 {
2294 datadriven
2295 .merge_reqs
2296 .append(&mut datadriven.trace.push_batch(x));
2297 }
2298 Ok("ok\n".to_owned())
2299 }
2300
2301 pub fn downgrade_since(
2302 datadriven: &mut TraceState,
2303 args: DirectiveArgs,
2304 ) -> Result<String, anyhow::Error> {
2305 let since = args.expect("since");
2306 datadriven
2307 .trace
2308 .downgrade_since(&Antichain::from_elem(since));
2309 Ok("ok\n".to_owned())
2310 }
2311
2312 pub fn take_merge_req(
2313 datadriven: &mut TraceState,
2314 _args: DirectiveArgs,
2315 ) -> Result<String, anyhow::Error> {
2316 let mut s = String::new();
2317 for merge_req in std::mem::take(&mut datadriven.merge_reqs) {
2318 write!(
2319 s,
2320 "{:?}{:?}{:?} {}\n",
2321 merge_req.desc.lower().elements(),
2322 merge_req.desc.upper().elements(),
2323 merge_req.desc.since().elements(),
2324 merge_req
2325 .inputs
2326 .iter()
2327 .flat_map(|x| x.batch.parts.iter())
2328 .map(|x| x.printable_name())
2329 .collect::<Vec<_>>()
2330 .join(" ")
2331 );
2332 }
2333 Ok(s)
2334 }
2335
2336 pub fn apply_merge_res(
2337 datadriven: &mut TraceState,
2338 args: DirectiveArgs,
2339 ) -> Result<String, anyhow::Error> {
2340 let res = FueledMergeRes {
2341 output: DirectiveArgs::parse_hollow_batch(args.input),
2342 input: CompactionInput::Legacy,
2343 new_active_compaction: None,
2344 };
2345 match datadriven.trace.apply_merge_res_unchecked(&res) {
2346 ApplyMergeResult::AppliedExact => Ok("applied exact\n".into()),
2347 ApplyMergeResult::AppliedSubset => Ok("applied subset\n".into()),
2348 ApplyMergeResult::NotAppliedNoMatch => Ok("no-op\n".into()),
2349 ApplyMergeResult::NotAppliedInvalidSince => Ok("no-op invalid since\n".into()),
2350 ApplyMergeResult::NotAppliedTooManyUpdates => Ok("no-op too many updates\n".into()),
2351 }
2352 }
2353}
2354
2355#[cfg(test)]
2356pub(crate) mod tests {
2357 use std::ops::Range;
2358
2359 use proptest::prelude::*;
2360 use semver::Version;
2361
2362 use crate::internal::state::tests::{any_hollow_batch, any_hollow_batch_with_exact_runs};
2363
2364 use super::*;
2365
2366 pub fn any_trace<T: Arbitrary + Timestamp + Lattice>(
2367 num_batches: Range<usize>,
2368 ) -> impl Strategy<Value = Trace<T>> {
2369 Strategy::prop_map(
2370 (
2371 any::<Option<T>>(),
2372 proptest::collection::vec(any_hollow_batch::<T>(), num_batches),
2373 any::<bool>(),
2374 any::<u64>(),
2375 ),
2376 |(since, mut batches, roundtrip_structure, timeout_ms)| {
2377 let mut trace = Trace::<T>::default();
2378 trace.downgrade_since(&since.map_or_else(Antichain::new, Antichain::from_elem));
2379
2380 batches.sort_by(|x, y| x.desc.upper().elements().cmp(y.desc.upper().elements()));
2383 let mut lower = Antichain::from_elem(T::minimum());
2384 for mut batch in batches {
2385 if PartialOrder::less_than(trace.since(), batch.desc.since()) {
2387 trace.downgrade_since(batch.desc.since());
2388 }
2389 batch.desc = Description::new(
2390 lower.clone(),
2391 batch.desc.upper().clone(),
2392 batch.desc.since().clone(),
2393 );
2394 lower.clone_from(batch.desc.upper());
2395 let _merge_req = trace.push_batch(batch);
2396 }
2397 let reqs: Vec<_> = trace
2398 .fueled_merge_reqs_before_ms(timeout_ms, None)
2399 .collect();
2400 for req in reqs {
2401 trace.claim_compaction(req.id, ActiveCompaction { start_ms: 0 })
2402 }
2403 trace.roundtrip_structure = roundtrip_structure;
2404 trace
2405 },
2406 )
2407 }
2408
2409 #[mz_ore::test]
2410 #[cfg_attr(miri, ignore)] fn test_roundtrips() {
2412 fn check(trace: Trace<i64>) {
2413 trace.validate().unwrap();
2414 let flat = trace.flatten();
2415 let unflat = Trace::unflatten(flat).unwrap();
2416 assert_eq!(trace, unflat);
2417 }
2418
2419 proptest!(|(trace in any_trace::<i64>(1..10))| { check(trace) })
2420 }
2421
2422 #[mz_ore::test]
2423 fn fueled_merge_reqs() {
2424 let mut trace: Trace<u64> = Trace::default();
2425 let fueled_reqs = trace.push_batch(crate::internal::state::tests::hollow(
2426 0,
2427 10,
2428 &["n0011500/p3122e2a1-a0c7-429f-87aa-1019bf4f5f86"],
2429 1000,
2430 ));
2431
2432 assert!(fueled_reqs.is_empty());
2433 assert_eq!(
2434 trace.fueled_merge_reqs_before_ms(u64::MAX, None).count(),
2435 0,
2436 "no merge reqs when not filtering by version"
2437 );
2438 assert_eq!(
2439 trace
2440 .fueled_merge_reqs_before_ms(
2441 u64::MAX,
2442 Some(WriterKey::for_version(&Version::new(0, 50, 0)))
2443 )
2444 .count(),
2445 0,
2446 "zero batches are older than a past version"
2447 );
2448 assert_eq!(
2449 trace
2450 .fueled_merge_reqs_before_ms(
2451 u64::MAX,
2452 Some(WriterKey::for_version(&Version::new(99, 99, 0)))
2453 )
2454 .count(),
2455 1,
2456 "one batch is older than a future version"
2457 );
2458 }
2459
2460 #[mz_ore::test]
2461 fn remove_redundant_merge_reqs() {
2462 fn req(lower: u64, upper: u64) -> FueledMergeReq<u64> {
2463 FueledMergeReq {
2464 id: SpineId(usize::cast_from(lower), usize::cast_from(upper)),
2465 desc: Description::new(
2466 Antichain::from_elem(lower),
2467 Antichain::from_elem(upper),
2468 Antichain::new(),
2469 ),
2470 inputs: vec![],
2471 }
2472 }
2473
2474 assert_eq!(Trace::<u64>::remove_redundant_merge_reqs(vec![]), vec![]);
2476
2477 assert_eq!(
2479 Trace::remove_redundant_merge_reqs(vec![req(0, 1)]),
2480 vec![req(0, 1)]
2481 );
2482
2483 assert_eq!(
2485 Trace::remove_redundant_merge_reqs(vec![req(0, 1), req(0, 1)]),
2486 vec![req(0, 1)]
2487 );
2488
2489 assert_eq!(
2491 Trace::remove_redundant_merge_reqs(vec![req(0, 1), req(1, 2)]),
2492 vec![req(1, 2), req(0, 1)]
2493 );
2494
2495 assert_eq!(
2497 Trace::remove_redundant_merge_reqs(vec![req(1, 2), req(0, 3)]),
2498 vec![req(0, 3)]
2499 );
2500
2501 assert_eq!(
2503 Trace::remove_redundant_merge_reqs(vec![req(0, 2), req(0, 3)]),
2504 vec![req(0, 3)]
2505 );
2506
2507 assert_eq!(
2509 Trace::remove_redundant_merge_reqs(vec![req(1, 3), req(0, 3)]),
2510 vec![req(0, 3)]
2511 );
2512
2513 assert_eq!(
2515 Trace::remove_redundant_merge_reqs(vec![req(0, 3), req(1, 2)]),
2516 vec![req(0, 3)]
2517 );
2518
2519 assert_eq!(
2521 Trace::remove_redundant_merge_reqs(vec![req(0, 2), req(1, 3)]),
2522 vec![req(1, 3), req(0, 2)]
2523 );
2524
2525 assert_eq!(
2527 Trace::remove_redundant_merge_reqs(vec![req(1, 3), req(0, 2)]),
2528 vec![req(0, 2), req(1, 3)]
2529 );
2530
2531 let req015 = FueledMergeReq {
2533 id: SpineId(0, 1),
2534 desc: Description::new(
2535 Antichain::from_elem(0),
2536 Antichain::from_elem(1),
2537 Antichain::from_elem(5),
2538 ),
2539 inputs: vec![],
2540 };
2541 assert_eq!(
2542 Trace::remove_redundant_merge_reqs(vec![req(0, 1), req015.clone()]),
2543 vec![req015, req(0, 1)]
2544 );
2545 }
2546
2547 #[mz_ore::test]
2548 #[cfg_attr(miri, ignore)] fn construct_batch_with_runs_replaced_test() {
2550 let batch_strategy = any_hollow_batch::<u64>();
2551 let to_replace_strategy = any_hollow_batch_with_exact_runs::<u64>(1);
2552
2553 let combined_strategy = (batch_strategy, to_replace_strategy)
2554 .prop_filter("non-empty batch", |(batch, _)| batch.run_meta.len() >= 1);
2555
2556 let final_strategy = combined_strategy.prop_flat_map(|(batch, to_replace)| {
2557 let batch_len = batch.run_meta.len();
2558 let batch_clone = batch.clone();
2559 let to_replace_clone = to_replace.clone();
2560
2561 proptest::collection::vec(any::<bool>(), batch_len)
2562 .prop_filter("at least one run selected", |mask| mask.iter().any(|&x| x))
2563 .prop_map(move |mask| {
2564 let indices: Vec<usize> = mask
2565 .iter()
2566 .enumerate()
2567 .filter_map(|(i, &selected)| if selected { Some(i) } else { None })
2568 .collect();
2569 (batch_clone.clone(), to_replace_clone.clone(), indices)
2570 })
2571 });
2572
2573 proptest!(|(
2574 (batch, to_replace, runs) in final_strategy
2575 )| {
2576 let original_run_ids: Vec<_> = batch.run_meta.iter().map(|x|
2577 x.id.unwrap().clone()
2578 ).collect();
2579
2580 let run_ids = runs.iter().map(|&i| original_run_ids[i].clone()).collect::<Vec<_>>();
2581
2582 let new_batch = SpineBatch::construct_batch_with_runs_replaced(
2583 &batch,
2584 &run_ids,
2585 &to_replace,
2586 ).unwrap();
2587
2588 let expected_len = batch.run_meta.len() - runs.len()
2589 + to_replace.run_meta.len();
2590 prop_assert!(new_batch.run_meta.len() == expected_len);
2591 });
2592 }
2593
2594 #[mz_ore::test]
2595 fn test_perform_subset_replacement() {
2596 let batch1 = crate::internal::state::tests::hollow::<u64>(0, 10, &["a"], 10);
2597 let batch2 = crate::internal::state::tests::hollow::<u64>(10, 20, &["b"], 10);
2598 let batch3 = crate::internal::state::tests::hollow::<u64>(20, 30, &["c"], 10);
2599
2600 let id_batch1 = IdHollowBatch {
2601 id: SpineId(0, 1),
2602 batch: Arc::new(batch1.clone()),
2603 };
2604 let id_batch2 = IdHollowBatch {
2605 id: SpineId(1, 2),
2606 batch: Arc::new(batch2.clone()),
2607 };
2608 let id_batch3 = IdHollowBatch {
2609 id: SpineId(2, 3),
2610 batch: Arc::new(batch3.clone()),
2611 };
2612
2613 let spine_batch = SpineBatch {
2614 id: SpineId(0, 3),
2615 desc: Description::new(
2616 Antichain::from_elem(0),
2617 Antichain::from_elem(30),
2618 Antichain::from_elem(0),
2619 ),
2620 parts: vec![id_batch1, id_batch2, id_batch3],
2621 active_compaction: None,
2622 len: 30,
2623 };
2624
2625 let res_exact = crate::internal::state::tests::hollow::<u64>(0, 30, &["d"], 30);
2626 let mut sb_exact = spine_batch.clone();
2627 let result = sb_exact.perform_subset_replacement(&res_exact, SpineId(0, 3), 0..3, None);
2628 assert!(matches!(result, ApplyMergeResult::AppliedExact));
2629 assert_eq!(sb_exact.parts.len(), 1);
2630 assert_eq!(sb_exact.len(), 30);
2631
2632 let res_subset = crate::internal::state::tests::hollow::<u64>(0, 20, &["e"], 20);
2633 let mut sb_subset = spine_batch.clone();
2634 let result = sb_subset.perform_subset_replacement(&res_subset, SpineId(0, 2), 0..2, None);
2635 assert!(matches!(result, ApplyMergeResult::AppliedSubset));
2636 assert_eq!(sb_subset.parts.len(), 2); assert_eq!(sb_subset.len(), 30);
2638
2639 let res_too_big = crate::internal::state::tests::hollow::<u64>(0, 30, &["f"], 31);
2640 let mut sb_too_big = spine_batch.clone();
2641 let result = sb_too_big.perform_subset_replacement(&res_too_big, SpineId(0, 3), 0..3, None);
2642 assert!(matches!(result, ApplyMergeResult::NotAppliedTooManyUpdates));
2643 assert_eq!(sb_too_big.parts.len(), 3);
2644 assert_eq!(sb_too_big.len(), 30);
2645 }
2646}