Skip to main content

mz_storage/
upsert_continual_feedback.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10//! Implementation of feedback UPSERT operator and associated helpers. See
11//! [`upsert_inner`] for a description of how the operator works and why.
12
13use std::cmp::Reverse;
14use std::fmt::Debug;
15use std::sync::Arc;
16
17use differential_dataflow::hashable::Hashable;
18use differential_dataflow::{AsCollection, VecCollection};
19use indexmap::map::Entry;
20use itertools::Itertools;
21use mz_repr::{Diff, GlobalId, Row};
22use mz_storage_types::errors::{DataflowError, EnvelopeError};
23use mz_timely_util::builder_async::{
24    Event as AsyncEvent, OperatorBuilder as AsyncOperatorBuilder, PressOnDropButton,
25};
26use std::convert::Infallible;
27use timely::container::CapacityContainerBuilder;
28use timely::dataflow::channels::pact::Exchange;
29use timely::dataflow::operators::{Capability, CapabilitySet};
30use timely::dataflow::{Scope, StreamVec};
31use timely::order::{PartialOrder, TotalOrder};
32use timely::progress::timestamp::Refines;
33use timely::progress::{Antichain, Timestamp};
34
35use crate::healthcheck::HealthStatusUpdate;
36use crate::metrics::upsert::UpsertMetrics;
37use crate::upsert::UpsertConfig;
38use crate::upsert::UpsertErrorEmitter;
39use crate::upsert::UpsertKey;
40use crate::upsert::UpsertValue;
41use crate::upsert::types::UpsertValueAndSize;
42use crate::upsert::types::{self as upsert_types, ValueMetadata};
43use crate::upsert::types::{StateValue, UpsertState, UpsertStateBackend};
44
45/// An operator that transforms an input stream of upserts (updates to key-value
46/// pairs), which represents an imaginary key-value state, into a differential
47/// collection. It keeps an internal map-like state which keeps the latest value
48/// for each key, such that it can emit the retractions and additions implied by
49/// a new update for a given key.
50///
51/// This operator is intended to be used in an ingestion pipeline that reads
52/// from an external source, and the output of this operator is eventually
53/// written to persist.
54///
55/// The operator has two inputs: a) the source input, of upserts, and b) a
56/// persist input that feeds back the upsert state to the operator. Below, there
57/// is a section for each input that describes how and why we process updates
58/// from each input.
59///
60/// An important property of this operator is that it does _not_ update the
61/// map-like state that it keeps for translating the stream of upserts into a
62/// differential collection when it processes source input. It _only_ updates
63/// the map-like state based on updates from the persist (feedback) input. We do
64/// this because the operator is expected to be used in cases where there are
65/// multiple concurrent instances of the same ingestion pipeline, and the
66/// different instances might see different input because of concurrency and
67/// non-determinism. All instances of the upsert operator must produce output
68/// that is consistent with the current state of the output (that all instances
69/// produce "collaboratively"). This global state is what the operator
70/// continually learns about via updates from the persist input.
71///
72/// ## Processing the Source Input
73///
74/// Updates on the source input are stashed/staged until they can be processed.
75/// Whether or not an update can be processed depends both on the upper frontier
76/// of the source input and on the upper frontier of the persist input:
77///
78///  - Input updates are only processed once their timestamp is "done", that is
79///  the input upper is no longer `less_equal` their timestamp.
80///
81///  - Input updates are only processed once they are at the persist upper, that
82///  is we have emitted and written down updates for all previous times and we
83///  have updated our map-like state to the latest global state of the output of
84///  the ingestion pipeline. We know this is the case when the persist upper is
85///  no longer `less_than` their timestamp.
86///
87/// As an optimization, we allow processing input updates when they are right at
88/// the input frontier. This is called _partial emission_ because we are
89/// emitting updates that might be retracted when processing more updates from
90/// the same timestamp. In order to be able to process these updates we keep
91/// _provisional values_ in our upsert state. These will be overwritten when we
92/// get the final upsert values on the persist input.
93///
94/// ## Processing the Persist Input
95///
96/// We continually ingest updates from the persist input into our state using
97/// `UpsertState::consolidate_chunk`. We might be ingesting updates from the
98/// initial snapshot (when starting the operator) that are not consolidated or
99/// we might be ingesting updates from a partial emission (see above). In either
100/// case, our input might not be consolidated and `consolidate_chunk` is able to
101/// handle that.
102pub fn upsert_inner<G: Scope, FromTime, F, Fut, US>(
103    input: VecCollection<G, (UpsertKey, Option<UpsertValue>, FromTime), Diff>,
104    key_indices: Vec<usize>,
105    resume_upper: Antichain<G::Timestamp>,
106    persist_input: VecCollection<G, Result<Row, DataflowError>, Diff>,
107    mut persist_token: Option<Vec<PressOnDropButton>>,
108    upsert_metrics: UpsertMetrics,
109    source_config: crate::source::SourceExportCreationConfig,
110    state_fn: F,
111    upsert_config: UpsertConfig,
112    prevent_snapshot_buffering: bool,
113    snapshot_buffering_max: Option<usize>,
114) -> (
115    VecCollection<G, Result<Row, DataflowError>, Diff>,
116    StreamVec<G, (Option<GlobalId>, HealthStatusUpdate)>,
117    StreamVec<G, Infallible>,
118    PressOnDropButton,
119)
120where
121    G::Timestamp: Refines<mz_repr::Timestamp> + TotalOrder + Sync,
122    F: FnOnce() -> Fut + 'static,
123    Fut: std::future::Future<Output = US>,
124    US: UpsertStateBackend<G::Timestamp, FromTime>,
125    FromTime: Debug + timely::ExchangeData + Clone + Ord + Sync,
126{
127    let mut builder = AsyncOperatorBuilder::new("Upsert".to_string(), input.scope());
128
129    // We only care about UpsertValueError since this is the only error that we can retract
130    let persist_input = persist_input.flat_map(move |result| {
131        let value = match result {
132            Ok(ok) => Ok(ok),
133            Err(DataflowError::EnvelopeError(err)) => match *err {
134                EnvelopeError::Upsert(err) => Err(Box::new(err)),
135                _ => return None,
136            },
137            Err(_) => return None,
138        };
139        let value_ref = match value {
140            Ok(ref row) => Ok(row),
141            Err(ref err) => Err(&**err),
142        };
143        Some((UpsertKey::from_value(value_ref, &key_indices), value))
144    });
145    let (output_handle, output) = builder.new_output::<CapacityContainerBuilder<_>>();
146
147    // An output that just reports progress of the snapshot consolidation process upstream to the
148    // persist source to ensure that backpressure is applied
149    let (_snapshot_handle, snapshot_stream) =
150        builder.new_output::<CapacityContainerBuilder<Vec<Infallible>>>();
151
152    let (mut health_output, health_stream) = builder.new_output();
153    let mut input = builder.new_input_for(
154        input.inner,
155        Exchange::new(move |((key, _, _), _, _)| UpsertKey::hashed(key)),
156        &output_handle,
157    );
158
159    let mut persist_input = builder.new_disconnected_input(
160        persist_input.inner,
161        Exchange::new(|((key, _), _, _)| UpsertKey::hashed(key)),
162    );
163
164    let upsert_shared_metrics = Arc::clone(&upsert_metrics.shared);
165
166    let shutdown_button = builder.build(move |caps| async move {
167        let [output_cap, snapshot_cap, health_cap]: [_; 3] = caps.try_into().unwrap();
168        drop(output_cap);
169        let mut snapshot_cap = CapabilitySet::from_elem(snapshot_cap);
170
171        let mut state = UpsertState::<_, G::Timestamp, FromTime>::new(
172            state_fn().await,
173            upsert_shared_metrics,
174            &upsert_metrics,
175            source_config.source_statistics.clone(),
176            upsert_config.shrink_upsert_unused_buffers_by_ratio,
177        );
178
179        // True while we're still reading the initial "snapshot" (a whole bunch
180        // of updates, all at the same initial timestamp) from our persist
181        // input or while we're reading the initial snapshot from the upstream
182        // source.
183        let mut hydrating = true;
184
185        // A re-usable buffer of changes, per key. This is an `IndexMap`
186        // because it has to be `drain`-able and have a consistent iteration
187        // order.
188        let mut commands_state: indexmap::IndexMap<
189            _,
190            upsert_types::UpsertValueAndSize<G::Timestamp, FromTime>,
191        > = indexmap::IndexMap::new();
192        let mut multi_get_scratch = Vec::new();
193
194        // For stashing source input while it's not eligible for processing.
195        let mut stash = vec![];
196        // A capability suitable for emitting any updates based on stash. No capability is held
197        // when the stash is empty.
198        let mut stash_cap: Option<Capability<G::Timestamp>> = None;
199        let mut input_upper = Antichain::from_elem(Timestamp::minimum());
200        let mut partial_drain_time = None;
201
202        // For our persist/feedback input, both of these.
203        let mut persist_stash = vec![];
204        let mut persist_upper = Antichain::from_elem(Timestamp::minimum());
205
206        // We keep track of the largest timestamp seen on the persist input so
207        // that we can block processing source input while that timestamp is
208        // beyond the persist frontier. While ingesting updates of a timestamp,
209        // our upsert state is in a consolidating state, and trying to read it
210        // at that time would yield a panic.
211        //
212        // NOTE(aljoscha): You would think that it cannot happen that we even
213        // attempt to process source updates while the state is in a
214        // consolidating state, because we always wait until the persist
215        // frontier "catches up" with the timestamp of the source input. If
216        // there is only this here UPSERT operator and no concurrent instances,
217        // this is true. But with concurrent instances it can happen that an
218        // operator that is faster than us makes it so updates get written to
219        // persist. And we would then be ingesting them.
220        let mut largest_seen_persist_ts: Option<G::Timestamp> = None;
221
222        // A buffer for our output.
223        let mut output_updates = vec![];
224
225        let mut error_emitter = (&mut health_output, &health_cap);
226
227        loop {
228            tokio::select! {
229                _ = persist_input.ready() => {
230                    // Read away as much input as we can.
231                    while let Some(persist_event) = persist_input.next_sync() {
232                        match persist_event {
233                            AsyncEvent::Data(time, data) => {
234                                tracing::trace!(
235                                    worker_id = %source_config.worker_id,
236                                    source_id = %source_config.id,
237                                    time=?time,
238                                    updates=%data.len(),
239                                    "received persist data");
240
241                                persist_stash.extend(data.into_iter().map(
242                                    |((key, value), ts, diff)| {
243                                        largest_seen_persist_ts =
244                                            std::cmp::max(
245                                                largest_seen_persist_ts
246                                                    .clone(),
247                                                Some(ts.clone()),
248                                            );
249                                        (key, value, ts, diff)
250                                    },
251                                ));
252                            }
253                            AsyncEvent::Progress(upper) => {
254                                tracing::trace!(
255                                    worker_id = %source_config.worker_id,
256                                    source_id = %source_config.id,
257                                    ?upper,
258                                    "received persist progress");
259                                persist_upper = upper;
260                            }
261                        }
262                    }
263
264                    let last_rehydration_chunk =
265                        hydrating && PartialOrder::less_equal(&resume_upper, &persist_upper);
266
267                    tracing::debug!(
268                        worker_id = %source_config.worker_id,
269                        source_id = %source_config.id,
270                        persist_stash = %persist_stash.len(),
271                        %hydrating,
272                        %last_rehydration_chunk,
273                        ?resume_upper,
274                        ?persist_upper,
275                        "ingesting persist snapshot chunk");
276
277                    // Log any keys in this batch that have a suspicious net diff,
278                    // to help diagnose how diff_sum corruption enters the system.
279                    // We project to (key, diff) and consolidate to get the net
280                    // diff per key.
281                    {
282                        let mut key_diffs: Vec<(UpsertKey, mz_repr::Diff)> = persist_stash
283                            .iter()
284                            .map(|(key, _val, _ts, diff)| (*key, *diff))
285                            .collect();
286                        differential_dataflow::consolidation::consolidate(&mut key_diffs);
287                        for (key, net_diff) in &key_diffs {
288                            if net_diff.into_inner() > 1 || net_diff.into_inner() < -1 {
289                                tracing::warn!(
290                                    worker_id = %source_config.worker_id,
291                                    source_id = %source_config.id,
292                                    ?key,
293                                    net_diff = net_diff.into_inner(),
294                                    %hydrating,
295                                    ?persist_upper,
296                                    "persist feedback batch has key with suspicious net diff \
297                                    (expected -1, 0, or 1)"
298                                );
299                            }
300                        }
301                    }
302
303                    let persist_stash_iter = persist_stash
304                        .drain(..)
305                        .map(|(key, val, _ts, diff)| (key, val, diff));
306
307                    match state
308                        .consolidate_chunk(
309                            persist_stash_iter,
310                            last_rehydration_chunk,
311                        )
312                        .await
313                    {
314                        Ok(_) => {}
315                        Err(e) => {
316                            // Make sure our persist source can shut down.
317                            persist_token.take();
318                            snapshot_cap.downgrade(&[]);
319                            UpsertErrorEmitter::<G>::emit(
320                                &mut error_emitter,
321                                "Failed to rehydrate state".to_string(),
322                                e,
323                            )
324                            .await;
325                        }
326                    }
327
328                    tracing::debug!(
329                        worker_id = %source_config.worker_id,
330                        source_id = %source_config.id,
331                        ?resume_upper,
332                        ?persist_upper,
333                        "downgrading snapshot cap",
334                    );
335
336                    // Only downgrade this _after_ ingesting the data, because
337                    // that can actually take quite some time, and we don't want
338                    // to announce that we're done ingesting the initial
339                    // snapshot too early.
340                    //
341                    // When we finish ingesting our initial persist snapshot,
342                    // during "re-hydration", we downgrade this to the empty
343                    // frontier, so we need to be lenient to this failing from
344                    // then on.
345                    let _ = snapshot_cap.try_downgrade(persist_upper.iter());
346
347
348
349                    if last_rehydration_chunk {
350                        hydrating = false;
351
352                        tracing::info!(
353                            worker_id = %source_config.worker_id,
354                            source_id = %source_config.id,
355                            "upsert source finished rehydration",
356                        );
357
358                        snapshot_cap.downgrade(&[]);
359                    }
360
361                }
362                _ = input.ready() => {
363                    let mut events_processed = 0;
364                    while let Some(event) = input.next_sync() {
365                        match event {
366                            AsyncEvent::Data(cap, mut data) => {
367                                tracing::trace!(
368                                    worker_id = %source_config.worker_id,
369                                    source_id = %source_config.id,
370                                    time=?cap.time(),
371                                    updates=%data.len(),
372                                    "received data");
373
374                                let event_time = cap.time().clone();
375
376                                stage_input(
377                                    &mut stash,
378                                    &mut data,
379                                    &input_upper,
380                                    &resume_upper,
381                                );
382                                if !stash.is_empty() {
383                                    // Update the stashed capability to the minimum
384                                    stash_cap = match stash_cap {
385                                        Some(stash_cap) => {
386                                            if cap.time() < stash_cap.time() {
387                                                Some(cap)
388                                            } else {
389                                                Some(stash_cap)
390                                            }
391                                        }
392                                        None => Some(cap)
393                                    };
394                                }
395
396                                if prevent_snapshot_buffering
397                                    && input_upper.as_option()
398                                        == Some(&event_time)
399                                {
400                                    tracing::debug!(
401                                        worker_id = %source_config.worker_id,
402                                        source_id = %source_config.id,
403                                        ?event_time,
404                                        ?resume_upper,
405                                        ?input_upper,
406                                        "allowing partial drain");
407                                    partial_drain_time = Some(event_time.clone());
408                                } else {
409                                    tracing::debug!(
410                                        worker_id = %source_config.worker_id,
411                                        source_id = %source_config.id,
412                                        %prevent_snapshot_buffering,
413                                        ?event_time,
414                                        ?resume_upper,
415                                        ?input_upper,
416                                        "not allowing partial drain");
417                                }
418                            }
419                            AsyncEvent::Progress(upper) => {
420                                tracing::trace!(
421                                    worker_id = %source_config.worker_id,
422                                    source_id = %source_config.id,
423                                    ?upper,
424                                    "received progress");
425
426                                // Ignore progress updates before the `resume_upper`, which is our initial
427                                // capability post-snapshotting.
428                                if PartialOrder::less_than(&upper, &resume_upper) {
429                                    tracing::trace!(
430                                        worker_id = %source_config.worker_id,
431                                        source_id = %source_config.id,
432                                        ?upper,
433                                        ?resume_upper,
434                                        "ignoring progress updates before resume_upper");
435                                    continue;
436                                }
437
438                                // Disable partial drain, because this progress
439                                // update has moved the frontier. We might allow
440                                // it again once we receive data right at the
441                                // frontier again.
442                                partial_drain_time = None;
443                                input_upper = upper;
444                            }
445                        }
446
447                        events_processed += 1;
448                        if let Some(max) = snapshot_buffering_max {
449                            if events_processed >= max {
450                                break;
451                            }
452                        }
453                    }
454                }
455            };
456
457            // While we have partially ingested updates of a timestamp our state
458            // is in an inconsistent/consolidating state and accessing it would
459            // panic.
460            if let Some(largest_seen_persist_ts) = largest_seen_persist_ts.as_ref() {
461                let largest_seen_outer_persist_ts = largest_seen_persist_ts.clone().to_outer();
462                let outer_persist_upper = persist_upper.iter().map(|ts| ts.clone().to_outer());
463                let outer_persist_upper = Antichain::from_iter(outer_persist_upper);
464                if outer_persist_upper.less_equal(&largest_seen_outer_persist_ts) {
465                    continue;
466                }
467            }
468
469            // We try and drain from our stash every time we go through the
470            // loop. More of our stash can become eligible for draining both
471            // when the source-input frontier advances or when the persist
472            // frontier advances.
473            if !stash.is_empty() {
474                let cap = stash_cap
475                    .as_mut()
476                    .expect("missing capability for non-empty stash");
477
478                tracing::trace!(
479                    worker_id = %source_config.worker_id,
480                    source_id = %source_config.id,
481                    ?cap,
482                    ?stash,
483                    "stashed updates");
484
485                let mut min_remaining_time = drain_staged_input::<_, G, _, _, _>(
486                    &mut stash,
487                    &mut commands_state,
488                    &mut output_updates,
489                    &mut multi_get_scratch,
490                    DrainStyle::ToUpper {
491                        input_upper: &input_upper,
492                        persist_upper: &persist_upper,
493                    },
494                    &mut error_emitter,
495                    &mut state,
496                    &source_config,
497                )
498                .await;
499
500                tracing::trace!(
501                    worker_id = %source_config.worker_id,
502                    source_id = %source_config.id,
503                    output_updates = %output_updates.len(),
504                    "output updates for complete timestamp");
505
506                for (update, ts, diff) in output_updates.drain(..) {
507                    output_handle.give(cap, (update, ts, diff));
508                }
509
510                if !stash.is_empty() {
511                    let min_remaining_time = min_remaining_time
512                        .take()
513                        .expect("we still have updates left");
514                    cap.downgrade(&min_remaining_time);
515                } else {
516                    stash_cap = None;
517                }
518            }
519
520            if input_upper.is_empty() {
521                tracing::debug!(
522                    worker_id = %source_config.worker_id,
523                    source_id = %source_config.id,
524                    "input exhausted, shutting down");
525                break;
526            };
527
528            // If there were staged events that occurred at the capability time, drain
529            // them. This is safe because out-of-order updates to the same key that are
530            // drained in separate calls to `drain_staged_input` are correctly ordered by
531            // their `FromTime` in `drain_staged_input`.
532            //
533            // Note also that this may result in more updates in the output collection than
534            // the minimum. However, because the frontier only advances on `Progress` updates,
535            // the collection always accumulates correctly for all keys.
536            if let Some(partial_drain_time) = &partial_drain_time {
537                if !stash.is_empty() {
538                    let cap = stash_cap
539                        .as_mut()
540                        .expect("missing capability for non-empty stash");
541
542                    tracing::trace!(
543                        worker_id = %source_config.worker_id,
544                        source_id = %source_config.id,
545                        ?cap,
546                        ?stash,
547                        "stashed updates");
548
549                    let mut min_remaining_time = drain_staged_input::<_, G, _, _, _>(
550                        &mut stash,
551                        &mut commands_state,
552                        &mut output_updates,
553                        &mut multi_get_scratch,
554                        DrainStyle::AtTime {
555                            time: partial_drain_time.clone(),
556                            persist_upper: &persist_upper,
557                        },
558                        &mut error_emitter,
559                        &mut state,
560                        &source_config,
561                    )
562                    .await;
563
564                    tracing::trace!(
565                        worker_id = %source_config.worker_id,
566                        source_id = %source_config.id,
567                        output_updates = %output_updates.len(),
568                        "output updates for partial timestamp");
569
570                    for (update, ts, diff) in output_updates.drain(..) {
571                        output_handle.give(cap, (update, ts, diff));
572                    }
573
574                    if !stash.is_empty() {
575                        let min_remaining_time = min_remaining_time
576                            .take()
577                            .expect("we still have updates left");
578                        cap.downgrade(&min_remaining_time);
579                    } else {
580                        stash_cap = None;
581                    }
582                }
583            }
584        }
585    });
586
587    (
588        output
589            .as_collection()
590            .map(|result: UpsertValue| match result {
591                Ok(ok) => Ok(ok),
592                Err(err) => Err(DataflowError::from(EnvelopeError::Upsert(*err))),
593            }),
594        health_stream,
595        snapshot_stream,
596        shutdown_button.press_on_drop(),
597    )
598}
599
600/// Helper method for [`upsert_inner`] used to stage `data` updates
601/// from the input/source timely edge.
602#[allow(clippy::disallowed_types)]
603fn stage_input<T, FromTime>(
604    stash: &mut Vec<(T, UpsertKey, Reverse<FromTime>, Option<UpsertValue>)>,
605    data: &mut Vec<((UpsertKey, Option<UpsertValue>, FromTime), T, Diff)>,
606    input_upper: &Antichain<T>,
607    resume_upper: &Antichain<T>,
608) where
609    T: PartialOrder + timely::progress::Timestamp,
610    FromTime: Ord,
611{
612    if PartialOrder::less_equal(input_upper, resume_upper) {
613        data.retain(|(_, ts, _)| resume_upper.less_equal(ts));
614    }
615
616    stash.extend(data.drain(..).map(|((key, value, order), time, diff)| {
617        assert!(diff.is_positive(), "invalid upsert input");
618        (time, key, Reverse(order), value)
619    }));
620}
621
622/// The style of drain we are performing on the stash. `AtTime`-drains cannot
623/// assume that all values have been seen, and must leave tombstones behind for deleted values.
624#[derive(Debug)]
625enum DrainStyle<'a, T> {
626    ToUpper {
627        input_upper: &'a Antichain<T>,
628        persist_upper: &'a Antichain<T>,
629    },
630    // For partial draining when taking the source snapshot.
631    AtTime {
632        time: T,
633        persist_upper: &'a Antichain<T>,
634    },
635}
636
637/// Helper method for [`upsert_inner`] used to stage `data` updates
638/// from the input timely edge.
639///
640/// Returns the minimum observed time across the updates that remain in the
641/// stash or `None` if none are left.
642///
643/// ## Correctness
644///
645/// It is safe to call this function multiple times with the same `persist_upper` provided that the
646/// drain style is `AtTime`, which updates the state such that past actions are remembered and can
647/// be undone in subsequent calls.
648///
649/// It is *not* safe to call this function more than once with the same `persist_upper` and a
650/// `ToUpper` drain style. Doing so causes all calls except the first one to base their work on
651/// stale state, since in this drain style no modifications to the state are made.
652async fn drain_staged_input<S, G, T, FromTime, E>(
653    stash: &mut Vec<(T, UpsertKey, Reverse<FromTime>, Option<UpsertValue>)>,
654    commands_state: &mut indexmap::IndexMap<UpsertKey, UpsertValueAndSize<T, FromTime>>,
655    output_updates: &mut Vec<(UpsertValue, T, Diff)>,
656    multi_get_scratch: &mut Vec<UpsertKey>,
657    drain_style: DrainStyle<'_, T>,
658    error_emitter: &mut E,
659    state: &mut UpsertState<'_, S, T, FromTime>,
660    source_config: &crate::source::SourceExportCreationConfig,
661) -> Option<T>
662where
663    S: UpsertStateBackend<T, FromTime>,
664    G: Scope,
665    T: TotalOrder + timely::ExchangeData + Clone + Debug + Ord + Sync,
666    FromTime: timely::ExchangeData + Clone + Ord + Sync,
667    E: UpsertErrorEmitter<G>,
668{
669    let mut min_remaining_time = Antichain::new();
670
671    let mut eligible_updates = stash
672        .extract_if(.., |(ts, _, _, _)| {
673            let eligible = match &drain_style {
674                DrainStyle::ToUpper {
675                    input_upper,
676                    persist_upper,
677                } => {
678                    // We make sure that a) we only process updates when we know their
679                    // timestamp is complete, that is there will be no more updates for
680                    // that timestamp, and b) that "previous" times in the persist
681                    // input are complete. The latter makes sure that we emit updates
682                    // for the next timestamp that are consistent with the global state
683                    // in the output persist shard, which also serves as a persistent
684                    // copy of our in-memory/on-disk upsert state.
685                    !input_upper.less_equal(ts) && !persist_upper.less_than(ts)
686                }
687                DrainStyle::AtTime {
688                    time,
689                    persist_upper,
690                } => {
691                    // Even when emitting partial updates, we still need to wait
692                    // until "previous" times in the persist input are complete.
693                    *ts <= *time && !persist_upper.less_than(ts)
694                }
695            };
696
697            if !eligible {
698                min_remaining_time.insert(ts.clone());
699            }
700
701            eligible
702        })
703        .filter(|(ts, _, _, _)| {
704            let persist_upper = match &drain_style {
705                DrainStyle::ToUpper {
706                    input_upper: _,
707                    persist_upper,
708                } => persist_upper,
709                DrainStyle::AtTime {
710                    time: _,
711                    persist_upper,
712                } => persist_upper,
713            };
714
715            // Any update that is "in the past" of the persist upper is not
716            // relevant anymore. We _can_ emit changes for it, but the
717            // downstream persist_sink would filter these updates out because
718            // the shard upper is already further ahead.
719            //
720            // Plus, our upsert state is up-to-date to the persist_upper, so we
721            // wouldn't be able to emit correct retractions for incoming
722            // commands whose `ts` is in the past of that.
723            let relevant = persist_upper.less_equal(ts);
724            relevant
725        })
726        .collect_vec();
727
728    tracing::debug!(
729        worker_id = %source_config.worker_id,
730        source_id = %source_config.id,
731        ?drain_style,
732        remaining = %stash.len(),
733        eligible = eligible_updates.len(),
734        "draining stash");
735
736    // Sort the eligible updates by (key, time, Reverse(from_time)) so that
737    // deduping by (key, time) gives the latest change for that key.
738    eligible_updates.sort_unstable_by(|a, b| {
739        let (ts1, key1, from_ts1, val1) = a;
740        let (ts2, key2, from_ts2, val2) = b;
741        Ord::cmp(&(ts1, key1, from_ts1, val1), &(ts2, key2, from_ts2, val2))
742    });
743
744    // Read the previous values _per key_ out of `state`, recording it
745    // along with the value with the _latest timestamp for that key_.
746    commands_state.clear();
747    for (_, key, _, _) in eligible_updates.iter() {
748        commands_state.entry(*key).or_default();
749    }
750
751    // These iterators iterate in the same order because `commands_state`
752    // is an `IndexMap`.
753    multi_get_scratch.clear();
754    multi_get_scratch.extend(commands_state.iter().map(|(k, _)| *k));
755    match state
756        .multi_get(multi_get_scratch.drain(..), commands_state.values_mut())
757        .await
758    {
759        Ok(_) => {}
760        Err(e) => {
761            error_emitter
762                .emit("Failed to fetch records from state".to_string(), e)
763                .await;
764        }
765    }
766
767    // From the prefix that can be emitted we can deduplicate based on (ts, key) in
768    // order to only process the command with the maximum order within the (ts,
769    // key) group. This is achieved by wrapping order in `Reverse(FromTime)` above.;
770    let mut commands = eligible_updates.into_iter().dedup_by(|a, b| {
771        let ((a_ts, a_key, _, _), (b_ts, b_key, _, _)) = (a, b);
772        a_ts == b_ts && a_key == b_key
773    });
774
775    let bincode_opts = upsert_types::upsert_bincode_opts();
776    // Upsert the values into `commands_state`, by recording the latest
777    // value (or deletion). These will be synced at the end to the `state`.
778    //
779    // Note that we are effectively doing "mini-upsert" here, using
780    // `command_state`. This "mini-upsert" is seeded with data from `state`, using
781    // a single `multi_get` above, and the final state is written out into
782    // `state` using a single `multi_put`. This simplifies `UpsertStateBackend`
783    // implementations, and reduces the number of reads and write we need to do.
784    //
785    // This "mini-upsert" technique is actually useful in `UpsertState`'s
786    // `consolidate_snapshot_read_write_inner` implementation, minimizing gets and puts on
787    // the `UpsertStateBackend` implementations. In some sense, its "upsert all the way down".
788    while let Some((ts, key, from_time, value)) = commands.next() {
789        let mut command_state = if let Entry::Occupied(command_state) = commands_state.entry(key) {
790            command_state
791        } else {
792            panic!("key missing from commands_state");
793        };
794
795        let existing_state_cell = &mut command_state.get_mut().value;
796
797        if let Some(cs) = existing_state_cell.as_mut() {
798            cs.ensure_decoded(bincode_opts, source_config.id, Some(&key));
799        }
800
801        // Skip this command if its order key is below the one in the upsert state.
802        // Note that the existing order key may be `None` if the existing value
803        // is from snapshotting, which always sorts below new values/deletes.
804        let existing_order = existing_state_cell
805            .as_ref()
806            .and_then(|cs| cs.provisional_order(&ts));
807        if existing_order >= Some(&from_time.0) {
808            // Skip this update. If no later updates adjust this key, then we just
809            // end up writing the same value back to state. If there
810            // is nothing in the state, `existing_order` is `None`, and this
811            // does not occur.
812            continue;
813        }
814
815        match value {
816            Some(value) => {
817                if let Some(old_value) = existing_state_cell.as_ref() {
818                    if let Some(old_value) = old_value.provisional_value_ref(&ts) {
819                        output_updates.push((old_value.clone(), ts.clone(), Diff::MINUS_ONE));
820                    }
821                }
822
823                match &drain_style {
824                    DrainStyle::AtTime { .. } => {
825                        let existing_value = existing_state_cell.take();
826
827                        let new_value = match existing_value {
828                            Some(existing_value) => existing_value.clone().into_provisional_value(
829                                value.clone(),
830                                ts.clone(),
831                                from_time.0.clone(),
832                            ),
833                            None => StateValue::new_provisional_value(
834                                value.clone(),
835                                ts.clone(),
836                                from_time.0.clone(),
837                            ),
838                        };
839
840                        existing_state_cell.replace(new_value);
841                    }
842                    DrainStyle::ToUpper { .. } => {
843                        // Not writing down provisional values, or anything.
844                    }
845                };
846
847                output_updates.push((value, ts, Diff::ONE));
848            }
849            None => {
850                if let Some(old_value) = existing_state_cell.as_ref() {
851                    if let Some(old_value) = old_value.provisional_value_ref(&ts) {
852                        output_updates.push((old_value.clone(), ts.clone(), Diff::MINUS_ONE));
853                    }
854                }
855
856                match &drain_style {
857                    DrainStyle::AtTime { .. } => {
858                        let existing_value = existing_state_cell.take();
859
860                        let new_value = match existing_value {
861                            Some(existing_value) => existing_value
862                                .into_provisional_tombstone(ts.clone(), from_time.0.clone()),
863                            None => StateValue::new_provisional_tombstone(
864                                ts.clone(),
865                                from_time.0.clone(),
866                            ),
867                        };
868
869                        existing_state_cell.replace(new_value);
870                    }
871                    DrainStyle::ToUpper { .. } => {
872                        // Not writing down provisional values, or anything.
873                    }
874                }
875            }
876        }
877    }
878
879    match &drain_style {
880        DrainStyle::AtTime { .. } => {
881            match state
882                .multi_put(
883                    // We don't want to update per-record stats, like size of
884                    // records indexed or count of records indexed.
885                    //
886                    // We only add provisional values and these will be
887                    // overwritten once we receive updates for state from the
888                    // persist input. And the merge functionality cannot know
889                    // what was in state before merging, so it cannot correctly
890                    // retract/update stats added here.
891                    //
892                    // Mostly, the merge functionality can't update those stats
893                    // because merging happens in a function that we pass to
894                    // rocksdb which doesn't have access to any external
895                    // context. And in general, with rocksdb we do blind writes
896                    // rather than inspect what was there before when
897                    // updating/inserting.
898                    false,
899                    commands_state.drain(..).map(|(k, cv)| {
900                        (
901                            k,
902                            upsert_types::PutValue {
903                                value: cv.value.map(|cv| cv.into_decoded()),
904                                previous_value_metadata: cv.metadata.map(|v| ValueMetadata {
905                                    size: v.size.try_into().expect("less than i64 size"),
906                                    is_tombstone: v.is_tombstone,
907                                }),
908                            },
909                        )
910                    }),
911                )
912                .await
913            {
914                Ok(_) => {}
915                Err(e) => {
916                    error_emitter
917                        .emit("Failed to update records in state".to_string(), e)
918                        .await;
919                }
920            }
921        }
922        style => {
923            tracing::trace!(
924                worker_id = %source_config.worker_id,
925                source_id = %source_config.id,
926                "not doing state update for drain style {:?}", style);
927        }
928    }
929
930    min_remaining_time.into_option()
931}
932
933#[cfg(test)]
934mod test {
935    use std::sync::mpsc;
936
937    use mz_ore::metrics::MetricsRegistry;
938    use mz_persist_types::ShardId;
939    use mz_repr::{Datum, Timestamp as MzTimestamp};
940    use mz_rocksdb::{RocksDBConfig, ValueIterator};
941    use mz_storage_operators::persist_source::Subtime;
942    use mz_storage_types::sources::SourceEnvelope;
943    use mz_storage_types::sources::envelope::{KeyEnvelope, UpsertEnvelope, UpsertStyle};
944    use rocksdb::Env;
945    use timely::dataflow::operators::capture::Extract;
946    use timely::dataflow::operators::{Capture, Input, Probe};
947    use timely::progress::Timestamp;
948
949    use crate::metrics::StorageMetrics;
950    use crate::metrics::upsert::UpsertMetricDefs;
951    use crate::source::SourceExportCreationConfig;
952    use crate::statistics::{SourceStatistics, SourceStatisticsMetricDefs};
953    use crate::upsert::memory::InMemoryHashMap;
954    use crate::upsert::types::{BincodeOpts, consolidating_merge_function, upsert_bincode_opts};
955
956    use super::*;
957
958    #[mz_ore::test]
959    #[cfg_attr(miri, ignore)]
960    fn gh_9160_repro() {
961        // Helper to wrap timestamps in the appropriate types
962        let new_ts = |ts| (MzTimestamp::new(ts), Subtime::minimum());
963
964        let output_handle = timely::execute_directly(move |worker| {
965            let (mut input_handle, mut persist_handle, output_handle) = worker
966                .dataflow::<MzTimestamp, _, _>(|scope| {
967                    // Enter a subscope since the upsert operator expects to work a backpressure
968                    // enabled scope.
969                    scope.scoped::<(MzTimestamp, Subtime), _, _>("upsert", |scope| {
970                        let (input_handle, input) = scope.new_input();
971                        let (persist_handle, persist_input) = scope.new_input();
972                        let upsert_config = UpsertConfig {
973                            shrink_upsert_unused_buffers_by_ratio: 0,
974                        };
975                        let source_id = GlobalId::User(0);
976                        let metrics_registry = MetricsRegistry::new();
977                        let upsert_metrics_defs =
978                            UpsertMetricDefs::register_with(&metrics_registry);
979                        let upsert_metrics =
980                            UpsertMetrics::new(&upsert_metrics_defs, source_id, 0, None);
981
982                        let metrics_registry = MetricsRegistry::new();
983                        let storage_metrics = StorageMetrics::register_with(&metrics_registry);
984
985                        let metrics_registry = MetricsRegistry::new();
986                        let source_statistics_defs =
987                            SourceStatisticsMetricDefs::register_with(&metrics_registry);
988                        let envelope = SourceEnvelope::Upsert(UpsertEnvelope {
989                            source_arity: 2,
990                            style: UpsertStyle::Default(KeyEnvelope::Flattened),
991                            key_indices: vec![0],
992                        });
993                        let source_statistics = SourceStatistics::new(
994                            source_id,
995                            0,
996                            &source_statistics_defs,
997                            source_id,
998                            &ShardId::new(),
999                            envelope,
1000                            Antichain::from_elem(Timestamp::minimum()),
1001                        );
1002
1003                        let source_config = SourceExportCreationConfig {
1004                            id: GlobalId::User(0),
1005                            worker_id: 0,
1006                            metrics: storage_metrics,
1007                            source_statistics,
1008                        };
1009
1010                        let (output, _, _, button) = upsert_inner(
1011                            input.as_collection(),
1012                            vec![0],
1013                            Antichain::from_elem(Timestamp::minimum()),
1014                            persist_input.as_collection(),
1015                            None,
1016                            upsert_metrics,
1017                            source_config,
1018                            || async { InMemoryHashMap::default() },
1019                            upsert_config,
1020                            true,
1021                            None,
1022                        );
1023                        std::mem::forget(button);
1024
1025                        (input_handle, persist_handle, output.inner.capture())
1026                    })
1027                });
1028
1029            // We work with a hypothetical schema of (key int, value int).
1030
1031            // The input will contain records for two keys, 0 and 1.
1032            let key0 = UpsertKey::from_key(Ok(&Row::pack_slice(&[Datum::Int64(0)])));
1033            let key1 = UpsertKey::from_key(Ok(&Row::pack_slice(&[Datum::Int64(1)])));
1034
1035            // We will assume that the kafka topic contains the following messages with their
1036            // associated reclocked timestamp:
1037            //  1. {offset=1, key=0, value=0}    @ mz_time = 0
1038            //  2. {offset=2, key=1, value=NULL} @ mz_time = 2  // <- deletion of unrelated key. Causes the operator
1039            //                                                  //    to maintain the associated cap to time 2
1040            //  3. {offset=3, key=0, value=1}    @ mz_time = 3
1041            //  4. {offset=4, key=0, value=2}    @ mz_time = 3  // <- messages 2 and 3 are reclocked to time 3
1042            let value1 = Row::pack_slice(&[Datum::Int64(0), Datum::Int64(0)]);
1043            let value3 = Row::pack_slice(&[Datum::Int64(0), Datum::Int64(1)]);
1044            let value4 = Row::pack_slice(&[Datum::Int64(0), Datum::Int64(2)]);
1045            let msg1 = (key0, Some(Ok(value1.clone())), 1);
1046            let msg2 = (key1, None, 2);
1047            let msg3 = (key0, Some(Ok(value3)), 3);
1048            let msg4 = (key0, Some(Ok(value4)), 4);
1049
1050            // The first message will initialize the upsert state such that key 0 has value 0 and
1051            // produce an output update to that effect.
1052            input_handle.send((msg1, new_ts(0), Diff::ONE));
1053            input_handle.advance_to(new_ts(2));
1054            worker.step();
1055
1056            // We assume this worker succesfully CAAs the update to the shard so we send it back
1057            // through the persist_input
1058            persist_handle.send((Ok(value1), new_ts(0), Diff::ONE));
1059            persist_handle.advance_to(new_ts(1));
1060            worker.step();
1061
1062            // Then, messages 2 and 3 are sent as one batch with capability = 2
1063            input_handle.send_batch(&mut vec![
1064                (msg2, new_ts(2), Diff::ONE),
1065                (msg3, new_ts(3), Diff::ONE),
1066            ]);
1067            // Advance our capability to 3
1068            input_handle.advance_to(new_ts(3));
1069            // Message 4 is sent with capability 3
1070            input_handle.send_batch(&mut vec![(msg4, new_ts(3), Diff::ONE)]);
1071            // Advance our capability to 4
1072            input_handle.advance_to(new_ts(4));
1073            // We now step the worker so that the pending data is received. This causes the
1074            // operator to store internally the following map from capabilities to updates:
1075            // cap=2 => [ msg2, msg3 ]
1076            // cap=3 => [ msg4 ]
1077            worker.step();
1078
1079            // We now assume that another replica raced us and processed msg1 at time 2, which in
1080            // this test is a no-op so the persist frontier advances to time 3 without new data.
1081            persist_handle.advance_to(new_ts(3));
1082            // We now step this worker again, which will notice that the persist upper is {3} and
1083            // wlil attempt to process msg3 and msg4 *separately*, causing it to produce a double
1084            // retraction.
1085            worker.step();
1086
1087            output_handle
1088        });
1089
1090        let mut actual_output = output_handle
1091            .extract()
1092            .into_iter()
1093            .flat_map(|(_cap, container)| container)
1094            .collect();
1095        differential_dataflow::consolidation::consolidate_updates(&mut actual_output);
1096
1097        // The expected consolidated output contains only updates for key 0 which has the value 0
1098        // at timestamp 0 and the value 2 at timestamp 3
1099        let value1 = Row::pack_slice(&[Datum::Int64(0), Datum::Int64(0)]);
1100        let value4 = Row::pack_slice(&[Datum::Int64(0), Datum::Int64(2)]);
1101        let expected_output: Vec<(Result<Row, DataflowError>, _, _)> = vec![
1102            (Ok(value1.clone()), new_ts(0), Diff::ONE),
1103            (Ok(value1), new_ts(3), Diff::MINUS_ONE),
1104            (Ok(value4), new_ts(3), Diff::ONE),
1105        ];
1106        assert_eq!(actual_output, expected_output);
1107    }
1108
1109    #[mz_ore::test]
1110    #[cfg_attr(miri, ignore)]
1111    fn gh_9540_repro() {
1112        // Helper to wrap timestamps in the appropriate types
1113        let mz_ts = |ts| (MzTimestamp::new(ts), Subtime::minimum());
1114        let (tx, rx) = mpsc::channel::<std::thread::JoinHandle<()>>();
1115
1116        let rocksdb_dir = tempfile::tempdir().unwrap();
1117        let output_handle = timely::execute_directly(move |worker| {
1118            let tx = tx.clone();
1119            let (mut input_handle, mut persist_handle, output_probe, output_handle) =
1120                worker.dataflow::<MzTimestamp, _, _>(|scope| {
1121                    // Enter a subscope since the upsert operator expects to work a backpressure
1122                    // enabled scope.
1123                    scope.scoped::<(MzTimestamp, Subtime), _, _>("upsert", |scope| {
1124                        let (input_handle, input) = scope.new_input();
1125                        let (persist_handle, persist_input) = scope.new_input();
1126                        let upsert_config = UpsertConfig {
1127                            shrink_upsert_unused_buffers_by_ratio: 0,
1128                        };
1129                        let source_id = GlobalId::User(0);
1130                        let metrics_registry = MetricsRegistry::new();
1131                        let upsert_metrics_defs =
1132                            UpsertMetricDefs::register_with(&metrics_registry);
1133                        let upsert_metrics =
1134                            UpsertMetrics::new(&upsert_metrics_defs, source_id, 0, None);
1135                        let rocksdb_shared_metrics = Arc::clone(&upsert_metrics.rocksdb_shared);
1136                        let rocksdb_instance_metrics =
1137                            Arc::clone(&upsert_metrics.rocksdb_instance_metrics);
1138
1139                        let metrics_registry = MetricsRegistry::new();
1140                        let storage_metrics = StorageMetrics::register_with(&metrics_registry);
1141
1142                        let metrics_registry = MetricsRegistry::new();
1143                        let source_statistics_defs =
1144                            SourceStatisticsMetricDefs::register_with(&metrics_registry);
1145                        let envelope = SourceEnvelope::Upsert(UpsertEnvelope {
1146                            source_arity: 2,
1147                            style: UpsertStyle::Default(KeyEnvelope::Flattened),
1148                            key_indices: vec![0],
1149                        });
1150                        let source_statistics = SourceStatistics::new(
1151                            source_id,
1152                            0,
1153                            &source_statistics_defs,
1154                            source_id,
1155                            &ShardId::new(),
1156                            envelope,
1157                            Antichain::from_elem(Timestamp::minimum()),
1158                        );
1159
1160                        let source_config = SourceExportCreationConfig {
1161                            id: GlobalId::User(0),
1162                            worker_id: 0,
1163                            metrics: storage_metrics,
1164                            source_statistics,
1165                        };
1166
1167                        // A closure that will initialize and return a configured RocksDB instance
1168                        let rocksdb_init_fn = move || async move {
1169                            let merge_operator = Some((
1170                                "upsert_state_snapshot_merge_v1".to_string(),
1171                                |a: &[u8],
1172                                 b: ValueIterator<
1173                                    BincodeOpts,
1174                                    StateValue<(MzTimestamp, Subtime), u64>,
1175                                >| {
1176                                    consolidating_merge_function::<(MzTimestamp, Subtime), u64>(
1177                                        a.into(),
1178                                        b,
1179                                    )
1180                                },
1181                            ));
1182                            let rocksdb_cleanup_tries = 5;
1183                            let tuning = RocksDBConfig::new(Default::default(), None);
1184                            let mut rocksdb_inst = mz_rocksdb::RocksDBInstance::new(
1185                                rocksdb_dir.path(),
1186                                mz_rocksdb::InstanceOptions::new(
1187                                    Env::mem_env().unwrap(),
1188                                    rocksdb_cleanup_tries,
1189                                    merge_operator,
1190                                    // For now, just use the same config as the one used for
1191                                    // merging snapshots.
1192                                    upsert_bincode_opts(),
1193                                ),
1194                                tuning,
1195                                rocksdb_shared_metrics,
1196                                rocksdb_instance_metrics,
1197                            )
1198                            .unwrap();
1199
1200                            let handle = rocksdb_inst.take_core_loop_handle().expect("join handle");
1201                            tx.send(handle).expect("sent joinhandle");
1202                            crate::upsert::rocksdb::RocksDB::new(rocksdb_inst)
1203                        };
1204
1205                        let (output, _, _, button) = upsert_inner(
1206                            input.as_collection(),
1207                            vec![0],
1208                            Antichain::from_elem(Timestamp::minimum()),
1209                            persist_input.as_collection(),
1210                            None,
1211                            upsert_metrics,
1212                            source_config,
1213                            rocksdb_init_fn,
1214                            upsert_config,
1215                            true,
1216                            None,
1217                        );
1218                        std::mem::forget(button);
1219
1220                        let (probe, stream) = output.inner.probe();
1221                        (input_handle, persist_handle, probe, stream.capture())
1222                    })
1223                });
1224
1225            // We work with a hypothetical schema of (key int, value int).
1226
1227            // The input will contain records for two keys, 0 and 1.
1228            let key0 = UpsertKey::from_key(Ok(&Row::pack_slice(&[Datum::Int64(0)])));
1229
1230            // We will assume that the kafka topic contains the following messages with their
1231            // associated reclocked timestamp:
1232            //  1. {offset=1, key=0, value=0}    @ mz_time = 0
1233            //  2. {offset=2, key=0, value=NULL} @ mz_time = 1
1234            //  3. {offset=3, key=0, value=0}    @ mz_time = 2
1235            //  4. {offset=4, key=0, value=NULL} @ mz_time = 2  // <- messages 3 and 4 are *BOTH* reclocked to time 2
1236            let value1 = Row::pack_slice(&[Datum::Int64(0), Datum::Int64(0)]);
1237            let msg1 = ((key0, Some(Ok(value1.clone())), 1), mz_ts(0), Diff::ONE);
1238            let msg2 = ((key0, None, 2), mz_ts(1), Diff::ONE);
1239            let msg3 = ((key0, Some(Ok(value1.clone())), 3), mz_ts(2), Diff::ONE);
1240            let msg4 = ((key0, None, 4), mz_ts(2), Diff::ONE);
1241
1242            // The first message will initialize the upsert state such that key 0 has value 0 and
1243            // produce an output update to that effect.
1244            input_handle.send(msg1);
1245            input_handle.advance_to(mz_ts(1));
1246            while output_probe.less_than(&mz_ts(1)) {
1247                worker.step_or_park(None);
1248            }
1249            // Feedback the produced output..
1250            persist_handle.send((Ok(value1.clone()), mz_ts(0), Diff::ONE));
1251            persist_handle.advance_to(mz_ts(1));
1252            // ..and send the next upsert command that deletes the key.
1253            input_handle.send(msg2);
1254            input_handle.advance_to(mz_ts(2));
1255            while output_probe.less_than(&mz_ts(2)) {
1256                worker.step_or_park(None);
1257            }
1258
1259            // Feedback the produced output..
1260            persist_handle.send((Ok(value1), mz_ts(1), Diff::MINUS_ONE));
1261            persist_handle.advance_to(mz_ts(2));
1262            // ..and send the next *out of order* upsert command that deletes the key. Here msg4
1263            // happens at offset 4 and the operator should rememeber that.
1264            input_handle.send(msg4);
1265            input_handle.flush();
1266            // Run the worker for enough steps to process these events. We can't guide the
1267            // execution with the probe here since the frontier does not advance, only provisional
1268            // updates are produced.
1269            for _ in 0..5 {
1270                worker.step();
1271            }
1272
1273            // Send the missing message that will now confuse the operator because it has lost
1274            // track that for key 0 it has already seen a command for offset 4, and therefore msg3
1275            // should be skipped.
1276            input_handle.send(msg3);
1277            input_handle.flush();
1278            input_handle.advance_to(mz_ts(3));
1279
1280            output_handle
1281        });
1282
1283        let mut actual_output = output_handle
1284            .extract()
1285            .into_iter()
1286            .flat_map(|(_cap, container)| container)
1287            .collect();
1288        differential_dataflow::consolidation::consolidate_updates(&mut actual_output);
1289
1290        // The expected consolidated output contains only updates for key 0 which has the value 0
1291        // at timestamp 0 and the value 2 at timestamp 3
1292        let value1 = Row::pack_slice(&[Datum::Int64(0), Datum::Int64(0)]);
1293        let expected_output: Vec<(Result<Row, DataflowError>, _, _)> = vec![
1294            (Ok(value1.clone()), mz_ts(0), Diff::ONE),
1295            (Ok(value1), mz_ts(1), Diff::MINUS_ONE),
1296        ];
1297        assert_eq!(actual_output, expected_output);
1298
1299        while let Ok(handle) = rx.recv() {
1300            handle.join().expect("threads completed successfully");
1301        }
1302    }
1303}