Skip to main content

mz_storage_controller/
collection_mgmt.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10//! Tokio tasks (and support machinery) for maintaining storage-managed
11//! collections.
12//!
13//! We differentiate between append-only collections and differential
14//! collections. The intent is that knowing the type allows being more
15//! intentional about what state we keep in memory and how we work when in
16//! read-only mode / during zero-downtime upgrades.
17//!
18//! ## Append-only collections
19//!
20//! Writers only append blind writes. Those writes never fail. It does not
21//! matter at what timestamp they happen (to a degree, but ...).
22//!
23//! While in read-only mode, the append-only write task can immediately write
24//! updates out as batches, but only append them when going out of read-only
25//! mode.
26//!
27//! ## Differential collections
28//!
29//! These are very similar to the self-correcting persist_sink. We have an
30//! in-memory desired state and continually make it so that persist matches
31//! desired. As described below (in the task implementation), we could do this
32//! in a memory efficient way by keeping open a persist read handle and
33//! continually updating/consolidating our desired collection. This way, we
34//! would be memory-efficient even in read-only mode.
35//!
36//! This is an evolution of the current design where, on startup, we bring the
37//! persist collection into a known state (mostly by retracting everything) and
38//! then assume that this `envd` is the only writer. We panic when that is ever
39//! not the case, which we notice when the upper of a collection changes
40//! unexpectedly. With this new design we can instead continually update our
41//! view of the persist shard and emit updates when needed, when desired
42//! changed.
43//!
44//! NOTE: As it is, we always keep all of desired in memory. Only when told to
45//! go out of read-only mode would we start attempting to write.
46//!
47//! ## Read-only mode
48//!
49//! When [`CollectionManager`] is in read-only mode it cannot write out to
50//! persist. It will, however, maintain the `desired` state of differential
51//! collections so that we can immediately start writing out updates when going
52//! out of read-only mode.
53//!
54//! For append-only collections we either panic, in the case of
55//! [`CollectionManager::blind_write`], or report back a
56//! [`StorageError::ReadOnly`] when trying to append through a
57//! [`MonotonicAppender`] returned from
58//! [`CollectionManager::monotonic_appender`].
59
60use std::any::Any;
61use std::cmp::Reverse;
62use std::collections::{BTreeMap, BinaryHeap};
63use std::fmt::Debug;
64use std::ops::ControlFlow;
65use std::pin::Pin;
66use std::str::FromStr;
67use std::sync::atomic::{AtomicU64, Ordering};
68use std::sync::{Arc, Mutex};
69
70use anyhow::{anyhow, bail};
71use chrono::{DateTime, Utc};
72use differential_dataflow::consolidation;
73use futures::future::BoxFuture;
74use futures::stream::StreamExt;
75use futures::{Future, FutureExt};
76use mz_cluster_client::ReplicaId;
77use mz_dyncfg::ConfigSet;
78use mz_ore::now::NowFn;
79use mz_ore::retry::Retry;
80use mz_ore::soft_panic_or_log;
81use mz_ore::task::AbortOnDropHandle;
82use mz_persist_client::batch::Added;
83use mz_persist_client::read::ReadHandle;
84use mz_persist_client::write::WriteHandle;
85use mz_repr::adt::timestamp::CheckedTimestamp;
86use mz_repr::{ColumnName, Diff, GlobalId, Row, Timestamp};
87use mz_storage_client::client::{AppendOnlyUpdate, Status, TimestamplessUpdate};
88use mz_storage_client::controller::{IntrospectionType, MonotonicAppender, StorageWriteOp};
89use mz_storage_client::healthcheck::{
90    MZ_SINK_STATUS_HISTORY_DESC, MZ_SOURCE_STATUS_HISTORY_DESC, REPLICA_METRICS_HISTORY_DESC,
91    WALLCLOCK_GLOBAL_LAG_HISTOGRAM_RAW_DESC, WALLCLOCK_LAG_HISTORY_DESC,
92};
93use mz_storage_client::metrics::StorageControllerMetrics;
94use mz_storage_client::statistics::ControllerSinkStatistics;
95use mz_storage_client::storage_collections::StorageCollections;
96use mz_storage_types::StorageDiff;
97use mz_storage_types::controller::InvalidUpper;
98use mz_storage_types::dyncfgs::{
99    REPLICA_METRICS_HISTORY_RETENTION_INTERVAL, WALLCLOCK_GLOBAL_LAG_HISTOGRAM_RETENTION_INTERVAL,
100    WALLCLOCK_LAG_HISTORY_RETENTION_INTERVAL,
101};
102use mz_storage_types::parameters::{
103    STORAGE_MANAGED_COLLECTIONS_BATCH_DURATION_DEFAULT, StorageParameters,
104};
105use mz_storage_types::sources::SourceData;
106use timely::progress::Antichain;
107use tokio::sync::{mpsc, oneshot, watch};
108use tokio::time::{Duration, Instant};
109use tracing::{debug, error, info};
110
111use crate::{
112    StatusHistoryDesc, StatusHistoryRetentionPolicy, StorageError, collection_mgmt,
113    privatelink_status_history_desc, replica_status_history_desc, sink_status_history_desc,
114    snapshot_statistics, source_status_history_desc, statistics,
115};
116
117// Default rate at which we advance the uppers of managed collections.
118const DEFAULT_TICK_MS: u64 = 1_000;
119
120/// A channel for sending writes to a differential collection.
121type DifferentialWriteChannel =
122    mpsc::UnboundedSender<(StorageWriteOp, oneshot::Sender<Result<(), StorageError>>)>;
123
124/// A channel for sending writes to an append-only collection.
125type AppendOnlyWriteChannel = mpsc::UnboundedSender<(
126    Vec<AppendOnlyUpdate>,
127    oneshot::Sender<Result<(), StorageError>>,
128)>;
129
130type WriteTask = AbortOnDropHandle<()>;
131type ShutdownSender = oneshot::Sender<()>;
132
133/// Types of storage-managed/introspection collections:
134///
135/// Append-only: Only accepts blind writes, writes that can be applied at any
136/// timestamp and don’t depend on current collection contents.
137///
138/// Pseudo append-only: We treat them largely as append-only collections but
139/// periodically (currently on bootstrap) retract old updates from them.
140///
141/// Differential: at any given time `t` , collection contents mirrors some
142/// (small cardinality) state. The cardinality of the collection stays constant
143/// if the thing that is mirrored doesn’t change in cardinality. At steady
144/// state, updates always come in pairs of retractions/additions.
145pub enum CollectionManagerKind {
146    AppendOnly,
147    Differential,
148}
149
150#[derive(Debug, Clone)]
151pub struct CollectionManager {
152    /// When a [`CollectionManager`] is in read-only mode it must not affect any
153    /// changes to external state.
154    read_only: bool,
155
156    // WIP: Name TBD! I thought about `managed_collections`, `ivm_collections`,
157    // `self_correcting_collections`.
158    /// These are collections that we write to by adding/removing updates to an
159    /// internal _desired_ collection. The `CollectionManager` continually makes
160    /// sure that collection contents (in persist) match the desired state.
161    differential_collections:
162        Arc<Mutex<BTreeMap<GlobalId, (DifferentialWriteChannel, WriteTask, ShutdownSender)>>>,
163
164    /// Collections that we only append to using blind-writes.
165    ///
166    /// Every write succeeds at _some_ timestamp, and we never check what the
167    /// actual contents of the collection (in persist) are.
168    append_only_collections:
169        Arc<Mutex<BTreeMap<GlobalId, (AppendOnlyWriteChannel, WriteTask, ShutdownSender)>>>,
170
171    /// Amount of time we'll wait before sending a batch of inserts to Persist, for user
172    /// collections.
173    user_batch_duration_ms: Arc<AtomicU64>,
174    now: NowFn,
175}
176
177/// The `CollectionManager` provides two complementary functions:
178/// - Providing an API to append values to a registered set of collections.
179///   For this usecase:
180///     - The `CollectionManager` expects to be the only writer.
181///     - Appending to a closed collection panics
182/// - Automatically advancing the timestamp of managed collections every
183///   second. For this usecase:
184///     - The `CollectionManager` handles contention by permitting and ignoring errors.
185///     - Closed collections will not panic if they continue receiving these requests.
186impl CollectionManager {
187    pub(super) fn new(read_only: bool, now: NowFn) -> CollectionManager {
188        let batch_duration_ms: u64 = STORAGE_MANAGED_COLLECTIONS_BATCH_DURATION_DEFAULT
189            .as_millis()
190            .try_into()
191            .expect("known to fit");
192
193        CollectionManager {
194            read_only,
195            differential_collections: Arc::new(Mutex::new(BTreeMap::new())),
196            append_only_collections: Arc::new(Mutex::new(BTreeMap::new())),
197            user_batch_duration_ms: Arc::new(AtomicU64::new(batch_duration_ms)),
198            now,
199        }
200    }
201
202    /// Updates the duration we'll wait to batch events for user owned collections.
203    pub fn update_user_batch_duration(&self, duration: Duration) {
204        tracing::info!(?duration, "updating user batch duration");
205        let millis: u64 = duration.as_millis().try_into().unwrap_or(u64::MAX);
206        self.user_batch_duration_ms.store(millis, Ordering::Relaxed);
207    }
208
209    /// Registers a new _differential collection_.
210    ///
211    /// The [CollectionManager] will automatically advance the upper of every
212    /// registered collection every second.
213    ///
214    /// Update the `desired` state of a differential collection using
215    /// [Self::differential_write].
216    pub(super) fn register_differential_collection<R>(
217        &self,
218        id: GlobalId,
219        write_handle: WriteHandle<SourceData, (), Timestamp, StorageDiff>,
220        read_handle_fn: R,
221        force_writable: bool,
222        introspection_config: DifferentialIntrospectionConfig,
223    ) where
224        R: FnMut() -> Pin<
225                Box<dyn Future<Output = ReadHandle<SourceData, (), Timestamp, StorageDiff>> + Send>,
226            > + Send
227            + Sync
228            + 'static,
229    {
230        let mut guard = self
231            .differential_collections
232            .lock()
233            .expect("collection_mgmt panicked");
234
235        // Check if this collection is already registered.
236        if let Some((_writer, task, _shutdown_tx)) = guard.get(&id) {
237            // The collection is already registered and the task is still running so nothing to do.
238            if !task.is_finished() {
239                // TODO(parkmycar): Panic here if we never see this error in production.
240                tracing::error!("Registered a collection twice! {id:?}");
241                return;
242            }
243        }
244
245        let read_only = self.get_read_only(id, force_writable);
246
247        // Spawns a new task so we can write to this collection.
248        let writer_and_handle = DifferentialWriteTask::spawn(
249            id,
250            write_handle,
251            read_handle_fn,
252            read_only,
253            self.now.clone(),
254            introspection_config,
255        );
256        let prev = guard.insert(id, writer_and_handle);
257
258        // Double check the previous task was actually finished.
259        if let Some((_, prev_task, _)) = prev {
260            assert!(
261                prev_task.is_finished(),
262                "should only spawn a new task if the previous is finished"
263            );
264        }
265    }
266
267    /// Registers a new _append-only collection_.
268    ///
269    /// The [CollectionManager] will automatically advance the upper of every
270    /// registered collection every second.
271    pub(super) fn register_append_only_collection(
272        &self,
273        id: GlobalId,
274        write_handle: WriteHandle<SourceData, (), Timestamp, StorageDiff>,
275        force_writable: bool,
276        introspection_config: Option<AppendOnlyIntrospectionConfig>,
277    ) {
278        let mut guard = self
279            .append_only_collections
280            .lock()
281            .expect("collection_mgmt panicked");
282
283        // Check if this collection is already registered.
284        if let Some((_writer, task, _shutdown_tx)) = guard.get(&id) {
285            // The collection is already registered and the task is still running so nothing to do.
286            if !task.is_finished() {
287                // TODO(parkmycar): Panic here if we never see this error in production.
288                tracing::error!("Registered a collection twice! {id:?}");
289                return;
290            }
291        }
292
293        let read_only = self.get_read_only(id, force_writable);
294
295        // Spawns a new task so we can write to this collection.
296        let writer_and_handle = AppendOnlyWriteTask::spawn(
297            id,
298            write_handle,
299            read_only,
300            self.now.clone(),
301            Arc::clone(&self.user_batch_duration_ms),
302            introspection_config,
303        );
304        let prev = guard.insert(id, writer_and_handle);
305
306        // Double check the previous task was actually finished.
307        if let Some((_, prev_task, _)) = prev {
308            assert!(
309                prev_task.is_finished(),
310                "should only spawn a new task if the previous is finished"
311            );
312        }
313    }
314
315    /// Unregisters the given collection.
316    ///
317    /// Also waits until the `CollectionManager` has completed all outstanding work to ensure that
318    /// it has stopped referencing the provided `id`.
319    #[mz_ore::instrument(level = "debug")]
320    pub(super) fn unregister_collection(&self, id: GlobalId) -> BoxFuture<'static, ()> {
321        let prev = self
322            .differential_collections
323            .lock()
324            .expect("CollectionManager panicked")
325            .remove(&id);
326
327        // Wait for the task to complete before reporting as unregisted.
328        if let Some((_prev_writer, prev_task, shutdown_tx)) = prev {
329            // Notify the task it needs to shutdown.
330            //
331            // We can ignore errors here because they indicate the task is already done.
332            let _ = shutdown_tx.send(());
333            return Box::pin(prev_task.map(|_| ()));
334        }
335
336        let prev = self
337            .append_only_collections
338            .lock()
339            .expect("CollectionManager panicked")
340            .remove(&id);
341
342        // Wait for the task to complete before reporting as unregisted.
343        if let Some((_prev_writer, prev_task, shutdown_tx)) = prev {
344            // Notify the task it needs to shutdown.
345            //
346            // We can ignore errors here because they indicate the task is already done.
347            let _ = shutdown_tx.send(());
348            return Box::pin(prev_task.map(|_| ()));
349        }
350
351        Box::pin(futures::future::ready(()))
352    }
353
354    /// Returns a sender for writes to the given append-only collection.
355    ///
356    /// # Panics
357    /// - If `id` does not belong to an append-only collections.
358    pub(super) fn append_only_write_sender(&self, id: GlobalId) -> AppendOnlyWriteChannel {
359        let collections = self.append_only_collections.lock().expect("poisoned");
360        match collections.get(&id) {
361            Some((tx, _, _)) => tx.clone(),
362            None => panic!("missing append-only collection: {id}"),
363        }
364    }
365
366    /// Returns a sender for writes to the given differential collection.
367    ///
368    /// # Panics
369    /// - If `id` does not belong to a differential collections.
370    pub(super) fn differential_write_sender(&self, id: GlobalId) -> DifferentialWriteChannel {
371        let collections = self.differential_collections.lock().expect("poisoned");
372        match collections.get(&id) {
373            Some((tx, _, _)) => tx.clone(),
374            None => panic!("missing differential collection: {id}"),
375        }
376    }
377
378    /// Appends `updates` to the append-only collection identified by `id`, at
379    /// _some_ timestamp. Does not wait for the append to complete.
380    ///
381    /// # Panics
382    /// - If `id` does not belong to an append-only collections.
383    /// - If this [`CollectionManager`] is in read-only mode.
384    /// - If the collection closed.
385    pub(super) fn blind_write(&self, id: GlobalId, updates: Vec<AppendOnlyUpdate>) {
386        if self.read_only {
387            panic!("attempting blind write to {} while in read-only mode", id);
388        }
389
390        if updates.is_empty() {
391            return;
392        }
393
394        let collections = self.append_only_collections.lock().expect("poisoned");
395        match collections.get(&id) {
396            Some((update_tx, _, _)) => {
397                let (tx, _rx) = oneshot::channel();
398                update_tx.send((updates, tx)).expect("rx hung up");
399            }
400            None => panic!("missing append-only collection: {id}"),
401        }
402    }
403
404    /// Updates the desired collection state of the differential collection identified by
405    /// `id`. The underlying persist shard will reflect this change at
406    /// _some_point. Does not wait for the change to complete.
407    ///
408    /// # Panics
409    /// - If `id` does not belong to a differential collection.
410    /// - If the collection closed.
411    pub(super) fn differential_write(&self, id: GlobalId, op: StorageWriteOp) {
412        if op.is_empty_append() {
413            return;
414        }
415
416        let collections = self.differential_collections.lock().expect("poisoned");
417        match collections.get(&id) {
418            Some((update_tx, _, _)) => {
419                let (tx, _rx) = oneshot::channel();
420                update_tx.send((op, tx)).expect("rx hung up");
421            }
422            None => panic!("missing differential collection: {id}"),
423        }
424    }
425
426    /// Appends the given `updates` to the differential collection identified by `id`.
427    ///
428    /// # Panics
429    /// - If `id` does not belong to a differential collection.
430    /// - If the collection closed.
431    pub(super) fn differential_append(&self, id: GlobalId, updates: Vec<(Row, Diff)>) {
432        self.differential_write(id, StorageWriteOp::Append { updates })
433    }
434
435    /// Returns a [`MonotonicAppender`] that can be used to monotonically append updates to the
436    /// collection correlated with `id`.
437    pub(super) fn monotonic_appender(
438        &self,
439        id: GlobalId,
440    ) -> Result<MonotonicAppender, StorageError> {
441        let guard = self
442            .append_only_collections
443            .lock()
444            .expect("CollectionManager panicked");
445        let tx = guard
446            .get(&id)
447            .map(|(tx, _, _)| tx.clone())
448            .ok_or(StorageError::IdentifierMissing(id))?;
449
450        Ok(MonotonicAppender::new(tx))
451    }
452
453    fn get_read_only(&self, id: GlobalId, force_writable: bool) -> bool {
454        if force_writable {
455            assert!(id.is_system(), "unexpected non-system global id: {id:?}");
456            false
457        } else {
458            self.read_only
459        }
460    }
461}
462
463pub(crate) struct DifferentialIntrospectionConfig {
464    pub(crate) recent_upper: Antichain<Timestamp>,
465    pub(crate) introspection_type: IntrospectionType,
466    pub(crate) storage_collections: Arc<dyn StorageCollections + Send + Sync>,
467    pub(crate) collection_manager: collection_mgmt::CollectionManager,
468    pub(crate) source_statistics: Arc<Mutex<statistics::SourceStatistics>>,
469    pub(crate) sink_statistics:
470        Arc<Mutex<BTreeMap<(GlobalId, Option<ReplicaId>), ControllerSinkStatistics>>>,
471    pub(crate) statistics_interval: Duration,
472    pub(crate) statistics_interval_receiver: watch::Receiver<Duration>,
473    pub(crate) statistics_retention_duration: Duration,
474    pub(crate) metrics: StorageControllerMetrics,
475    pub(crate) introspection_tokens: Arc<Mutex<BTreeMap<GlobalId, Box<dyn Any + Send + Sync>>>>,
476}
477
478/// A task that will make it so that the state in persist matches the desired
479/// state and continuously bump the upper for the specified collection.
480///
481/// NOTE: This implementation is a bit clunky, and could be optimized by not keeping
482/// all of desired in memory (see commend below). It is meant to showcase the
483/// general approach.
484struct DifferentialWriteTask<R>
485where
486    R: FnMut() -> Pin<
487            Box<dyn Future<Output = ReadHandle<SourceData, (), Timestamp, StorageDiff>> + Send>,
488        > + Send
489        + 'static,
490{
491    /// The collection that we are writing to.
492    id: GlobalId,
493
494    write_handle: WriteHandle<SourceData, (), Timestamp, StorageDiff>,
495
496    /// For getting a [`ReadHandle`] to sync our state to persist contents.
497    read_handle_fn: R,
498
499    read_only: bool,
500
501    now: NowFn,
502
503    /// In the absence of updates, we regularly bump the upper to "now", on this
504    /// interval. This makes it so the collection remains readable at recent
505    /// timestamps.
506    upper_tick_interval: tokio::time::Interval,
507
508    /// Receiver for write commands. These change our desired state.
509    cmd_rx: mpsc::UnboundedReceiver<(StorageWriteOp, oneshot::Sender<Result<(), StorageError>>)>,
510
511    /// We have to shut down when receiving from this.
512    shutdown_rx: oneshot::Receiver<()>,
513
514    /// The contents of the collection as it should be according to whoever is
515    /// driving us around.
516    // This is memory inefficient: we always keep a full copy of
517    // desired, so that we can re-derive a to_write if/when someone else
518    // writes to persist and we notice because of an upper conflict.
519    // This is optimized for the case where we rarely have more than one
520    // writer.
521    //
522    // We can optimize for a multi-writer case by keeping an open
523    // ReadHandle and continually reading updates from persist, updating
524    // a desired in place. Similar to the self-correcting persist_sink.
525    desired: Vec<(Row, Diff)>,
526
527    /// Updates that we have to write when next writing to persist. This is
528    /// determined by looking at what is desired and what is in persist.
529    to_write: Vec<(Row, Diff)>,
530
531    /// Current upper of the persist shard. We keep track of this so that we
532    /// realize when someone else writes to the shard, in which case we have to
533    /// update our state of the world, that is update our `to_write` based on
534    /// `desired` and the contents of the persist shard.
535    current_upper: Timestamp,
536}
537
538impl<R> DifferentialWriteTask<R>
539where
540    R: FnMut() -> Pin<
541            Box<dyn Future<Output = ReadHandle<SourceData, (), Timestamp, StorageDiff>> + Send>,
542        > + Send
543        + Sync
544        + 'static,
545{
546    /// Spawns a [`DifferentialWriteTask`] in an [`mz_ore::task`] and returns
547    /// handles for interacting with it.
548    fn spawn(
549        id: GlobalId,
550        write_handle: WriteHandle<SourceData, (), Timestamp, StorageDiff>,
551        read_handle_fn: R,
552        read_only: bool,
553        now: NowFn,
554        introspection_config: DifferentialIntrospectionConfig,
555    ) -> (DifferentialWriteChannel, WriteTask, ShutdownSender) {
556        let (tx, rx) = mpsc::unbounded_channel();
557        let (shutdown_tx, shutdown_rx) = oneshot::channel();
558
559        let upper_tick_interval = tokio::time::interval(Duration::from_millis(DEFAULT_TICK_MS));
560
561        let task = Self {
562            id,
563            write_handle,
564            read_handle_fn,
565            read_only,
566            now,
567            upper_tick_interval,
568            cmd_rx: rx,
569            shutdown_rx,
570            desired: Vec::new(),
571            to_write: Vec::new(),
572            current_upper: Timestamp::MIN,
573        };
574
575        let handle = mz_ore::task::spawn(
576            || format!("CollectionManager-differential_write_task-{id}"),
577            async move {
578                if !task.read_only {
579                    task.prepare(introspection_config).await;
580                }
581                let res = task.run().await;
582
583                match res {
584                    ControlFlow::Break(reason) => {
585                        info!("write_task-{} ending: {}", id, reason);
586                    }
587                    c @ ControlFlow::Continue(_) => {
588                        unreachable!(
589                            "cannot break out of the loop with a Continue, but got: {:?}",
590                            c
591                        );
592                    }
593                }
594            },
595        );
596
597        (tx, handle.abort_on_drop(), shutdown_tx)
598    }
599
600    /// Does any work that is required before this background task starts
601    /// writing to the given introspection collection.
602    ///
603    /// This might include consolidation, deleting older entries or seeding
604    /// in-memory state of, say, scrapers, with current collection contents.
605    async fn prepare(&self, introspection_config: DifferentialIntrospectionConfig) {
606        tracing::info!(%self.id, ?introspection_config.introspection_type, "preparing differential introspection collection for writes");
607
608        match introspection_config.introspection_type {
609            IntrospectionType::ShardMapping => {
610                // Done by the `append_shard_mappings` call.
611            }
612            IntrospectionType::Frontiers | IntrospectionType::ReplicaFrontiers => {
613                // Differential collections start with an empty
614                // desired state. No need to manually reset.
615            }
616            IntrospectionType::StorageSourceStatistics => {
617                let prev = snapshot_statistics(
618                    self.id,
619                    introspection_config.recent_upper,
620                    &introspection_config.storage_collections,
621                )
622                .await;
623
624                let scraper_token = statistics::spawn_statistics_scraper(
625                    self.id.clone(),
626                    // These do a shallow copy.
627                    introspection_config.collection_manager,
628                    Arc::clone(&introspection_config.source_statistics),
629                    prev,
630                    introspection_config.statistics_interval.clone(),
631                    introspection_config.statistics_interval_receiver.clone(),
632                    introspection_config.statistics_retention_duration,
633                    introspection_config.metrics,
634                );
635                let web_token = statistics::spawn_webhook_statistics_scraper(
636                    introspection_config.source_statistics,
637                    introspection_config.statistics_interval,
638                    introspection_config.statistics_interval_receiver,
639                );
640
641                // Make sure these are dropped when the controller is
642                // dropped, so that the internal task will stop.
643                introspection_config
644                    .introspection_tokens
645                    .lock()
646                    .expect("poisoned")
647                    .insert(self.id, Box::new((scraper_token, web_token)));
648            }
649            IntrospectionType::StorageSinkStatistics => {
650                let prev = snapshot_statistics(
651                    self.id,
652                    introspection_config.recent_upper,
653                    &introspection_config.storage_collections,
654                )
655                .await;
656
657                let scraper_token = statistics::spawn_statistics_scraper(
658                    self.id.clone(),
659                    introspection_config.collection_manager,
660                    Arc::clone(&introspection_config.sink_statistics),
661                    prev,
662                    introspection_config.statistics_interval,
663                    introspection_config.statistics_interval_receiver,
664                    introspection_config.statistics_retention_duration,
665                    introspection_config.metrics,
666                );
667
668                // Make sure this is dropped when the controller is
669                // dropped, so that the internal task will stop.
670                introspection_config
671                    .introspection_tokens
672                    .lock()
673                    .expect("poisoned")
674                    .insert(self.id, scraper_token);
675            }
676
677            IntrospectionType::ComputeDependencies
678            | IntrospectionType::ComputeOperatorHydrationStatus
679            | IntrospectionType::ComputeMaterializedViewRefreshes
680            | IntrospectionType::ComputeErrorCounts
681            | IntrospectionType::ComputeHydrationTimes => {
682                // Differential collections start with an empty
683                // desired state. No need to manually reset.
684            }
685
686            introspection_type @ IntrospectionType::ReplicaMetricsHistory
687            | introspection_type @ IntrospectionType::WallclockLagHistory
688            | introspection_type @ IntrospectionType::WallclockLagHistogram
689            | introspection_type @ IntrospectionType::PreparedStatementHistory
690            | introspection_type @ IntrospectionType::StatementExecutionHistory
691            | introspection_type @ IntrospectionType::SessionHistory
692            | introspection_type @ IntrospectionType::StatementLifecycleHistory
693            | introspection_type @ IntrospectionType::SqlText
694            | introspection_type @ IntrospectionType::SourceStatusHistory
695            | introspection_type @ IntrospectionType::SinkStatusHistory
696            | introspection_type @ IntrospectionType::PrivatelinkConnectionStatusHistory
697            | introspection_type @ IntrospectionType::ReplicaStatusHistory => {
698                unreachable!("not differential collection: {introspection_type:?}")
699            }
700        }
701    }
702
703    async fn run(mut self) -> ControlFlow<String> {
704        let mut updates = Vec::new();
705        loop {
706            tokio::select! {
707                // Prefer sending actual updates over just bumping the upper,
708                // because sending updates also bump the upper.
709                biased;
710
711                // Listen for a shutdown signal so we can gracefully cleanup.
712                _ = &mut self.shutdown_rx => {
713                    self.handle_shutdown();
714
715                    return ControlFlow::Break("graceful shutdown".to_string());
716                }
717
718                // Pull a chunk of queued updates off the channel.
719                () = recv_all_commands(&mut self.cmd_rx, &mut updates) => {
720                    if updates.is_empty() {
721                        // Sender has been dropped, which means the collection
722                        // should have been unregistered, break out of the run
723                        // loop if we weren't already aborted.
724                        return ControlFlow::Break("sender has been dropped".to_string());
725                    }
726                    self.handle_updates(&mut updates).await?;
727                }
728
729                // If we haven't received any updates, then we'll move the upper forward.
730                _ = self.upper_tick_interval.tick() => {
731                    if self.read_only {
732                        // Not bumping uppers while in read-only mode.
733                        continue;
734                    }
735                    self.tick_upper().await?;
736                },
737            }
738        }
739    }
740
741    async fn tick_upper(&mut self) -> ControlFlow<String> {
742        let now = Timestamp::from((self.now)());
743
744        if now <= self.current_upper {
745            // Upper is already further along than current wall-clock time, no
746            // need to bump it.
747            return ControlFlow::Continue(());
748        }
749
750        assert!(!self.read_only);
751        let res = self
752            .write_handle
753            .compare_and_append_batch(
754                &mut [],
755                Antichain::from_elem(self.current_upper),
756                Antichain::from_elem(now),
757                true,
758            )
759            .await
760            .expect("valid usage");
761        match res {
762            // All good!
763            Ok(()) => {
764                tracing::debug!(%self.id, "bumped upper of differential collection");
765                self.current_upper = now;
766            }
767            Err(err) => {
768                // Someone else wrote to the collection or bumped the upper. We
769                // need to sync to latest persist state and potentially patch up
770                // our `to_write`, based on what we learn and `desired`.
771
772                let actual_upper = if let Some(ts) = err.current.as_option() {
773                    *ts
774                } else {
775                    return ControlFlow::Break("upper is the empty antichain".to_string());
776                };
777
778                tracing::info!(%self.id, ?actual_upper, expected_upper = ?self.current_upper, "upper mismatch while bumping upper, syncing to persist state");
779
780                self.current_upper = actual_upper;
781
782                self.sync_to_persist().await;
783            }
784        }
785
786        ControlFlow::Continue(())
787    }
788
789    fn handle_shutdown(&mut self) {
790        let mut senders = Vec::new();
791
792        // Prevent new messages from being sent.
793        self.cmd_rx.close();
794
795        // Get as many waiting senders as possible.
796        while let Ok((_batch, sender)) = self.cmd_rx.try_recv() {
797            senders.push(sender);
798        }
799
800        // Notify them that this collection is closed.
801        //
802        // Note: if a task is shutting down, that indicates the source has been
803        // dropped, at which point the identifier is invalid. Returning this
804        // error provides a better user experience.
805        notify_listeners(senders, || Err(StorageError::IdentifierInvalid(self.id)));
806    }
807
808    async fn handle_updates(
809        &mut self,
810        batch: &mut Vec<(StorageWriteOp, oneshot::Sender<Result<(), StorageError>>)>,
811    ) -> ControlFlow<String> {
812        // Put in place _some_ rate limiting.
813        let batch_duration_ms = STORAGE_MANAGED_COLLECTIONS_BATCH_DURATION_DEFAULT;
814
815        let use_batch_now = Instant::now();
816        let min_time_to_complete = use_batch_now + batch_duration_ms;
817
818        tracing::debug!(
819            ?use_batch_now,
820            ?batch_duration_ms,
821            ?min_time_to_complete,
822            "batch duration",
823        );
824
825        let mut responders = Vec::with_capacity(batch.len());
826        for (op, tx) in batch.drain(..) {
827            self.apply_write_op(op);
828            responders.push(tx);
829        }
830
831        // TODO: Maybe don't do it every time?
832        consolidation::consolidate(&mut self.desired);
833        consolidation::consolidate(&mut self.to_write);
834
835        // Reset the interval which is used to periodically bump the uppers
836        // because the uppers will get bumped with the following update.
837        // This makes it such that we will write at most once every
838        // `interval`.
839        //
840        // For example, let's say our `DEFAULT_TICK` interval is 10, so at
841        // `t + 10`, `t + 20`, ... we'll bump the uppers. If we receive an
842        // update at `t + 3` we want to shift this window so we bump the
843        // uppers at `t + 13`, `t + 23`, ... which resetting the interval
844        // accomplishes.
845        self.upper_tick_interval.reset();
846
847        self.write_to_persist(responders).await?;
848
849        // Wait until our artificial latency has completed.
850        //
851        // Note: if writing to persist took longer than `DEFAULT_TICK` this
852        // await will resolve immediately.
853        tokio::time::sleep_until(min_time_to_complete).await;
854
855        ControlFlow::Continue(())
856    }
857
858    /// Apply the given write operation to the `desired`/`to_write` state.
859    fn apply_write_op(&mut self, op: StorageWriteOp) {
860        match op {
861            StorageWriteOp::Append { updates } => {
862                self.desired.extend_from_slice(&updates);
863                self.to_write.extend(updates);
864            }
865            StorageWriteOp::Delete { filter } => {
866                let to_delete = self.desired.extract_if(.., |(row, _)| filter(row));
867                let retractions = to_delete.map(|(row, diff)| (row, -diff));
868                self.to_write.extend(retractions);
869            }
870        }
871    }
872
873    /// Attempt to write what is currently in [Self::to_write] to persist,
874    /// retrying and re-syncing to persist when necessary, that is when the
875    /// upper was not what we expected.
876    async fn write_to_persist(
877        &mut self,
878        responders: Vec<oneshot::Sender<Result<(), StorageError>>>,
879    ) -> ControlFlow<String> {
880        if self.read_only {
881            tracing::debug!(%self.id, "not writing to differential collection: read-only");
882            // Not attempting to write while in read-only mode.
883            return ControlFlow::Continue(());
884        }
885
886        // We'll try really hard to succeed, but eventually stop.
887        //
888        // Note: it's very rare we should ever need to retry, and if we need to
889        // retry it should only take 1 or 2 attempts. We set `max_tries` to be
890        // high though because if we hit some edge case we want to try hard to
891        // commit the data.
892        let retries = Retry::default()
893            .initial_backoff(Duration::from_secs(1))
894            .clamp_backoff(Duration::from_secs(3))
895            .factor(1.25)
896            .max_tries(20)
897            .into_retry_stream();
898        let mut retries = Box::pin(retries);
899
900        loop {
901            // Append updates to persist!
902            let now = Timestamp::from((self.now)());
903            let new_upper = std::cmp::max(now, self.current_upper.step_forward());
904
905            let updates_to_write = self
906                .to_write
907                .iter()
908                .map(|(row, diff)| {
909                    (
910                        (SourceData(Ok(row.clone())), ()),
911                        self.current_upper,
912                        diff.into_inner(),
913                    )
914                })
915                .collect::<Vec<_>>();
916
917            assert!(!self.read_only);
918            let res = self
919                .write_handle
920                .compare_and_append(
921                    updates_to_write,
922                    Antichain::from_elem(self.current_upper),
923                    Antichain::from_elem(new_upper),
924                )
925                .await
926                .expect("valid usage");
927            match res {
928                // Everything was successful!
929                Ok(()) => {
930                    // Notify all of our listeners.
931                    notify_listeners(responders, || Ok(()));
932
933                    self.current_upper = new_upper;
934
935                    // Very important! This is empty at steady state, while
936                    // desired keeps an in-memory copy of desired state.
937                    self.to_write.clear();
938
939                    tracing::debug!(%self.id, "appended to differential collection");
940
941                    // Break out of the retry loop so we can wait for more data.
942                    break;
943                }
944                // Failed to write to some collections,
945                Err(err) => {
946                    // Someone else wrote to the collection. We need to read
947                    // from persist and update to_write based on that and the
948                    // desired state.
949                    let actual_upper = if let Some(ts) = err.current.as_option() {
950                        *ts
951                    } else {
952                        return ControlFlow::Break("upper is the empty antichain".to_string());
953                    };
954
955                    tracing::info!(%self.id, ?actual_upper, expected_upper = ?self.current_upper, "retrying append for differential collection");
956
957                    // We've exhausted all of our retries, notify listeners and
958                    // break out of the retry loop so we can wait for more data.
959                    if retries.next().await.is_none() {
960                        let invalid_upper = InvalidUpper {
961                            id: self.id,
962                            current_upper: err.current,
963                        };
964                        notify_listeners(responders, || {
965                            Err(StorageError::InvalidUppers(vec![invalid_upper.clone()]))
966                        });
967                        error!(
968                            "exhausted retries when appending to managed collection {}",
969                            self.id
970                        );
971                        break;
972                    }
973
974                    self.current_upper = actual_upper;
975
976                    self.sync_to_persist().await;
977
978                    debug!(
979                        "Retrying invalid-uppers error while appending to differential collection {}",
980                        self.id
981                    );
982                }
983            }
984        }
985
986        ControlFlow::Continue(())
987    }
988
989    /// Re-derives [Self::to_write] by looking at [Self::desired] and the
990    /// current state in persist. We want to insert everything in desired and
991    /// retract everything in persist. But ideally most of that cancels out in
992    /// consolidation.
993    ///
994    /// To be called when a `compare_and_append` failed because the upper didn't
995    /// match what we expected.
996    async fn sync_to_persist(&mut self) {
997        let mut read_handle = (self.read_handle_fn)().await;
998        let as_of = self.current_upper.step_back().unwrap_or(Timestamp::MIN);
999        let as_of = Antichain::from_elem(as_of);
1000        let snapshot = read_handle.snapshot_and_fetch(as_of).await;
1001
1002        let mut negated_oks = match snapshot {
1003            Ok(contents) => {
1004                let mut snapshot = Vec::with_capacity(contents.len());
1005                for ((data, _), _, diff) in contents {
1006                    let row = data.0.unwrap();
1007                    snapshot.push((row, -Diff::from(diff)));
1008                }
1009                snapshot
1010            }
1011            Err(e) => panic!("read before since: {e:?}"),
1012        };
1013
1014        self.to_write.clear();
1015        self.to_write.extend(self.desired.iter().cloned());
1016        self.to_write.append(&mut negated_oks);
1017        consolidation::consolidate(&mut self.to_write);
1018    }
1019}
1020
1021pub(crate) struct AppendOnlyIntrospectionConfig {
1022    pub(crate) introspection_type: IntrospectionType,
1023    pub(crate) config_set: Arc<ConfigSet>,
1024    pub(crate) parameters: StorageParameters,
1025    pub(crate) storage_collections: Arc<dyn StorageCollections + Send + Sync>,
1026}
1027
1028/// A task that writes to an append only collection and continuously bumps the upper for the specified
1029/// collection.
1030///
1031/// For status history collections, this task can deduplicate redundant [`Statuses`](Status).
1032struct AppendOnlyWriteTask {
1033    /// The collection that we are writing to.
1034    id: GlobalId,
1035    write_handle: WriteHandle<SourceData, (), Timestamp, StorageDiff>,
1036    read_only: bool,
1037    now: NowFn,
1038    user_batch_duration_ms: Arc<AtomicU64>,
1039    /// Receiver for write commands.
1040    rx: mpsc::UnboundedReceiver<(
1041        Vec<AppendOnlyUpdate>,
1042        oneshot::Sender<Result<(), StorageError>>,
1043    )>,
1044
1045    /// We have to shut down when receiving from this.
1046    shutdown_rx: oneshot::Receiver<()>,
1047    /// If this collection deduplicates statuses, this map is used to track the previous status.
1048    previous_statuses: Option<BTreeMap<(GlobalId, Option<ReplicaId>), Status>>,
1049}
1050
1051impl AppendOnlyWriteTask {
1052    /// Spawns an [`AppendOnlyWriteTask`] in an [`mz_ore::task`] that will continuously bump the
1053    /// upper for the specified collection,
1054    /// and append data that is sent via the provided [`mpsc::UnboundedSender`].
1055    ///
1056    /// TODO(parkmycar): One day if we want to customize the tick interval for each collection, that
1057    /// should be done here.
1058    /// TODO(parkmycar): Maybe add prometheus metrics for each collection?
1059    fn spawn(
1060        id: GlobalId,
1061        write_handle: WriteHandle<SourceData, (), Timestamp, StorageDiff>,
1062        read_only: bool,
1063        now: NowFn,
1064        user_batch_duration_ms: Arc<AtomicU64>,
1065        introspection_config: Option<AppendOnlyIntrospectionConfig>,
1066    ) -> (AppendOnlyWriteChannel, WriteTask, ShutdownSender) {
1067        let (tx, rx) = mpsc::unbounded_channel();
1068        let (shutdown_tx, shutdown_rx) = oneshot::channel();
1069
1070        let previous_statuses: Option<BTreeMap<(GlobalId, Option<ReplicaId>), Status>> =
1071            match introspection_config
1072                .as_ref()
1073                .map(|config| config.introspection_type)
1074            {
1075                Some(IntrospectionType::SourceStatusHistory)
1076                | Some(IntrospectionType::SinkStatusHistory) => Some(BTreeMap::new()),
1077
1078                Some(IntrospectionType::ReplicaMetricsHistory)
1079                | Some(IntrospectionType::WallclockLagHistory)
1080                | Some(IntrospectionType::WallclockLagHistogram)
1081                | Some(IntrospectionType::PrivatelinkConnectionStatusHistory)
1082                | Some(IntrospectionType::ReplicaStatusHistory)
1083                | Some(IntrospectionType::PreparedStatementHistory)
1084                | Some(IntrospectionType::StatementExecutionHistory)
1085                | Some(IntrospectionType::SessionHistory)
1086                | Some(IntrospectionType::StatementLifecycleHistory)
1087                | Some(IntrospectionType::SqlText)
1088                | None => None,
1089
1090                Some(introspection_type @ IntrospectionType::ShardMapping)
1091                | Some(introspection_type @ IntrospectionType::Frontiers)
1092                | Some(introspection_type @ IntrospectionType::ReplicaFrontiers)
1093                | Some(introspection_type @ IntrospectionType::StorageSourceStatistics)
1094                | Some(introspection_type @ IntrospectionType::StorageSinkStatistics)
1095                | Some(introspection_type @ IntrospectionType::ComputeDependencies)
1096                | Some(introspection_type @ IntrospectionType::ComputeOperatorHydrationStatus)
1097                | Some(introspection_type @ IntrospectionType::ComputeMaterializedViewRefreshes)
1098                | Some(introspection_type @ IntrospectionType::ComputeErrorCounts)
1099                | Some(introspection_type @ IntrospectionType::ComputeHydrationTimes) => {
1100                    unreachable!("not append-only collection: {introspection_type:?}")
1101                }
1102            };
1103
1104        let mut task = Self {
1105            id,
1106            write_handle,
1107            rx,
1108            shutdown_rx,
1109            read_only,
1110            now,
1111            user_batch_duration_ms,
1112            previous_statuses,
1113        };
1114
1115        let handle = mz_ore::task::spawn(
1116            || format!("CollectionManager-append_only_write_task-{id}"),
1117            async move {
1118                if !task.read_only {
1119                    task.prepare(introspection_config).await;
1120                }
1121                task.run().await;
1122            },
1123        );
1124
1125        (tx, handle.abort_on_drop(), shutdown_tx)
1126    }
1127
1128    /// Does any work that is required before the background task starts
1129    /// writing to the given append only introspection collection.
1130    ///
1131    /// This might include consolidation or deleting older entries.
1132    async fn prepare(&mut self, introspection_config: Option<AppendOnlyIntrospectionConfig>) {
1133        let Some(AppendOnlyIntrospectionConfig {
1134            introspection_type,
1135            config_set,
1136            parameters,
1137            storage_collections,
1138        }) = introspection_config
1139        else {
1140            return;
1141        };
1142        let initial_statuses = match introspection_type {
1143            IntrospectionType::ReplicaMetricsHistory
1144            | IntrospectionType::WallclockLagHistory
1145            | IntrospectionType::WallclockLagHistogram => {
1146                let result = partially_truncate_metrics_history(
1147                    self.id,
1148                    introspection_type,
1149                    &mut self.write_handle,
1150                    config_set,
1151                    self.now.clone(),
1152                    storage_collections,
1153                )
1154                .await;
1155                if let Err(error) = result {
1156                    soft_panic_or_log!(
1157                        "error truncating metrics history: {error} (type={introspection_type:?})"
1158                    );
1159                }
1160                Vec::new()
1161            }
1162
1163            IntrospectionType::PrivatelinkConnectionStatusHistory => {
1164                partially_truncate_status_history(
1165                    self.id,
1166                    IntrospectionType::PrivatelinkConnectionStatusHistory,
1167                    &mut self.write_handle,
1168                    privatelink_status_history_desc(&parameters),
1169                    self.now.clone(),
1170                    &storage_collections,
1171                )
1172                .await;
1173                Vec::new()
1174            }
1175            IntrospectionType::ReplicaStatusHistory => {
1176                partially_truncate_status_history(
1177                    self.id,
1178                    IntrospectionType::ReplicaStatusHistory,
1179                    &mut self.write_handle,
1180                    replica_status_history_desc(&parameters),
1181                    self.now.clone(),
1182                    &storage_collections,
1183                )
1184                .await;
1185                Vec::new()
1186            }
1187
1188            // Note [btv] - we don't truncate these, because that uses
1189            // a huge amount of memory on environmentd startup.
1190            IntrospectionType::PreparedStatementHistory
1191            | IntrospectionType::StatementExecutionHistory
1192            | IntrospectionType::SessionHistory
1193            | IntrospectionType::StatementLifecycleHistory
1194            | IntrospectionType::SqlText => {
1195                // NOTE(aljoscha): We never remove from these
1196                // collections. Someone, at some point needs to
1197                // think about that! Issue:
1198                // https://github.com/MaterializeInc/database-issues/issues/7666
1199                Vec::new()
1200            }
1201
1202            IntrospectionType::SourceStatusHistory => {
1203                let last_status_per_id = partially_truncate_status_history(
1204                    self.id,
1205                    IntrospectionType::SourceStatusHistory,
1206                    &mut self.write_handle,
1207                    source_status_history_desc(&parameters),
1208                    self.now.clone(),
1209                    &storage_collections,
1210                )
1211                .await;
1212
1213                let status_col = MZ_SOURCE_STATUS_HISTORY_DESC
1214                    .get_by_name(&ColumnName::from("status"))
1215                    .expect("schema has not changed")
1216                    .0;
1217
1218                last_status_per_id
1219                    .into_iter()
1220                    .map(|(id, row)| {
1221                        (
1222                            id,
1223                            Status::from_str(
1224                                row.iter()
1225                                    .nth(status_col)
1226                                    .expect("schema has not changed")
1227                                    .unwrap_str(),
1228                            )
1229                            .expect("statuses must be uncorrupted"),
1230                        )
1231                    })
1232                    .collect()
1233            }
1234            IntrospectionType::SinkStatusHistory => {
1235                let last_status_per_id = partially_truncate_status_history(
1236                    self.id,
1237                    IntrospectionType::SinkStatusHistory,
1238                    &mut self.write_handle,
1239                    sink_status_history_desc(&parameters),
1240                    self.now.clone(),
1241                    &storage_collections,
1242                )
1243                .await;
1244
1245                let status_col = MZ_SINK_STATUS_HISTORY_DESC
1246                    .get_by_name(&ColumnName::from("status"))
1247                    .expect("schema has not changed")
1248                    .0;
1249
1250                last_status_per_id
1251                    .into_iter()
1252                    .map(|(id, row)| {
1253                        (
1254                            id,
1255                            Status::from_str(
1256                                row.iter()
1257                                    .nth(status_col)
1258                                    .expect("schema has not changed")
1259                                    .unwrap_str(),
1260                            )
1261                            .expect("statuses must be uncorrupted"),
1262                        )
1263                    })
1264                    .collect()
1265            }
1266
1267            introspection_type @ IntrospectionType::ShardMapping
1268            | introspection_type @ IntrospectionType::Frontiers
1269            | introspection_type @ IntrospectionType::ReplicaFrontiers
1270            | introspection_type @ IntrospectionType::StorageSourceStatistics
1271            | introspection_type @ IntrospectionType::StorageSinkStatistics
1272            | introspection_type @ IntrospectionType::ComputeDependencies
1273            | introspection_type @ IntrospectionType::ComputeOperatorHydrationStatus
1274            | introspection_type @ IntrospectionType::ComputeMaterializedViewRefreshes
1275            | introspection_type @ IntrospectionType::ComputeErrorCounts
1276            | introspection_type @ IntrospectionType::ComputeHydrationTimes => {
1277                unreachable!("not append-only collection: {introspection_type:?}")
1278            }
1279        };
1280        if let Some(previous_statuses) = &mut self.previous_statuses {
1281            previous_statuses.extend(initial_statuses);
1282        }
1283    }
1284
1285    async fn run(mut self) {
1286        let mut interval = tokio::time::interval(Duration::from_millis(DEFAULT_TICK_MS));
1287
1288        let mut batch: Vec<(Vec<_>, _)> = Vec::new();
1289
1290        'run: loop {
1291            tokio::select! {
1292                // Prefer sending actual updates over just bumping the upper, because sending
1293                // updates also bump the upper.
1294                biased;
1295
1296                // Listen for a shutdown signal so we can gracefully cleanup.
1297                _ = &mut self.shutdown_rx => {
1298                    let mut senders = Vec::new();
1299
1300                    // Prevent new messages from being sent.
1301                    self.rx.close();
1302
1303                    // Get as many waiting senders as possible.
1304                    while let Ok((_batch, sender)) = self.rx.try_recv() {
1305                        senders.push(sender);
1306                    }
1307
1308                    // Notify them that this collection is closed.
1309                    //
1310                    // Note: if a task is shutting down, that indicates the source has been
1311                    // dropped, at which point the identifier is invalid. Returning this
1312                    // error provides a better user experience.
1313                    notify_listeners(senders, || Err(StorageError::IdentifierInvalid(self.id)));
1314
1315                    break 'run;
1316                }
1317
1318                // Pull a chunk of queued updates off the channel.
1319                () = recv_all_commands(&mut self.rx, &mut batch) => {
1320                    if batch.is_empty() {
1321                        // Sender has been dropped, which means the collection should have been
1322                        // unregistered, break out of the run loop if we weren't already
1323                        // aborted.
1324                        break 'run;
1325                    }
1326
1327                    // To rate limit appends to persist we add artificial latency, and will
1328                    // finish no sooner than this instant.
1329                    let batch_duration_ms = match self.id {
1330                        GlobalId::User(_) => Duration::from_millis(
1331                            self.user_batch_duration_ms.load(Ordering::Relaxed),
1332                        ),
1333                        // For non-user collections, always just use the default.
1334                        _ => STORAGE_MANAGED_COLLECTIONS_BATCH_DURATION_DEFAULT,
1335                    };
1336                    let use_batch_now = Instant::now();
1337                    let min_time_to_complete = use_batch_now + batch_duration_ms;
1338
1339                    tracing::debug!(
1340                        ?use_batch_now,
1341                        ?batch_duration_ms,
1342                        ?min_time_to_complete,
1343                        "batch duration",
1344                    );
1345
1346                    // Reset the interval which is used to periodically bump the uppers
1347                    // because the uppers will get bumped with the following update. This
1348                    // makes it such that we will write at most once every `interval`.
1349                    //
1350                    // For example, let's say our `DEFAULT_TICK` interval is 10, so at
1351                    // `t + 10`, `t + 20`, ... we'll bump the uppers. If we receive an
1352                    // update at `t + 3` we want to shift this window so we bump the uppers
1353                    // at `t + 13`, `t + 23`, ... which resetting the interval accomplishes.
1354                    interval.reset();
1355
1356                    let capacity: usize = batch
1357                        .iter()
1358                        .map(|(rows, _)| rows.len())
1359                        .sum();
1360                    let mut all_rows = Vec::with_capacity(capacity);
1361                    let mut responders = Vec::with_capacity(batch.len());
1362
1363                    for (updates, responder) in batch.drain(..) {
1364                        let rows = self.process_updates(updates);
1365
1366                        all_rows.extend(
1367                            rows.map(|(row, diff)| TimestamplessUpdate { row, diff }),
1368                        );
1369                        responders.push(responder);
1370                    }
1371
1372                    if self.read_only {
1373                        tracing::warn!(%self.id, ?all_rows, "append while in read-only mode");
1374                        notify_listeners(responders, || Err(StorageError::ReadOnly));
1375                        continue;
1376                    }
1377
1378                    // Append updates to persist!
1379                    let at_least = Timestamp::from((self.now)());
1380
1381                    if !all_rows.is_empty() {
1382                        monotonic_append(&mut self.write_handle, all_rows, at_least).await;
1383                    }
1384                    // Notify all of our listeners.
1385                    notify_listeners(responders, || Ok(()));
1386
1387                    // Wait until our artificial latency has completed.
1388                    //
1389                    // Note: if writing to persist took longer than `DEFAULT_TICK` this
1390                    // await will resolve immediately.
1391                    tokio::time::sleep_until(min_time_to_complete).await;
1392                }
1393
1394                // If we haven't received any updates, then we'll move the upper forward.
1395                _ = interval.tick() => {
1396                    if self.read_only {
1397                        // Not bumping uppers while in read-only mode.
1398                        continue;
1399                    }
1400
1401                    // Update our collection.
1402                    let now = Timestamp::from((self.now)());
1403                    let updates = vec![];
1404                    let at_least = now;
1405
1406                    // Failures don't matter when advancing collections' uppers. This might
1407                    // fail when a clusterd happens to be writing to this concurrently.
1408                    // Advancing uppers here is best-effort and only needs to succeed if no
1409                    // one else is advancing it; contention proves otherwise.
1410                    monotonic_append(&mut self.write_handle, updates, at_least).await;
1411                },
1412            }
1413        }
1414
1415        info!("write_task-{} ending", self.id);
1416    }
1417
1418    /// Deduplicate any [`mz_storage_client::client::StatusUpdate`] within `updates` and converts
1419    /// `updates` to rows and diffs.
1420    fn process_updates(
1421        &mut self,
1422        updates: Vec<AppendOnlyUpdate>,
1423    ) -> impl Iterator<Item = (Row, Diff)> {
1424        let updates = if let Some(previous_statuses) = &mut self.previous_statuses {
1425            let new: Vec<_> = updates
1426                .into_iter()
1427                .filter(|r| match r {
1428                    AppendOnlyUpdate::Row(_) => true,
1429                    AppendOnlyUpdate::Status(update) => {
1430                        match (
1431                            previous_statuses
1432                                .get(&(update.id, update.replica_id))
1433                                .as_deref(),
1434                            &update.status,
1435                        ) {
1436                            (None, _) => true,
1437                            (Some(old), new) => old.superseded_by(*new),
1438                        }
1439                    }
1440                })
1441                .collect();
1442            previous_statuses.extend(new.iter().filter_map(|update| match update {
1443                AppendOnlyUpdate::Row(_) => None,
1444                AppendOnlyUpdate::Status(update) => {
1445                    Some(((update.id, update.replica_id), update.status))
1446                }
1447            }));
1448            new
1449        } else {
1450            updates
1451        };
1452
1453        updates.into_iter().map(AppendOnlyUpdate::into_row)
1454    }
1455}
1456
1457/// Truncates the given metrics history by removing all entries older than that history's
1458/// configured retention interval.
1459///
1460/// # Panics
1461///
1462/// Panics if `collection` is not a metrics history.
1463async fn partially_truncate_metrics_history(
1464    id: GlobalId,
1465    introspection_type: IntrospectionType,
1466    write_handle: &mut WriteHandle<SourceData, (), Timestamp, StorageDiff>,
1467    config_set: Arc<ConfigSet>,
1468    now: NowFn,
1469    storage_collections: Arc<dyn StorageCollections + Send + Sync>,
1470) -> Result<(), anyhow::Error> {
1471    let (keep_duration, occurred_at_col) = match introspection_type {
1472        IntrospectionType::ReplicaMetricsHistory => (
1473            REPLICA_METRICS_HISTORY_RETENTION_INTERVAL.get(&config_set),
1474            REPLICA_METRICS_HISTORY_DESC
1475                .get_by_name(&ColumnName::from("occurred_at"))
1476                .expect("schema has not changed")
1477                .0,
1478        ),
1479        IntrospectionType::WallclockLagHistory => (
1480            WALLCLOCK_LAG_HISTORY_RETENTION_INTERVAL.get(&config_set),
1481            WALLCLOCK_LAG_HISTORY_DESC
1482                .get_by_name(&ColumnName::from("occurred_at"))
1483                .expect("schema has not changed")
1484                .0,
1485        ),
1486        IntrospectionType::WallclockLagHistogram => (
1487            WALLCLOCK_GLOBAL_LAG_HISTOGRAM_RETENTION_INTERVAL.get(&config_set),
1488            WALLCLOCK_GLOBAL_LAG_HISTOGRAM_RAW_DESC
1489                .get_by_name(&ColumnName::from("period_start"))
1490                .expect("schema has not changed")
1491                .0,
1492        ),
1493        _ => panic!("not a metrics history: {introspection_type:?}"),
1494    };
1495
1496    let upper = write_handle.fetch_recent_upper().await;
1497    let Some(upper_ts) = upper.as_option() else {
1498        bail!("collection is sealed");
1499    };
1500    let Some(as_of_ts) = upper_ts.step_back() else {
1501        return Ok(()); // nothing to truncate
1502    };
1503
1504    let mut rows = storage_collections
1505        .snapshot_cursor(id, as_of_ts)
1506        .await
1507        .map_err(|e| anyhow!("reading snapshot: {e:?}"))?;
1508
1509    let now = mz_ore::now::to_datetime(now());
1510    let keep_since = now - keep_duration;
1511
1512    // It is very important that we append our retractions at the timestamp
1513    // right after the timestamp at which we got our snapshot. Otherwise,
1514    // it's possible for someone else to sneak in retractions or other
1515    // unexpected changes.
1516    let old_upper_ts = *upper_ts;
1517    let new_upper_ts = old_upper_ts.step_forward();
1518
1519    // Produce retractions by inverting diffs of rows we want to delete.
1520    let mut builder = write_handle.builder(Antichain::from_elem(old_upper_ts));
1521    while let Some(chunk) = rows.next().await {
1522        for (data, _t, diff) in chunk {
1523            let Ok(row) = &data.0 else { continue };
1524            let datums = row.unpack();
1525            let occurred_at = datums[occurred_at_col].unwrap_timestamptz();
1526            if *occurred_at >= keep_since {
1527                continue;
1528            }
1529            let diff = -diff;
1530            match builder.add(&data, &(), &old_upper_ts, &diff).await? {
1531                Added::Record => {}
1532                Added::RecordAndParts => {
1533                    debug!(?id, "added part to builder");
1534                }
1535            }
1536        }
1537    }
1538
1539    let mut updates = builder.finish(Antichain::from_elem(new_upper_ts)).await?;
1540    let mut batches = vec![&mut updates];
1541
1542    write_handle
1543        .compare_and_append_batch(
1544            batches.as_mut_slice(),
1545            Antichain::from_elem(old_upper_ts),
1546            Antichain::from_elem(new_upper_ts),
1547            true,
1548        )
1549        .await
1550        .expect("valid usage")
1551        .map_err(|e| anyhow!("appending retractions: {e:?}"))
1552}
1553
1554/// Effectively truncates the status history shard based on its retention policy.
1555///
1556/// NOTE: The history collections are really append-only collections, but
1557/// every-now-and-then we want to retract old updates so that the collection
1558/// does not grow unboundedly. Crucially, these are _not_ incremental
1559/// collections, they are not derived from a state at some time `t` and we
1560/// cannot maintain a desired state for them.
1561///
1562/// Returns a map with latest unpacked row per key.
1563pub(crate) async fn partially_truncate_status_history<K>(
1564    id: GlobalId,
1565    introspection_type: IntrospectionType,
1566    write_handle: &mut WriteHandle<SourceData, (), Timestamp, StorageDiff>,
1567    status_history_desc: StatusHistoryDesc<K>,
1568    now: NowFn,
1569    storage_collections: &Arc<dyn StorageCollections + Send + Sync>,
1570) -> BTreeMap<K, Row>
1571where
1572    K: Clone + Debug + Ord + Send + Sync,
1573{
1574    let upper = write_handle.fetch_recent_upper().await.clone();
1575
1576    let mut rows = match upper.as_option() {
1577        Some(f) if f > &Timestamp::MIN => {
1578            let as_of = f.step_back().unwrap();
1579
1580            storage_collections
1581                .snapshot_cursor(id, as_of)
1582                .await
1583                .expect("snapshot succeeds")
1584        }
1585        // If collection is closed or the frontier is the minimum, we cannot
1586        // or don't need to truncate (respectively).
1587        _ => return BTreeMap::new(),
1588    };
1589
1590    // BTreeMap to keep track of the row with the latest timestamp for each key.
1591    let mut latest_row_per_key: BTreeMap<K, (CheckedTimestamp<DateTime<Utc>>, Row)> =
1592        BTreeMap::new();
1593
1594    // It is very important that we append our retractions at the timestamp
1595    // right after the timestamp at which we got our snapshot. Otherwise,
1596    // it's possible for someone else to sneak in retractions or other
1597    // unexpected changes.
1598    let expected_upper = upper.into_option().expect("checked above");
1599    let new_upper = expected_upper.step_forward();
1600
1601    let mut deletions = write_handle.builder(Antichain::from_elem(expected_upper));
1602
1603    let mut handle_row = {
1604        let latest_row_per_key = &mut latest_row_per_key;
1605        move |row: &Row, diff| {
1606            let datums = row.unpack();
1607            let key = (status_history_desc.extract_key)(&datums);
1608            let timestamp = (status_history_desc.extract_time)(&datums);
1609
1610            assert!(
1611                diff > 0,
1612                "only know how to operate over consolidated data with diffs > 0, \
1613                    found diff {diff} for object {key:?} in {introspection_type:?}",
1614            );
1615
1616            // Keep track of the timestamp of the latest row per key.
1617            match latest_row_per_key.get(&key) {
1618                Some(existing) if &existing.0 > &timestamp => {}
1619                _ => {
1620                    latest_row_per_key.insert(key.clone(), (timestamp, row.clone()));
1621                }
1622            };
1623            (key, timestamp)
1624        }
1625    };
1626
1627    match status_history_desc.retention_policy {
1628        StatusHistoryRetentionPolicy::LastN(n) => {
1629            // BTreeMap to track the earliest events for each key.
1630            let mut last_n_entries_per_key: BTreeMap<
1631                K,
1632                BinaryHeap<Reverse<(CheckedTimestamp<DateTime<Utc>>, Row)>>,
1633            > = BTreeMap::new();
1634
1635            while let Some(chunk) = rows.next().await {
1636                for (data, _t, diff) in chunk {
1637                    let Ok(row) = &data.0 else { continue };
1638                    let (key, timestamp) = handle_row(row, diff);
1639
1640                    // Duplicate rows ARE possible if many status changes happen in VERY quick succession,
1641                    // so we handle duplicated rows separately.
1642                    let entries = last_n_entries_per_key.entry(key).or_default();
1643                    for _ in 0..diff {
1644                        // We CAN have multiple statuses (most likely Starting and Running) at the exact same
1645                        // millisecond, depending on how the `health_operator` is scheduled.
1646                        //
1647                        // Note that these will be arbitrarily ordered, so a Starting event might
1648                        // survive and a Running one won't. The next restart will remove the other,
1649                        // so we don't bother being careful about it.
1650                        //
1651                        // TODO(guswynn): unpack these into health-status objects and use
1652                        // their `Ord` impl.
1653                        entries.push(Reverse((timestamp, row.clone())));
1654
1655                        // Retain some number of entries, using pop to mark the oldest entries for
1656                        // deletion.
1657                        while entries.len() > n {
1658                            if let Some(Reverse((_, r))) = entries.pop() {
1659                                deletions
1660                                    .add(&SourceData(Ok(r)), &(), &expected_upper, &-1)
1661                                    .await
1662                                    .expect("usage should be valid");
1663                            }
1664                        }
1665                    }
1666                }
1667            }
1668        }
1669        StatusHistoryRetentionPolicy::TimeWindow(time_window) => {
1670            // Get the lower bound of our retention window
1671            let now = mz_ore::now::to_datetime(now());
1672            let keep_since = now - time_window;
1673
1674            // Mark any row outside the retention window for deletion
1675            while let Some(chunk) = rows.next().await {
1676                for (data, _t, diff) in chunk {
1677                    let Ok(row) = &data.0 else { continue };
1678                    let (_, timestamp) = handle_row(row, diff);
1679
1680                    if *timestamp < keep_since {
1681                        deletions
1682                            .add(&data, &(), &expected_upper, &-1)
1683                            .await
1684                            .expect("usage should be valid");
1685                    }
1686                }
1687            }
1688        }
1689    }
1690
1691    let mut updates = deletions
1692        .finish(Antichain::from_elem(new_upper))
1693        .await
1694        .expect("expected valid usage");
1695    let mut batches = vec![&mut updates];
1696
1697    // Updates are only deletes because everything else is already in the shard.\
1698    let res = write_handle
1699        .compare_and_append_batch(
1700            batches.as_mut_slice(),
1701            Antichain::from_elem(expected_upper),
1702            Antichain::from_elem(new_upper),
1703            true,
1704        )
1705        .await
1706        .expect("usage was valid");
1707
1708    match res {
1709        Ok(_) => {
1710            // All good, yay!
1711        }
1712        Err(err) => {
1713            // This is fine, it just means the upper moved because
1714            // of continual upper advancement or because someone
1715            // already appended some more retractions/updates.
1716            //
1717            // NOTE: We might want to attempt these partial
1718            // retractions on an interval, instead of only when
1719            // starting up!
1720            info!(
1721                %id, ?expected_upper, current_upper = ?err.current,
1722                "failed to append partial truncation",
1723            );
1724        }
1725    }
1726
1727    latest_row_per_key
1728        .into_iter()
1729        .map(|(key, (_, row))| (key, row))
1730        .collect()
1731}
1732
1733async fn monotonic_append(
1734    write_handle: &mut WriteHandle<SourceData, (), Timestamp, StorageDiff>,
1735    updates: Vec<TimestamplessUpdate>,
1736    at_least: Timestamp,
1737) {
1738    let mut expected_upper = write_handle.shared_upper();
1739    loop {
1740        if updates.is_empty() && expected_upper.is_empty() {
1741            // Ignore timestamp advancement for
1742            // closed collections. TODO? Make this a
1743            // correctable error
1744            return;
1745        }
1746
1747        let upper = expected_upper
1748            .into_option()
1749            .expect("cannot append data to closed collection");
1750
1751        let lower = std::cmp::max(upper, at_least);
1752        let new_upper = lower.step_forward();
1753        let updates = updates
1754            .iter()
1755            .map(|TimestamplessUpdate { row, diff }| {
1756                ((SourceData(Ok(row.clone())), ()), lower, diff.into_inner())
1757            })
1758            .collect::<Vec<_>>();
1759        let res = write_handle
1760            .compare_and_append(
1761                updates,
1762                Antichain::from_elem(upper),
1763                Antichain::from_elem(new_upper),
1764            )
1765            .await
1766            .expect("valid usage");
1767        match res {
1768            Ok(()) => return,
1769            Err(err) => {
1770                expected_upper = err.current;
1771                continue;
1772            }
1773        }
1774    }
1775}
1776
1777// Helper method for notifying listeners.
1778fn notify_listeners<T>(
1779    responders: impl IntoIterator<Item = oneshot::Sender<T>>,
1780    result: impl Fn() -> T,
1781) {
1782    for r in responders {
1783        // We don't care if the listener disappeared.
1784        let _ = r.send(result());
1785    }
1786}
1787
1788/// Receive all currently enqueued messages from a task command channel.
1789///
1790/// If no messages are initially enqueued, this function blocks until messages become available or
1791/// the channel is closed.
1792///
1793/// If the `out` buffer has a large amount of free capacity at the end of this operation, it is
1794/// shrunk to reclaim some of its memory.
1795///
1796/// # Cancel safety
1797///
1798/// This function is cancel safe. It only awaits `UnboundedReceiver::recv`, which is itself cancel
1799/// safe.
1800async fn recv_all_commands<T>(rx: &mut mpsc::UnboundedReceiver<T>, out: &mut Vec<T>) {
1801    if let Some(msg) = rx.recv().await {
1802        out.push(msg);
1803    } else {
1804        return; // channel closed
1805    };
1806
1807    out.reserve(rx.len());
1808    while let Ok(msg) = rx.try_recv() {
1809        out.push(msg);
1810    }
1811
1812    // We may have the opportunity to reclaim allocated memory.
1813    // Given that `push` will at most double the capacity when the vector is more than half full,
1814    // and we want to avoid entering into a resizing cycle, we choose to only shrink if the
1815    // vector's length is less than one fourth of its capacity.
1816    if out.capacity() > out.len() * 4 {
1817        out.shrink_to_fit();
1818    }
1819}
1820
1821#[cfg(test)]
1822mod tests {
1823    use std::collections::BTreeSet;
1824
1825    use super::*;
1826    use itertools::Itertools;
1827    use mz_repr::{Datum, Row};
1828    use mz_storage_client::client::StatusUpdate;
1829    use mz_storage_client::healthcheck::{
1830        MZ_SINK_STATUS_HISTORY_DESC, MZ_SOURCE_STATUS_HISTORY_DESC,
1831    };
1832
1833    #[mz_ore::test]
1834    fn test_row() {
1835        let error_message = "error message";
1836        let hint = "hint message";
1837        let id = GlobalId::User(1);
1838        let status = Status::Dropped;
1839        let row = Row::from(StatusUpdate {
1840            id,
1841            timestamp: chrono::offset::Utc::now(),
1842            status,
1843            error: Some(error_message.to_string()),
1844            hints: BTreeSet::from([hint.to_string()]),
1845            namespaced_errors: Default::default(),
1846            replica_id: None,
1847        });
1848
1849        for (datum, column_type) in row.iter().zip_eq(MZ_SINK_STATUS_HISTORY_DESC.iter_types()) {
1850            assert!(datum.is_instance_of_sql(column_type));
1851        }
1852
1853        for (datum, column_type) in row
1854            .iter()
1855            .zip_eq(MZ_SOURCE_STATUS_HISTORY_DESC.iter_types())
1856        {
1857            assert!(datum.is_instance_of_sql(column_type));
1858        }
1859
1860        assert_eq!(row.iter().nth(1).unwrap(), Datum::String(&id.to_string()));
1861        assert_eq!(row.iter().nth(2).unwrap(), Datum::String(status.to_str()));
1862        assert_eq!(row.iter().nth(3).unwrap(), Datum::String(error_message));
1863
1864        let details = row
1865            .iter()
1866            .nth(4)
1867            .unwrap()
1868            .unwrap_map()
1869            .iter()
1870            .collect::<Vec<_>>();
1871
1872        assert_eq!(details.len(), 1);
1873        let hint_datum = &details[0];
1874
1875        assert_eq!(hint_datum.0, "hints");
1876        assert_eq!(
1877            hint_datum.1.unwrap_list().iter().next().unwrap(),
1878            Datum::String(hint)
1879        );
1880    }
1881
1882    #[mz_ore::test]
1883    fn test_row_without_hint() {
1884        let error_message = "error message";
1885        let id = GlobalId::User(1);
1886        let status = Status::Dropped;
1887        let row = Row::from(StatusUpdate {
1888            id,
1889            timestamp: chrono::offset::Utc::now(),
1890            status,
1891            error: Some(error_message.to_string()),
1892            hints: Default::default(),
1893            namespaced_errors: Default::default(),
1894            replica_id: None,
1895        });
1896
1897        for (datum, column_type) in row.iter().zip_eq(MZ_SINK_STATUS_HISTORY_DESC.iter_types()) {
1898            assert!(datum.is_instance_of_sql(column_type));
1899        }
1900
1901        for (datum, column_type) in row
1902            .iter()
1903            .zip_eq(MZ_SOURCE_STATUS_HISTORY_DESC.iter_types())
1904        {
1905            assert!(datum.is_instance_of_sql(column_type));
1906        }
1907
1908        assert_eq!(row.iter().nth(1).unwrap(), Datum::String(&id.to_string()));
1909        assert_eq!(row.iter().nth(2).unwrap(), Datum::String(status.to_str()));
1910        assert_eq!(row.iter().nth(3).unwrap(), Datum::String(error_message));
1911        assert_eq!(row.iter().nth(4).unwrap(), Datum::Null);
1912    }
1913
1914    #[mz_ore::test]
1915    fn test_row_without_error() {
1916        let id = GlobalId::User(1);
1917        let status = Status::Dropped;
1918        let hint = "hint message";
1919        let row = Row::from(StatusUpdate {
1920            id,
1921            timestamp: chrono::offset::Utc::now(),
1922            status,
1923            error: None,
1924            hints: BTreeSet::from([hint.to_string()]),
1925            namespaced_errors: Default::default(),
1926            replica_id: None,
1927        });
1928
1929        for (datum, column_type) in row.iter().zip_eq(MZ_SINK_STATUS_HISTORY_DESC.iter_types()) {
1930            assert!(datum.is_instance_of_sql(column_type));
1931        }
1932
1933        for (datum, column_type) in row
1934            .iter()
1935            .zip_eq(MZ_SOURCE_STATUS_HISTORY_DESC.iter_types())
1936        {
1937            assert!(datum.is_instance_of_sql(column_type));
1938        }
1939
1940        assert_eq!(row.iter().nth(1).unwrap(), Datum::String(&id.to_string()));
1941        assert_eq!(row.iter().nth(2).unwrap(), Datum::String(status.to_str()));
1942        assert_eq!(row.iter().nth(3).unwrap(), Datum::Null);
1943
1944        let details = row
1945            .iter()
1946            .nth(4)
1947            .unwrap()
1948            .unwrap_map()
1949            .iter()
1950            .collect::<Vec<_>>();
1951
1952        assert_eq!(details.len(), 1);
1953        let hint_datum = &details[0];
1954
1955        assert_eq!(hint_datum.0, "hints");
1956        assert_eq!(
1957            hint_datum.1.unwrap_list().iter().next().unwrap(),
1958            Datum::String(hint)
1959        );
1960    }
1961
1962    #[mz_ore::test]
1963    fn test_row_with_namespaced() {
1964        let error_message = "error message";
1965        let id = GlobalId::User(1);
1966        let status = Status::Dropped;
1967        let row = Row::from(StatusUpdate {
1968            id,
1969            timestamp: chrono::offset::Utc::now(),
1970            status,
1971            error: Some(error_message.to_string()),
1972            hints: Default::default(),
1973            namespaced_errors: BTreeMap::from([("thing".to_string(), "error".to_string())]),
1974            replica_id: None,
1975        });
1976
1977        for (datum, column_type) in row.iter().zip_eq(MZ_SINK_STATUS_HISTORY_DESC.iter_types()) {
1978            assert!(datum.is_instance_of_sql(column_type));
1979        }
1980
1981        for (datum, column_type) in row
1982            .iter()
1983            .zip_eq(MZ_SOURCE_STATUS_HISTORY_DESC.iter_types())
1984        {
1985            assert!(datum.is_instance_of_sql(column_type));
1986        }
1987
1988        assert_eq!(row.iter().nth(1).unwrap(), Datum::String(&id.to_string()));
1989        assert_eq!(row.iter().nth(2).unwrap(), Datum::String(status.to_str()));
1990        assert_eq!(row.iter().nth(3).unwrap(), Datum::String(error_message));
1991
1992        let details = row
1993            .iter()
1994            .nth(4)
1995            .unwrap()
1996            .unwrap_map()
1997            .iter()
1998            .collect::<Vec<_>>();
1999
2000        assert_eq!(details.len(), 1);
2001        let ns_datum = &details[0];
2002
2003        assert_eq!(ns_datum.0, "namespaced");
2004        assert_eq!(
2005            ns_datum.1.unwrap_map().iter().next().unwrap(),
2006            ("thing", Datum::String("error"))
2007        );
2008    }
2009
2010    #[mz_ore::test]
2011    fn test_row_with_everything() {
2012        let error_message = "error message";
2013        let hint = "hint message";
2014        let id = GlobalId::User(1);
2015        let status = Status::Dropped;
2016        let row = Row::from(StatusUpdate {
2017            id,
2018            timestamp: chrono::offset::Utc::now(),
2019            status,
2020            error: Some(error_message.to_string()),
2021            hints: BTreeSet::from([hint.to_string()]),
2022            namespaced_errors: BTreeMap::from([("thing".to_string(), "error".to_string())]),
2023            replica_id: None,
2024        });
2025
2026        for (datum, column_type) in row.iter().zip_eq(MZ_SINK_STATUS_HISTORY_DESC.iter_types()) {
2027            assert!(datum.is_instance_of_sql(column_type));
2028        }
2029
2030        for (datum, column_type) in row
2031            .iter()
2032            .zip_eq(MZ_SOURCE_STATUS_HISTORY_DESC.iter_types())
2033        {
2034            assert!(datum.is_instance_of_sql(column_type));
2035        }
2036
2037        assert_eq!(row.iter().nth(1).unwrap(), Datum::String(&id.to_string()));
2038        assert_eq!(row.iter().nth(2).unwrap(), Datum::String(status.to_str()));
2039        assert_eq!(row.iter().nth(3).unwrap(), Datum::String(error_message));
2040
2041        let details = row
2042            .iter()
2043            .nth(4)
2044            .unwrap()
2045            .unwrap_map()
2046            .iter()
2047            .collect::<Vec<_>>();
2048
2049        assert_eq!(details.len(), 2);
2050        // These are always sorted
2051        let hint_datum = &details[0];
2052        let ns_datum = &details[1];
2053
2054        assert_eq!(hint_datum.0, "hints");
2055        assert_eq!(
2056            hint_datum.1.unwrap_list().iter().next().unwrap(),
2057            Datum::String(hint)
2058        );
2059
2060        assert_eq!(ns_datum.0, "namespaced");
2061        assert_eq!(
2062            ns_datum.1.unwrap_map().iter().next().unwrap(),
2063            ("thing", Datum::String("error"))
2064        );
2065    }
2066}