1use std::any::Any;
61use std::cmp::Reverse;
62use std::collections::{BTreeMap, BinaryHeap};
63use std::fmt::Debug;
64use std::ops::ControlFlow;
65use std::pin::Pin;
66use std::str::FromStr;
67use std::sync::atomic::{AtomicU64, Ordering};
68use std::sync::{Arc, Mutex};
69
70use anyhow::{anyhow, bail};
71use chrono::{DateTime, Utc};
72use differential_dataflow::consolidation;
73use futures::future::BoxFuture;
74use futures::stream::StreamExt;
75use futures::{Future, FutureExt};
76use mz_cluster_client::ReplicaId;
77use mz_dyncfg::ConfigSet;
78use mz_ore::now::NowFn;
79use mz_ore::retry::Retry;
80use mz_ore::soft_panic_or_log;
81use mz_ore::task::AbortOnDropHandle;
82use mz_persist_client::batch::Added;
83use mz_persist_client::read::ReadHandle;
84use mz_persist_client::write::WriteHandle;
85use mz_repr::adt::timestamp::CheckedTimestamp;
86use mz_repr::{ColumnName, Diff, GlobalId, Row, Timestamp};
87use mz_storage_client::client::{AppendOnlyUpdate, Status, TimestamplessUpdate};
88use mz_storage_client::controller::{IntrospectionType, MonotonicAppender, StorageWriteOp};
89use mz_storage_client::healthcheck::{
90 MZ_SINK_STATUS_HISTORY_DESC, MZ_SOURCE_STATUS_HISTORY_DESC, REPLICA_METRICS_HISTORY_DESC,
91 WALLCLOCK_GLOBAL_LAG_HISTOGRAM_RAW_DESC, WALLCLOCK_LAG_HISTORY_DESC,
92};
93use mz_storage_client::metrics::StorageControllerMetrics;
94use mz_storage_client::statistics::ControllerSinkStatistics;
95use mz_storage_client::storage_collections::StorageCollections;
96use mz_storage_types::StorageDiff;
97use mz_storage_types::controller::InvalidUpper;
98use mz_storage_types::dyncfgs::{
99 REPLICA_METRICS_HISTORY_RETENTION_INTERVAL, WALLCLOCK_GLOBAL_LAG_HISTOGRAM_RETENTION_INTERVAL,
100 WALLCLOCK_LAG_HISTORY_RETENTION_INTERVAL,
101};
102use mz_storage_types::parameters::{
103 STORAGE_MANAGED_COLLECTIONS_BATCH_DURATION_DEFAULT, StorageParameters,
104};
105use mz_storage_types::sources::SourceData;
106use timely::progress::Antichain;
107use tokio::sync::{mpsc, oneshot, watch};
108use tokio::time::{Duration, Instant};
109use tracing::{debug, error, info};
110
111use crate::{
112 StatusHistoryDesc, StatusHistoryRetentionPolicy, StorageError, collection_mgmt,
113 privatelink_status_history_desc, replica_status_history_desc, sink_status_history_desc,
114 snapshot_statistics, source_status_history_desc, statistics,
115};
116
117const DEFAULT_TICK_MS: u64 = 1_000;
119
120type DifferentialWriteChannel =
122 mpsc::UnboundedSender<(StorageWriteOp, oneshot::Sender<Result<(), StorageError>>)>;
123
124type AppendOnlyWriteChannel = mpsc::UnboundedSender<(
126 Vec<AppendOnlyUpdate>,
127 oneshot::Sender<Result<(), StorageError>>,
128)>;
129
130type WriteTask = AbortOnDropHandle<()>;
131type ShutdownSender = oneshot::Sender<()>;
132
133pub enum CollectionManagerKind {
146 AppendOnly,
147 Differential,
148}
149
150#[derive(Debug, Clone)]
151pub struct CollectionManager {
152 read_only: bool,
155
156 differential_collections:
162 Arc<Mutex<BTreeMap<GlobalId, (DifferentialWriteChannel, WriteTask, ShutdownSender)>>>,
163
164 append_only_collections:
169 Arc<Mutex<BTreeMap<GlobalId, (AppendOnlyWriteChannel, WriteTask, ShutdownSender)>>>,
170
171 user_batch_duration_ms: Arc<AtomicU64>,
174 now: NowFn,
175}
176
177impl CollectionManager {
187 pub(super) fn new(read_only: bool, now: NowFn) -> CollectionManager {
188 let batch_duration_ms: u64 = STORAGE_MANAGED_COLLECTIONS_BATCH_DURATION_DEFAULT
189 .as_millis()
190 .try_into()
191 .expect("known to fit");
192
193 CollectionManager {
194 read_only,
195 differential_collections: Arc::new(Mutex::new(BTreeMap::new())),
196 append_only_collections: Arc::new(Mutex::new(BTreeMap::new())),
197 user_batch_duration_ms: Arc::new(AtomicU64::new(batch_duration_ms)),
198 now,
199 }
200 }
201
202 pub fn update_user_batch_duration(&self, duration: Duration) {
204 tracing::info!(?duration, "updating user batch duration");
205 let millis: u64 = duration.as_millis().try_into().unwrap_or(u64::MAX);
206 self.user_batch_duration_ms.store(millis, Ordering::Relaxed);
207 }
208
209 pub(super) fn register_differential_collection<R>(
217 &self,
218 id: GlobalId,
219 write_handle: WriteHandle<SourceData, (), Timestamp, StorageDiff>,
220 read_handle_fn: R,
221 force_writable: bool,
222 introspection_config: DifferentialIntrospectionConfig,
223 ) where
224 R: FnMut() -> Pin<
225 Box<dyn Future<Output = ReadHandle<SourceData, (), Timestamp, StorageDiff>> + Send>,
226 > + Send
227 + Sync
228 + 'static,
229 {
230 let mut guard = self
231 .differential_collections
232 .lock()
233 .expect("collection_mgmt panicked");
234
235 if let Some((_writer, task, _shutdown_tx)) = guard.get(&id) {
237 if !task.is_finished() {
239 tracing::error!("Registered a collection twice! {id:?}");
241 return;
242 }
243 }
244
245 let read_only = self.get_read_only(id, force_writable);
246
247 let writer_and_handle = DifferentialWriteTask::spawn(
249 id,
250 write_handle,
251 read_handle_fn,
252 read_only,
253 self.now.clone(),
254 introspection_config,
255 );
256 let prev = guard.insert(id, writer_and_handle);
257
258 if let Some((_, prev_task, _)) = prev {
260 assert!(
261 prev_task.is_finished(),
262 "should only spawn a new task if the previous is finished"
263 );
264 }
265 }
266
267 pub(super) fn register_append_only_collection(
272 &self,
273 id: GlobalId,
274 write_handle: WriteHandle<SourceData, (), Timestamp, StorageDiff>,
275 force_writable: bool,
276 introspection_config: Option<AppendOnlyIntrospectionConfig>,
277 ) {
278 let mut guard = self
279 .append_only_collections
280 .lock()
281 .expect("collection_mgmt panicked");
282
283 if let Some((_writer, task, _shutdown_tx)) = guard.get(&id) {
285 if !task.is_finished() {
287 tracing::error!("Registered a collection twice! {id:?}");
289 return;
290 }
291 }
292
293 let read_only = self.get_read_only(id, force_writable);
294
295 let writer_and_handle = AppendOnlyWriteTask::spawn(
297 id,
298 write_handle,
299 read_only,
300 self.now.clone(),
301 Arc::clone(&self.user_batch_duration_ms),
302 introspection_config,
303 );
304 let prev = guard.insert(id, writer_and_handle);
305
306 if let Some((_, prev_task, _)) = prev {
308 assert!(
309 prev_task.is_finished(),
310 "should only spawn a new task if the previous is finished"
311 );
312 }
313 }
314
315 #[mz_ore::instrument(level = "debug")]
320 pub(super) fn unregister_collection(&self, id: GlobalId) -> BoxFuture<'static, ()> {
321 let prev = self
322 .differential_collections
323 .lock()
324 .expect("CollectionManager panicked")
325 .remove(&id);
326
327 if let Some((_prev_writer, prev_task, shutdown_tx)) = prev {
329 let _ = shutdown_tx.send(());
333 return Box::pin(prev_task.map(|_| ()));
334 }
335
336 let prev = self
337 .append_only_collections
338 .lock()
339 .expect("CollectionManager panicked")
340 .remove(&id);
341
342 if let Some((_prev_writer, prev_task, shutdown_tx)) = prev {
344 let _ = shutdown_tx.send(());
348 return Box::pin(prev_task.map(|_| ()));
349 }
350
351 Box::pin(futures::future::ready(()))
352 }
353
354 pub(super) fn append_only_write_sender(&self, id: GlobalId) -> AppendOnlyWriteChannel {
359 let collections = self.append_only_collections.lock().expect("poisoned");
360 match collections.get(&id) {
361 Some((tx, _, _)) => tx.clone(),
362 None => panic!("missing append-only collection: {id}"),
363 }
364 }
365
366 pub(super) fn differential_write_sender(&self, id: GlobalId) -> DifferentialWriteChannel {
371 let collections = self.differential_collections.lock().expect("poisoned");
372 match collections.get(&id) {
373 Some((tx, _, _)) => tx.clone(),
374 None => panic!("missing differential collection: {id}"),
375 }
376 }
377
378 pub(super) fn blind_write(&self, id: GlobalId, updates: Vec<AppendOnlyUpdate>) {
386 if self.read_only {
387 panic!("attempting blind write to {} while in read-only mode", id);
388 }
389
390 if updates.is_empty() {
391 return;
392 }
393
394 let collections = self.append_only_collections.lock().expect("poisoned");
395 match collections.get(&id) {
396 Some((update_tx, _, _)) => {
397 let (tx, _rx) = oneshot::channel();
398 update_tx.send((updates, tx)).expect("rx hung up");
399 }
400 None => panic!("missing append-only collection: {id}"),
401 }
402 }
403
404 pub(super) fn differential_write(&self, id: GlobalId, op: StorageWriteOp) {
412 if op.is_empty_append() {
413 return;
414 }
415
416 let collections = self.differential_collections.lock().expect("poisoned");
417 match collections.get(&id) {
418 Some((update_tx, _, _)) => {
419 let (tx, _rx) = oneshot::channel();
420 update_tx.send((op, tx)).expect("rx hung up");
421 }
422 None => panic!("missing differential collection: {id}"),
423 }
424 }
425
426 pub(super) fn differential_append(&self, id: GlobalId, updates: Vec<(Row, Diff)>) {
432 self.differential_write(id, StorageWriteOp::Append { updates })
433 }
434
435 pub(super) fn monotonic_appender(
438 &self,
439 id: GlobalId,
440 ) -> Result<MonotonicAppender, StorageError> {
441 let guard = self
442 .append_only_collections
443 .lock()
444 .expect("CollectionManager panicked");
445 let tx = guard
446 .get(&id)
447 .map(|(tx, _, _)| tx.clone())
448 .ok_or(StorageError::IdentifierMissing(id))?;
449
450 Ok(MonotonicAppender::new(tx))
451 }
452
453 fn get_read_only(&self, id: GlobalId, force_writable: bool) -> bool {
454 if force_writable {
455 assert!(id.is_system(), "unexpected non-system global id: {id:?}");
456 false
457 } else {
458 self.read_only
459 }
460 }
461}
462
463pub(crate) struct DifferentialIntrospectionConfig {
464 pub(crate) recent_upper: Antichain<Timestamp>,
465 pub(crate) introspection_type: IntrospectionType,
466 pub(crate) storage_collections: Arc<dyn StorageCollections + Send + Sync>,
467 pub(crate) collection_manager: collection_mgmt::CollectionManager,
468 pub(crate) source_statistics: Arc<Mutex<statistics::SourceStatistics>>,
469 pub(crate) sink_statistics:
470 Arc<Mutex<BTreeMap<(GlobalId, Option<ReplicaId>), ControllerSinkStatistics>>>,
471 pub(crate) statistics_interval: Duration,
472 pub(crate) statistics_interval_receiver: watch::Receiver<Duration>,
473 pub(crate) statistics_retention_duration: Duration,
474 pub(crate) metrics: StorageControllerMetrics,
475 pub(crate) introspection_tokens: Arc<Mutex<BTreeMap<GlobalId, Box<dyn Any + Send + Sync>>>>,
476}
477
478struct DifferentialWriteTask<R>
485where
486 R: FnMut() -> Pin<
487 Box<dyn Future<Output = ReadHandle<SourceData, (), Timestamp, StorageDiff>> + Send>,
488 > + Send
489 + 'static,
490{
491 id: GlobalId,
493
494 write_handle: WriteHandle<SourceData, (), Timestamp, StorageDiff>,
495
496 read_handle_fn: R,
498
499 read_only: bool,
500
501 now: NowFn,
502
503 upper_tick_interval: tokio::time::Interval,
507
508 cmd_rx: mpsc::UnboundedReceiver<(StorageWriteOp, oneshot::Sender<Result<(), StorageError>>)>,
510
511 shutdown_rx: oneshot::Receiver<()>,
513
514 desired: Vec<(Row, Diff)>,
526
527 to_write: Vec<(Row, Diff)>,
530
531 current_upper: Timestamp,
536}
537
538impl<R> DifferentialWriteTask<R>
539where
540 R: FnMut() -> Pin<
541 Box<dyn Future<Output = ReadHandle<SourceData, (), Timestamp, StorageDiff>> + Send>,
542 > + Send
543 + Sync
544 + 'static,
545{
546 fn spawn(
549 id: GlobalId,
550 write_handle: WriteHandle<SourceData, (), Timestamp, StorageDiff>,
551 read_handle_fn: R,
552 read_only: bool,
553 now: NowFn,
554 introspection_config: DifferentialIntrospectionConfig,
555 ) -> (DifferentialWriteChannel, WriteTask, ShutdownSender) {
556 let (tx, rx) = mpsc::unbounded_channel();
557 let (shutdown_tx, shutdown_rx) = oneshot::channel();
558
559 let upper_tick_interval = tokio::time::interval(Duration::from_millis(DEFAULT_TICK_MS));
560
561 let task = Self {
562 id,
563 write_handle,
564 read_handle_fn,
565 read_only,
566 now,
567 upper_tick_interval,
568 cmd_rx: rx,
569 shutdown_rx,
570 desired: Vec::new(),
571 to_write: Vec::new(),
572 current_upper: Timestamp::MIN,
573 };
574
575 let handle = mz_ore::task::spawn(
576 || format!("CollectionManager-differential_write_task-{id}"),
577 async move {
578 if !task.read_only {
579 task.prepare(introspection_config).await;
580 }
581 let res = task.run().await;
582
583 match res {
584 ControlFlow::Break(reason) => {
585 info!("write_task-{} ending: {}", id, reason);
586 }
587 c @ ControlFlow::Continue(_) => {
588 unreachable!(
589 "cannot break out of the loop with a Continue, but got: {:?}",
590 c
591 );
592 }
593 }
594 },
595 );
596
597 (tx, handle.abort_on_drop(), shutdown_tx)
598 }
599
600 async fn prepare(&self, introspection_config: DifferentialIntrospectionConfig) {
606 tracing::info!(%self.id, ?introspection_config.introspection_type, "preparing differential introspection collection for writes");
607
608 match introspection_config.introspection_type {
609 IntrospectionType::ShardMapping => {
610 }
612 IntrospectionType::Frontiers | IntrospectionType::ReplicaFrontiers => {
613 }
616 IntrospectionType::StorageSourceStatistics => {
617 let prev = snapshot_statistics(
618 self.id,
619 introspection_config.recent_upper,
620 &introspection_config.storage_collections,
621 )
622 .await;
623
624 let scraper_token = statistics::spawn_statistics_scraper(
625 self.id.clone(),
626 introspection_config.collection_manager,
628 Arc::clone(&introspection_config.source_statistics),
629 prev,
630 introspection_config.statistics_interval.clone(),
631 introspection_config.statistics_interval_receiver.clone(),
632 introspection_config.statistics_retention_duration,
633 introspection_config.metrics,
634 );
635 let web_token = statistics::spawn_webhook_statistics_scraper(
636 introspection_config.source_statistics,
637 introspection_config.statistics_interval,
638 introspection_config.statistics_interval_receiver,
639 );
640
641 introspection_config
644 .introspection_tokens
645 .lock()
646 .expect("poisoned")
647 .insert(self.id, Box::new((scraper_token, web_token)));
648 }
649 IntrospectionType::StorageSinkStatistics => {
650 let prev = snapshot_statistics(
651 self.id,
652 introspection_config.recent_upper,
653 &introspection_config.storage_collections,
654 )
655 .await;
656
657 let scraper_token = statistics::spawn_statistics_scraper(
658 self.id.clone(),
659 introspection_config.collection_manager,
660 Arc::clone(&introspection_config.sink_statistics),
661 prev,
662 introspection_config.statistics_interval,
663 introspection_config.statistics_interval_receiver,
664 introspection_config.statistics_retention_duration,
665 introspection_config.metrics,
666 );
667
668 introspection_config
671 .introspection_tokens
672 .lock()
673 .expect("poisoned")
674 .insert(self.id, scraper_token);
675 }
676
677 IntrospectionType::ComputeDependencies
678 | IntrospectionType::ComputeOperatorHydrationStatus
679 | IntrospectionType::ComputeMaterializedViewRefreshes
680 | IntrospectionType::ComputeErrorCounts
681 | IntrospectionType::ComputeHydrationTimes => {
682 }
685
686 introspection_type @ IntrospectionType::ReplicaMetricsHistory
687 | introspection_type @ IntrospectionType::WallclockLagHistory
688 | introspection_type @ IntrospectionType::WallclockLagHistogram
689 | introspection_type @ IntrospectionType::PreparedStatementHistory
690 | introspection_type @ IntrospectionType::StatementExecutionHistory
691 | introspection_type @ IntrospectionType::SessionHistory
692 | introspection_type @ IntrospectionType::StatementLifecycleHistory
693 | introspection_type @ IntrospectionType::SqlText
694 | introspection_type @ IntrospectionType::SourceStatusHistory
695 | introspection_type @ IntrospectionType::SinkStatusHistory
696 | introspection_type @ IntrospectionType::PrivatelinkConnectionStatusHistory
697 | introspection_type @ IntrospectionType::ReplicaStatusHistory => {
698 unreachable!("not differential collection: {introspection_type:?}")
699 }
700 }
701 }
702
703 async fn run(mut self) -> ControlFlow<String> {
704 let mut updates = Vec::new();
705 loop {
706 tokio::select! {
707 biased;
710
711 _ = &mut self.shutdown_rx => {
713 self.handle_shutdown();
714
715 return ControlFlow::Break("graceful shutdown".to_string());
716 }
717
718 () = recv_all_commands(&mut self.cmd_rx, &mut updates) => {
720 if updates.is_empty() {
721 return ControlFlow::Break("sender has been dropped".to_string());
725 }
726 self.handle_updates(&mut updates).await?;
727 }
728
729 _ = self.upper_tick_interval.tick() => {
731 if self.read_only {
732 continue;
734 }
735 self.tick_upper().await?;
736 },
737 }
738 }
739 }
740
741 async fn tick_upper(&mut self) -> ControlFlow<String> {
742 let now = Timestamp::from((self.now)());
743
744 if now <= self.current_upper {
745 return ControlFlow::Continue(());
748 }
749
750 assert!(!self.read_only);
751 let res = self
752 .write_handle
753 .compare_and_append_batch(
754 &mut [],
755 Antichain::from_elem(self.current_upper),
756 Antichain::from_elem(now),
757 true,
758 )
759 .await
760 .expect("valid usage");
761 match res {
762 Ok(()) => {
764 tracing::debug!(%self.id, "bumped upper of differential collection");
765 self.current_upper = now;
766 }
767 Err(err) => {
768 let actual_upper = if let Some(ts) = err.current.as_option() {
773 *ts
774 } else {
775 return ControlFlow::Break("upper is the empty antichain".to_string());
776 };
777
778 tracing::info!(%self.id, ?actual_upper, expected_upper = ?self.current_upper, "upper mismatch while bumping upper, syncing to persist state");
779
780 self.current_upper = actual_upper;
781
782 self.sync_to_persist().await;
783 }
784 }
785
786 ControlFlow::Continue(())
787 }
788
789 fn handle_shutdown(&mut self) {
790 let mut senders = Vec::new();
791
792 self.cmd_rx.close();
794
795 while let Ok((_batch, sender)) = self.cmd_rx.try_recv() {
797 senders.push(sender);
798 }
799
800 notify_listeners(senders, || Err(StorageError::IdentifierInvalid(self.id)));
806 }
807
808 async fn handle_updates(
809 &mut self,
810 batch: &mut Vec<(StorageWriteOp, oneshot::Sender<Result<(), StorageError>>)>,
811 ) -> ControlFlow<String> {
812 let batch_duration_ms = STORAGE_MANAGED_COLLECTIONS_BATCH_DURATION_DEFAULT;
814
815 let use_batch_now = Instant::now();
816 let min_time_to_complete = use_batch_now + batch_duration_ms;
817
818 tracing::debug!(
819 ?use_batch_now,
820 ?batch_duration_ms,
821 ?min_time_to_complete,
822 "batch duration",
823 );
824
825 let mut responders = Vec::with_capacity(batch.len());
826 for (op, tx) in batch.drain(..) {
827 self.apply_write_op(op);
828 responders.push(tx);
829 }
830
831 consolidation::consolidate(&mut self.desired);
833 consolidation::consolidate(&mut self.to_write);
834
835 self.upper_tick_interval.reset();
846
847 self.write_to_persist(responders).await?;
848
849 tokio::time::sleep_until(min_time_to_complete).await;
854
855 ControlFlow::Continue(())
856 }
857
858 fn apply_write_op(&mut self, op: StorageWriteOp) {
860 match op {
861 StorageWriteOp::Append { updates } => {
862 self.desired.extend_from_slice(&updates);
863 self.to_write.extend(updates);
864 }
865 StorageWriteOp::Delete { filter } => {
866 let to_delete = self.desired.extract_if(.., |(row, _)| filter(row));
867 let retractions = to_delete.map(|(row, diff)| (row, -diff));
868 self.to_write.extend(retractions);
869 }
870 }
871 }
872
873 async fn write_to_persist(
877 &mut self,
878 responders: Vec<oneshot::Sender<Result<(), StorageError>>>,
879 ) -> ControlFlow<String> {
880 if self.read_only {
881 tracing::debug!(%self.id, "not writing to differential collection: read-only");
882 return ControlFlow::Continue(());
884 }
885
886 let retries = Retry::default()
893 .initial_backoff(Duration::from_secs(1))
894 .clamp_backoff(Duration::from_secs(3))
895 .factor(1.25)
896 .max_tries(20)
897 .into_retry_stream();
898 let mut retries = Box::pin(retries);
899
900 loop {
901 let now = Timestamp::from((self.now)());
903 let new_upper = std::cmp::max(now, self.current_upper.step_forward());
904
905 let updates_to_write = self
906 .to_write
907 .iter()
908 .map(|(row, diff)| {
909 (
910 (SourceData(Ok(row.clone())), ()),
911 self.current_upper,
912 diff.into_inner(),
913 )
914 })
915 .collect::<Vec<_>>();
916
917 assert!(!self.read_only);
918 let res = self
919 .write_handle
920 .compare_and_append(
921 updates_to_write,
922 Antichain::from_elem(self.current_upper),
923 Antichain::from_elem(new_upper),
924 )
925 .await
926 .expect("valid usage");
927 match res {
928 Ok(()) => {
930 notify_listeners(responders, || Ok(()));
932
933 self.current_upper = new_upper;
934
935 self.to_write.clear();
938
939 tracing::debug!(%self.id, "appended to differential collection");
940
941 break;
943 }
944 Err(err) => {
946 let actual_upper = if let Some(ts) = err.current.as_option() {
950 *ts
951 } else {
952 return ControlFlow::Break("upper is the empty antichain".to_string());
953 };
954
955 tracing::info!(%self.id, ?actual_upper, expected_upper = ?self.current_upper, "retrying append for differential collection");
956
957 if retries.next().await.is_none() {
960 let invalid_upper = InvalidUpper {
961 id: self.id,
962 current_upper: err.current,
963 };
964 notify_listeners(responders, || {
965 Err(StorageError::InvalidUppers(vec![invalid_upper.clone()]))
966 });
967 error!(
968 "exhausted retries when appending to managed collection {}",
969 self.id
970 );
971 break;
972 }
973
974 self.current_upper = actual_upper;
975
976 self.sync_to_persist().await;
977
978 debug!(
979 "Retrying invalid-uppers error while appending to differential collection {}",
980 self.id
981 );
982 }
983 }
984 }
985
986 ControlFlow::Continue(())
987 }
988
989 async fn sync_to_persist(&mut self) {
997 let mut read_handle = (self.read_handle_fn)().await;
998 let as_of = self.current_upper.step_back().unwrap_or(Timestamp::MIN);
999 let as_of = Antichain::from_elem(as_of);
1000 let snapshot = read_handle.snapshot_and_fetch(as_of).await;
1001
1002 let mut negated_oks = match snapshot {
1003 Ok(contents) => {
1004 let mut snapshot = Vec::with_capacity(contents.len());
1005 for ((data, _), _, diff) in contents {
1006 let row = data.0.unwrap();
1007 snapshot.push((row, -Diff::from(diff)));
1008 }
1009 snapshot
1010 }
1011 Err(e) => panic!("read before since: {e:?}"),
1012 };
1013
1014 self.to_write.clear();
1015 self.to_write.extend(self.desired.iter().cloned());
1016 self.to_write.append(&mut negated_oks);
1017 consolidation::consolidate(&mut self.to_write);
1018 }
1019}
1020
1021pub(crate) struct AppendOnlyIntrospectionConfig {
1022 pub(crate) introspection_type: IntrospectionType,
1023 pub(crate) config_set: Arc<ConfigSet>,
1024 pub(crate) parameters: StorageParameters,
1025 pub(crate) storage_collections: Arc<dyn StorageCollections + Send + Sync>,
1026}
1027
1028struct AppendOnlyWriteTask {
1033 id: GlobalId,
1035 write_handle: WriteHandle<SourceData, (), Timestamp, StorageDiff>,
1036 read_only: bool,
1037 now: NowFn,
1038 user_batch_duration_ms: Arc<AtomicU64>,
1039 rx: mpsc::UnboundedReceiver<(
1041 Vec<AppendOnlyUpdate>,
1042 oneshot::Sender<Result<(), StorageError>>,
1043 )>,
1044
1045 shutdown_rx: oneshot::Receiver<()>,
1047 previous_statuses: Option<BTreeMap<(GlobalId, Option<ReplicaId>), Status>>,
1049}
1050
1051impl AppendOnlyWriteTask {
1052 fn spawn(
1060 id: GlobalId,
1061 write_handle: WriteHandle<SourceData, (), Timestamp, StorageDiff>,
1062 read_only: bool,
1063 now: NowFn,
1064 user_batch_duration_ms: Arc<AtomicU64>,
1065 introspection_config: Option<AppendOnlyIntrospectionConfig>,
1066 ) -> (AppendOnlyWriteChannel, WriteTask, ShutdownSender) {
1067 let (tx, rx) = mpsc::unbounded_channel();
1068 let (shutdown_tx, shutdown_rx) = oneshot::channel();
1069
1070 let previous_statuses: Option<BTreeMap<(GlobalId, Option<ReplicaId>), Status>> =
1071 match introspection_config
1072 .as_ref()
1073 .map(|config| config.introspection_type)
1074 {
1075 Some(IntrospectionType::SourceStatusHistory)
1076 | Some(IntrospectionType::SinkStatusHistory) => Some(BTreeMap::new()),
1077
1078 Some(IntrospectionType::ReplicaMetricsHistory)
1079 | Some(IntrospectionType::WallclockLagHistory)
1080 | Some(IntrospectionType::WallclockLagHistogram)
1081 | Some(IntrospectionType::PrivatelinkConnectionStatusHistory)
1082 | Some(IntrospectionType::ReplicaStatusHistory)
1083 | Some(IntrospectionType::PreparedStatementHistory)
1084 | Some(IntrospectionType::StatementExecutionHistory)
1085 | Some(IntrospectionType::SessionHistory)
1086 | Some(IntrospectionType::StatementLifecycleHistory)
1087 | Some(IntrospectionType::SqlText)
1088 | None => None,
1089
1090 Some(introspection_type @ IntrospectionType::ShardMapping)
1091 | Some(introspection_type @ IntrospectionType::Frontiers)
1092 | Some(introspection_type @ IntrospectionType::ReplicaFrontiers)
1093 | Some(introspection_type @ IntrospectionType::StorageSourceStatistics)
1094 | Some(introspection_type @ IntrospectionType::StorageSinkStatistics)
1095 | Some(introspection_type @ IntrospectionType::ComputeDependencies)
1096 | Some(introspection_type @ IntrospectionType::ComputeOperatorHydrationStatus)
1097 | Some(introspection_type @ IntrospectionType::ComputeMaterializedViewRefreshes)
1098 | Some(introspection_type @ IntrospectionType::ComputeErrorCounts)
1099 | Some(introspection_type @ IntrospectionType::ComputeHydrationTimes) => {
1100 unreachable!("not append-only collection: {introspection_type:?}")
1101 }
1102 };
1103
1104 let mut task = Self {
1105 id,
1106 write_handle,
1107 rx,
1108 shutdown_rx,
1109 read_only,
1110 now,
1111 user_batch_duration_ms,
1112 previous_statuses,
1113 };
1114
1115 let handle = mz_ore::task::spawn(
1116 || format!("CollectionManager-append_only_write_task-{id}"),
1117 async move {
1118 if !task.read_only {
1119 task.prepare(introspection_config).await;
1120 }
1121 task.run().await;
1122 },
1123 );
1124
1125 (tx, handle.abort_on_drop(), shutdown_tx)
1126 }
1127
1128 async fn prepare(&mut self, introspection_config: Option<AppendOnlyIntrospectionConfig>) {
1133 let Some(AppendOnlyIntrospectionConfig {
1134 introspection_type,
1135 config_set,
1136 parameters,
1137 storage_collections,
1138 }) = introspection_config
1139 else {
1140 return;
1141 };
1142 let initial_statuses = match introspection_type {
1143 IntrospectionType::ReplicaMetricsHistory
1144 | IntrospectionType::WallclockLagHistory
1145 | IntrospectionType::WallclockLagHistogram => {
1146 let result = partially_truncate_metrics_history(
1147 self.id,
1148 introspection_type,
1149 &mut self.write_handle,
1150 config_set,
1151 self.now.clone(),
1152 storage_collections,
1153 )
1154 .await;
1155 if let Err(error) = result {
1156 soft_panic_or_log!(
1157 "error truncating metrics history: {error} (type={introspection_type:?})"
1158 );
1159 }
1160 Vec::new()
1161 }
1162
1163 IntrospectionType::PrivatelinkConnectionStatusHistory => {
1164 partially_truncate_status_history(
1165 self.id,
1166 IntrospectionType::PrivatelinkConnectionStatusHistory,
1167 &mut self.write_handle,
1168 privatelink_status_history_desc(¶meters),
1169 self.now.clone(),
1170 &storage_collections,
1171 )
1172 .await;
1173 Vec::new()
1174 }
1175 IntrospectionType::ReplicaStatusHistory => {
1176 partially_truncate_status_history(
1177 self.id,
1178 IntrospectionType::ReplicaStatusHistory,
1179 &mut self.write_handle,
1180 replica_status_history_desc(¶meters),
1181 self.now.clone(),
1182 &storage_collections,
1183 )
1184 .await;
1185 Vec::new()
1186 }
1187
1188 IntrospectionType::PreparedStatementHistory
1191 | IntrospectionType::StatementExecutionHistory
1192 | IntrospectionType::SessionHistory
1193 | IntrospectionType::StatementLifecycleHistory
1194 | IntrospectionType::SqlText => {
1195 Vec::new()
1200 }
1201
1202 IntrospectionType::SourceStatusHistory => {
1203 let last_status_per_id = partially_truncate_status_history(
1204 self.id,
1205 IntrospectionType::SourceStatusHistory,
1206 &mut self.write_handle,
1207 source_status_history_desc(¶meters),
1208 self.now.clone(),
1209 &storage_collections,
1210 )
1211 .await;
1212
1213 let status_col = MZ_SOURCE_STATUS_HISTORY_DESC
1214 .get_by_name(&ColumnName::from("status"))
1215 .expect("schema has not changed")
1216 .0;
1217
1218 last_status_per_id
1219 .into_iter()
1220 .map(|(id, row)| {
1221 (
1222 id,
1223 Status::from_str(
1224 row.iter()
1225 .nth(status_col)
1226 .expect("schema has not changed")
1227 .unwrap_str(),
1228 )
1229 .expect("statuses must be uncorrupted"),
1230 )
1231 })
1232 .collect()
1233 }
1234 IntrospectionType::SinkStatusHistory => {
1235 let last_status_per_id = partially_truncate_status_history(
1236 self.id,
1237 IntrospectionType::SinkStatusHistory,
1238 &mut self.write_handle,
1239 sink_status_history_desc(¶meters),
1240 self.now.clone(),
1241 &storage_collections,
1242 )
1243 .await;
1244
1245 let status_col = MZ_SINK_STATUS_HISTORY_DESC
1246 .get_by_name(&ColumnName::from("status"))
1247 .expect("schema has not changed")
1248 .0;
1249
1250 last_status_per_id
1251 .into_iter()
1252 .map(|(id, row)| {
1253 (
1254 id,
1255 Status::from_str(
1256 row.iter()
1257 .nth(status_col)
1258 .expect("schema has not changed")
1259 .unwrap_str(),
1260 )
1261 .expect("statuses must be uncorrupted"),
1262 )
1263 })
1264 .collect()
1265 }
1266
1267 introspection_type @ IntrospectionType::ShardMapping
1268 | introspection_type @ IntrospectionType::Frontiers
1269 | introspection_type @ IntrospectionType::ReplicaFrontiers
1270 | introspection_type @ IntrospectionType::StorageSourceStatistics
1271 | introspection_type @ IntrospectionType::StorageSinkStatistics
1272 | introspection_type @ IntrospectionType::ComputeDependencies
1273 | introspection_type @ IntrospectionType::ComputeOperatorHydrationStatus
1274 | introspection_type @ IntrospectionType::ComputeMaterializedViewRefreshes
1275 | introspection_type @ IntrospectionType::ComputeErrorCounts
1276 | introspection_type @ IntrospectionType::ComputeHydrationTimes => {
1277 unreachable!("not append-only collection: {introspection_type:?}")
1278 }
1279 };
1280 if let Some(previous_statuses) = &mut self.previous_statuses {
1281 previous_statuses.extend(initial_statuses);
1282 }
1283 }
1284
1285 async fn run(mut self) {
1286 let mut interval = tokio::time::interval(Duration::from_millis(DEFAULT_TICK_MS));
1287
1288 let mut batch: Vec<(Vec<_>, _)> = Vec::new();
1289
1290 'run: loop {
1291 tokio::select! {
1292 biased;
1295
1296 _ = &mut self.shutdown_rx => {
1298 let mut senders = Vec::new();
1299
1300 self.rx.close();
1302
1303 while let Ok((_batch, sender)) = self.rx.try_recv() {
1305 senders.push(sender);
1306 }
1307
1308 notify_listeners(senders, || Err(StorageError::IdentifierInvalid(self.id)));
1314
1315 break 'run;
1316 }
1317
1318 () = recv_all_commands(&mut self.rx, &mut batch) => {
1320 if batch.is_empty() {
1321 break 'run;
1325 }
1326
1327 let batch_duration_ms = match self.id {
1330 GlobalId::User(_) => Duration::from_millis(
1331 self.user_batch_duration_ms.load(Ordering::Relaxed),
1332 ),
1333 _ => STORAGE_MANAGED_COLLECTIONS_BATCH_DURATION_DEFAULT,
1335 };
1336 let use_batch_now = Instant::now();
1337 let min_time_to_complete = use_batch_now + batch_duration_ms;
1338
1339 tracing::debug!(
1340 ?use_batch_now,
1341 ?batch_duration_ms,
1342 ?min_time_to_complete,
1343 "batch duration",
1344 );
1345
1346 interval.reset();
1355
1356 let capacity: usize = batch
1357 .iter()
1358 .map(|(rows, _)| rows.len())
1359 .sum();
1360 let mut all_rows = Vec::with_capacity(capacity);
1361 let mut responders = Vec::with_capacity(batch.len());
1362
1363 for (updates, responder) in batch.drain(..) {
1364 let rows = self.process_updates(updates);
1365
1366 all_rows.extend(
1367 rows.map(|(row, diff)| TimestamplessUpdate { row, diff }),
1368 );
1369 responders.push(responder);
1370 }
1371
1372 if self.read_only {
1373 tracing::warn!(%self.id, ?all_rows, "append while in read-only mode");
1374 notify_listeners(responders, || Err(StorageError::ReadOnly));
1375 continue;
1376 }
1377
1378 let at_least = Timestamp::from((self.now)());
1380
1381 if !all_rows.is_empty() {
1382 monotonic_append(&mut self.write_handle, all_rows, at_least).await;
1383 }
1384 notify_listeners(responders, || Ok(()));
1386
1387 tokio::time::sleep_until(min_time_to_complete).await;
1392 }
1393
1394 _ = interval.tick() => {
1396 if self.read_only {
1397 continue;
1399 }
1400
1401 let now = Timestamp::from((self.now)());
1403 let updates = vec![];
1404 let at_least = now;
1405
1406 monotonic_append(&mut self.write_handle, updates, at_least).await;
1411 },
1412 }
1413 }
1414
1415 info!("write_task-{} ending", self.id);
1416 }
1417
1418 fn process_updates(
1421 &mut self,
1422 updates: Vec<AppendOnlyUpdate>,
1423 ) -> impl Iterator<Item = (Row, Diff)> {
1424 let updates = if let Some(previous_statuses) = &mut self.previous_statuses {
1425 let new: Vec<_> = updates
1426 .into_iter()
1427 .filter(|r| match r {
1428 AppendOnlyUpdate::Row(_) => true,
1429 AppendOnlyUpdate::Status(update) => {
1430 match (
1431 previous_statuses
1432 .get(&(update.id, update.replica_id))
1433 .as_deref(),
1434 &update.status,
1435 ) {
1436 (None, _) => true,
1437 (Some(old), new) => old.superseded_by(*new),
1438 }
1439 }
1440 })
1441 .collect();
1442 previous_statuses.extend(new.iter().filter_map(|update| match update {
1443 AppendOnlyUpdate::Row(_) => None,
1444 AppendOnlyUpdate::Status(update) => {
1445 Some(((update.id, update.replica_id), update.status))
1446 }
1447 }));
1448 new
1449 } else {
1450 updates
1451 };
1452
1453 updates.into_iter().map(AppendOnlyUpdate::into_row)
1454 }
1455}
1456
1457async fn partially_truncate_metrics_history(
1464 id: GlobalId,
1465 introspection_type: IntrospectionType,
1466 write_handle: &mut WriteHandle<SourceData, (), Timestamp, StorageDiff>,
1467 config_set: Arc<ConfigSet>,
1468 now: NowFn,
1469 storage_collections: Arc<dyn StorageCollections + Send + Sync>,
1470) -> Result<(), anyhow::Error> {
1471 let (keep_duration, occurred_at_col) = match introspection_type {
1472 IntrospectionType::ReplicaMetricsHistory => (
1473 REPLICA_METRICS_HISTORY_RETENTION_INTERVAL.get(&config_set),
1474 REPLICA_METRICS_HISTORY_DESC
1475 .get_by_name(&ColumnName::from("occurred_at"))
1476 .expect("schema has not changed")
1477 .0,
1478 ),
1479 IntrospectionType::WallclockLagHistory => (
1480 WALLCLOCK_LAG_HISTORY_RETENTION_INTERVAL.get(&config_set),
1481 WALLCLOCK_LAG_HISTORY_DESC
1482 .get_by_name(&ColumnName::from("occurred_at"))
1483 .expect("schema has not changed")
1484 .0,
1485 ),
1486 IntrospectionType::WallclockLagHistogram => (
1487 WALLCLOCK_GLOBAL_LAG_HISTOGRAM_RETENTION_INTERVAL.get(&config_set),
1488 WALLCLOCK_GLOBAL_LAG_HISTOGRAM_RAW_DESC
1489 .get_by_name(&ColumnName::from("period_start"))
1490 .expect("schema has not changed")
1491 .0,
1492 ),
1493 _ => panic!("not a metrics history: {introspection_type:?}"),
1494 };
1495
1496 let upper = write_handle.fetch_recent_upper().await;
1497 let Some(upper_ts) = upper.as_option() else {
1498 bail!("collection is sealed");
1499 };
1500 let Some(as_of_ts) = upper_ts.step_back() else {
1501 return Ok(()); };
1503
1504 let mut rows = storage_collections
1505 .snapshot_cursor(id, as_of_ts)
1506 .await
1507 .map_err(|e| anyhow!("reading snapshot: {e:?}"))?;
1508
1509 let now = mz_ore::now::to_datetime(now());
1510 let keep_since = now - keep_duration;
1511
1512 let old_upper_ts = *upper_ts;
1517 let new_upper_ts = old_upper_ts.step_forward();
1518
1519 let mut builder = write_handle.builder(Antichain::from_elem(old_upper_ts));
1521 while let Some(chunk) = rows.next().await {
1522 for (data, _t, diff) in chunk {
1523 let Ok(row) = &data.0 else { continue };
1524 let datums = row.unpack();
1525 let occurred_at = datums[occurred_at_col].unwrap_timestamptz();
1526 if *occurred_at >= keep_since {
1527 continue;
1528 }
1529 let diff = -diff;
1530 match builder.add(&data, &(), &old_upper_ts, &diff).await? {
1531 Added::Record => {}
1532 Added::RecordAndParts => {
1533 debug!(?id, "added part to builder");
1534 }
1535 }
1536 }
1537 }
1538
1539 let mut updates = builder.finish(Antichain::from_elem(new_upper_ts)).await?;
1540 let mut batches = vec![&mut updates];
1541
1542 write_handle
1543 .compare_and_append_batch(
1544 batches.as_mut_slice(),
1545 Antichain::from_elem(old_upper_ts),
1546 Antichain::from_elem(new_upper_ts),
1547 true,
1548 )
1549 .await
1550 .expect("valid usage")
1551 .map_err(|e| anyhow!("appending retractions: {e:?}"))
1552}
1553
1554pub(crate) async fn partially_truncate_status_history<K>(
1564 id: GlobalId,
1565 introspection_type: IntrospectionType,
1566 write_handle: &mut WriteHandle<SourceData, (), Timestamp, StorageDiff>,
1567 status_history_desc: StatusHistoryDesc<K>,
1568 now: NowFn,
1569 storage_collections: &Arc<dyn StorageCollections + Send + Sync>,
1570) -> BTreeMap<K, Row>
1571where
1572 K: Clone + Debug + Ord + Send + Sync,
1573{
1574 let upper = write_handle.fetch_recent_upper().await.clone();
1575
1576 let mut rows = match upper.as_option() {
1577 Some(f) if f > &Timestamp::MIN => {
1578 let as_of = f.step_back().unwrap();
1579
1580 storage_collections
1581 .snapshot_cursor(id, as_of)
1582 .await
1583 .expect("snapshot succeeds")
1584 }
1585 _ => return BTreeMap::new(),
1588 };
1589
1590 let mut latest_row_per_key: BTreeMap<K, (CheckedTimestamp<DateTime<Utc>>, Row)> =
1592 BTreeMap::new();
1593
1594 let expected_upper = upper.into_option().expect("checked above");
1599 let new_upper = expected_upper.step_forward();
1600
1601 let mut deletions = write_handle.builder(Antichain::from_elem(expected_upper));
1602
1603 let mut handle_row = {
1604 let latest_row_per_key = &mut latest_row_per_key;
1605 move |row: &Row, diff| {
1606 let datums = row.unpack();
1607 let key = (status_history_desc.extract_key)(&datums);
1608 let timestamp = (status_history_desc.extract_time)(&datums);
1609
1610 assert!(
1611 diff > 0,
1612 "only know how to operate over consolidated data with diffs > 0, \
1613 found diff {diff} for object {key:?} in {introspection_type:?}",
1614 );
1615
1616 match latest_row_per_key.get(&key) {
1618 Some(existing) if &existing.0 > ×tamp => {}
1619 _ => {
1620 latest_row_per_key.insert(key.clone(), (timestamp, row.clone()));
1621 }
1622 };
1623 (key, timestamp)
1624 }
1625 };
1626
1627 match status_history_desc.retention_policy {
1628 StatusHistoryRetentionPolicy::LastN(n) => {
1629 let mut last_n_entries_per_key: BTreeMap<
1631 K,
1632 BinaryHeap<Reverse<(CheckedTimestamp<DateTime<Utc>>, Row)>>,
1633 > = BTreeMap::new();
1634
1635 while let Some(chunk) = rows.next().await {
1636 for (data, _t, diff) in chunk {
1637 let Ok(row) = &data.0 else { continue };
1638 let (key, timestamp) = handle_row(row, diff);
1639
1640 let entries = last_n_entries_per_key.entry(key).or_default();
1643 for _ in 0..diff {
1644 entries.push(Reverse((timestamp, row.clone())));
1654
1655 while entries.len() > n {
1658 if let Some(Reverse((_, r))) = entries.pop() {
1659 deletions
1660 .add(&SourceData(Ok(r)), &(), &expected_upper, &-1)
1661 .await
1662 .expect("usage should be valid");
1663 }
1664 }
1665 }
1666 }
1667 }
1668 }
1669 StatusHistoryRetentionPolicy::TimeWindow(time_window) => {
1670 let now = mz_ore::now::to_datetime(now());
1672 let keep_since = now - time_window;
1673
1674 while let Some(chunk) = rows.next().await {
1676 for (data, _t, diff) in chunk {
1677 let Ok(row) = &data.0 else { continue };
1678 let (_, timestamp) = handle_row(row, diff);
1679
1680 if *timestamp < keep_since {
1681 deletions
1682 .add(&data, &(), &expected_upper, &-1)
1683 .await
1684 .expect("usage should be valid");
1685 }
1686 }
1687 }
1688 }
1689 }
1690
1691 let mut updates = deletions
1692 .finish(Antichain::from_elem(new_upper))
1693 .await
1694 .expect("expected valid usage");
1695 let mut batches = vec![&mut updates];
1696
1697 let res = write_handle
1699 .compare_and_append_batch(
1700 batches.as_mut_slice(),
1701 Antichain::from_elem(expected_upper),
1702 Antichain::from_elem(new_upper),
1703 true,
1704 )
1705 .await
1706 .expect("usage was valid");
1707
1708 match res {
1709 Ok(_) => {
1710 }
1712 Err(err) => {
1713 info!(
1721 %id, ?expected_upper, current_upper = ?err.current,
1722 "failed to append partial truncation",
1723 );
1724 }
1725 }
1726
1727 latest_row_per_key
1728 .into_iter()
1729 .map(|(key, (_, row))| (key, row))
1730 .collect()
1731}
1732
1733async fn monotonic_append(
1734 write_handle: &mut WriteHandle<SourceData, (), Timestamp, StorageDiff>,
1735 updates: Vec<TimestamplessUpdate>,
1736 at_least: Timestamp,
1737) {
1738 let mut expected_upper = write_handle.shared_upper();
1739 loop {
1740 if updates.is_empty() && expected_upper.is_empty() {
1741 return;
1745 }
1746
1747 let upper = expected_upper
1748 .into_option()
1749 .expect("cannot append data to closed collection");
1750
1751 let lower = std::cmp::max(upper, at_least);
1752 let new_upper = lower.step_forward();
1753 let updates = updates
1754 .iter()
1755 .map(|TimestamplessUpdate { row, diff }| {
1756 ((SourceData(Ok(row.clone())), ()), lower, diff.into_inner())
1757 })
1758 .collect::<Vec<_>>();
1759 let res = write_handle
1760 .compare_and_append(
1761 updates,
1762 Antichain::from_elem(upper),
1763 Antichain::from_elem(new_upper),
1764 )
1765 .await
1766 .expect("valid usage");
1767 match res {
1768 Ok(()) => return,
1769 Err(err) => {
1770 expected_upper = err.current;
1771 continue;
1772 }
1773 }
1774 }
1775}
1776
1777fn notify_listeners<T>(
1779 responders: impl IntoIterator<Item = oneshot::Sender<T>>,
1780 result: impl Fn() -> T,
1781) {
1782 for r in responders {
1783 let _ = r.send(result());
1785 }
1786}
1787
1788async fn recv_all_commands<T>(rx: &mut mpsc::UnboundedReceiver<T>, out: &mut Vec<T>) {
1801 if let Some(msg) = rx.recv().await {
1802 out.push(msg);
1803 } else {
1804 return; };
1806
1807 out.reserve(rx.len());
1808 while let Ok(msg) = rx.try_recv() {
1809 out.push(msg);
1810 }
1811
1812 if out.capacity() > out.len() * 4 {
1817 out.shrink_to_fit();
1818 }
1819}
1820
1821#[cfg(test)]
1822mod tests {
1823 use std::collections::BTreeSet;
1824
1825 use super::*;
1826 use itertools::Itertools;
1827 use mz_repr::{Datum, Row};
1828 use mz_storage_client::client::StatusUpdate;
1829 use mz_storage_client::healthcheck::{
1830 MZ_SINK_STATUS_HISTORY_DESC, MZ_SOURCE_STATUS_HISTORY_DESC,
1831 };
1832
1833 #[mz_ore::test]
1834 fn test_row() {
1835 let error_message = "error message";
1836 let hint = "hint message";
1837 let id = GlobalId::User(1);
1838 let status = Status::Dropped;
1839 let row = Row::from(StatusUpdate {
1840 id,
1841 timestamp: chrono::offset::Utc::now(),
1842 status,
1843 error: Some(error_message.to_string()),
1844 hints: BTreeSet::from([hint.to_string()]),
1845 namespaced_errors: Default::default(),
1846 replica_id: None,
1847 });
1848
1849 for (datum, column_type) in row.iter().zip_eq(MZ_SINK_STATUS_HISTORY_DESC.iter_types()) {
1850 assert!(datum.is_instance_of_sql(column_type));
1851 }
1852
1853 for (datum, column_type) in row
1854 .iter()
1855 .zip_eq(MZ_SOURCE_STATUS_HISTORY_DESC.iter_types())
1856 {
1857 assert!(datum.is_instance_of_sql(column_type));
1858 }
1859
1860 assert_eq!(row.iter().nth(1).unwrap(), Datum::String(&id.to_string()));
1861 assert_eq!(row.iter().nth(2).unwrap(), Datum::String(status.to_str()));
1862 assert_eq!(row.iter().nth(3).unwrap(), Datum::String(error_message));
1863
1864 let details = row
1865 .iter()
1866 .nth(4)
1867 .unwrap()
1868 .unwrap_map()
1869 .iter()
1870 .collect::<Vec<_>>();
1871
1872 assert_eq!(details.len(), 1);
1873 let hint_datum = &details[0];
1874
1875 assert_eq!(hint_datum.0, "hints");
1876 assert_eq!(
1877 hint_datum.1.unwrap_list().iter().next().unwrap(),
1878 Datum::String(hint)
1879 );
1880 }
1881
1882 #[mz_ore::test]
1883 fn test_row_without_hint() {
1884 let error_message = "error message";
1885 let id = GlobalId::User(1);
1886 let status = Status::Dropped;
1887 let row = Row::from(StatusUpdate {
1888 id,
1889 timestamp: chrono::offset::Utc::now(),
1890 status,
1891 error: Some(error_message.to_string()),
1892 hints: Default::default(),
1893 namespaced_errors: Default::default(),
1894 replica_id: None,
1895 });
1896
1897 for (datum, column_type) in row.iter().zip_eq(MZ_SINK_STATUS_HISTORY_DESC.iter_types()) {
1898 assert!(datum.is_instance_of_sql(column_type));
1899 }
1900
1901 for (datum, column_type) in row
1902 .iter()
1903 .zip_eq(MZ_SOURCE_STATUS_HISTORY_DESC.iter_types())
1904 {
1905 assert!(datum.is_instance_of_sql(column_type));
1906 }
1907
1908 assert_eq!(row.iter().nth(1).unwrap(), Datum::String(&id.to_string()));
1909 assert_eq!(row.iter().nth(2).unwrap(), Datum::String(status.to_str()));
1910 assert_eq!(row.iter().nth(3).unwrap(), Datum::String(error_message));
1911 assert_eq!(row.iter().nth(4).unwrap(), Datum::Null);
1912 }
1913
1914 #[mz_ore::test]
1915 fn test_row_without_error() {
1916 let id = GlobalId::User(1);
1917 let status = Status::Dropped;
1918 let hint = "hint message";
1919 let row = Row::from(StatusUpdate {
1920 id,
1921 timestamp: chrono::offset::Utc::now(),
1922 status,
1923 error: None,
1924 hints: BTreeSet::from([hint.to_string()]),
1925 namespaced_errors: Default::default(),
1926 replica_id: None,
1927 });
1928
1929 for (datum, column_type) in row.iter().zip_eq(MZ_SINK_STATUS_HISTORY_DESC.iter_types()) {
1930 assert!(datum.is_instance_of_sql(column_type));
1931 }
1932
1933 for (datum, column_type) in row
1934 .iter()
1935 .zip_eq(MZ_SOURCE_STATUS_HISTORY_DESC.iter_types())
1936 {
1937 assert!(datum.is_instance_of_sql(column_type));
1938 }
1939
1940 assert_eq!(row.iter().nth(1).unwrap(), Datum::String(&id.to_string()));
1941 assert_eq!(row.iter().nth(2).unwrap(), Datum::String(status.to_str()));
1942 assert_eq!(row.iter().nth(3).unwrap(), Datum::Null);
1943
1944 let details = row
1945 .iter()
1946 .nth(4)
1947 .unwrap()
1948 .unwrap_map()
1949 .iter()
1950 .collect::<Vec<_>>();
1951
1952 assert_eq!(details.len(), 1);
1953 let hint_datum = &details[0];
1954
1955 assert_eq!(hint_datum.0, "hints");
1956 assert_eq!(
1957 hint_datum.1.unwrap_list().iter().next().unwrap(),
1958 Datum::String(hint)
1959 );
1960 }
1961
1962 #[mz_ore::test]
1963 fn test_row_with_namespaced() {
1964 let error_message = "error message";
1965 let id = GlobalId::User(1);
1966 let status = Status::Dropped;
1967 let row = Row::from(StatusUpdate {
1968 id,
1969 timestamp: chrono::offset::Utc::now(),
1970 status,
1971 error: Some(error_message.to_string()),
1972 hints: Default::default(),
1973 namespaced_errors: BTreeMap::from([("thing".to_string(), "error".to_string())]),
1974 replica_id: None,
1975 });
1976
1977 for (datum, column_type) in row.iter().zip_eq(MZ_SINK_STATUS_HISTORY_DESC.iter_types()) {
1978 assert!(datum.is_instance_of_sql(column_type));
1979 }
1980
1981 for (datum, column_type) in row
1982 .iter()
1983 .zip_eq(MZ_SOURCE_STATUS_HISTORY_DESC.iter_types())
1984 {
1985 assert!(datum.is_instance_of_sql(column_type));
1986 }
1987
1988 assert_eq!(row.iter().nth(1).unwrap(), Datum::String(&id.to_string()));
1989 assert_eq!(row.iter().nth(2).unwrap(), Datum::String(status.to_str()));
1990 assert_eq!(row.iter().nth(3).unwrap(), Datum::String(error_message));
1991
1992 let details = row
1993 .iter()
1994 .nth(4)
1995 .unwrap()
1996 .unwrap_map()
1997 .iter()
1998 .collect::<Vec<_>>();
1999
2000 assert_eq!(details.len(), 1);
2001 let ns_datum = &details[0];
2002
2003 assert_eq!(ns_datum.0, "namespaced");
2004 assert_eq!(
2005 ns_datum.1.unwrap_map().iter().next().unwrap(),
2006 ("thing", Datum::String("error"))
2007 );
2008 }
2009
2010 #[mz_ore::test]
2011 fn test_row_with_everything() {
2012 let error_message = "error message";
2013 let hint = "hint message";
2014 let id = GlobalId::User(1);
2015 let status = Status::Dropped;
2016 let row = Row::from(StatusUpdate {
2017 id,
2018 timestamp: chrono::offset::Utc::now(),
2019 status,
2020 error: Some(error_message.to_string()),
2021 hints: BTreeSet::from([hint.to_string()]),
2022 namespaced_errors: BTreeMap::from([("thing".to_string(), "error".to_string())]),
2023 replica_id: None,
2024 });
2025
2026 for (datum, column_type) in row.iter().zip_eq(MZ_SINK_STATUS_HISTORY_DESC.iter_types()) {
2027 assert!(datum.is_instance_of_sql(column_type));
2028 }
2029
2030 for (datum, column_type) in row
2031 .iter()
2032 .zip_eq(MZ_SOURCE_STATUS_HISTORY_DESC.iter_types())
2033 {
2034 assert!(datum.is_instance_of_sql(column_type));
2035 }
2036
2037 assert_eq!(row.iter().nth(1).unwrap(), Datum::String(&id.to_string()));
2038 assert_eq!(row.iter().nth(2).unwrap(), Datum::String(status.to_str()));
2039 assert_eq!(row.iter().nth(3).unwrap(), Datum::String(error_message));
2040
2041 let details = row
2042 .iter()
2043 .nth(4)
2044 .unwrap()
2045 .unwrap_map()
2046 .iter()
2047 .collect::<Vec<_>>();
2048
2049 assert_eq!(details.len(), 2);
2050 let hint_datum = &details[0];
2052 let ns_datum = &details[1];
2053
2054 assert_eq!(hint_datum.0, "hints");
2055 assert_eq!(
2056 hint_datum.1.unwrap_list().iter().next().unwrap(),
2057 Datum::String(hint)
2058 );
2059
2060 assert_eq!(ns_datum.0, "namespaced");
2061 assert_eq!(
2062 ns_datum.1.unwrap_map().iter().next().unwrap(),
2063 ("thing", Datum::String("error"))
2064 );
2065 }
2066}