Skip to main content

mz_adapter/
coord.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10//! Translation of SQL commands into timestamped `Controller` commands.
11//!
12//! The various SQL commands instruct the system to take actions that are not
13//! yet explicitly timestamped. On the other hand, the underlying data continually
14//! change as time moves forward. On the third hand, we greatly benefit from the
15//! information that some times are no longer of interest, so that we may
16//! compact the representation of the continually changing collections.
17//!
18//! The [`Coordinator`] curates these interactions by observing the progress
19//! collections make through time, choosing timestamps for its own commands,
20//! and eventually communicating that certain times have irretrievably "passed".
21//!
22//! ## Frontiers another way
23//!
24//! If the above description of frontiers left you with questions, this
25//! repackaged explanation might help.
26//!
27//! - `since` is the least recent time (i.e. oldest time) that you can read
28//!   from sources and be guaranteed that the returned data is accurate as of
29//!   that time.
30//!
31//!   Reads at times less than `since` may return values that were not actually
32//!   seen at the specified time, but arrived later (i.e. the results are
33//!   compacted).
34//!
35//!   For correctness' sake, the coordinator never chooses to read at a time
36//!   less than an arrangement's `since`.
37//!
38//! - `upper` is the first time after the most recent time that you can read
39//!   from sources and receive an immediate response. Alternately, it is the
40//!   least time at which the data may still change (that is the reason we may
41//!   not be able to respond immediately).
42//!
43//!   Reads at times >= `upper` may not immediately return because the answer
44//!   isn't known yet. However, once the `upper` is > the specified read time,
45//!   the read can return.
46//!
47//!   For the sake of returned values' freshness, the coordinator prefers
48//!   performing reads at an arrangement's `upper`. However, because we more
49//!   strongly prefer correctness, the coordinator will choose timestamps
50//!   greater than an object's `upper` if it is also being accessed alongside
51//!   objects whose `since` times are >= its `upper`.
52//!
53//! This illustration attempts to show, with time moving left to right, the
54//! relationship between `since` and `upper`.
55//!
56//! - `#`: possibly inaccurate results
57//! - `-`: immediate, correct response
58//! - `?`: not yet known
59//! - `s`: since
60//! - `u`: upper
61//! - `|`: eligible for coordinator to select
62//!
63//! ```nofmt
64//! ####s----u?????
65//!     |||||||||||
66//! ```
67//!
68
69use std::borrow::Cow;
70use std::collections::{BTreeMap, BTreeSet, VecDeque};
71use std::net::IpAddr;
72use std::num::NonZeroI64;
73use std::ops::Neg;
74use std::str::FromStr;
75use std::sync::LazyLock;
76use std::sync::{Arc, Mutex};
77use std::thread;
78use std::time::{Duration, Instant};
79use std::{fmt, mem};
80
81use anyhow::Context;
82use chrono::{DateTime, Utc};
83use derivative::Derivative;
84use differential_dataflow::lattice::Lattice;
85use fail::fail_point;
86use futures::StreamExt;
87use futures::future::{BoxFuture, FutureExt, LocalBoxFuture};
88use http::Uri;
89use ipnet::IpNet;
90use itertools::{Either, Itertools};
91use mz_adapter_types::bootstrap_builtin_cluster_config::BootstrapBuiltinClusterConfig;
92use mz_adapter_types::compaction::CompactionWindow;
93use mz_adapter_types::connection::ConnectionId;
94use mz_adapter_types::dyncfgs::{
95    USER_ID_POOL_BATCH_SIZE, WITH_0DT_DEPLOYMENT_CAUGHT_UP_CHECK_INTERVAL,
96};
97use mz_auth::password::Password;
98use mz_build_info::BuildInfo;
99use mz_catalog::builtin::{BUILTINS, BUILTINS_STATIC, MZ_AUDIT_EVENTS, MZ_STORAGE_USAGE_BY_SHARD};
100use mz_catalog::config::{AwsPrincipalContext, BuiltinItemMigrationConfig, ClusterReplicaSizeMap};
101use mz_catalog::durable::{AuditLogIterator, OpenableDurableCatalogState};
102use mz_catalog::expr_cache::{GlobalExpressions, LocalExpressions};
103use mz_catalog::memory::objects::{
104    CatalogEntry, CatalogItem, ClusterReplicaProcessStatus, ClusterVariantManaged, Connection,
105    DataSourceDesc, StateDiff, StateUpdate, StateUpdateKind, Table, TableDataSource,
106};
107use mz_cloud_resources::{CloudResourceController, VpcEndpointConfig, VpcEndpointEvent};
108use mz_compute_client::as_of_selection;
109use mz_compute_client::controller::error::{
110    CollectionLookupError, CollectionMissing, DataflowCreationError, InstanceMissing,
111};
112use mz_compute_types::ComputeInstanceId;
113use mz_compute_types::dataflows::DataflowDescription;
114use mz_compute_types::plan::Plan;
115use mz_controller::clusters::{
116    ClusterConfig, ClusterEvent, ClusterStatus, ProcessId, ReplicaLocation,
117};
118use mz_controller::{ControllerConfig, Readiness};
119use mz_controller_types::{ClusterId, ReplicaId, WatchSetId};
120use mz_expr::{MapFilterProject, MirRelationExpr, OptimizedMirRelationExpr, RowSetFinishing};
121use mz_license_keys::{ExpirationBehavior, ValidatedLicenseKey};
122use mz_orchestrator::OfflineReason;
123use mz_ore::cast::{CastFrom, CastInto, CastLossy};
124use mz_ore::channel::trigger::Trigger;
125use mz_ore::future::TimeoutError;
126use mz_ore::metrics::MetricsRegistry;
127use mz_ore::now::{EpochMillis, NowFn};
128use mz_ore::task::{JoinHandle, spawn};
129use mz_ore::thread::JoinHandleExt;
130use mz_ore::tracing::{OpenTelemetryContext, TracingHandle};
131use mz_ore::url::SensitiveUrl;
132use mz_ore::{
133    assert_none, instrument, soft_assert_eq_or_log, soft_assert_or_log, soft_panic_or_log, stack,
134};
135use mz_persist_client::PersistClient;
136use mz_persist_client::batch::ProtoBatch;
137use mz_persist_client::usage::{ShardsUsageReferenced, StorageUsageClient};
138use mz_repr::adt::numeric::Numeric;
139use mz_repr::explain::{ExplainConfig, ExplainFormat};
140use mz_repr::global_id::TransientIdGen;
141use mz_repr::optimize::{OptimizerFeatures, OverrideFrom};
142use mz_repr::role_id::RoleId;
143use mz_repr::{CatalogItemId, Diff, GlobalId, RelationDesc, SqlRelationType, Timestamp};
144use mz_secrets::cache::CachingSecretsReader;
145use mz_secrets::{SecretsController, SecretsReader};
146use mz_sql::ast::{Raw, Statement};
147use mz_sql::catalog::{CatalogCluster, EnvironmentId};
148use mz_sql::names::{QualifiedItemName, ResolvedIds, SchemaSpecifier};
149use mz_sql::optimizer_metrics::OptimizerMetrics;
150use mz_sql::plan::{
151    self, AlterSinkPlan, ConnectionDetails, CreateConnectionPlan, HirRelationExpr,
152    NetworkPolicyRule, OnTimeoutAction, Params, QueryWhen,
153};
154use mz_sql::session::user::User;
155use mz_sql::session::vars::{MAX_CREDIT_CONSUMPTION_RATE, SystemVars, Var};
156use mz_sql_parser::ast::ExplainStage;
157use mz_sql_parser::ast::display::AstDisplay;
158use mz_storage_client::client::TableData;
159use mz_storage_client::controller::{CollectionDescription, DataSource, ExportDescription};
160use mz_storage_types::connections::Connection as StorageConnection;
161use mz_storage_types::connections::ConnectionContext;
162use mz_storage_types::connections::inline::{IntoInlineConnection, ReferencedConnection};
163use mz_storage_types::read_holds::ReadHold;
164use mz_storage_types::sinks::{S3SinkFormat, StorageSinkDesc};
165use mz_storage_types::sources::kafka::KAFKA_PROGRESS_DESC;
166use mz_storage_types::sources::{IngestionDescription, SourceExport, Timeline};
167use mz_timestamp_oracle::{TimestampOracleConfig, WriteTimestamp};
168use mz_transform::dataflow::DataflowMetainfo;
169use opentelemetry::trace::TraceContextExt;
170use serde::Serialize;
171use thiserror::Error;
172use timely::progress::{Antichain, Timestamp as _};
173use tokio::runtime::Handle as TokioHandle;
174use tokio::select;
175use tokio::sync::{OwnedMutexGuard, mpsc, oneshot, watch};
176use tokio::time::{Interval, MissedTickBehavior};
177use tracing::{Instrument, Level, Span, debug, info, info_span, span, warn};
178use tracing_opentelemetry::OpenTelemetrySpanExt;
179use uuid::Uuid;
180
181use crate::active_compute_sink::{ActiveComputeSink, ActiveCopyFrom};
182use crate::catalog::{BuiltinTableUpdate, Catalog, OpenCatalogResult};
183use crate::client::{Client, Handle};
184use crate::command::{Command, ExecuteResponse};
185use crate::config::{SynchronizedParameters, SystemParameterFrontend, SystemParameterSyncConfig};
186use crate::coord::appends::{
187    BuiltinTableAppendNotify, DeferredOp, GroupCommitPermit, PendingWriteTxn,
188};
189use crate::coord::caught_up::CaughtUpCheckContext;
190use crate::coord::cluster_scheduling::SchedulingDecision;
191use crate::coord::id_bundle::CollectionIdBundle;
192use crate::coord::introspection::IntrospectionSubscribe;
193use crate::coord::peek::PendingPeek;
194use crate::coord::statement_logging::StatementLogging;
195use crate::coord::timeline::{TimelineContext, TimelineState};
196use crate::coord::timestamp_selection::{TimestampContext, TimestampDetermination};
197use crate::coord::validity::PlanValidity;
198use crate::error::AdapterError;
199use crate::explain::insights::PlanInsightsContext;
200use crate::explain::optimizer_trace::{DispatchGuard, OptimizerTrace};
201use crate::metrics::Metrics;
202use crate::optimize::dataflows::{ComputeInstanceSnapshot, DataflowBuilder};
203use crate::optimize::{self, Optimize, OptimizerConfig};
204use crate::session::{EndTransactionAction, Session};
205use crate::statement_logging::{
206    StatementEndedExecutionReason, StatementLifecycleEvent, StatementLoggingId,
207};
208use crate::util::{ClientTransmitter, ResultExt, sort_topological};
209use crate::webhook::{WebhookAppenderInvalidator, WebhookConcurrencyLimiter};
210use crate::{AdapterNotice, ReadHolds, flags};
211
212pub(crate) mod appends;
213pub(crate) mod catalog_serving;
214pub(crate) mod cluster_scheduling;
215pub(crate) mod consistency;
216pub(crate) mod id_bundle;
217pub(crate) mod in_memory_oracle;
218pub(crate) mod peek;
219pub(crate) mod read_policy;
220pub(crate) mod read_then_write;
221pub(crate) mod sequencer;
222pub(crate) mod statement_logging;
223pub(crate) mod timeline;
224pub(crate) mod timestamp_selection;
225
226pub mod catalog_implications;
227mod caught_up;
228mod command_handler;
229mod ddl;
230pub(crate) mod group_sync;
231mod indexes;
232mod introspection;
233mod message_handler;
234mod privatelink_status;
235mod sql;
236mod validity;
237
238/// A pool of pre-allocated user IDs to avoid per-DDL persist writes.
239///
240/// IDs in the range `[next, upper)` are available for allocation.
241/// When exhausted, the pool must be refilled via the catalog.
242///
243/// # Correctness
244///
245/// The pool is owned by [`Coordinator`], which processes all requests
246/// on a single-threaded event loop. Because every access requires
247/// `&mut self` on the coordinator, there is no concurrent access to the
248/// pool — no additional synchronization is needed.
249///
250/// Global ID uniqueness is guaranteed because each refill calls
251/// [`Catalog::allocate_user_ids`], which performs a durable persist
252/// write that atomically reserves the entire batch before any IDs from
253/// it are handed out. If the process crashes after a refill but before
254/// all pre-allocated IDs are consumed, the unused IDs form harmless
255/// gaps in the sequence — user IDs are not required to be contiguous.
256///
257/// This guarantee holds even if multiple `environmentd` processes run
258/// concurrently. Each process has its own independent pool,
259/// but every refill goes through the shared persist-backed catalog,
260/// which serializes allocations across all callers. Two processes
261/// will therefore never receive overlapping ID ranges,
262/// for the same reason they could not before this pool existed.
263#[derive(Debug)]
264pub(crate) struct IdPool {
265    next: u64,
266    upper: u64,
267}
268
269impl IdPool {
270    /// Creates an empty pool.
271    pub fn empty() -> Self {
272        IdPool { next: 0, upper: 0 }
273    }
274
275    /// Allocates a single ID from the pool, returning `None` if exhausted.
276    pub fn allocate(&mut self) -> Option<u64> {
277        if self.next < self.upper {
278            let id = self.next;
279            self.next += 1;
280            Some(id)
281        } else {
282            None
283        }
284    }
285
286    /// Allocates `n` consecutive IDs from the pool, returning `None` if
287    /// insufficient IDs remain.
288    pub fn allocate_many(&mut self, n: u64) -> Option<Vec<u64>> {
289        if self.remaining() >= n {
290            let ids = (self.next..self.next + n).collect();
291            self.next += n;
292            Some(ids)
293        } else {
294            None
295        }
296    }
297
298    /// Returns the number of IDs remaining in the pool.
299    pub fn remaining(&self) -> u64 {
300        self.upper - self.next
301    }
302
303    /// Refills the pool with the given range `[next, upper)`.
304    pub fn refill(&mut self, next: u64, upper: u64) {
305        assert!(next <= upper, "invalid pool range: {next}..{upper}");
306        self.next = next;
307        self.upper = upper;
308    }
309}
310
311#[derive(Debug)]
312pub enum Message {
313    Command(OpenTelemetryContext, Command),
314    ControllerReady {
315        controller: ControllerReadiness,
316    },
317    PurifiedStatementReady(PurifiedStatementReady),
318    CreateConnectionValidationReady(CreateConnectionValidationReady),
319    AlterConnectionValidationReady(AlterConnectionValidationReady),
320    TryDeferred {
321        /// The connection that created this op.
322        conn_id: ConnectionId,
323        /// The write lock that notified us our deferred op might be able to run.
324        ///
325        /// Note: While we never want to hold a partial set of locks, it can be important to hold
326        /// onto the _one_ that notified us our op might be ready. If there are multiple operations
327        /// waiting on a single collection, and we don't hold this lock through retyring the op,
328        /// then everything waiting on this collection will get retried causing traffic in the
329        /// Coordinator's message queue.
330        ///
331        /// See [`DeferredOp::can_be_optimistically_retried`] for more detail.
332        acquired_lock: Option<(CatalogItemId, tokio::sync::OwnedMutexGuard<()>)>,
333    },
334    /// Initiates a group commit.
335    GroupCommitInitiate(Span, Option<GroupCommitPermit>),
336    DeferredStatementReady,
337    AdvanceTimelines,
338    ClusterEvent(ClusterEvent),
339    CancelPendingPeeks {
340        conn_id: ConnectionId,
341    },
342    LinearizeReads,
343    StagedBatches {
344        conn_id: ConnectionId,
345        table_id: CatalogItemId,
346        batches: Vec<Result<ProtoBatch, String>>,
347    },
348    StorageUsageSchedule,
349    StorageUsageFetch,
350    StorageUsageUpdate(ShardsUsageReferenced),
351    StorageUsagePrune(Vec<BuiltinTableUpdate>),
352    ArrangementSizesSchedule,
353    ArrangementSizesSnapshot,
354    ArrangementSizesPrune(Vec<BuiltinTableUpdate>),
355    /// Performs any cleanup and logging actions necessary for
356    /// finalizing a statement execution.
357    RetireExecute {
358        data: ExecuteContextExtra,
359        otel_ctx: OpenTelemetryContext,
360        reason: StatementEndedExecutionReason,
361    },
362    ExecuteSingleStatementTransaction {
363        ctx: ExecuteContext,
364        otel_ctx: OpenTelemetryContext,
365        stmt: Arc<Statement<Raw>>,
366        params: mz_sql::plan::Params,
367    },
368    PeekStageReady {
369        ctx: ExecuteContext,
370        span: Span,
371        stage: PeekStage,
372    },
373    CreateIndexStageReady {
374        ctx: ExecuteContext,
375        span: Span,
376        stage: CreateIndexStage,
377    },
378    CreateViewStageReady {
379        ctx: ExecuteContext,
380        span: Span,
381        stage: CreateViewStage,
382    },
383    CreateMaterializedViewStageReady {
384        ctx: ExecuteContext,
385        span: Span,
386        stage: CreateMaterializedViewStage,
387    },
388    SubscribeStageReady {
389        ctx: ExecuteContext,
390        span: Span,
391        stage: SubscribeStage,
392    },
393    IntrospectionSubscribeStageReady {
394        span: Span,
395        stage: IntrospectionSubscribeStage,
396    },
397    SecretStageReady {
398        ctx: ExecuteContext,
399        span: Span,
400        stage: SecretStage,
401    },
402    ClusterStageReady {
403        ctx: ExecuteContext,
404        span: Span,
405        stage: ClusterStage,
406    },
407    ExplainTimestampStageReady {
408        ctx: ExecuteContext,
409        span: Span,
410        stage: ExplainTimestampStage,
411    },
412    DrainStatementLog,
413    PrivateLinkVpcEndpointEvents(Vec<VpcEndpointEvent>),
414    CheckSchedulingPolicies,
415
416    /// Scheduling policy decisions about turning clusters On/Off.
417    /// `Vec<(policy name, Vec of decisions by the policy)>`
418    /// A cluster will be On if and only if there is at least one On decision for it.
419    /// Scheduling decisions for clusters that have `SCHEDULE = MANUAL` are ignored.
420    SchedulingDecisions(Vec<(&'static str, Vec<(ClusterId, SchedulingDecision)>)>),
421}
422
423impl Message {
424    /// Returns a string to identify the kind of [`Message`], useful for logging.
425    pub const fn kind(&self) -> &'static str {
426        match self {
427            Message::Command(_, msg) => match msg {
428                Command::CatalogSnapshot { .. } => "command-catalog_snapshot",
429                Command::Startup { .. } => "command-startup",
430                Command::Execute { .. } => "command-execute",
431                Command::Commit { .. } => "command-commit",
432                Command::CancelRequest { .. } => "command-cancel_request",
433                Command::PrivilegedCancelRequest { .. } => "command-privileged_cancel_request",
434                Command::GetWebhook { .. } => "command-get_webhook",
435                Command::GetSystemVars { .. } => "command-get_system_vars",
436                Command::SetSystemVars { .. } => "command-set_system_vars",
437                Command::Terminate { .. } => "command-terminate",
438                Command::RetireExecute { .. } => "command-retire_execute",
439                Command::CheckConsistency { .. } => "command-check_consistency",
440                Command::Dump { .. } => "command-dump",
441                Command::AuthenticatePassword { .. } => "command-auth_check",
442                Command::AuthenticateGetSASLChallenge { .. } => "command-auth_get_sasl_challenge",
443                Command::AuthenticateVerifySASLProof { .. } => "command-auth_verify_sasl_proof",
444                Command::CheckRoleCanLogin { .. } => "command-check_role_can_login",
445                Command::GetComputeInstanceClient { .. } => "get-compute-instance-client",
446                Command::GetOracle { .. } => "get-oracle",
447                Command::DetermineRealTimeRecentTimestamp { .. } => {
448                    "determine-real-time-recent-timestamp"
449                }
450                Command::GetTransactionReadHoldsBundle { .. } => {
451                    "get-transaction-read-holds-bundle"
452                }
453                Command::StoreTransactionReadHolds { .. } => "store-transaction-read-holds",
454                Command::ExecuteSlowPathPeek { .. } => "execute-slow-path-peek",
455                Command::ExecuteSubscribe { .. } => "execute-subscribe",
456                Command::CopyToPreflight { .. } => "copy-to-preflight",
457                Command::ExecuteCopyTo { .. } => "execute-copy-to",
458                Command::ExecuteSideEffectingFunc { .. } => "execute-side-effecting-func",
459                Command::RegisterFrontendPeek { .. } => "register-frontend-peek",
460                Command::UnregisterFrontendPeek { .. } => "unregister-frontend-peek",
461                Command::ExplainTimestamp { .. } => "explain-timestamp",
462                Command::FrontendStatementLogging(..) => "frontend-statement-logging",
463                Command::StartCopyFromStdin { .. } => "start-copy-from-stdin",
464                Command::InjectAuditEvents { .. } => "inject-audit-events",
465            },
466            Message::ControllerReady {
467                controller: ControllerReadiness::Compute,
468            } => "controller_ready(compute)",
469            Message::ControllerReady {
470                controller: ControllerReadiness::Storage,
471            } => "controller_ready(storage)",
472            Message::ControllerReady {
473                controller: ControllerReadiness::Metrics,
474            } => "controller_ready(metrics)",
475            Message::ControllerReady {
476                controller: ControllerReadiness::Internal,
477            } => "controller_ready(internal)",
478            Message::PurifiedStatementReady(_) => "purified_statement_ready",
479            Message::CreateConnectionValidationReady(_) => "create_connection_validation_ready",
480            Message::TryDeferred { .. } => "try_deferred",
481            Message::GroupCommitInitiate(..) => "group_commit_initiate",
482            Message::AdvanceTimelines => "advance_timelines",
483            Message::ClusterEvent(_) => "cluster_event",
484            Message::CancelPendingPeeks { .. } => "cancel_pending_peeks",
485            Message::LinearizeReads => "linearize_reads",
486            Message::StagedBatches { .. } => "staged_batches",
487            Message::StorageUsageSchedule => "storage_usage_schedule",
488            Message::StorageUsageFetch => "storage_usage_fetch",
489            Message::StorageUsageUpdate(_) => "storage_usage_update",
490            Message::StorageUsagePrune(_) => "storage_usage_prune",
491            Message::ArrangementSizesSchedule => "arrangement_sizes_schedule",
492            Message::ArrangementSizesSnapshot => "arrangement_sizes_snapshot",
493            Message::ArrangementSizesPrune(_) => "arrangement_sizes_prune",
494            Message::RetireExecute { .. } => "retire_execute",
495            Message::ExecuteSingleStatementTransaction { .. } => {
496                "execute_single_statement_transaction"
497            }
498            Message::PeekStageReady { .. } => "peek_stage_ready",
499            Message::ExplainTimestampStageReady { .. } => "explain_timestamp_stage_ready",
500            Message::CreateIndexStageReady { .. } => "create_index_stage_ready",
501            Message::CreateViewStageReady { .. } => "create_view_stage_ready",
502            Message::CreateMaterializedViewStageReady { .. } => {
503                "create_materialized_view_stage_ready"
504            }
505            Message::SubscribeStageReady { .. } => "subscribe_stage_ready",
506            Message::IntrospectionSubscribeStageReady { .. } => {
507                "introspection_subscribe_stage_ready"
508            }
509            Message::SecretStageReady { .. } => "secret_stage_ready",
510            Message::ClusterStageReady { .. } => "cluster_stage_ready",
511            Message::DrainStatementLog => "drain_statement_log",
512            Message::AlterConnectionValidationReady(..) => "alter_connection_validation_ready",
513            Message::PrivateLinkVpcEndpointEvents(_) => "private_link_vpc_endpoint_events",
514            Message::CheckSchedulingPolicies => "check_scheduling_policies",
515            Message::SchedulingDecisions { .. } => "scheduling_decision",
516            Message::DeferredStatementReady => "deferred_statement_ready",
517        }
518    }
519}
520
521/// The reason for why a controller needs processing on the main loop.
522#[derive(Debug)]
523pub enum ControllerReadiness {
524    /// The storage controller is ready.
525    Storage,
526    /// The compute controller is ready.
527    Compute,
528    /// A batch of metric data is ready.
529    Metrics,
530    /// An internally-generated message is ready to be returned.
531    Internal,
532}
533
534#[derive(Derivative)]
535#[derivative(Debug)]
536pub struct BackgroundWorkResult<T> {
537    #[derivative(Debug = "ignore")]
538    pub ctx: ExecuteContext,
539    pub result: Result<T, AdapterError>,
540    pub params: Params,
541    pub plan_validity: PlanValidity,
542    pub original_stmt: Arc<Statement<Raw>>,
543    pub otel_ctx: OpenTelemetryContext,
544}
545
546pub type PurifiedStatementReady = BackgroundWorkResult<mz_sql::pure::PurifiedStatement>;
547
548#[derive(Derivative)]
549#[derivative(Debug)]
550pub struct ValidationReady<T> {
551    #[derivative(Debug = "ignore")]
552    pub ctx: ExecuteContext,
553    pub result: Result<T, AdapterError>,
554    pub resolved_ids: ResolvedIds,
555    pub connection_id: CatalogItemId,
556    pub connection_gid: GlobalId,
557    pub plan_validity: PlanValidity,
558    pub otel_ctx: OpenTelemetryContext,
559}
560
561pub type CreateConnectionValidationReady = ValidationReady<CreateConnectionPlan>;
562pub type AlterConnectionValidationReady = ValidationReady<Connection>;
563
564#[derive(Debug)]
565pub enum PeekStage {
566    /// Common stages across SELECT, EXPLAIN and COPY TO queries.
567    LinearizeTimestamp(PeekStageLinearizeTimestamp),
568    RealTimeRecency(PeekStageRealTimeRecency),
569    TimestampReadHold(PeekStageTimestampReadHold),
570    Optimize(PeekStageOptimize),
571    /// Final stage for a peek.
572    Finish(PeekStageFinish),
573    /// Final stage for an explain.
574    ExplainPlan(PeekStageExplainPlan),
575    ExplainPushdown(PeekStageExplainPushdown),
576    /// Preflight checks for a copy to operation.
577    CopyToPreflight(PeekStageCopyTo),
578    /// Final stage for a copy to which involves shipping the dataflow.
579    CopyToDataflow(PeekStageCopyTo),
580}
581
582#[derive(Debug)]
583pub struct CopyToContext {
584    /// The `RelationDesc` of the data to be copied.
585    pub desc: RelationDesc,
586    /// The destination uri of the external service where the data will be copied.
587    pub uri: Uri,
588    /// Connection information required to connect to the external service to copy the data.
589    pub connection: StorageConnection<ReferencedConnection>,
590    /// The ID of the CONNECTION object to be used for copying the data.
591    pub connection_id: CatalogItemId,
592    /// Format params to format the data.
593    pub format: S3SinkFormat,
594    /// Approximate max file size of each uploaded file.
595    pub max_file_size: u64,
596    /// Number of batches the output of the COPY TO will be partitioned into
597    /// to distribute the load across workers deterministically.
598    /// This is only an option since it's not set when CopyToContext is instantiated
599    /// but immediately after in the PeekStageValidate stage.
600    pub output_batch_count: Option<u64>,
601}
602
603#[derive(Debug)]
604pub struct PeekStageLinearizeTimestamp {
605    validity: PlanValidity,
606    plan: mz_sql::plan::SelectPlan,
607    max_query_result_size: Option<u64>,
608    source_ids: BTreeSet<GlobalId>,
609    target_replica: Option<ReplicaId>,
610    timeline_context: TimelineContext,
611    optimizer: Either<optimize::peek::Optimizer, optimize::copy_to::Optimizer>,
612    /// An optional context set iff the state machine is initiated from
613    /// sequencing an EXPLAIN for this statement.
614    explain_ctx: ExplainContext,
615}
616
617#[derive(Debug)]
618pub struct PeekStageRealTimeRecency {
619    validity: PlanValidity,
620    plan: mz_sql::plan::SelectPlan,
621    max_query_result_size: Option<u64>,
622    source_ids: BTreeSet<GlobalId>,
623    target_replica: Option<ReplicaId>,
624    timeline_context: TimelineContext,
625    oracle_read_ts: Option<Timestamp>,
626    optimizer: Either<optimize::peek::Optimizer, optimize::copy_to::Optimizer>,
627    /// An optional context set iff the state machine is initiated from
628    /// sequencing an EXPLAIN for this statement.
629    explain_ctx: ExplainContext,
630}
631
632#[derive(Debug)]
633pub struct PeekStageTimestampReadHold {
634    validity: PlanValidity,
635    plan: mz_sql::plan::SelectPlan,
636    max_query_result_size: Option<u64>,
637    source_ids: BTreeSet<GlobalId>,
638    target_replica: Option<ReplicaId>,
639    timeline_context: TimelineContext,
640    oracle_read_ts: Option<Timestamp>,
641    real_time_recency_ts: Option<mz_repr::Timestamp>,
642    optimizer: Either<optimize::peek::Optimizer, optimize::copy_to::Optimizer>,
643    /// An optional context set iff the state machine is initiated from
644    /// sequencing an EXPLAIN for this statement.
645    explain_ctx: ExplainContext,
646}
647
648#[derive(Debug)]
649pub struct PeekStageOptimize {
650    validity: PlanValidity,
651    plan: mz_sql::plan::SelectPlan,
652    max_query_result_size: Option<u64>,
653    source_ids: BTreeSet<GlobalId>,
654    id_bundle: CollectionIdBundle,
655    target_replica: Option<ReplicaId>,
656    determination: TimestampDetermination,
657    optimizer: Either<optimize::peek::Optimizer, optimize::copy_to::Optimizer>,
658    /// An optional context set iff the state machine is initiated from
659    /// sequencing an EXPLAIN for this statement.
660    explain_ctx: ExplainContext,
661}
662
663#[derive(Debug)]
664pub struct PeekStageFinish {
665    validity: PlanValidity,
666    plan: mz_sql::plan::SelectPlan,
667    max_query_result_size: Option<u64>,
668    id_bundle: CollectionIdBundle,
669    target_replica: Option<ReplicaId>,
670    source_ids: BTreeSet<GlobalId>,
671    determination: TimestampDetermination,
672    cluster_id: ComputeInstanceId,
673    finishing: RowSetFinishing,
674    /// When present, an optimizer trace to be used for emitting a plan insights
675    /// notice.
676    plan_insights_optimizer_trace: Option<OptimizerTrace>,
677    insights_ctx: Option<Box<PlanInsightsContext>>,
678    global_lir_plan: optimize::peek::GlobalLirPlan,
679    optimization_finished_at: EpochMillis,
680}
681
682#[derive(Debug)]
683pub struct PeekStageCopyTo {
684    validity: PlanValidity,
685    optimizer: optimize::copy_to::Optimizer,
686    global_lir_plan: optimize::copy_to::GlobalLirPlan,
687    optimization_finished_at: EpochMillis,
688    source_ids: BTreeSet<GlobalId>,
689}
690
691#[derive(Debug)]
692pub struct PeekStageExplainPlan {
693    validity: PlanValidity,
694    optimizer: optimize::peek::Optimizer,
695    df_meta: DataflowMetainfo,
696    explain_ctx: ExplainPlanContext,
697    insights_ctx: Option<Box<PlanInsightsContext>>,
698}
699
700#[derive(Debug)]
701pub struct PeekStageExplainPushdown {
702    validity: PlanValidity,
703    determination: TimestampDetermination,
704    imports: BTreeMap<GlobalId, MapFilterProject>,
705}
706
707#[derive(Debug)]
708pub enum CreateIndexStage {
709    Optimize(CreateIndexOptimize),
710    Finish(CreateIndexFinish),
711    Explain(CreateIndexExplain),
712}
713
714#[derive(Debug)]
715pub struct CreateIndexOptimize {
716    validity: PlanValidity,
717    plan: plan::CreateIndexPlan,
718    resolved_ids: ResolvedIds,
719    /// An optional context set iff the state machine is initiated from
720    /// sequencing an EXPLAIN for this statement.
721    explain_ctx: ExplainContext,
722}
723
724#[derive(Debug)]
725pub struct CreateIndexFinish {
726    validity: PlanValidity,
727    item_id: CatalogItemId,
728    global_id: GlobalId,
729    plan: plan::CreateIndexPlan,
730    resolved_ids: ResolvedIds,
731    global_mir_plan: optimize::index::GlobalMirPlan,
732    global_lir_plan: optimize::index::GlobalLirPlan,
733    optimizer_features: OptimizerFeatures,
734}
735
736#[derive(Debug)]
737pub struct CreateIndexExplain {
738    validity: PlanValidity,
739    exported_index_id: GlobalId,
740    plan: plan::CreateIndexPlan,
741    df_meta: DataflowMetainfo,
742    explain_ctx: ExplainPlanContext,
743}
744
745#[derive(Debug)]
746pub enum CreateViewStage {
747    Optimize(CreateViewOptimize),
748    Finish(CreateViewFinish),
749    Explain(CreateViewExplain),
750}
751
752#[derive(Debug)]
753pub struct CreateViewOptimize {
754    validity: PlanValidity,
755    plan: plan::CreateViewPlan,
756    resolved_ids: ResolvedIds,
757    /// An optional context set iff the state machine is initiated from
758    /// sequencing an EXPLAIN for this statement.
759    explain_ctx: ExplainContext,
760}
761
762#[derive(Debug)]
763pub struct CreateViewFinish {
764    validity: PlanValidity,
765    /// ID of this item in the Catalog.
766    item_id: CatalogItemId,
767    /// ID by with Compute will reference this View.
768    global_id: GlobalId,
769    plan: plan::CreateViewPlan,
770    /// IDs of objects resolved during name resolution.
771    resolved_ids: ResolvedIds,
772    optimized_expr: OptimizedMirRelationExpr,
773}
774
775#[derive(Debug)]
776pub struct CreateViewExplain {
777    validity: PlanValidity,
778    id: GlobalId,
779    plan: plan::CreateViewPlan,
780    explain_ctx: ExplainPlanContext,
781}
782
783#[derive(Debug)]
784pub enum ExplainTimestampStage {
785    Optimize(ExplainTimestampOptimize),
786    RealTimeRecency(ExplainTimestampRealTimeRecency),
787    Finish(ExplainTimestampFinish),
788}
789
790#[derive(Debug)]
791pub struct ExplainTimestampOptimize {
792    validity: PlanValidity,
793    plan: plan::ExplainTimestampPlan,
794    cluster_id: ClusterId,
795}
796
797#[derive(Debug)]
798pub struct ExplainTimestampRealTimeRecency {
799    validity: PlanValidity,
800    format: ExplainFormat,
801    optimized_plan: OptimizedMirRelationExpr,
802    cluster_id: ClusterId,
803    when: QueryWhen,
804}
805
806#[derive(Debug)]
807pub struct ExplainTimestampFinish {
808    validity: PlanValidity,
809    format: ExplainFormat,
810    optimized_plan: OptimizedMirRelationExpr,
811    cluster_id: ClusterId,
812    source_ids: BTreeSet<GlobalId>,
813    when: QueryWhen,
814    real_time_recency_ts: Option<Timestamp>,
815}
816
817#[derive(Debug)]
818pub enum ClusterStage {
819    Alter(AlterCluster),
820    WaitForHydrated(AlterClusterWaitForHydrated),
821    Finalize(AlterClusterFinalize),
822}
823
824#[derive(Debug)]
825pub struct AlterCluster {
826    validity: PlanValidity,
827    plan: plan::AlterClusterPlan,
828}
829
830#[derive(Debug)]
831pub struct AlterClusterWaitForHydrated {
832    validity: PlanValidity,
833    plan: plan::AlterClusterPlan,
834    new_config: ClusterVariantManaged,
835    workload_class: Option<String>,
836    timeout_time: Instant,
837    on_timeout: OnTimeoutAction,
838}
839
840#[derive(Debug)]
841pub struct AlterClusterFinalize {
842    validity: PlanValidity,
843    plan: plan::AlterClusterPlan,
844    new_config: ClusterVariantManaged,
845    workload_class: Option<String>,
846}
847
848#[derive(Debug)]
849pub enum ExplainContext {
850    /// The ordinary, non-explain variant of the statement.
851    None,
852    /// The `EXPLAIN <level> PLAN FOR <explainee>` version of the statement.
853    Plan(ExplainPlanContext),
854    /// Generate a notice containing the `EXPLAIN PLAN INSIGHTS` output
855    /// alongside the query's normal output.
856    PlanInsightsNotice(OptimizerTrace),
857    /// `EXPLAIN FILTER PUSHDOWN`
858    Pushdown,
859}
860
861impl ExplainContext {
862    /// If available for this context, wrap the [`OptimizerTrace`] into a
863    /// [`tracing::Dispatch`] and set it as default, returning the resulting
864    /// guard in a `Some(guard)` option.
865    pub(crate) fn dispatch_guard(&self) -> Option<DispatchGuard<'_>> {
866        let optimizer_trace = match self {
867            ExplainContext::Plan(explain_ctx) => Some(&explain_ctx.optimizer_trace),
868            ExplainContext::PlanInsightsNotice(optimizer_trace) => Some(optimizer_trace),
869            _ => None,
870        };
871        optimizer_trace.map(|optimizer_trace| optimizer_trace.as_guard())
872    }
873
874    pub(crate) fn needs_cluster(&self) -> bool {
875        match self {
876            ExplainContext::None => true,
877            ExplainContext::Plan(..) => false,
878            ExplainContext::PlanInsightsNotice(..) => true,
879            ExplainContext::Pushdown => false,
880        }
881    }
882
883    pub(crate) fn needs_plan_insights(&self) -> bool {
884        matches!(
885            self,
886            ExplainContext::Plan(ExplainPlanContext {
887                stage: ExplainStage::PlanInsights,
888                ..
889            }) | ExplainContext::PlanInsightsNotice(_)
890        )
891    }
892}
893
894#[derive(Debug)]
895pub struct ExplainPlanContext {
896    /// EXPLAIN BROKEN is internal syntax for showing EXPLAIN output despite an internal error in
897    /// the optimizer: we don't immediately bail out from peek sequencing when an internal optimizer
898    /// error happens, but go on with trying to show the requested EXPLAIN stage. This can still
899    /// succeed if the requested EXPLAIN stage is before the point where the error happened.
900    pub broken: bool,
901    pub config: ExplainConfig,
902    pub format: ExplainFormat,
903    pub stage: ExplainStage,
904    pub replan: Option<GlobalId>,
905    pub desc: Option<RelationDesc>,
906    pub optimizer_trace: OptimizerTrace,
907}
908
909#[derive(Debug)]
910pub enum CreateMaterializedViewStage {
911    Optimize(CreateMaterializedViewOptimize),
912    Finish(CreateMaterializedViewFinish),
913    Explain(CreateMaterializedViewExplain),
914}
915
916#[derive(Debug)]
917pub struct CreateMaterializedViewOptimize {
918    validity: PlanValidity,
919    plan: plan::CreateMaterializedViewPlan,
920    resolved_ids: ResolvedIds,
921    /// An optional context set iff the state machine is initiated from
922    /// sequencing an EXPLAIN for this statement.
923    explain_ctx: ExplainContext,
924}
925
926#[derive(Debug)]
927pub struct CreateMaterializedViewFinish {
928    /// The ID of this Materialized View in the Catalog.
929    item_id: CatalogItemId,
930    /// The ID of the durable pTVC backing this Materialized View.
931    global_id: GlobalId,
932    validity: PlanValidity,
933    plan: plan::CreateMaterializedViewPlan,
934    resolved_ids: ResolvedIds,
935    local_mir_plan: optimize::materialized_view::LocalMirPlan,
936    global_mir_plan: optimize::materialized_view::GlobalMirPlan,
937    global_lir_plan: optimize::materialized_view::GlobalLirPlan,
938    optimizer_features: OptimizerFeatures,
939}
940
941#[derive(Debug)]
942pub struct CreateMaterializedViewExplain {
943    global_id: GlobalId,
944    validity: PlanValidity,
945    plan: plan::CreateMaterializedViewPlan,
946    df_meta: DataflowMetainfo,
947    explain_ctx: ExplainPlanContext,
948}
949
950#[derive(Debug)]
951pub enum SubscribeStage {
952    OptimizeMir(SubscribeOptimizeMir),
953    TimestampOptimizeLir(SubscribeTimestampOptimizeLir),
954    Finish(SubscribeFinish),
955    Explain(SubscribeExplain),
956}
957
958#[derive(Debug)]
959pub struct SubscribeOptimizeMir {
960    validity: PlanValidity,
961    plan: plan::SubscribePlan,
962    timeline: TimelineContext,
963    dependency_ids: BTreeSet<GlobalId>,
964    cluster_id: ComputeInstanceId,
965    replica_id: Option<ReplicaId>,
966    /// An optional context set iff the state machine is initiated from
967    /// sequencing an EXPLAIN for this statement.
968    explain_ctx: ExplainContext,
969}
970
971#[derive(Debug)]
972pub struct SubscribeTimestampOptimizeLir {
973    validity: PlanValidity,
974    plan: plan::SubscribePlan,
975    timeline: TimelineContext,
976    optimizer: optimize::subscribe::Optimizer,
977    global_mir_plan: optimize::subscribe::GlobalMirPlan<optimize::subscribe::Unresolved>,
978    dependency_ids: BTreeSet<GlobalId>,
979    replica_id: Option<ReplicaId>,
980    /// An optional context set iff the state machine is initiated from
981    /// sequencing an EXPLAIN for this statement.
982    explain_ctx: ExplainContext,
983}
984
985#[derive(Debug)]
986pub struct SubscribeFinish {
987    validity: PlanValidity,
988    cluster_id: ComputeInstanceId,
989    replica_id: Option<ReplicaId>,
990    plan: plan::SubscribePlan,
991    global_lir_plan: optimize::subscribe::GlobalLirPlan,
992    dependency_ids: BTreeSet<GlobalId>,
993}
994
995#[derive(Debug)]
996pub struct SubscribeExplain {
997    validity: PlanValidity,
998    optimizer: optimize::subscribe::Optimizer,
999    df_meta: DataflowMetainfo,
1000    cluster_id: ComputeInstanceId,
1001    explain_ctx: ExplainPlanContext,
1002}
1003
1004#[derive(Debug)]
1005pub enum IntrospectionSubscribeStage {
1006    OptimizeMir(IntrospectionSubscribeOptimizeMir),
1007    TimestampOptimizeLir(IntrospectionSubscribeTimestampOptimizeLir),
1008    Finish(IntrospectionSubscribeFinish),
1009}
1010
1011#[derive(Debug)]
1012pub struct IntrospectionSubscribeOptimizeMir {
1013    validity: PlanValidity,
1014    plan: plan::SubscribePlan,
1015    subscribe_id: GlobalId,
1016    cluster_id: ComputeInstanceId,
1017    replica_id: ReplicaId,
1018}
1019
1020#[derive(Debug)]
1021pub struct IntrospectionSubscribeTimestampOptimizeLir {
1022    validity: PlanValidity,
1023    optimizer: optimize::subscribe::Optimizer,
1024    global_mir_plan: optimize::subscribe::GlobalMirPlan<optimize::subscribe::Unresolved>,
1025    cluster_id: ComputeInstanceId,
1026    replica_id: ReplicaId,
1027}
1028
1029#[derive(Debug)]
1030pub struct IntrospectionSubscribeFinish {
1031    validity: PlanValidity,
1032    global_lir_plan: optimize::subscribe::GlobalLirPlan,
1033    read_holds: ReadHolds,
1034    cluster_id: ComputeInstanceId,
1035    replica_id: ReplicaId,
1036}
1037
1038#[derive(Debug)]
1039pub enum SecretStage {
1040    CreateEnsure(CreateSecretEnsure),
1041    CreateFinish(CreateSecretFinish),
1042    RotateKeysEnsure(RotateKeysSecretEnsure),
1043    RotateKeysFinish(RotateKeysSecretFinish),
1044    Alter(AlterSecret),
1045}
1046
1047#[derive(Debug)]
1048pub struct CreateSecretEnsure {
1049    validity: PlanValidity,
1050    plan: plan::CreateSecretPlan,
1051}
1052
1053#[derive(Debug)]
1054pub struct CreateSecretFinish {
1055    validity: PlanValidity,
1056    item_id: CatalogItemId,
1057    global_id: GlobalId,
1058    plan: plan::CreateSecretPlan,
1059}
1060
1061#[derive(Debug)]
1062pub struct RotateKeysSecretEnsure {
1063    validity: PlanValidity,
1064    id: CatalogItemId,
1065}
1066
1067#[derive(Debug)]
1068pub struct RotateKeysSecretFinish {
1069    validity: PlanValidity,
1070    ops: Vec<crate::catalog::Op>,
1071}
1072
1073#[derive(Debug)]
1074pub struct AlterSecret {
1075    validity: PlanValidity,
1076    plan: plan::AlterSecretPlan,
1077}
1078
1079/// An enum describing which cluster to run a statement on.
1080///
1081/// One example usage would be that if a query depends only on system tables, we might
1082/// automatically run it on the catalog server cluster to benefit from indexes that exist there.
1083#[derive(Debug, Copy, Clone, PartialEq, Eq)]
1084pub enum TargetCluster {
1085    /// The catalog server cluster.
1086    CatalogServer,
1087    /// The current user's active cluster.
1088    Active,
1089    /// The cluster selected at the start of a transaction.
1090    Transaction(ClusterId),
1091}
1092
1093/// Result types for each stage of a sequence.
1094pub(crate) enum StageResult<T> {
1095    /// A task was spawned that will return the next stage.
1096    Handle(JoinHandle<Result<T, AdapterError>>),
1097    /// A task was spawned that will return a response for the client.
1098    HandleRetire(JoinHandle<Result<ExecuteResponse, AdapterError>>),
1099    /// The next stage is immediately ready and will execute.
1100    Immediate(T),
1101    /// The final stage was executed and is ready to respond to the client.
1102    Response(ExecuteResponse),
1103}
1104
1105/// Common functionality for [Coordinator::sequence_staged].
1106pub(crate) trait Staged: Send {
1107    type Ctx: StagedContext;
1108
1109    fn validity(&mut self) -> &mut PlanValidity;
1110
1111    /// Returns the next stage or final result.
1112    async fn stage(
1113        self,
1114        coord: &mut Coordinator,
1115        ctx: &mut Self::Ctx,
1116    ) -> Result<StageResult<Box<Self>>, AdapterError>;
1117
1118    /// Prepares a message for the Coordinator.
1119    fn message(self, ctx: Self::Ctx, span: Span) -> Message;
1120
1121    /// Whether it is safe to SQL cancel this stage.
1122    fn cancel_enabled(&self) -> bool;
1123}
1124
1125pub trait StagedContext {
1126    fn retire(self, result: Result<ExecuteResponse, AdapterError>);
1127    fn session(&self) -> Option<&Session>;
1128}
1129
1130impl StagedContext for ExecuteContext {
1131    fn retire(self, result: Result<ExecuteResponse, AdapterError>) {
1132        self.retire(result);
1133    }
1134
1135    fn session(&self) -> Option<&Session> {
1136        Some(self.session())
1137    }
1138}
1139
1140impl StagedContext for () {
1141    fn retire(self, _result: Result<ExecuteResponse, AdapterError>) {}
1142
1143    fn session(&self) -> Option<&Session> {
1144        None
1145    }
1146}
1147
1148/// Configures a coordinator.
1149pub struct Config {
1150    pub controller_config: ControllerConfig,
1151    pub controller_envd_epoch: NonZeroI64,
1152    pub storage: Box<dyn mz_catalog::durable::DurableCatalogState>,
1153    pub audit_logs_iterator: AuditLogIterator,
1154    pub timestamp_oracle_url: Option<SensitiveUrl>,
1155    pub unsafe_mode: bool,
1156    pub all_features: bool,
1157    pub build_info: &'static BuildInfo,
1158    pub environment_id: EnvironmentId,
1159    pub metrics_registry: MetricsRegistry,
1160    pub now: NowFn,
1161    pub secrets_controller: Arc<dyn SecretsController>,
1162    pub cloud_resource_controller: Option<Arc<dyn CloudResourceController>>,
1163    pub availability_zones: Vec<String>,
1164    pub cluster_replica_sizes: ClusterReplicaSizeMap,
1165    pub builtin_system_cluster_config: BootstrapBuiltinClusterConfig,
1166    pub builtin_catalog_server_cluster_config: BootstrapBuiltinClusterConfig,
1167    pub builtin_probe_cluster_config: BootstrapBuiltinClusterConfig,
1168    pub builtin_support_cluster_config: BootstrapBuiltinClusterConfig,
1169    pub builtin_analytics_cluster_config: BootstrapBuiltinClusterConfig,
1170    pub system_parameter_defaults: BTreeMap<String, String>,
1171    pub storage_usage_client: StorageUsageClient,
1172    pub storage_usage_collection_interval: Duration,
1173    pub storage_usage_retention_period: Option<Duration>,
1174    pub segment_client: Option<mz_segment::Client>,
1175    pub egress_addresses: Vec<IpNet>,
1176    pub remote_system_parameters: Option<BTreeMap<String, String>>,
1177    pub aws_account_id: Option<String>,
1178    pub aws_privatelink_availability_zones: Option<Vec<String>>,
1179    pub connection_context: ConnectionContext,
1180    pub connection_limit_callback: Box<dyn Fn(u64, u64) -> () + Send + Sync + 'static>,
1181    pub webhook_concurrency_limit: WebhookConcurrencyLimiter,
1182    pub http_host_name: Option<String>,
1183    pub tracing_handle: TracingHandle,
1184    /// Whether or not to start controllers in read-only mode. This is only
1185    /// meant for use during development of read-only clusters and 0dt upgrades
1186    /// and should go away once we have proper orchestration during upgrades.
1187    pub read_only_controllers: bool,
1188
1189    /// A trigger that signals that the current deployment has caught up with a
1190    /// previous deployment. Only used during 0dt deployment, while in read-only
1191    /// mode.
1192    pub caught_up_trigger: Option<Trigger>,
1193
1194    pub helm_chart_version: Option<String>,
1195    pub license_key: ValidatedLicenseKey,
1196    pub external_login_password_mz_system: Option<Password>,
1197    pub force_builtin_schema_migration: Option<String>,
1198}
1199
1200/// Metadata about an active connection.
1201#[derive(Debug, Serialize)]
1202pub struct ConnMeta {
1203    /// Pgwire specifies that every connection have a 32-bit secret associated
1204    /// with it, that is known to both the client and the server. Cancellation
1205    /// requests are required to authenticate with the secret of the connection
1206    /// that they are targeting.
1207    secret_key: u32,
1208    /// The time when the session's connection was initiated.
1209    connected_at: EpochMillis,
1210    user: User,
1211    application_name: String,
1212    uuid: Uuid,
1213    conn_id: ConnectionId,
1214    client_ip: Option<IpAddr>,
1215
1216    /// Sinks that will need to be dropped when the current transaction, if
1217    /// any, is cleared.
1218    drop_sinks: BTreeSet<GlobalId>,
1219
1220    /// Lock for the Coordinator's deferred statements that is dropped on transaction clear.
1221    #[serde(skip)]
1222    deferred_lock: Option<OwnedMutexGuard<()>>,
1223
1224    /// Cluster reconfigurations that will need to be
1225    /// cleaned up when the current transaction is cleared
1226    pending_cluster_alters: BTreeSet<ClusterId>,
1227
1228    /// Channel on which to send notices to a session.
1229    #[serde(skip)]
1230    notice_tx: mpsc::UnboundedSender<AdapterNotice>,
1231
1232    /// The role that initiated the database context. Fixed for the duration of the connection.
1233    /// WARNING: This role reference is not updated when the role is dropped.
1234    /// Consumers should not assume that this role exist.
1235    authenticated_role: RoleId,
1236}
1237
1238impl ConnMeta {
1239    pub fn conn_id(&self) -> &ConnectionId {
1240        &self.conn_id
1241    }
1242
1243    pub fn user(&self) -> &User {
1244        &self.user
1245    }
1246
1247    pub fn application_name(&self) -> &str {
1248        &self.application_name
1249    }
1250
1251    pub fn authenticated_role_id(&self) -> &RoleId {
1252        &self.authenticated_role
1253    }
1254
1255    pub fn uuid(&self) -> Uuid {
1256        self.uuid
1257    }
1258
1259    pub fn client_ip(&self) -> Option<IpAddr> {
1260        self.client_ip
1261    }
1262
1263    pub fn connected_at(&self) -> EpochMillis {
1264        self.connected_at
1265    }
1266}
1267
1268#[derive(Debug)]
1269/// A pending transaction waiting to be committed.
1270pub struct PendingTxn {
1271    /// Context used to send a response back to the client.
1272    ctx: ExecuteContext,
1273    /// Client response for transaction.
1274    response: Result<PendingTxnResponse, AdapterError>,
1275    /// The action to take at the end of the transaction.
1276    action: EndTransactionAction,
1277}
1278
1279#[derive(Debug)]
1280/// The response we'll send for a [`PendingTxn`].
1281pub enum PendingTxnResponse {
1282    /// The transaction will be committed.
1283    Committed {
1284        /// Parameters that will change, and their values, once this transaction is complete.
1285        params: BTreeMap<&'static str, String>,
1286    },
1287    /// The transaction will be rolled back.
1288    Rolledback {
1289        /// Parameters that will change, and their values, once this transaction is complete.
1290        params: BTreeMap<&'static str, String>,
1291    },
1292}
1293
1294impl PendingTxnResponse {
1295    pub fn extend_params(&mut self, p: impl IntoIterator<Item = (&'static str, String)>) {
1296        match self {
1297            PendingTxnResponse::Committed { params }
1298            | PendingTxnResponse::Rolledback { params } => params.extend(p),
1299        }
1300    }
1301}
1302
1303impl From<PendingTxnResponse> for ExecuteResponse {
1304    fn from(value: PendingTxnResponse) -> Self {
1305        match value {
1306            PendingTxnResponse::Committed { params } => {
1307                ExecuteResponse::TransactionCommitted { params }
1308            }
1309            PendingTxnResponse::Rolledback { params } => {
1310                ExecuteResponse::TransactionRolledBack { params }
1311            }
1312        }
1313    }
1314}
1315
1316#[derive(Debug)]
1317/// A pending read transaction waiting to be linearized along with metadata about it's state
1318pub struct PendingReadTxn {
1319    /// The transaction type
1320    txn: PendingRead,
1321    /// The timestamp context of the transaction.
1322    timestamp_context: TimestampContext,
1323    /// When we created this pending txn, when the transaction ends. Only used for metrics.
1324    created: Instant,
1325    /// Number of times we requeued the processing of this pending read txn.
1326    /// Requeueing is necessary if the time we executed the query is after the current oracle time;
1327    /// see [`Coordinator::message_linearize_reads`] for more details.
1328    num_requeues: u64,
1329    /// Telemetry context.
1330    otel_ctx: OpenTelemetryContext,
1331}
1332
1333impl PendingReadTxn {
1334    /// Return the timestamp context of the pending read transaction.
1335    pub fn timestamp_context(&self) -> &TimestampContext {
1336        &self.timestamp_context
1337    }
1338
1339    pub(crate) fn take_context(self) -> ExecuteContext {
1340        self.txn.take_context()
1341    }
1342}
1343
1344#[derive(Debug)]
1345/// A pending read transaction waiting to be linearized.
1346enum PendingRead {
1347    Read {
1348        /// The inner transaction.
1349        txn: PendingTxn,
1350    },
1351    ReadThenWrite {
1352        /// Context used to send a response back to the client.
1353        ctx: ExecuteContext,
1354        /// Channel used to alert the transaction that the read has been linearized and send back
1355        /// `ctx`.
1356        tx: oneshot::Sender<Option<ExecuteContext>>,
1357    },
1358}
1359
1360impl PendingRead {
1361    /// Alert the client that the read has been linearized.
1362    ///
1363    /// If it is necessary to finalize an execute, return the state necessary to do so
1364    /// (execution context and result)
1365    #[instrument(level = "debug")]
1366    pub fn finish(self) -> Option<(ExecuteContext, Result<ExecuteResponse, AdapterError>)> {
1367        match self {
1368            PendingRead::Read {
1369                txn:
1370                    PendingTxn {
1371                        mut ctx,
1372                        response,
1373                        action,
1374                    },
1375                ..
1376            } => {
1377                let changed = ctx.session_mut().vars_mut().end_transaction(action);
1378                // Append any parameters that changed to the response.
1379                let response = response.map(|mut r| {
1380                    r.extend_params(changed);
1381                    ExecuteResponse::from(r)
1382                });
1383
1384                Some((ctx, response))
1385            }
1386            PendingRead::ReadThenWrite { ctx, tx, .. } => {
1387                // Ignore errors if the caller has hung up.
1388                let _ = tx.send(Some(ctx));
1389                None
1390            }
1391        }
1392    }
1393
1394    fn label(&self) -> &'static str {
1395        match self {
1396            PendingRead::Read { .. } => "read",
1397            PendingRead::ReadThenWrite { .. } => "read_then_write",
1398        }
1399    }
1400
1401    pub(crate) fn take_context(self) -> ExecuteContext {
1402        match self {
1403            PendingRead::Read { txn, .. } => txn.ctx,
1404            PendingRead::ReadThenWrite { ctx, tx, .. } => {
1405                // Inform the transaction that we've taken their context.
1406                // Ignore errors if the caller has hung up.
1407                let _ = tx.send(None);
1408                ctx
1409            }
1410        }
1411    }
1412}
1413
1414/// State that the coordinator must process as part of retiring
1415/// command execution.  `ExecuteContextExtra::Default` is guaranteed
1416/// to produce a value that will cause the coordinator to do nothing, and
1417/// is intended for use by code that invokes the execution processing flow
1418/// (i.e., `sequence_plan`) without actually being a statement execution.
1419///
1420/// This is a pure data struct containing only the statement logging ID.
1421/// For auto-retire-on-drop behavior, use `ExecuteContextGuard` which wraps
1422/// this struct and owns the channel for sending retirement messages.
1423#[derive(Debug, Default)]
1424#[must_use]
1425pub struct ExecuteContextExtra {
1426    statement_uuid: Option<StatementLoggingId>,
1427}
1428
1429impl ExecuteContextExtra {
1430    pub(crate) fn new(statement_uuid: Option<StatementLoggingId>) -> Self {
1431        Self { statement_uuid }
1432    }
1433    pub fn is_trivial(&self) -> bool {
1434        self.statement_uuid.is_none()
1435    }
1436    pub fn contents(&self) -> Option<StatementLoggingId> {
1437        self.statement_uuid
1438    }
1439    /// Consume this extra and return the statement UUID for retirement.
1440    /// This should only be called from code that knows what to do to finish
1441    /// up logging based on the inner value.
1442    #[must_use]
1443    pub(crate) fn retire(self) -> Option<StatementLoggingId> {
1444        self.statement_uuid
1445    }
1446}
1447
1448/// A guard that wraps `ExecuteContextExtra` and owns a channel for sending
1449/// retirement messages to the coordinator.
1450///
1451/// If this guard is dropped with a `Some` `statement_uuid` in its inner
1452/// `ExecuteContextExtra`, the `Drop` implementation will automatically send a
1453/// `Message::RetireExecute` to log the statement ending.
1454/// This handles cases like connection drops where the context cannot be
1455/// explicitly retired.
1456/// See <https://github.com/MaterializeInc/database-issues/issues/7304>
1457#[derive(Debug)]
1458#[must_use]
1459pub struct ExecuteContextGuard {
1460    extra: ExecuteContextExtra,
1461    /// Channel for sending messages to the coordinator. Used for auto-retiring on drop.
1462    /// For `Default` instances, this is a dummy sender (receiver already dropped), so
1463    /// sends will fail silently - which is the desired behavior since Default instances
1464    /// should only be used for non-logged statements.
1465    coordinator_tx: mpsc::UnboundedSender<Message>,
1466}
1467
1468impl Default for ExecuteContextGuard {
1469    fn default() -> Self {
1470        // Create a dummy sender by immediately dropping the receiver.
1471        // Any send on this channel will fail silently, which is the desired
1472        // behavior for Default instances (non-logged statements).
1473        let (tx, _rx) = mpsc::unbounded_channel();
1474        Self {
1475            extra: ExecuteContextExtra::default(),
1476            coordinator_tx: tx,
1477        }
1478    }
1479}
1480
1481impl ExecuteContextGuard {
1482    pub(crate) fn new(
1483        statement_uuid: Option<StatementLoggingId>,
1484        coordinator_tx: mpsc::UnboundedSender<Message>,
1485    ) -> Self {
1486        Self {
1487            extra: ExecuteContextExtra::new(statement_uuid),
1488            coordinator_tx,
1489        }
1490    }
1491    pub fn is_trivial(&self) -> bool {
1492        self.extra.is_trivial()
1493    }
1494    pub fn contents(&self) -> Option<StatementLoggingId> {
1495        self.extra.contents()
1496    }
1497    /// Take responsibility for the contents.  This should only be
1498    /// called from code that knows what to do to finish up logging
1499    /// based on the inner value.
1500    ///
1501    /// Returns the inner `ExecuteContextExtra`, consuming the guard without
1502    /// triggering the auto-retire behavior.
1503    pub(crate) fn defuse(mut self) -> ExecuteContextExtra {
1504        // Taking statement_uuid prevents the Drop impl from sending a retire message
1505        std::mem::take(&mut self.extra)
1506    }
1507}
1508
1509impl Drop for ExecuteContextGuard {
1510    fn drop(&mut self) {
1511        if let Some(statement_uuid) = self.extra.statement_uuid.take() {
1512            // Auto-retire since the guard was dropped without explicit retirement (likely due
1513            // to connection drop).
1514            let msg = Message::RetireExecute {
1515                data: ExecuteContextExtra {
1516                    statement_uuid: Some(statement_uuid),
1517                },
1518                otel_ctx: OpenTelemetryContext::obtain(),
1519                reason: StatementEndedExecutionReason::Aborted,
1520            };
1521            // Send may fail for Default instances (dummy sender), which is fine since
1522            // Default instances should only be used for non-logged statements.
1523            let _ = self.coordinator_tx.send(msg);
1524        }
1525    }
1526}
1527
1528/// Bundle of state related to statement execution.
1529///
1530/// This struct collects a bundle of state that needs to be threaded
1531/// through various functions as part of statement execution.
1532/// It is used to finalize execution, by calling `retire`. Finalizing execution
1533/// involves sending the session back to the pgwire layer so that it
1534/// may be used to process further commands. It also involves
1535/// performing some work on the main coordinator thread
1536/// (e.g., recording the time at which the statement finished
1537/// executing). The state necessary to perform this work is bundled in
1538/// the `ExecuteContextGuard` object.
1539#[derive(Debug)]
1540pub struct ExecuteContext {
1541    inner: Box<ExecuteContextInner>,
1542}
1543
1544impl std::ops::Deref for ExecuteContext {
1545    type Target = ExecuteContextInner;
1546    fn deref(&self) -> &Self::Target {
1547        &*self.inner
1548    }
1549}
1550
1551impl std::ops::DerefMut for ExecuteContext {
1552    fn deref_mut(&mut self) -> &mut Self::Target {
1553        &mut *self.inner
1554    }
1555}
1556
1557#[derive(Debug)]
1558pub struct ExecuteContextInner {
1559    tx: ClientTransmitter<ExecuteResponse>,
1560    internal_cmd_tx: mpsc::UnboundedSender<Message>,
1561    session: Session,
1562    extra: ExecuteContextGuard,
1563}
1564
1565impl ExecuteContext {
1566    pub fn session(&self) -> &Session {
1567        &self.session
1568    }
1569
1570    pub fn session_mut(&mut self) -> &mut Session {
1571        &mut self.session
1572    }
1573
1574    pub fn tx(&self) -> &ClientTransmitter<ExecuteResponse> {
1575        &self.tx
1576    }
1577
1578    pub fn tx_mut(&mut self) -> &mut ClientTransmitter<ExecuteResponse> {
1579        &mut self.tx
1580    }
1581
1582    pub fn from_parts(
1583        tx: ClientTransmitter<ExecuteResponse>,
1584        internal_cmd_tx: mpsc::UnboundedSender<Message>,
1585        session: Session,
1586        extra: ExecuteContextGuard,
1587    ) -> Self {
1588        Self {
1589            inner: ExecuteContextInner {
1590                tx,
1591                session,
1592                extra,
1593                internal_cmd_tx,
1594            }
1595            .into(),
1596        }
1597    }
1598
1599    /// By calling this function, the caller takes responsibility for
1600    /// dealing with the instance of `ExecuteContextGuard`. This is
1601    /// intended to support protocols (like `COPY FROM`) that involve
1602    /// multiple passes of sending the session back and forth between
1603    /// the coordinator and the pgwire layer. As part of any such
1604    /// protocol, we must ensure that the `ExecuteContextGuard`
1605    /// (possibly wrapped in a new `ExecuteContext`) is passed back to the coordinator for
1606    /// eventual retirement.
1607    pub fn into_parts(
1608        self,
1609    ) -> (
1610        ClientTransmitter<ExecuteResponse>,
1611        mpsc::UnboundedSender<Message>,
1612        Session,
1613        ExecuteContextGuard,
1614    ) {
1615        let ExecuteContextInner {
1616            tx,
1617            internal_cmd_tx,
1618            session,
1619            extra,
1620        } = *self.inner;
1621        (tx, internal_cmd_tx, session, extra)
1622    }
1623
1624    /// Retire the execution, by sending a message to the coordinator.
1625    #[instrument(level = "debug")]
1626    pub fn retire(self, result: Result<ExecuteResponse, AdapterError>) {
1627        let ExecuteContextInner {
1628            tx,
1629            internal_cmd_tx,
1630            session,
1631            extra,
1632        } = *self.inner;
1633        let reason = if extra.is_trivial() {
1634            None
1635        } else {
1636            Some((&result).into())
1637        };
1638        tx.send(result, session);
1639        if let Some(reason) = reason {
1640            // Retire the guard to get the inner ExecuteContextExtra without triggering auto-retire
1641            let extra = extra.defuse();
1642            if let Err(e) = internal_cmd_tx.send(Message::RetireExecute {
1643                otel_ctx: OpenTelemetryContext::obtain(),
1644                data: extra,
1645                reason,
1646            }) {
1647                warn!("internal_cmd_rx dropped before we could send: {:?}", e);
1648            }
1649        }
1650    }
1651
1652    pub fn extra(&self) -> &ExecuteContextGuard {
1653        &self.extra
1654    }
1655
1656    pub fn extra_mut(&mut self) -> &mut ExecuteContextGuard {
1657        &mut self.extra
1658    }
1659}
1660
1661#[derive(Debug)]
1662struct ClusterReplicaStatuses(
1663    BTreeMap<ClusterId, BTreeMap<ReplicaId, BTreeMap<ProcessId, ClusterReplicaProcessStatus>>>,
1664);
1665
1666impl ClusterReplicaStatuses {
1667    pub(crate) fn new() -> ClusterReplicaStatuses {
1668        ClusterReplicaStatuses(BTreeMap::new())
1669    }
1670
1671    /// Initializes the statuses of the specified cluster.
1672    ///
1673    /// Panics if the cluster statuses are already initialized.
1674    pub(crate) fn initialize_cluster_statuses(&mut self, cluster_id: ClusterId) {
1675        let prev = self.0.insert(cluster_id, BTreeMap::new());
1676        assert_eq!(
1677            prev, None,
1678            "cluster {cluster_id} statuses already initialized"
1679        );
1680    }
1681
1682    /// Initializes the statuses of the specified cluster replica.
1683    ///
1684    /// Panics if the cluster replica statuses are already initialized.
1685    pub(crate) fn initialize_cluster_replica_statuses(
1686        &mut self,
1687        cluster_id: ClusterId,
1688        replica_id: ReplicaId,
1689        num_processes: usize,
1690        time: DateTime<Utc>,
1691    ) {
1692        tracing::info!(
1693            ?cluster_id,
1694            ?replica_id,
1695            ?time,
1696            "initializing cluster replica status"
1697        );
1698        let replica_statuses = self.0.entry(cluster_id).or_default();
1699        let process_statuses = (0..num_processes)
1700            .map(|process_id| {
1701                let status = ClusterReplicaProcessStatus {
1702                    status: ClusterStatus::Offline(Some(OfflineReason::Initializing)),
1703                    time: time.clone(),
1704                };
1705                (u64::cast_from(process_id), status)
1706            })
1707            .collect();
1708        let prev = replica_statuses.insert(replica_id, process_statuses);
1709        assert_none!(
1710            prev,
1711            "cluster replica {cluster_id}.{replica_id} statuses already initialized"
1712        );
1713    }
1714
1715    /// Removes the statuses of the specified cluster.
1716    ///
1717    /// Panics if the cluster does not exist.
1718    pub(crate) fn remove_cluster_statuses(
1719        &mut self,
1720        cluster_id: &ClusterId,
1721    ) -> BTreeMap<ReplicaId, BTreeMap<ProcessId, ClusterReplicaProcessStatus>> {
1722        let prev = self.0.remove(cluster_id);
1723        prev.unwrap_or_else(|| panic!("unknown cluster: {cluster_id}"))
1724    }
1725
1726    /// Removes the statuses of the specified cluster replica.
1727    ///
1728    /// Panics if the cluster or replica does not exist.
1729    pub(crate) fn remove_cluster_replica_statuses(
1730        &mut self,
1731        cluster_id: &ClusterId,
1732        replica_id: &ReplicaId,
1733    ) -> BTreeMap<ProcessId, ClusterReplicaProcessStatus> {
1734        let replica_statuses = self
1735            .0
1736            .get_mut(cluster_id)
1737            .unwrap_or_else(|| panic!("unknown cluster: {cluster_id}"));
1738        let prev = replica_statuses.remove(replica_id);
1739        prev.unwrap_or_else(|| panic!("unknown cluster replica: {cluster_id}.{replica_id}"))
1740    }
1741
1742    /// Inserts or updates the status of the specified cluster replica process.
1743    ///
1744    /// Panics if the cluster or replica does not exist.
1745    pub(crate) fn ensure_cluster_status(
1746        &mut self,
1747        cluster_id: ClusterId,
1748        replica_id: ReplicaId,
1749        process_id: ProcessId,
1750        status: ClusterReplicaProcessStatus,
1751    ) {
1752        let replica_statuses = self
1753            .0
1754            .get_mut(&cluster_id)
1755            .unwrap_or_else(|| panic!("unknown cluster: {cluster_id}"))
1756            .get_mut(&replica_id)
1757            .unwrap_or_else(|| panic!("unknown cluster replica: {cluster_id}.{replica_id}"));
1758        replica_statuses.insert(process_id, status);
1759    }
1760
1761    /// Computes the status of the cluster replica as a whole.
1762    ///
1763    /// Panics if `cluster_id` or `replica_id` don't exist.
1764    pub fn get_cluster_replica_status(
1765        &self,
1766        cluster_id: ClusterId,
1767        replica_id: ReplicaId,
1768    ) -> ClusterStatus {
1769        let process_status = self.get_cluster_replica_statuses(cluster_id, replica_id);
1770        Self::cluster_replica_status(process_status)
1771    }
1772
1773    /// Computes the status of the cluster replica as a whole.
1774    pub fn cluster_replica_status(
1775        process_status: &BTreeMap<ProcessId, ClusterReplicaProcessStatus>,
1776    ) -> ClusterStatus {
1777        process_status
1778            .values()
1779            .fold(ClusterStatus::Online, |s, p| match (s, p.status) {
1780                (ClusterStatus::Online, ClusterStatus::Online) => ClusterStatus::Online,
1781                (x, y) => {
1782                    let reason_x = match x {
1783                        ClusterStatus::Offline(reason) => reason,
1784                        ClusterStatus::Online => None,
1785                    };
1786                    let reason_y = match y {
1787                        ClusterStatus::Offline(reason) => reason,
1788                        ClusterStatus::Online => None,
1789                    };
1790                    // Arbitrarily pick the first known not-ready reason.
1791                    ClusterStatus::Offline(reason_x.or(reason_y))
1792                }
1793            })
1794    }
1795
1796    /// Gets the statuses of the given cluster replica.
1797    ///
1798    /// Panics if the cluster or replica does not exist
1799    pub(crate) fn get_cluster_replica_statuses(
1800        &self,
1801        cluster_id: ClusterId,
1802        replica_id: ReplicaId,
1803    ) -> &BTreeMap<ProcessId, ClusterReplicaProcessStatus> {
1804        self.try_get_cluster_replica_statuses(cluster_id, replica_id)
1805            .unwrap_or_else(|| panic!("unknown cluster replica: {cluster_id}.{replica_id}"))
1806    }
1807
1808    /// Gets the statuses of the given cluster replica.
1809    pub(crate) fn try_get_cluster_replica_statuses(
1810        &self,
1811        cluster_id: ClusterId,
1812        replica_id: ReplicaId,
1813    ) -> Option<&BTreeMap<ProcessId, ClusterReplicaProcessStatus>> {
1814        self.try_get_cluster_statuses(cluster_id)
1815            .and_then(|statuses| statuses.get(&replica_id))
1816    }
1817
1818    /// Gets the statuses of the given cluster.
1819    pub(crate) fn try_get_cluster_statuses(
1820        &self,
1821        cluster_id: ClusterId,
1822    ) -> Option<&BTreeMap<ReplicaId, BTreeMap<ProcessId, ClusterReplicaProcessStatus>>> {
1823        self.0.get(&cluster_id)
1824    }
1825}
1826
1827/// Glues the external world to the Timely workers.
1828#[derive(Derivative)]
1829#[derivative(Debug)]
1830pub struct Coordinator {
1831    /// The controller for the storage and compute layers.
1832    #[derivative(Debug = "ignore")]
1833    controller: mz_controller::Controller,
1834    /// The catalog in an Arc suitable for readonly references. The Arc allows
1835    /// us to hand out cheap copies of the catalog to functions that can use it
1836    /// off of the main coordinator thread. If the coordinator needs to mutate
1837    /// the catalog, call [`Self::catalog_mut`], which will clone this struct member,
1838    /// allowing it to be mutated here while the other off-thread references can
1839    /// read their catalog as long as needed. In the future we would like this
1840    /// to be a pTVC, but for now this is sufficient.
1841    catalog: Arc<Catalog>,
1842
1843    /// A client for persist. Initially, this is only used for reading stashed
1844    /// peek responses out of batches.
1845    persist_client: PersistClient,
1846
1847    /// Channel to manage internal commands from the coordinator to itself.
1848    internal_cmd_tx: mpsc::UnboundedSender<Message>,
1849    /// Notification that triggers a group commit.
1850    group_commit_tx: appends::GroupCommitNotifier,
1851
1852    /// Channel for strict serializable reads ready to commit.
1853    strict_serializable_reads_tx: mpsc::UnboundedSender<(ConnectionId, PendingReadTxn)>,
1854
1855    /// Mechanism for totally ordering write and read timestamps, so that all reads
1856    /// reflect exactly the set of writes that precede them, and no writes that follow.
1857    global_timelines: BTreeMap<Timeline, TimelineState>,
1858
1859    /// A generator for transient [`GlobalId`]s, shareable with other threads.
1860    transient_id_gen: Arc<TransientIdGen>,
1861    /// A map from connection ID to metadata about that connection for all
1862    /// active connections.
1863    active_conns: BTreeMap<ConnectionId, ConnMeta>,
1864
1865    /// For each transaction, the read holds taken to support any performed reads.
1866    ///
1867    /// Upon completing a transaction, these read holds should be dropped.
1868    txn_read_holds: BTreeMap<ConnectionId, read_policy::ReadHolds>,
1869
1870    /// Access to the peek fields should be restricted to methods in the [`peek`] API.
1871    /// A map from pending peek ids to the queue into which responses are sent, and
1872    /// the connection id of the client that initiated the peek.
1873    pending_peeks: BTreeMap<Uuid, PendingPeek>,
1874    /// A map from client connection ids to a set of all pending peeks for that client.
1875    client_pending_peeks: BTreeMap<ConnectionId, BTreeMap<Uuid, ClusterId>>,
1876
1877    /// A map from client connection ids to pending linearize read transaction.
1878    pending_linearize_read_txns: BTreeMap<ConnectionId, PendingReadTxn>,
1879
1880    /// A map from the compute sink ID to it's state description.
1881    active_compute_sinks: BTreeMap<GlobalId, ActiveComputeSink>,
1882    /// A map from active webhooks to their invalidation handle.
1883    active_webhooks: BTreeMap<CatalogItemId, WebhookAppenderInvalidator>,
1884    /// A map of active `COPY FROM` statements. The Coordinator waits for `clusterd`
1885    /// to stage Batches in Persist that we will then link into the shard.
1886    active_copies: BTreeMap<ConnectionId, ActiveCopyFrom>,
1887
1888    /// A map from connection ids to a watch channel that is set to `true` if the connection
1889    /// received a cancel request.
1890    staged_cancellation: BTreeMap<ConnectionId, (watch::Sender<bool>, watch::Receiver<bool>)>,
1891    /// Active introspection subscribes.
1892    introspection_subscribes: BTreeMap<GlobalId, IntrospectionSubscribe>,
1893
1894    /// Locks that grant access to a specific object, populated lazily as objects are written to.
1895    write_locks: BTreeMap<CatalogItemId, Arc<tokio::sync::Mutex<()>>>,
1896    /// Plans that are currently deferred and waiting on a write lock.
1897    deferred_write_ops: BTreeMap<ConnectionId, DeferredOp>,
1898
1899    /// Pending writes waiting for a group commit.
1900    pending_writes: Vec<PendingWriteTxn>,
1901
1902    /// For the realtime timeline, an explicit SELECT or INSERT on a table will bump the
1903    /// table's timestamps, but there are cases where timestamps are not bumped but
1904    /// we expect the closed timestamps to advance (`AS OF X`, SUBSCRIBing views over
1905    /// RT sources and tables). To address these, spawn a task that forces table
1906    /// timestamps to close on a regular interval. This roughly tracks the behavior
1907    /// of realtime sources that close off timestamps on an interval.
1908    ///
1909    /// For non-realtime timelines, nothing pushes the timestamps forward, so we must do
1910    /// it manually.
1911    advance_timelines_interval: Interval,
1912
1913    /// Serialized DDL. DDL must be serialized because:
1914    /// - Many of them do off-thread work and need to verify the catalog is in a valid state, but
1915    ///   [`PlanValidity`] does not currently support tracking all changes. Doing that correctly
1916    ///   seems to be more difficult than it's worth, so we would instead re-plan and re-sequence
1917    ///   the statements.
1918    /// - Re-planning a statement is hard because Coordinator and Session state is mutated at
1919    ///   various points, and we would need to correctly reset those changes before re-planning and
1920    ///   re-sequencing.
1921    serialized_ddl: LockedVecDeque<DeferredPlanStatement>,
1922
1923    /// Handle to secret manager that can create and delete secrets from
1924    /// an arbitrary secret storage engine.
1925    secrets_controller: Arc<dyn SecretsController>,
1926    /// A secrets reader than maintains an in-memory cache, where values have a set TTL.
1927    caching_secrets_reader: CachingSecretsReader,
1928
1929    /// Handle to a manager that can create and delete kubernetes resources
1930    /// (ie: VpcEndpoint objects)
1931    cloud_resource_controller: Option<Arc<dyn CloudResourceController>>,
1932
1933    /// Persist client for fetching storage metadata such as size metrics.
1934    storage_usage_client: StorageUsageClient,
1935    /// The interval at which to collect storage usage information.
1936    storage_usage_collection_interval: Duration,
1937
1938    /// Segment analytics client.
1939    #[derivative(Debug = "ignore")]
1940    segment_client: Option<mz_segment::Client>,
1941
1942    /// Coordinator metrics.
1943    metrics: Metrics,
1944    /// Optimizer metrics.
1945    optimizer_metrics: OptimizerMetrics,
1946
1947    /// Tracing handle.
1948    tracing_handle: TracingHandle,
1949
1950    /// Data used by the statement logging feature.
1951    statement_logging: StatementLogging,
1952
1953    /// Limit for how many concurrent webhook requests we allow.
1954    webhook_concurrency_limit: WebhookConcurrencyLimiter,
1955
1956    /// Optional config for the timestamp oracle. This is _required_ when
1957    /// a timestamp oracle backend is configured.
1958    timestamp_oracle_config: Option<TimestampOracleConfig>,
1959
1960    /// Periodically asks cluster scheduling policies to make their decisions.
1961    check_cluster_scheduling_policies_interval: Interval,
1962
1963    /// This keeps the last On/Off decision for each cluster and each scheduling policy.
1964    /// (Clusters that have been dropped or are otherwise out of scope for automatic scheduling are
1965    /// periodically cleaned up from this Map.)
1966    cluster_scheduling_decisions: BTreeMap<ClusterId, BTreeMap<&'static str, SchedulingDecision>>,
1967
1968    /// When doing 0dt upgrades/in read-only mode, periodically ask all known
1969    /// clusters/collections whether they are caught up.
1970    caught_up_check_interval: Interval,
1971
1972    /// Context needed to check whether all clusters/collections have caught up.
1973    /// Only used during 0dt deployment, while in read-only mode.
1974    caught_up_check: Option<CaughtUpCheckContext>,
1975
1976    /// Tracks the state associated with the currently installed watchsets.
1977    installed_watch_sets: BTreeMap<WatchSetId, (ConnectionId, WatchSetResponse)>,
1978
1979    /// Tracks the currently installed watchsets for each connection.
1980    connection_watch_sets: BTreeMap<ConnectionId, BTreeSet<WatchSetId>>,
1981
1982    /// Tracks the statuses of all cluster replicas.
1983    cluster_replica_statuses: ClusterReplicaStatuses,
1984
1985    /// Whether or not to start controllers in read-only mode. This is only
1986    /// meant for use during development of read-only clusters and 0dt upgrades
1987    /// and should go away once we have proper orchestration during upgrades.
1988    read_only_controllers: bool,
1989
1990    /// Updates to builtin tables that are being buffered while we are in
1991    /// read-only mode. We apply these all at once when coming out of read-only
1992    /// mode.
1993    ///
1994    /// This is a `Some` while in read-only mode and will be replaced by a
1995    /// `None` when we transition out of read-only mode and write out any
1996    /// buffered updates.
1997    buffered_builtin_table_updates: Option<Vec<BuiltinTableUpdate>>,
1998
1999    license_key: ValidatedLicenseKey,
2000
2001    /// Pre-allocated pool of user IDs to amortize persist writes across DDL operations.
2002    user_id_pool: IdPool,
2003}
2004
2005impl Coordinator {
2006    /// Initializes coordinator state based on the contained catalog. Must be
2007    /// called after creating the coordinator and before calling the
2008    /// `Coordinator::serve` method.
2009    #[instrument(name = "coord::bootstrap")]
2010    pub(crate) async fn bootstrap(
2011        &mut self,
2012        boot_ts: Timestamp,
2013        migrated_storage_collections_0dt: BTreeSet<CatalogItemId>,
2014        mut builtin_table_updates: Vec<BuiltinTableUpdate>,
2015        cached_global_exprs: BTreeMap<GlobalId, GlobalExpressions>,
2016        uncached_local_exprs: BTreeMap<GlobalId, LocalExpressions>,
2017        audit_logs_iterator: AuditLogIterator,
2018    ) -> Result<(), AdapterError> {
2019        let bootstrap_start = Instant::now();
2020        info!("startup: coordinator init: bootstrap beginning");
2021        info!("startup: coordinator init: bootstrap: preamble beginning");
2022
2023        // Initialize cluster replica statuses.
2024        // Gross iterator is to avoid partial borrow issues.
2025        let cluster_statuses: Vec<(_, Vec<_>)> = self
2026            .catalog()
2027            .clusters()
2028            .map(|cluster| {
2029                (
2030                    cluster.id(),
2031                    cluster
2032                        .replicas()
2033                        .map(|replica| {
2034                            (replica.replica_id, replica.config.location.num_processes())
2035                        })
2036                        .collect(),
2037                )
2038            })
2039            .collect();
2040        let now = self.now_datetime();
2041        for (cluster_id, replica_statuses) in cluster_statuses {
2042            self.cluster_replica_statuses
2043                .initialize_cluster_statuses(cluster_id);
2044            for (replica_id, num_processes) in replica_statuses {
2045                self.cluster_replica_statuses
2046                    .initialize_cluster_replica_statuses(
2047                        cluster_id,
2048                        replica_id,
2049                        num_processes,
2050                        now,
2051                    );
2052            }
2053        }
2054
2055        let system_config = self.catalog().system_config();
2056
2057        // Inform metrics about the initial system configuration.
2058        mz_metrics::update_dyncfg(&system_config.dyncfg_updates());
2059
2060        // Inform the controllers about their initial configuration.
2061        let compute_config = flags::compute_config(system_config);
2062        let storage_config = flags::storage_config(system_config);
2063        let scheduling_config = flags::orchestrator_scheduling_config(system_config);
2064        let dyncfg_updates = system_config.dyncfg_updates();
2065        self.controller.compute.update_configuration(compute_config);
2066        self.controller.storage.update_parameters(storage_config);
2067        self.controller
2068            .update_orchestrator_scheduling_config(scheduling_config);
2069        self.controller.update_configuration(dyncfg_updates);
2070
2071        // Skip the credit consumption check at bootstrap under DisableClusterCreation behavior:
2072        // this codepath validates existing replicas at startup, not cluster creation, so it
2073        // must not block startup. New cluster creation is still gated by the DDL-time check.
2074        // The Disable case is already handled by a bail! in main.rs before we reach here.
2075        let enforce_credit_limit_at_bootstrap = !matches!(
2076            self.license_key.expiration_behavior,
2077            ExpirationBehavior::DisableClusterCreation,
2078        );
2079        if enforce_credit_limit_at_bootstrap {
2080            self.validate_resource_limit_numeric(
2081                Numeric::zero(),
2082                self.current_credit_consumption_rate(),
2083                |system_vars| {
2084                    self.license_key
2085                        .max_credit_consumption_rate()
2086                        .map_or_else(|| system_vars.max_credit_consumption_rate(), Numeric::from)
2087                },
2088                "cluster replica",
2089                MAX_CREDIT_CONSUMPTION_RATE.name(),
2090            )?;
2091        }
2092
2093        let mut policies_to_set: BTreeMap<CompactionWindow, CollectionIdBundle> =
2094            Default::default();
2095
2096        let enable_worker_core_affinity =
2097            self.catalog().system_config().enable_worker_core_affinity();
2098        let enable_storage_introspection_logs = self
2099            .catalog()
2100            .system_config()
2101            .enable_storage_introspection_logs();
2102        for instance in self.catalog.clusters() {
2103            self.controller.create_cluster(
2104                instance.id,
2105                ClusterConfig {
2106                    arranged_logs: instance.log_indexes.clone(),
2107                    workload_class: instance.config.workload_class.clone(),
2108                },
2109            )?;
2110            for replica in instance.replicas() {
2111                let role = instance.role();
2112                self.controller.create_replica(
2113                    instance.id,
2114                    replica.replica_id,
2115                    instance.name.clone(),
2116                    replica.name.clone(),
2117                    role,
2118                    replica.config.clone(),
2119                    enable_worker_core_affinity,
2120                    enable_storage_introspection_logs,
2121                )?;
2122            }
2123        }
2124
2125        info!(
2126            "startup: coordinator init: bootstrap: preamble complete in {:?}",
2127            bootstrap_start.elapsed()
2128        );
2129
2130        let init_storage_collections_start = Instant::now();
2131        info!("startup: coordinator init: bootstrap: storage collections init beginning");
2132        self.bootstrap_storage_collections(&migrated_storage_collections_0dt)
2133            .await;
2134        info!(
2135            "startup: coordinator init: bootstrap: storage collections init complete in {:?}",
2136            init_storage_collections_start.elapsed()
2137        );
2138
2139        // The storage controller knows about the introspection collections now, so we can start
2140        // sinking introspection updates in the compute controller. It makes sense to do that as
2141        // soon as possible, to avoid updates piling up in the compute controller's internal
2142        // buffers.
2143        self.controller.start_compute_introspection_sink();
2144
2145        let sorting_start = Instant::now();
2146        info!("startup: coordinator init: bootstrap: sorting catalog entries");
2147        let entries = self.bootstrap_sort_catalog_entries();
2148        info!(
2149            "startup: coordinator init: bootstrap: sorting catalog entries complete in {:?}",
2150            sorting_start.elapsed()
2151        );
2152
2153        let optimize_dataflows_start = Instant::now();
2154        info!("startup: coordinator init: bootstrap: optimize dataflow plans beginning");
2155        let uncached_global_exps = self.bootstrap_dataflow_plans(&entries, cached_global_exprs)?;
2156        info!(
2157            "startup: coordinator init: bootstrap: optimize dataflow plans complete in {:?}",
2158            optimize_dataflows_start.elapsed()
2159        );
2160
2161        // We don't need to wait for the cache to update.
2162        let _fut = self.catalog().update_expression_cache(
2163            uncached_local_exprs.into_iter().collect(),
2164            uncached_global_exps.into_iter().collect(),
2165            Default::default(),
2166        );
2167
2168        // Select dataflow as-ofs. This step relies on the storage collections created by
2169        // `bootstrap_storage_collections` and the dataflow plans created by
2170        // `bootstrap_dataflow_plans`.
2171        let bootstrap_as_ofs_start = Instant::now();
2172        info!("startup: coordinator init: bootstrap: dataflow as-of bootstrapping beginning");
2173        let dataflow_read_holds = self.bootstrap_dataflow_as_ofs().await;
2174        info!(
2175            "startup: coordinator init: bootstrap: dataflow as-of bootstrapping complete in {:?}",
2176            bootstrap_as_ofs_start.elapsed()
2177        );
2178
2179        let postamble_start = Instant::now();
2180        info!("startup: coordinator init: bootstrap: postamble beginning");
2181
2182        let logs: BTreeSet<_> = BUILTINS::logs()
2183            .map(|log| self.catalog().resolve_builtin_log(log))
2184            .flat_map(|item_id| self.catalog().get_global_ids(&item_id))
2185            .collect();
2186
2187        let mut privatelink_connections = BTreeMap::new();
2188
2189        for entry in &entries {
2190            debug!(
2191                "coordinator init: installing {} {}",
2192                entry.item().typ(),
2193                entry.id()
2194            );
2195            let mut policy = entry.item().initial_logical_compaction_window();
2196            match entry.item() {
2197                // Currently catalog item rebuild assumes that sinks and
2198                // indexes are always built individually and does not store information
2199                // about how it was built. If we start building multiple sinks and/or indexes
2200                // using a single dataflow, we have to make sure the rebuild process re-runs
2201                // the same multiple-build dataflow.
2202                CatalogItem::Source(source) => {
2203                    // Propagate source compaction windows to subsources if needed.
2204                    if source.custom_logical_compaction_window.is_none() {
2205                        if let DataSourceDesc::IngestionExport { ingestion_id, .. } =
2206                            source.data_source
2207                        {
2208                            policy = Some(
2209                                self.catalog()
2210                                    .get_entry(&ingestion_id)
2211                                    .source()
2212                                    .expect("must be source")
2213                                    .custom_logical_compaction_window
2214                                    .unwrap_or_default(),
2215                            );
2216                        }
2217                    }
2218                    policies_to_set
2219                        .entry(policy.expect("sources have a compaction window"))
2220                        .or_insert_with(Default::default)
2221                        .storage_ids
2222                        .insert(source.global_id());
2223                }
2224                CatalogItem::Table(table) => {
2225                    policies_to_set
2226                        .entry(policy.expect("tables have a compaction window"))
2227                        .or_insert_with(Default::default)
2228                        .storage_ids
2229                        .extend(table.global_ids());
2230                }
2231                CatalogItem::Index(idx) => {
2232                    let policy_entry = policies_to_set
2233                        .entry(policy.expect("indexes have a compaction window"))
2234                        .or_insert_with(Default::default);
2235
2236                    if logs.contains(&idx.on) {
2237                        policy_entry
2238                            .compute_ids
2239                            .entry(idx.cluster_id)
2240                            .or_insert_with(BTreeSet::new)
2241                            .insert(idx.global_id());
2242                    } else {
2243                        let df_desc = self
2244                            .catalog()
2245                            .try_get_physical_plan(&idx.global_id())
2246                            .expect("added in `bootstrap_dataflow_plans`")
2247                            .clone();
2248
2249                        let df_meta = self
2250                            .catalog()
2251                            .try_get_dataflow_metainfo(&idx.global_id())
2252                            .expect("added in `bootstrap_dataflow_plans`");
2253
2254                        if self.catalog().state().system_config().enable_mz_notices() {
2255                            // Collect optimization hint updates.
2256                            self.catalog().state().pack_optimizer_notices(
2257                                &mut builtin_table_updates,
2258                                df_meta.optimizer_notices.iter(),
2259                                Diff::ONE,
2260                            );
2261                        }
2262
2263                        // What follows is morally equivalent to `self.ship_dataflow(df, idx.cluster_id)`,
2264                        // but we cannot call that as it will also downgrade the read hold on the index.
2265                        policy_entry
2266                            .compute_ids
2267                            .entry(idx.cluster_id)
2268                            .or_insert_with(Default::default)
2269                            .extend(df_desc.export_ids());
2270
2271                        self.controller
2272                            .compute
2273                            .create_dataflow(idx.cluster_id, df_desc, None)
2274                            .unwrap_or_terminate("cannot fail to create dataflows");
2275                    }
2276                }
2277                CatalogItem::View(_) => (),
2278                CatalogItem::MaterializedView(mview) => {
2279                    policies_to_set
2280                        .entry(policy.expect("materialized views have a compaction window"))
2281                        .or_insert_with(Default::default)
2282                        .storage_ids
2283                        .insert(mview.global_id_writes());
2284
2285                    let mut df_desc = self
2286                        .catalog()
2287                        .try_get_physical_plan(&mview.global_id_writes())
2288                        .expect("added in `bootstrap_dataflow_plans`")
2289                        .clone();
2290
2291                    if let Some(initial_as_of) = mview.initial_as_of.clone() {
2292                        df_desc.set_initial_as_of(initial_as_of);
2293                    }
2294
2295                    // If we have a refresh schedule that has a last refresh, then set the `until` to the last refresh.
2296                    let until = mview
2297                        .refresh_schedule
2298                        .as_ref()
2299                        .and_then(|s| s.last_refresh())
2300                        .and_then(|r| r.try_step_forward());
2301                    if let Some(until) = until {
2302                        df_desc.until.meet_assign(&Antichain::from_elem(until));
2303                    }
2304
2305                    let df_meta = self
2306                        .catalog()
2307                        .try_get_dataflow_metainfo(&mview.global_id_writes())
2308                        .expect("added in `bootstrap_dataflow_plans`");
2309
2310                    if self.catalog().state().system_config().enable_mz_notices() {
2311                        // Collect optimization hint updates.
2312                        self.catalog().state().pack_optimizer_notices(
2313                            &mut builtin_table_updates,
2314                            df_meta.optimizer_notices.iter(),
2315                            Diff::ONE,
2316                        );
2317                    }
2318
2319                    self.ship_dataflow(df_desc, mview.cluster_id, mview.target_replica)
2320                        .await;
2321
2322                    // If this is a replacement MV, it must remain read-only until the replacement
2323                    // gets applied.
2324                    if mview.replacement_target.is_none() {
2325                        self.allow_writes(mview.cluster_id, mview.global_id_writes());
2326                    }
2327                }
2328                CatalogItem::Sink(sink) => {
2329                    policies_to_set
2330                        .entry(CompactionWindow::Default)
2331                        .or_insert_with(Default::default)
2332                        .storage_ids
2333                        .insert(sink.global_id());
2334                }
2335                CatalogItem::Connection(catalog_connection) => {
2336                    if let ConnectionDetails::AwsPrivatelink(conn) = &catalog_connection.details {
2337                        privatelink_connections.insert(
2338                            entry.id(),
2339                            VpcEndpointConfig {
2340                                aws_service_name: conn.service_name.clone(),
2341                                availability_zone_ids: conn.availability_zones.clone(),
2342                            },
2343                        );
2344                    }
2345                }
2346                // Nothing to do for these cases
2347                CatalogItem::Log(_)
2348                | CatalogItem::Type(_)
2349                | CatalogItem::Func(_)
2350                | CatalogItem::Secret(_) => {}
2351            }
2352        }
2353
2354        if let Some(cloud_resource_controller) = &self.cloud_resource_controller {
2355            // Clean up any extraneous VpcEndpoints that shouldn't exist.
2356            let existing_vpc_endpoints = cloud_resource_controller
2357                .list_vpc_endpoints()
2358                .await
2359                .context("list vpc endpoints")?;
2360            let existing_vpc_endpoints = BTreeSet::from_iter(existing_vpc_endpoints.into_keys());
2361            let desired_vpc_endpoints = privatelink_connections.keys().cloned().collect();
2362            let vpc_endpoints_to_remove = existing_vpc_endpoints.difference(&desired_vpc_endpoints);
2363            for id in vpc_endpoints_to_remove {
2364                cloud_resource_controller
2365                    .delete_vpc_endpoint(*id)
2366                    .await
2367                    .context("deleting extraneous vpc endpoint")?;
2368            }
2369
2370            // Ensure desired VpcEndpoints are up to date.
2371            for (id, spec) in privatelink_connections {
2372                cloud_resource_controller
2373                    .ensure_vpc_endpoint(id, spec)
2374                    .await
2375                    .context("ensuring vpc endpoint")?;
2376            }
2377        }
2378
2379        // Having installed all entries, creating all constraints, we can now drop read holds and
2380        // relax read policies.
2381        drop(dataflow_read_holds);
2382        // TODO -- Improve `initialize_read_policies` API so we can avoid calling this in a loop.
2383        for (cw, policies) in policies_to_set {
2384            self.initialize_read_policies(&policies, cw).await;
2385        }
2386
2387        // Expose mapping from T-shirt sizes to actual sizes
2388        builtin_table_updates.extend(
2389            self.catalog().state().resolve_builtin_table_updates(
2390                self.catalog().state().pack_all_replica_size_updates(),
2391            ),
2392        );
2393
2394        debug!("startup: coordinator init: bootstrap: initializing migrated builtin tables");
2395        // When 0dt is enabled, we create new shards for any migrated builtin storage collections.
2396        // In read-only mode, the migrated builtin tables (which are a subset of migrated builtin
2397        // storage collections) need to be back-filled so that any dependent dataflow can be
2398        // hydrated. Additionally, these shards are not registered with the txn-shard, and cannot
2399        // be registered while in read-only, so they are written to directly.
2400        let migrated_updates_fut = if self.controller.read_only() {
2401            let min_timestamp = Timestamp::minimum();
2402            let migrated_builtin_table_updates: Vec<_> = builtin_table_updates
2403                .extract_if(.., |update| {
2404                    let gid = self.catalog().get_entry(&update.id).latest_global_id();
2405                    migrated_storage_collections_0dt.contains(&update.id)
2406                        && self
2407                            .controller
2408                            .storage_collections
2409                            .collection_frontiers(gid)
2410                            .expect("all tables are registered")
2411                            .write_frontier
2412                            .elements()
2413                            == &[min_timestamp]
2414                })
2415                .collect();
2416            if migrated_builtin_table_updates.is_empty() {
2417                futures::future::ready(()).boxed()
2418            } else {
2419                // Group all updates per-table.
2420                let mut grouped_appends: BTreeMap<GlobalId, Vec<TableData>> = BTreeMap::new();
2421                for update in migrated_builtin_table_updates {
2422                    let gid = self.catalog().get_entry(&update.id).latest_global_id();
2423                    grouped_appends.entry(gid).or_default().push(update.data);
2424                }
2425                info!(
2426                    "coordinator init: rehydrating migrated builtin tables in read-only mode: {:?}",
2427                    grouped_appends.keys().collect::<Vec<_>>()
2428                );
2429
2430                // Consolidate Row data, staged batches must already be consolidated.
2431                let mut all_appends = Vec::with_capacity(grouped_appends.len());
2432                for (item_id, table_data) in grouped_appends.into_iter() {
2433                    let mut all_rows = Vec::new();
2434                    let mut all_data = Vec::new();
2435                    for data in table_data {
2436                        match data {
2437                            TableData::Rows(rows) => all_rows.extend(rows),
2438                            TableData::Batches(_) => all_data.push(data),
2439                        }
2440                    }
2441                    differential_dataflow::consolidation::consolidate(&mut all_rows);
2442                    all_data.push(TableData::Rows(all_rows));
2443
2444                    // TODO(parkmycar): Use SmallVec throughout.
2445                    all_appends.push((item_id, all_data));
2446                }
2447
2448                let fut = self
2449                    .controller
2450                    .storage
2451                    .append_table(min_timestamp, boot_ts.step_forward(), all_appends)
2452                    .expect("cannot fail to append");
2453                async {
2454                    fut.await
2455                        .expect("One-shot shouldn't be dropped during bootstrap")
2456                        .unwrap_or_terminate("cannot fail to append")
2457                }
2458                .boxed()
2459            }
2460        } else {
2461            futures::future::ready(()).boxed()
2462        };
2463
2464        info!(
2465            "startup: coordinator init: bootstrap: postamble complete in {:?}",
2466            postamble_start.elapsed()
2467        );
2468
2469        let builtin_update_start = Instant::now();
2470        info!("startup: coordinator init: bootstrap: generate builtin updates beginning");
2471
2472        if self.controller.read_only() {
2473            info!(
2474                "coordinator init: bootstrap: stashing builtin table updates while in read-only mode"
2475            );
2476
2477            // TODO(jkosh44) Optimize deserializing the audit log in read-only mode.
2478            let audit_join_start = Instant::now();
2479            info!("startup: coordinator init: bootstrap: audit log deserialization beginning");
2480            let audit_log_updates: Vec<_> = audit_logs_iterator
2481                .map(|(audit_log, ts)| StateUpdate {
2482                    kind: StateUpdateKind::AuditLog(audit_log),
2483                    ts,
2484                    diff: StateDiff::Addition,
2485                })
2486                .collect();
2487            let audit_log_builtin_table_updates = self
2488                .catalog()
2489                .state()
2490                .generate_builtin_table_updates(audit_log_updates);
2491            builtin_table_updates.extend(audit_log_builtin_table_updates);
2492            info!(
2493                "startup: coordinator init: bootstrap: audit log deserialization complete in {:?}",
2494                audit_join_start.elapsed()
2495            );
2496            self.buffered_builtin_table_updates
2497                .as_mut()
2498                .expect("in read-only mode")
2499                .append(&mut builtin_table_updates);
2500        } else {
2501            self.bootstrap_tables(&entries, builtin_table_updates, audit_logs_iterator)
2502                .await;
2503        };
2504        info!(
2505            "startup: coordinator init: bootstrap: generate builtin updates complete in {:?}",
2506            builtin_update_start.elapsed()
2507        );
2508
2509        let cleanup_secrets_start = Instant::now();
2510        info!("startup: coordinator init: bootstrap: generate secret cleanup beginning");
2511        // Cleanup orphaned secrets. Errors during list() or delete() do not
2512        // need to prevent bootstrap from succeeding; we will retry next
2513        // startup.
2514        {
2515            // Destructure Self so we can selectively move fields into the async
2516            // task.
2517            let Self {
2518                secrets_controller,
2519                catalog,
2520                ..
2521            } = self;
2522
2523            let next_user_item_id = catalog.get_next_user_item_id().await?;
2524            let next_system_item_id = catalog.get_next_system_item_id().await?;
2525            let read_only = self.controller.read_only();
2526            // Fetch all IDs from the catalog to future-proof against other
2527            // things using secrets. Today, SECRET and CONNECTION objects use
2528            // secrets_controller.ensure, but more things could in the future
2529            // that would be easy to miss adding here.
2530            let catalog_ids: BTreeSet<CatalogItemId> =
2531                catalog.entries().map(|entry| entry.id()).collect();
2532            let secrets_controller = Arc::clone(secrets_controller);
2533
2534            spawn(|| "cleanup-orphaned-secrets", async move {
2535                if read_only {
2536                    info!(
2537                        "coordinator init: not cleaning up orphaned secrets while in read-only mode"
2538                    );
2539                    return;
2540                }
2541                info!("coordinator init: cleaning up orphaned secrets");
2542
2543                match secrets_controller.list().await {
2544                    Ok(controller_secrets) => {
2545                        let controller_secrets: BTreeSet<CatalogItemId> =
2546                            controller_secrets.into_iter().collect();
2547                        let orphaned = controller_secrets.difference(&catalog_ids);
2548                        for id in orphaned {
2549                            let id_too_large = match id {
2550                                CatalogItemId::System(id) => *id >= next_system_item_id,
2551                                CatalogItemId::User(id) => *id >= next_user_item_id,
2552                                CatalogItemId::IntrospectionSourceIndex(_)
2553                                | CatalogItemId::Transient(_) => false,
2554                            };
2555                            if id_too_large {
2556                                info!(
2557                                    %next_user_item_id, %next_system_item_id,
2558                                    "coordinator init: not deleting orphaned secret {id} that was likely created by a newer deploy generation"
2559                                );
2560                            } else {
2561                                info!("coordinator init: deleting orphaned secret {id}");
2562                                fail_point!("orphan_secrets");
2563                                if let Err(e) = secrets_controller.delete(*id).await {
2564                                    warn!(
2565                                        "Dropping orphaned secret has encountered an error: {}",
2566                                        e
2567                                    );
2568                                }
2569                            }
2570                        }
2571                    }
2572                    Err(e) => warn!("Failed to list secrets during orphan cleanup: {:?}", e),
2573                }
2574            });
2575        }
2576        info!(
2577            "startup: coordinator init: bootstrap: generate secret cleanup complete in {:?}",
2578            cleanup_secrets_start.elapsed()
2579        );
2580
2581        // Run all of our final steps concurrently.
2582        let final_steps_start = Instant::now();
2583        info!(
2584            "startup: coordinator init: bootstrap: migrate builtin tables in read-only mode beginning"
2585        );
2586        migrated_updates_fut
2587            .instrument(info_span!("coord::bootstrap::final"))
2588            .await;
2589
2590        debug!(
2591            "startup: coordinator init: bootstrap: announcing completion of initialization to controller"
2592        );
2593        // Announce the completion of initialization.
2594        self.controller.initialization_complete();
2595
2596        // Initialize unified introspection.
2597        self.bootstrap_introspection_subscribes().await;
2598
2599        info!(
2600            "startup: coordinator init: bootstrap: migrate builtin tables in read-only mode complete in {:?}",
2601            final_steps_start.elapsed()
2602        );
2603
2604        info!(
2605            "startup: coordinator init: bootstrap complete in {:?}",
2606            bootstrap_start.elapsed()
2607        );
2608        Ok(())
2609    }
2610
2611    /// Prepares tables for writing by resetting them to a known state and
2612    /// appending the given builtin table updates. The timestamp oracle
2613    /// will be advanced to the write timestamp of the append when this
2614    /// method returns.
2615    #[allow(clippy::async_yields_async)]
2616    #[instrument]
2617    async fn bootstrap_tables(
2618        &mut self,
2619        entries: &[CatalogEntry],
2620        mut builtin_table_updates: Vec<BuiltinTableUpdate>,
2621        audit_logs_iterator: AuditLogIterator,
2622    ) {
2623        /// Smaller helper struct of metadata for bootstrapping tables.
2624        struct TableMetadata<'a> {
2625            id: CatalogItemId,
2626            name: &'a QualifiedItemName,
2627            table: &'a Table,
2628        }
2629
2630        // Filter our entries down to just tables.
2631        let table_metas: Vec<_> = entries
2632            .into_iter()
2633            .filter_map(|entry| {
2634                entry.table().map(|table| TableMetadata {
2635                    id: entry.id(),
2636                    name: entry.name(),
2637                    table,
2638                })
2639            })
2640            .collect();
2641
2642        // Append empty batches to advance the timestamp of all tables.
2643        debug!("coordinator init: advancing all tables to current timestamp");
2644        let WriteTimestamp {
2645            timestamp: write_ts,
2646            advance_to,
2647        } = self.get_local_write_ts().await;
2648        let appends = table_metas
2649            .iter()
2650            .map(|meta| (meta.table.global_id_writes(), Vec::new()))
2651            .collect();
2652        // Append the tables in the background. We apply the write timestamp before getting a read
2653        // timestamp and reading a snapshot of each table, so the snapshots will block on their own
2654        // until the appends are complete.
2655        let table_fence_rx = self
2656            .controller
2657            .storage
2658            .append_table(write_ts.clone(), advance_to, appends)
2659            .expect("invalid updates");
2660
2661        self.apply_local_write(write_ts).await;
2662
2663        // Add builtin table updates the clear the contents of all system tables
2664        debug!("coordinator init: resetting system tables");
2665        let read_ts = self.get_local_read_ts().await;
2666
2667        // Filter out the 'mz_storage_usage_by_shard' table since we need to retain that info for
2668        // billing purposes.
2669        let mz_storage_usage_by_shard_schema: SchemaSpecifier = self
2670            .catalog()
2671            .resolve_system_schema(MZ_STORAGE_USAGE_BY_SHARD.schema)
2672            .into();
2673        let is_storage_usage_by_shard = |meta: &TableMetadata| -> bool {
2674            meta.name.item == MZ_STORAGE_USAGE_BY_SHARD.name
2675                && meta.name.qualifiers.schema_spec == mz_storage_usage_by_shard_schema
2676        };
2677
2678        let mut retraction_tasks = Vec::new();
2679        let mut system_tables: Vec<_> = table_metas
2680            .iter()
2681            .filter(|meta| meta.id.is_system() && !is_storage_usage_by_shard(meta))
2682            .collect();
2683
2684        // Special case audit events because it's append only.
2685        let (audit_events_idx, _) = system_tables
2686            .iter()
2687            .find_position(|table| {
2688                table.id == self.catalog().resolve_builtin_table(&MZ_AUDIT_EVENTS)
2689            })
2690            .expect("mz_audit_events must exist");
2691        let audit_events = system_tables.remove(audit_events_idx);
2692        let audit_log_task = self.bootstrap_audit_log_table(
2693            audit_events.id,
2694            audit_events.name,
2695            audit_events.table,
2696            audit_logs_iterator,
2697            read_ts,
2698        );
2699
2700        for system_table in system_tables {
2701            let table_id = system_table.id;
2702            let full_name = self.catalog().resolve_full_name(system_table.name, None);
2703            debug!("coordinator init: resetting system table {full_name} ({table_id})");
2704
2705            // Fetch the current contents of the table for retraction.
2706            let snapshot_fut = self
2707                .controller
2708                .storage_collections
2709                .snapshot_cursor(system_table.table.global_id_writes(), read_ts);
2710            let batch_fut = self
2711                .controller
2712                .storage_collections
2713                .create_update_builder(system_table.table.global_id_writes());
2714
2715            let task = spawn(|| format!("snapshot-{table_id}"), async move {
2716                // Create a TimestamplessUpdateBuilder.
2717                let mut batch = batch_fut
2718                    .await
2719                    .unwrap_or_terminate("cannot fail to create a batch for a BuiltinTable");
2720                tracing::info!(?table_id, "starting snapshot");
2721                // Get a cursor which will emit a consolidated snapshot.
2722                let mut snapshot_cursor = snapshot_fut
2723                    .await
2724                    .unwrap_or_terminate("cannot fail to snapshot");
2725
2726                // Retract the current contents, spilling into our builder.
2727                while let Some(values) = snapshot_cursor.next().await {
2728                    for (key, _t, d) in values {
2729                        let d_invert = d.neg();
2730                        batch.add(&key, &(), &d_invert).await;
2731                    }
2732                }
2733                tracing::info!(?table_id, "finished snapshot");
2734
2735                let batch = batch.finish().await;
2736                BuiltinTableUpdate::batch(table_id, batch)
2737            });
2738            retraction_tasks.push(task);
2739        }
2740
2741        let retractions_res = futures::future::join_all(retraction_tasks).await;
2742        for retractions in retractions_res {
2743            builtin_table_updates.push(retractions);
2744        }
2745
2746        let audit_join_start = Instant::now();
2747        info!("startup: coordinator init: bootstrap: join audit log deserialization beginning");
2748        let audit_log_updates = audit_log_task.await;
2749        let audit_log_builtin_table_updates = self
2750            .catalog()
2751            .state()
2752            .generate_builtin_table_updates(audit_log_updates);
2753        builtin_table_updates.extend(audit_log_builtin_table_updates);
2754        info!(
2755            "startup: coordinator init: bootstrap: join audit log deserialization complete in {:?}",
2756            audit_join_start.elapsed()
2757        );
2758
2759        // Now that the snapshots are complete, the appends must also be complete.
2760        table_fence_rx
2761            .await
2762            .expect("One-shot shouldn't be dropped during bootstrap")
2763            .unwrap_or_terminate("cannot fail to append");
2764
2765        info!("coordinator init: sending builtin table updates");
2766        let (_builtin_updates_fut, write_ts) = self
2767            .builtin_table_update()
2768            .execute(builtin_table_updates)
2769            .await;
2770        info!(?write_ts, "our write ts");
2771        if let Some(write_ts) = write_ts {
2772            self.apply_local_write(write_ts).await;
2773        }
2774    }
2775
2776    /// Prepare updates to the audit log table. The audit log table append only and very large, so
2777    /// we only need to find the events present in `audit_logs_iterator` but not in the audit log
2778    /// table.
2779    #[instrument]
2780    fn bootstrap_audit_log_table<'a>(
2781        &self,
2782        table_id: CatalogItemId,
2783        name: &'a QualifiedItemName,
2784        table: &'a Table,
2785        audit_logs_iterator: AuditLogIterator,
2786        read_ts: Timestamp,
2787    ) -> JoinHandle<Vec<StateUpdate>> {
2788        let full_name = self.catalog().resolve_full_name(name, None);
2789        debug!("coordinator init: reconciling audit log: {full_name} ({table_id})");
2790        let current_contents_fut = self
2791            .controller
2792            .storage_collections
2793            .snapshot(table.global_id_writes(), read_ts);
2794        spawn(|| format!("snapshot-audit-log-{table_id}"), async move {
2795            let current_contents = current_contents_fut
2796                .await
2797                .unwrap_or_terminate("cannot fail to fetch snapshot");
2798            let contents_len = current_contents.len();
2799            debug!("coordinator init: audit log table ({table_id}) size {contents_len}");
2800
2801            // Fetch the largest audit log event ID that has been written to the table.
2802            let max_table_id = current_contents
2803                .into_iter()
2804                .filter(|(_, diff)| *diff == 1)
2805                .map(|(row, _diff)| row.unpack_first().unwrap_uint64())
2806                .sorted()
2807                .rev()
2808                .next();
2809
2810            // Filter audit log catalog updates to those that are not present in the table.
2811            audit_logs_iterator
2812                .take_while(|(audit_log, _)| match max_table_id {
2813                    Some(id) => audit_log.event.sortable_id() > id,
2814                    None => true,
2815                })
2816                .map(|(audit_log, ts)| StateUpdate {
2817                    kind: StateUpdateKind::AuditLog(audit_log),
2818                    ts,
2819                    diff: StateDiff::Addition,
2820                })
2821                .collect::<Vec<_>>()
2822        })
2823    }
2824
2825    /// Initializes all storage collections required by catalog objects in the storage controller.
2826    ///
2827    /// This method takes care of collection creation, as well as migration of existing
2828    /// collections.
2829    ///
2830    /// Creating all storage collections in a single `create_collections` call, rather than on
2831    /// demand, is more efficient as it reduces the number of writes to durable storage. It also
2832    /// allows subsequent bootstrap logic to fetch metadata (such as frontiers) of arbitrary
2833    /// storage collections, without needing to worry about dependency order.
2834    ///
2835    /// `migrated_storage_collections` is a set of builtin storage collections that have been
2836    /// migrated and should be handled specially.
2837    #[instrument]
2838    async fn bootstrap_storage_collections(
2839        &mut self,
2840        migrated_storage_collections: &BTreeSet<CatalogItemId>,
2841    ) {
2842        let catalog = self.catalog();
2843
2844        let source_desc = |object_id: GlobalId,
2845                           data_source: &DataSourceDesc,
2846                           desc: &RelationDesc,
2847                           timeline: &Timeline| {
2848            let data_source = match data_source.clone() {
2849                // Re-announce the source description.
2850                DataSourceDesc::Ingestion { desc, cluster_id } => {
2851                    let desc = desc.into_inline_connection(catalog.state());
2852                    let ingestion = IngestionDescription::new(desc, cluster_id, object_id);
2853                    DataSource::Ingestion(ingestion)
2854                }
2855                DataSourceDesc::OldSyntaxIngestion {
2856                    desc,
2857                    progress_subsource,
2858                    data_config,
2859                    details,
2860                    cluster_id,
2861                } => {
2862                    let desc = desc.into_inline_connection(catalog.state());
2863                    let data_config = data_config.into_inline_connection(catalog.state());
2864                    // TODO(parkmycar): We should probably check the type here, but I'm not sure if
2865                    // this will always be a Source or a Table.
2866                    let progress_subsource =
2867                        catalog.get_entry(&progress_subsource).latest_global_id();
2868                    let mut ingestion =
2869                        IngestionDescription::new(desc, cluster_id, progress_subsource);
2870                    let legacy_export = SourceExport {
2871                        storage_metadata: (),
2872                        data_config,
2873                        details,
2874                    };
2875                    ingestion.source_exports.insert(object_id, legacy_export);
2876
2877                    DataSource::Ingestion(ingestion)
2878                }
2879                DataSourceDesc::IngestionExport {
2880                    ingestion_id,
2881                    external_reference: _,
2882                    details,
2883                    data_config,
2884                } => {
2885                    // TODO(parkmycar): We should probably check the type here, but I'm not sure if
2886                    // this will always be a Source or a Table.
2887                    let ingestion_id = catalog.get_entry(&ingestion_id).latest_global_id();
2888
2889                    DataSource::IngestionExport {
2890                        ingestion_id,
2891                        details,
2892                        data_config: data_config.into_inline_connection(catalog.state()),
2893                    }
2894                }
2895                DataSourceDesc::Webhook { .. } => DataSource::Webhook,
2896                DataSourceDesc::Progress => DataSource::Progress,
2897                DataSourceDesc::Introspection(introspection) => {
2898                    DataSource::Introspection(introspection)
2899                }
2900                DataSourceDesc::Catalog => DataSource::Other,
2901            };
2902            CollectionDescription {
2903                desc: desc.clone(),
2904                data_source,
2905                since: None,
2906                timeline: Some(timeline.clone()),
2907                primary: None,
2908            }
2909        };
2910
2911        let mut compute_collections = vec![];
2912        let mut collections = vec![];
2913        for entry in catalog.entries() {
2914            match entry.item() {
2915                CatalogItem::Source(source) => {
2916                    collections.push((
2917                        source.global_id(),
2918                        source_desc(
2919                            source.global_id(),
2920                            &source.data_source,
2921                            &source.desc,
2922                            &source.timeline,
2923                        ),
2924                    ));
2925                }
2926                CatalogItem::Table(table) => {
2927                    match &table.data_source {
2928                        TableDataSource::TableWrites { defaults: _ } => {
2929                            let versions: BTreeMap<_, _> = table
2930                                .collection_descs()
2931                                .map(|(gid, version, desc)| (version, (gid, desc)))
2932                                .collect();
2933                            let collection_descs = versions.iter().map(|(version, (gid, desc))| {
2934                                let next_version = version.bump();
2935                                let primary_collection =
2936                                    versions.get(&next_version).map(|(gid, _desc)| gid).copied();
2937                                let mut collection_desc =
2938                                    CollectionDescription::for_table(desc.clone());
2939                                collection_desc.primary = primary_collection;
2940
2941                                (*gid, collection_desc)
2942                            });
2943                            collections.extend(collection_descs);
2944                        }
2945                        TableDataSource::DataSource {
2946                            desc: data_source_desc,
2947                            timeline,
2948                        } => {
2949                            // TODO(alter_table): Support versioning tables that read from sources.
2950                            soft_assert_eq_or_log!(table.collections.len(), 1);
2951                            let collection_descs =
2952                                table.collection_descs().map(|(gid, _version, desc)| {
2953                                    (
2954                                        gid,
2955                                        source_desc(
2956                                            entry.latest_global_id(),
2957                                            data_source_desc,
2958                                            &desc,
2959                                            timeline,
2960                                        ),
2961                                    )
2962                                });
2963                            collections.extend(collection_descs);
2964                        }
2965                    };
2966                }
2967                CatalogItem::MaterializedView(mv) => {
2968                    let collection_descs = mv.collection_descs().map(|(gid, _version, desc)| {
2969                        let collection_desc =
2970                            CollectionDescription::for_other(desc, mv.initial_as_of.clone());
2971                        (gid, collection_desc)
2972                    });
2973
2974                    collections.extend(collection_descs);
2975                    compute_collections.push((mv.global_id_writes(), mv.desc.latest()));
2976                }
2977                CatalogItem::Sink(sink) => {
2978                    let storage_sink_from_entry = self.catalog().get_entry_by_global_id(&sink.from);
2979                    let from_desc = storage_sink_from_entry
2980                        .relation_desc()
2981                        .expect("sinks can only be built on items with descs")
2982                        .into_owned();
2983                    let collection_desc = CollectionDescription {
2984                        // TODO(sinks): make generic once we have more than one sink type.
2985                        desc: KAFKA_PROGRESS_DESC.clone(),
2986                        data_source: DataSource::Sink {
2987                            desc: ExportDescription {
2988                                sink: StorageSinkDesc {
2989                                    from: sink.from,
2990                                    from_desc,
2991                                    connection: sink
2992                                        .connection
2993                                        .clone()
2994                                        .into_inline_connection(self.catalog().state()),
2995                                    envelope: sink.envelope,
2996                                    as_of: Antichain::from_elem(Timestamp::minimum()),
2997                                    with_snapshot: sink.with_snapshot,
2998                                    version: sink.version,
2999                                    from_storage_metadata: (),
3000                                    to_storage_metadata: (),
3001                                    commit_interval: sink.commit_interval,
3002                                },
3003                                instance_id: sink.cluster_id,
3004                            },
3005                        },
3006                        since: None,
3007                        timeline: None,
3008                        primary: None,
3009                    };
3010                    collections.push((sink.global_id, collection_desc));
3011                }
3012                CatalogItem::Log(_)
3013                | CatalogItem::View(_)
3014                | CatalogItem::Index(_)
3015                | CatalogItem::Type(_)
3016                | CatalogItem::Func(_)
3017                | CatalogItem::Secret(_)
3018                | CatalogItem::Connection(_) => (),
3019            }
3020        }
3021
3022        let register_ts = if self.controller.read_only() {
3023            self.get_local_read_ts().await
3024        } else {
3025            // Getting a write timestamp bumps the write timestamp in the
3026            // oracle, which we're not allowed in read-only mode.
3027            self.get_local_write_ts().await.timestamp
3028        };
3029
3030        let storage_metadata = self.catalog.state().storage_metadata();
3031        let migrated_storage_collections = migrated_storage_collections
3032            .into_iter()
3033            .flat_map(|item_id| self.catalog.get_entry(item_id).global_ids())
3034            .collect();
3035
3036        // Before possibly creating collections, make sure their schemas are correct.
3037        //
3038        // Across different versions of Materialize the nullability of columns can change based on
3039        // updates to our optimizer.
3040        self.controller
3041            .storage
3042            .evolve_nullability_for_bootstrap(storage_metadata, compute_collections)
3043            .await
3044            .unwrap_or_terminate("cannot fail to evolve collections");
3045
3046        // New builtin storage collections are by default created with [0] since/upper frontiers.
3047        // For collections that have dependencies on other collections (MVs, CTs), this can violate
3048        // the frontier invariants assumed by as-of selection. For example, as-of selection expects
3049        // to be able to pick up computing a materialized view from its most recent upper, but if
3050        // that upper is [0] it's likely that the required times are not available anymore in the
3051        // MV inputs.
3052        //
3053        // To avoid violating frontier invariants, we need to bump their sinces to times greater
3054        // than all of their upstream storage inputs. To know the since of a storage input, it has
3055        // to be registered with the storage controller first. Thus we register collections in
3056        // layers: Each iteration registers the collections whose dependencies are all already
3057        // registered.
3058        let mut pending: BTreeMap<_, _> = collections.into_iter().collect();
3059
3060        // Precompute storage-collection dependencies for each collection.
3061        let transitive_dep_gids: BTreeMap<_, _> = pending
3062            .keys()
3063            .map(|gid| {
3064                let entry = self.catalog.get_entry_by_global_id(gid);
3065                let item_id = entry.id();
3066                let deps = self.catalog.state().transitive_uses(item_id);
3067                let dep_gids: BTreeSet<_> = deps
3068                    // Ignore self-dependencies. For example, `transitive_uses` includes the input ID,
3069                    // and CTs can depend on themselves.
3070                    .filter(|dep_id| *dep_id != item_id)
3071                    .map(|dep_id| self.catalog.get_entry(&dep_id).latest_global_id())
3072                    // Ignore dependencies on objects that are not storage collections.
3073                    .filter(|dep_gid| pending.contains_key(dep_gid))
3074                    .collect();
3075                (*gid, dep_gids)
3076            })
3077            .collect();
3078
3079        while !pending.is_empty() {
3080            // Drain collections whose dependencies have all been registered already
3081            // (i.e., are not in `pending`).
3082            let ready_gids: BTreeSet<_> = pending
3083                .keys()
3084                .filter(|gid| {
3085                    let mut deps = transitive_dep_gids[gid].iter();
3086                    !deps.any(|dep_gid| pending.contains_key(dep_gid))
3087                })
3088                .copied()
3089                .collect();
3090            let mut ready: Vec<_> = pending
3091                .extract_if(.., |gid, _| ready_gids.contains(gid))
3092                .collect();
3093
3094            // Bump sinces of builtin collections.
3095            for (gid, collection) in &mut ready {
3096                // Don't silently overwrite an explicitly specified `since`.
3097                if !gid.is_system() || collection.since.is_some() {
3098                    continue;
3099                }
3100
3101                let mut derived_since = Antichain::from_elem(Timestamp::MIN);
3102                for dep_gid in &transitive_dep_gids[gid] {
3103                    let (since, _) = self
3104                        .controller
3105                        .storage
3106                        .collection_frontiers(*dep_gid)
3107                        .expect("previously registered");
3108                    derived_since.join_assign(&since);
3109                }
3110                collection.since = Some(derived_since);
3111            }
3112
3113            if ready.is_empty() {
3114                soft_panic_or_log!(
3115                    "cycle in storage collections: {:?}",
3116                    pending.keys().collect::<Vec<_>>(),
3117                );
3118                // We get here only due to a bug. Rather than crash-looping, we try our best to
3119                // reach a sane state by attempting to register all the remaining collections at
3120                // once.
3121                ready = mem::take(&mut pending).into_iter().collect();
3122            }
3123
3124            self.controller
3125                .storage
3126                .create_collections_for_bootstrap(
3127                    storage_metadata,
3128                    Some(register_ts),
3129                    ready,
3130                    &migrated_storage_collections,
3131                )
3132                .await
3133                .unwrap_or_terminate("cannot fail to create collections");
3134        }
3135
3136        if !self.controller.read_only() {
3137            self.apply_local_write(register_ts).await;
3138        }
3139    }
3140
3141    /// Returns the current list of catalog entries, sorted into an appropriate order for
3142    /// bootstrapping.
3143    ///
3144    /// The returned entries are in dependency order. Indexes are sorted immediately after the
3145    /// objects they index, to ensure that all dependants of these indexed objects can make use of
3146    /// the respective indexes.
3147    fn bootstrap_sort_catalog_entries(&self) -> Vec<CatalogEntry> {
3148        let mut indexes_on = BTreeMap::<_, Vec<_>>::new();
3149        let mut non_indexes = Vec::new();
3150        for entry in self.catalog().entries().cloned() {
3151            if let Some(index) = entry.index() {
3152                let on = self.catalog().get_entry_by_global_id(&index.on);
3153                indexes_on.entry(on.id()).or_default().push(entry);
3154            } else {
3155                non_indexes.push(entry);
3156            }
3157        }
3158
3159        let key_fn = |entry: &CatalogEntry| entry.id;
3160        let dependencies_fn = |entry: &CatalogEntry| entry.uses();
3161        sort_topological(&mut non_indexes, key_fn, dependencies_fn);
3162
3163        let mut result = Vec::new();
3164        for entry in non_indexes {
3165            let id = entry.id();
3166            result.push(entry);
3167            if let Some(mut indexes) = indexes_on.remove(&id) {
3168                result.append(&mut indexes);
3169            }
3170        }
3171
3172        soft_assert_or_log!(
3173            indexes_on.is_empty(),
3174            "indexes with missing dependencies: {indexes_on:?}",
3175        );
3176
3177        result
3178    }
3179
3180    /// Invokes the optimizer on all indexes and materialized views in the catalog and inserts the
3181    /// resulting dataflow plans into the catalog state.
3182    ///
3183    /// `ordered_catalog_entries` must be sorted in dependency order, with dependencies ordered
3184    /// before their dependants.
3185    ///
3186    /// This method does not perform timestamp selection for the dataflows, nor does it create them
3187    /// in the compute controller. Both of these steps happen later during bootstrapping.
3188    ///
3189    /// Returns a map of expressions that were not cached.
3190    #[instrument]
3191    fn bootstrap_dataflow_plans(
3192        &mut self,
3193        ordered_catalog_entries: &[CatalogEntry],
3194        mut cached_global_exprs: BTreeMap<GlobalId, GlobalExpressions>,
3195    ) -> Result<BTreeMap<GlobalId, GlobalExpressions>, AdapterError> {
3196        // The optimizer expects to be able to query its `ComputeInstanceSnapshot` for
3197        // collections the current dataflow can depend on. But since we don't yet install anything
3198        // on compute instances, the snapshot information is incomplete. We fix that by manually
3199        // updating `ComputeInstanceSnapshot` objects to ensure they contain collections previously
3200        // optimized.
3201        let mut instance_snapshots = BTreeMap::new();
3202        let mut uncached_expressions = BTreeMap::new();
3203
3204        let optimizer_config = |catalog: &Catalog, cluster_id| {
3205            let system_config = catalog.system_config();
3206            let overrides = catalog.get_cluster(cluster_id).config.features();
3207            OptimizerConfig::from(system_config).override_from(&overrides)
3208        };
3209
3210        for entry in ordered_catalog_entries {
3211            match entry.item() {
3212                CatalogItem::Index(idx) => {
3213                    // Collect optimizer parameters.
3214                    let compute_instance =
3215                        instance_snapshots.entry(idx.cluster_id).or_insert_with(|| {
3216                            self.instance_snapshot(idx.cluster_id)
3217                                .expect("compute instance exists")
3218                        });
3219                    let global_id = idx.global_id();
3220
3221                    // The index may already be installed on the compute instance. For example,
3222                    // this is the case for introspection indexes.
3223                    if compute_instance.contains_collection(&global_id) {
3224                        continue;
3225                    }
3226
3227                    let optimizer_config = optimizer_config(&self.catalog, idx.cluster_id);
3228
3229                    let (optimized_plan, physical_plan, metainfo) =
3230                        match cached_global_exprs.remove(&global_id) {
3231                            Some(global_expressions)
3232                                if global_expressions.optimizer_features
3233                                    == optimizer_config.features =>
3234                            {
3235                                debug!("global expression cache hit for {global_id:?}");
3236                                (
3237                                    global_expressions.global_mir,
3238                                    global_expressions.physical_plan,
3239                                    global_expressions.dataflow_metainfos,
3240                                )
3241                            }
3242                            Some(_) | None => {
3243                                let (optimized_plan, global_lir_plan) = {
3244                                    // Build an optimizer for this INDEX.
3245                                    let mut optimizer = optimize::index::Optimizer::new(
3246                                        self.owned_catalog(),
3247                                        compute_instance.clone(),
3248                                        global_id,
3249                                        optimizer_config.clone(),
3250                                        self.optimizer_metrics(),
3251                                    );
3252
3253                                    // MIR ⇒ MIR optimization (global)
3254                                    let index_plan = optimize::index::Index::new(
3255                                        entry.name().clone(),
3256                                        idx.on,
3257                                        idx.keys.to_vec(),
3258                                    );
3259                                    let global_mir_plan = optimizer.optimize(index_plan)?;
3260                                    let optimized_plan = global_mir_plan.df_desc().clone();
3261
3262                                    // MIR ⇒ LIR lowering and LIR ⇒ LIR optimization (global)
3263                                    let global_lir_plan = optimizer.optimize(global_mir_plan)?;
3264
3265                                    (optimized_plan, global_lir_plan)
3266                                };
3267
3268                                let (physical_plan, metainfo) = global_lir_plan.unapply();
3269                                let metainfo = {
3270                                    // Pre-allocate a vector of transient GlobalIds for each notice.
3271                                    let notice_ids =
3272                                        std::iter::repeat_with(|| self.allocate_transient_id())
3273                                            .map(|(_item_id, gid)| gid)
3274                                            .take(metainfo.optimizer_notices.len())
3275                                            .collect::<Vec<_>>();
3276                                    // Return a metainfo with rendered notices.
3277                                    self.catalog().render_notices(
3278                                        metainfo,
3279                                        notice_ids,
3280                                        Some(idx.global_id()),
3281                                    )
3282                                };
3283                                uncached_expressions.insert(
3284                                    global_id,
3285                                    GlobalExpressions {
3286                                        global_mir: optimized_plan.clone(),
3287                                        physical_plan: physical_plan.clone(),
3288                                        dataflow_metainfos: metainfo.clone(),
3289                                        optimizer_features: optimizer_config.features.clone(),
3290                                    },
3291                                );
3292                                (optimized_plan, physical_plan, metainfo)
3293                            }
3294                        };
3295
3296                    let catalog = self.catalog_mut();
3297                    catalog.set_optimized_plan(idx.global_id(), optimized_plan);
3298                    catalog.set_physical_plan(idx.global_id(), physical_plan);
3299                    catalog.set_dataflow_metainfo(idx.global_id(), metainfo);
3300
3301                    compute_instance.insert_collection(idx.global_id());
3302                }
3303                CatalogItem::MaterializedView(mv) => {
3304                    // Collect optimizer parameters.
3305                    let compute_instance =
3306                        instance_snapshots.entry(mv.cluster_id).or_insert_with(|| {
3307                            self.instance_snapshot(mv.cluster_id)
3308                                .expect("compute instance exists")
3309                        });
3310                    let global_id = mv.global_id_writes();
3311
3312                    let optimizer_config = optimizer_config(&self.catalog, mv.cluster_id);
3313
3314                    let (optimized_plan, physical_plan, metainfo) = match cached_global_exprs
3315                        .remove(&global_id)
3316                    {
3317                        Some(global_expressions)
3318                            if global_expressions.optimizer_features
3319                                == optimizer_config.features =>
3320                        {
3321                            debug!("global expression cache hit for {global_id:?}");
3322                            (
3323                                global_expressions.global_mir,
3324                                global_expressions.physical_plan,
3325                                global_expressions.dataflow_metainfos,
3326                            )
3327                        }
3328                        Some(_) | None => {
3329                            let (_, internal_view_id) = self.allocate_transient_id();
3330                            let debug_name = self
3331                                .catalog()
3332                                .resolve_full_name(entry.name(), None)
3333                                .to_string();
3334
3335                            let (optimized_plan, global_lir_plan) = {
3336                                // Build an optimizer for this MATERIALIZED VIEW.
3337                                let mut optimizer = optimize::materialized_view::Optimizer::new(
3338                                    self.owned_catalog().as_optimizer_catalog(),
3339                                    compute_instance.clone(),
3340                                    global_id,
3341                                    internal_view_id,
3342                                    mv.desc.latest().iter_names().cloned().collect(),
3343                                    mv.non_null_assertions.clone(),
3344                                    mv.refresh_schedule.clone(),
3345                                    debug_name,
3346                                    optimizer_config.clone(),
3347                                    self.optimizer_metrics(),
3348                                );
3349
3350                                // MIR ⇒ MIR optimization (global)
3351                                // We make sure to use the HIR SQL type (since MIR SQL types may not be coherent).
3352                                let typ = infer_sql_type_for_catalog(
3353                                    &mv.raw_expr,
3354                                    &mv.locally_optimized_expr.as_ref().clone(),
3355                                );
3356                                let global_mir_plan = optimizer
3357                                    .optimize((mv.locally_optimized_expr.as_ref().clone(), typ))?;
3358                                let optimized_plan = global_mir_plan.df_desc().clone();
3359
3360                                // MIR ⇒ LIR lowering and LIR ⇒ LIR optimization (global)
3361                                let global_lir_plan = optimizer.optimize(global_mir_plan)?;
3362
3363                                (optimized_plan, global_lir_plan)
3364                            };
3365
3366                            let (physical_plan, metainfo) = global_lir_plan.unapply();
3367                            let metainfo = {
3368                                // Pre-allocate a vector of transient GlobalIds for each notice.
3369                                let notice_ids =
3370                                    std::iter::repeat_with(|| self.allocate_transient_id())
3371                                        .map(|(_item_id, global_id)| global_id)
3372                                        .take(metainfo.optimizer_notices.len())
3373                                        .collect::<Vec<_>>();
3374                                // Return a metainfo with rendered notices.
3375                                self.catalog().render_notices(
3376                                    metainfo,
3377                                    notice_ids,
3378                                    Some(mv.global_id_writes()),
3379                                )
3380                            };
3381                            uncached_expressions.insert(
3382                                global_id,
3383                                GlobalExpressions {
3384                                    global_mir: optimized_plan.clone(),
3385                                    physical_plan: physical_plan.clone(),
3386                                    dataflow_metainfos: metainfo.clone(),
3387                                    optimizer_features: optimizer_config.features.clone(),
3388                                },
3389                            );
3390                            (optimized_plan, physical_plan, metainfo)
3391                        }
3392                    };
3393
3394                    let catalog = self.catalog_mut();
3395                    catalog.set_optimized_plan(mv.global_id_writes(), optimized_plan);
3396                    catalog.set_physical_plan(mv.global_id_writes(), physical_plan);
3397                    catalog.set_dataflow_metainfo(mv.global_id_writes(), metainfo);
3398
3399                    compute_instance.insert_collection(mv.global_id_writes());
3400                }
3401                CatalogItem::Table(_)
3402                | CatalogItem::Source(_)
3403                | CatalogItem::Log(_)
3404                | CatalogItem::View(_)
3405                | CatalogItem::Sink(_)
3406                | CatalogItem::Type(_)
3407                | CatalogItem::Func(_)
3408                | CatalogItem::Secret(_)
3409                | CatalogItem::Connection(_) => (),
3410            }
3411        }
3412
3413        Ok(uncached_expressions)
3414    }
3415
3416    /// Selects for each compute dataflow an as-of suitable for bootstrapping it.
3417    ///
3418    /// Returns a set of [`ReadHold`]s that ensures the read frontiers of involved collections stay
3419    /// in place and that must not be dropped before all compute dataflows have been created with
3420    /// the compute controller.
3421    ///
3422    /// This method expects all storage collections and dataflow plans to be available, so it must
3423    /// run after [`Coordinator::bootstrap_storage_collections`] and
3424    /// [`Coordinator::bootstrap_dataflow_plans`].
3425    async fn bootstrap_dataflow_as_ofs(&mut self) -> BTreeMap<GlobalId, ReadHold> {
3426        let mut catalog_ids = Vec::new();
3427        let mut dataflows = Vec::new();
3428        let mut read_policies = BTreeMap::new();
3429        for entry in self.catalog.entries() {
3430            let gid = match entry.item() {
3431                CatalogItem::Index(idx) => idx.global_id(),
3432                CatalogItem::MaterializedView(mv) => mv.global_id_writes(),
3433                CatalogItem::Table(_)
3434                | CatalogItem::Source(_)
3435                | CatalogItem::Log(_)
3436                | CatalogItem::View(_)
3437                | CatalogItem::Sink(_)
3438                | CatalogItem::Type(_)
3439                | CatalogItem::Func(_)
3440                | CatalogItem::Secret(_)
3441                | CatalogItem::Connection(_) => continue,
3442            };
3443            if let Some(plan) = self.catalog.try_get_physical_plan(&gid) {
3444                catalog_ids.push(gid);
3445                dataflows.push(plan.clone());
3446
3447                if let Some(compaction_window) = entry.item().initial_logical_compaction_window() {
3448                    read_policies.insert(gid, compaction_window.into());
3449                }
3450            }
3451        }
3452
3453        let read_ts = self.get_local_read_ts().await;
3454        let read_holds = as_of_selection::run(
3455            &mut dataflows,
3456            &read_policies,
3457            &*self.controller.storage_collections,
3458            read_ts,
3459            self.controller.read_only(),
3460        );
3461
3462        let catalog = self.catalog_mut();
3463        for (id, plan) in catalog_ids.into_iter().zip_eq(dataflows) {
3464            catalog.set_physical_plan(id, plan);
3465        }
3466
3467        read_holds
3468    }
3469
3470    /// Serves the coordinator, receiving commands from users over `cmd_rx`
3471    /// and feedback from dataflow workers over `feedback_rx`.
3472    ///
3473    /// You must call `bootstrap` before calling this method.
3474    ///
3475    /// BOXED FUTURE: As of Nov 2023 the returned Future from this function was 92KB. This would
3476    /// get stored on the stack which is bad for runtime performance, and blow up our stack usage.
3477    /// Because of that we purposefully move this Future onto the heap (i.e. Box it).
3478    fn serve(
3479        mut self,
3480        mut internal_cmd_rx: mpsc::UnboundedReceiver<Message>,
3481        mut strict_serializable_reads_rx: mpsc::UnboundedReceiver<(ConnectionId, PendingReadTxn)>,
3482        mut cmd_rx: mpsc::UnboundedReceiver<(OpenTelemetryContext, Command)>,
3483        group_commit_rx: appends::GroupCommitWaiter,
3484    ) -> LocalBoxFuture<'static, ()> {
3485        async move {
3486            // Watcher that listens for and reports cluster service status changes.
3487            let mut cluster_events = self.controller.events_stream();
3488            let last_message = Arc::new(Mutex::new(LastMessage {
3489                kind: "none",
3490                stmt: None,
3491            }));
3492
3493            let (idle_tx, mut idle_rx) = tokio::sync::mpsc::channel(1);
3494            let idle_metric = self.metrics.queue_busy_seconds.clone();
3495            let last_message_watchdog = Arc::clone(&last_message);
3496
3497            spawn(|| "coord watchdog", async move {
3498                // Every 5 seconds, attempt to measure how long it takes for the
3499                // coord select loop to be empty, because this message is the last
3500                // processed. If it is idle, this will result in some microseconds
3501                // of measurement.
3502                let mut interval = tokio::time::interval(Duration::from_secs(5));
3503                // If we end up having to wait more than 5 seconds for the coord to respond, then the
3504                // behavior of Delay results in the interval "restarting" from whenever we yield
3505                // instead of trying to catch up.
3506                interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
3507
3508                // Track if we become stuck to de-dupe error reporting.
3509                let mut coord_stuck = false;
3510
3511                loop {
3512                    interval.tick().await;
3513
3514                    // Wait for space in the channel, if we timeout then the coordinator is stuck!
3515                    let duration = tokio::time::Duration::from_secs(30);
3516                    let timeout = tokio::time::timeout(duration, idle_tx.reserve()).await;
3517                    let Ok(maybe_permit) = timeout else {
3518                        // Only log if we're newly stuck, to prevent logging repeatedly.
3519                        if !coord_stuck {
3520                            let last_message = last_message_watchdog.lock().expect("poisoned");
3521                            tracing::warn!(
3522                                last_message_kind = %last_message.kind,
3523                                last_message_sql = %last_message.stmt_to_string(),
3524                                "coordinator stuck for {duration:?}",
3525                            );
3526                        }
3527                        coord_stuck = true;
3528
3529                        continue;
3530                    };
3531
3532                    // We got a permit, we're not stuck!
3533                    if coord_stuck {
3534                        tracing::info!("Coordinator became unstuck");
3535                    }
3536                    coord_stuck = false;
3537
3538                    // If we failed to acquire a permit it's because we're shutting down.
3539                    let Ok(permit) = maybe_permit else {
3540                        break;
3541                    };
3542
3543                    permit.send(idle_metric.start_timer());
3544                }
3545            });
3546
3547            self.schedule_storage_usage_collection().await;
3548            self.schedule_arrangement_sizes_collection().await;
3549            self.spawn_privatelink_vpc_endpoints_watch_task();
3550            self.spawn_statement_logging_task();
3551            flags::tracing_config(self.catalog.system_config()).apply(&self.tracing_handle);
3552
3553            // Report if the handling of a single message takes longer than this threshold.
3554            let warn_threshold = self
3555                .catalog()
3556                .system_config()
3557                .coord_slow_message_warn_threshold();
3558
3559            // How many messages we'd like to batch up before processing them. Must be > 0.
3560            const MESSAGE_BATCH: usize = 64;
3561            let mut messages = Vec::with_capacity(MESSAGE_BATCH);
3562            let mut cmd_messages = Vec::with_capacity(MESSAGE_BATCH);
3563
3564            let message_batch = self.metrics.message_batch.clone();
3565
3566            loop {
3567                // Before adding a branch to this select loop, please ensure that the branch is
3568                // cancellation safe and add a comment explaining why. You can refer here for more
3569                // info: https://docs.rs/tokio/latest/tokio/macro.select.html#cancellation-safety
3570                select! {
3571                    // We prioritize internal commands over other commands. However, we work through
3572                    // batches of commands in some branches of this select, which means that even if
3573                    // a command generates internal commands, we will work through the current batch
3574                    // before receiving a new batch of commands.
3575                    biased;
3576
3577                    // `recv_many()` on `UnboundedReceiver` is cancellation safe:
3578                    // https://docs.rs/tokio/1.38.0/tokio/sync/mpsc/struct.UnboundedReceiver.html#cancel-safety-1
3579                    // Receive a batch of commands.
3580                    _ = internal_cmd_rx.recv_many(&mut messages, MESSAGE_BATCH) => {},
3581                    // `next()` on any stream is cancel-safe:
3582                    // https://docs.rs/tokio-stream/0.1.9/tokio_stream/trait.StreamExt.html#cancel-safety
3583                    // Receive a single command.
3584                    Some(event) = cluster_events.next() => {
3585                        messages.push(Message::ClusterEvent(event))
3586                    },
3587                    // See [`mz_controller::Controller::Controller::ready`] for notes
3588                    // on why this is cancel-safe.
3589                    // Receive a single command.
3590                    () = self.controller.ready() => {
3591                        // NOTE: We don't get a `Readiness` back from `ready()`
3592                        // because the controller wants to keep it and it's not
3593                        // trivially `Clone` or `Copy`. Hence this accessor.
3594                        let controller = match self.controller.get_readiness() {
3595                            Readiness::Storage => ControllerReadiness::Storage,
3596                            Readiness::Compute => ControllerReadiness::Compute,
3597                            Readiness::Metrics(_) => ControllerReadiness::Metrics,
3598                            Readiness::Internal(_) => ControllerReadiness::Internal,
3599                            Readiness::NotReady => unreachable!("just signaled as ready"),
3600                        };
3601                        messages.push(Message::ControllerReady { controller });
3602                    }
3603                    // See [`appends::GroupCommitWaiter`] for notes on why this is cancel safe.
3604                    // Receive a single command.
3605                    permit = group_commit_rx.ready() => {
3606                        // If we happen to have batched exactly one user write, use
3607                        // that span so the `emit_trace_id_notice` hooks up.
3608                        // Otherwise, the best we can do is invent a new root span
3609                        // and make it follow from all the Spans in the pending
3610                        // writes.
3611                        let user_write_spans = self.pending_writes.iter().flat_map(|x| match x {
3612                            PendingWriteTxn::User{span, ..} => Some(span),
3613                            PendingWriteTxn::System{..} => None,
3614                        });
3615                        let span = match user_write_spans.exactly_one() {
3616                            Ok(span) => span.clone(),
3617                            Err(user_write_spans) => {
3618                                let span = info_span!(parent: None, "group_commit_notify");
3619                                for s in user_write_spans {
3620                                    span.follows_from(s);
3621                                }
3622                                span
3623                            }
3624                        };
3625                        messages.push(Message::GroupCommitInitiate(span, Some(permit)));
3626                    },
3627                    // `recv_many()` on `UnboundedReceiver` is cancellation safe:
3628                    // https://docs.rs/tokio/1.38.0/tokio/sync/mpsc/struct.UnboundedReceiver.html#cancel-safety-1
3629                    // Receive a batch of commands.
3630                    count = cmd_rx.recv_many(&mut cmd_messages, MESSAGE_BATCH) => {
3631                        if count == 0 {
3632                            break;
3633                        } else {
3634                            messages.extend(cmd_messages.drain(..).map(
3635                                |(otel_ctx, cmd)| Message::Command(otel_ctx, cmd),
3636                            ));
3637                        }
3638                    },
3639                    // `recv()` on `UnboundedReceiver` is cancellation safe:
3640                    // https://docs.rs/tokio/1.38.0/tokio/sync/mpsc/struct.UnboundedReceiver.html#cancel-safety
3641                    // Receive a single command.
3642                    Some(pending_read_txn) = strict_serializable_reads_rx.recv() => {
3643                        let mut pending_read_txns = vec![pending_read_txn];
3644                        while let Ok(pending_read_txn) = strict_serializable_reads_rx.try_recv() {
3645                            pending_read_txns.push(pending_read_txn);
3646                        }
3647                        for (conn_id, pending_read_txn) in pending_read_txns {
3648                            let prev = self
3649                                .pending_linearize_read_txns
3650                                .insert(conn_id, pending_read_txn);
3651                            soft_assert_or_log!(
3652                                prev.is_none(),
3653                                "connections can not have multiple concurrent reads, prev: {prev:?}"
3654                            )
3655                        }
3656                        messages.push(Message::LinearizeReads);
3657                    }
3658                    // `tick()` on `Interval` is cancel-safe:
3659                    // https://docs.rs/tokio/1.19.2/tokio/time/struct.Interval.html#cancel-safety
3660                    // Receive a single command.
3661                    _ = self.advance_timelines_interval.tick() => {
3662                        let span = info_span!(parent: None, "coord::advance_timelines_interval");
3663                        span.follows_from(Span::current());
3664
3665                        // Group commit sends an `AdvanceTimelines` message when
3666                        // done, which is what downgrades read holds. In
3667                        // read-only mode we send this message directly because
3668                        // we're not doing group commits.
3669                        if self.controller.read_only() {
3670                            messages.push(Message::AdvanceTimelines);
3671                        } else {
3672                            messages.push(Message::GroupCommitInitiate(span, None));
3673                        }
3674                    },
3675                    // `tick()` on `Interval` is cancel-safe:
3676                    // https://docs.rs/tokio/1.19.2/tokio/time/struct.Interval.html#cancel-safety
3677                    // Receive a single command.
3678                    _ = self.check_cluster_scheduling_policies_interval.tick() => {
3679                        messages.push(Message::CheckSchedulingPolicies);
3680                    },
3681
3682                    // `tick()` on `Interval` is cancel-safe:
3683                    // https://docs.rs/tokio/1.19.2/tokio/time/struct.Interval.html#cancel-safety
3684                    // Receive a single command.
3685                    _ = self.caught_up_check_interval.tick() => {
3686                        // We do this directly on the main loop instead of
3687                        // firing off a message. We are still in read-only mode,
3688                        // so optimizing for latency, not blocking the main loop
3689                        // is not that important.
3690                        self.maybe_check_caught_up().await;
3691
3692                        continue;
3693                    },
3694
3695                    // Process the idle metric at the lowest priority to sample queue non-idle time.
3696                    // `recv()` on `Receiver` is cancellation safe:
3697                    // https://docs.rs/tokio/1.8.0/tokio/sync/mpsc/struct.Receiver.html#cancel-safety
3698                    // Receive a single command.
3699                    timer = idle_rx.recv() => {
3700                        timer.expect("does not drop").observe_duration();
3701                        self.metrics
3702                            .message_handling
3703                            .with_label_values(&["watchdog"])
3704                            .observe(0.0);
3705                        continue;
3706                    }
3707                };
3708
3709                // Observe the number of messages we're processing at once.
3710                message_batch.observe(f64::cast_lossy(messages.len()));
3711
3712                for msg in messages.drain(..) {
3713                    // All message processing functions trace. Start a parent span
3714                    // for them to make it easy to find slow messages.
3715                    let msg_kind = msg.kind();
3716                    let span = span!(
3717                        target: "mz_adapter::coord::handle_message_loop",
3718                        Level::INFO,
3719                        "coord::handle_message",
3720                        kind = msg_kind
3721                    );
3722                    let otel_context = span.context().span().span_context().clone();
3723
3724                    // Record the last kind of message in case we get stuck. For
3725                    // execute commands, we additionally stash the user's SQL,
3726                    // statement, so we can log it in case we get stuck.
3727                    *last_message.lock().expect("poisoned") = LastMessage {
3728                        kind: msg_kind,
3729                        stmt: match &msg {
3730                            Message::Command(
3731                                _,
3732                                Command::Execute {
3733                                    portal_name,
3734                                    session,
3735                                    ..
3736                                },
3737                            ) => session
3738                                .get_portal_unverified(portal_name)
3739                                .and_then(|p| p.stmt.as_ref().map(Arc::clone)),
3740                            _ => None,
3741                        },
3742                    };
3743
3744                    let start = Instant::now();
3745                    self.handle_message(msg).instrument(span).await;
3746                    let duration = start.elapsed();
3747
3748                    self.metrics
3749                        .message_handling
3750                        .with_label_values(&[msg_kind])
3751                        .observe(duration.as_secs_f64());
3752
3753                    // If something is _really_ slow, print a trace id for debugging, if OTEL is enabled.
3754                    if duration > warn_threshold {
3755                        let trace_id = otel_context.is_valid().then(|| otel_context.trace_id());
3756                        tracing::error!(
3757                            ?msg_kind,
3758                            ?trace_id,
3759                            ?duration,
3760                            "very slow coordinator message"
3761                        );
3762                    }
3763                }
3764            }
3765            // Try and cleanup as a best effort. There may be some async tasks out there holding a
3766            // reference that prevents us from cleaning up.
3767            if let Some(catalog) = Arc::into_inner(self.catalog) {
3768                catalog.expire().await;
3769            }
3770        }
3771        .boxed_local()
3772    }
3773
3774    /// Obtain a read-only Catalog reference.
3775    fn catalog(&self) -> &Catalog {
3776        &self.catalog
3777    }
3778
3779    /// Obtain a read-only Catalog snapshot, suitable for giving out to
3780    /// non-Coordinator thread tasks.
3781    fn owned_catalog(&self) -> Arc<Catalog> {
3782        Arc::clone(&self.catalog)
3783    }
3784
3785    /// Obtain a handle to the optimizer metrics, suitable for giving
3786    /// out to non-Coordinator thread tasks.
3787    fn optimizer_metrics(&self) -> OptimizerMetrics {
3788        self.optimizer_metrics.clone()
3789    }
3790
3791    /// Obtain a writeable Catalog reference.
3792    fn catalog_mut(&mut self) -> &mut Catalog {
3793        // make_mut will cause any other Arc references (from owned_catalog) to
3794        // continue to be valid by cloning the catalog, putting it in a new Arc,
3795        // which lives at self._catalog. If there are no other Arc references,
3796        // then no clone is made, and it returns a reference to the existing
3797        // object. This makes this method and owned_catalog both very cheap: at
3798        // most one clone per catalog mutation, but only if there's a read-only
3799        // reference to it.
3800        Arc::make_mut(&mut self.catalog)
3801    }
3802
3803    /// Refills the user ID pool by allocating IDs from the catalog.
3804    ///
3805    /// Requests `max(min_count, batch_size)` IDs so the pool is never
3806    /// under-filled relative to the configured batch size.
3807    async fn refill_user_id_pool(&mut self, min_count: u64) -> Result<(), AdapterError> {
3808        let batch_size = USER_ID_POOL_BATCH_SIZE.get(self.catalog().system_config().dyncfgs());
3809        let to_allocate = min_count.max(u64::from(batch_size));
3810        let id_ts = self.get_catalog_write_ts().await;
3811        let ids = self.catalog().allocate_user_ids(to_allocate, id_ts).await?;
3812        if let (Some((first_id, _)), Some((last_id, _))) = (ids.first(), ids.last()) {
3813            let start = match first_id {
3814                CatalogItemId::User(id) => *id,
3815                other => {
3816                    return Err(AdapterError::Internal(format!(
3817                        "expected User CatalogItemId, got {other:?}"
3818                    )));
3819                }
3820            };
3821            let end = match last_id {
3822                CatalogItemId::User(id) => *id + 1, // exclusive upper bound
3823                other => {
3824                    return Err(AdapterError::Internal(format!(
3825                        "expected User CatalogItemId, got {other:?}"
3826                    )));
3827                }
3828            };
3829            self.user_id_pool.refill(start, end);
3830        } else {
3831            return Err(AdapterError::Internal(
3832                "catalog returned no user IDs".into(),
3833            ));
3834        }
3835        Ok(())
3836    }
3837
3838    /// Allocates a single user ID, refilling the pool from the catalog if needed.
3839    async fn allocate_user_id(&mut self) -> Result<(CatalogItemId, GlobalId), AdapterError> {
3840        if let Some(id) = self.user_id_pool.allocate() {
3841            return Ok((CatalogItemId::User(id), GlobalId::User(id)));
3842        }
3843        self.refill_user_id_pool(1).await?;
3844        let id = self.user_id_pool.allocate().expect("ID pool just refilled");
3845        Ok((CatalogItemId::User(id), GlobalId::User(id)))
3846    }
3847
3848    /// Allocates `count` user IDs, refilling the pool from the catalog if needed.
3849    async fn allocate_user_ids(
3850        &mut self,
3851        count: u64,
3852    ) -> Result<Vec<(CatalogItemId, GlobalId)>, AdapterError> {
3853        if self.user_id_pool.remaining() < count {
3854            self.refill_user_id_pool(count).await?;
3855        }
3856        let raw_ids = self
3857            .user_id_pool
3858            .allocate_many(count)
3859            .expect("pool has enough IDs after refill");
3860        Ok(raw_ids
3861            .into_iter()
3862            .map(|id| (CatalogItemId::User(id), GlobalId::User(id)))
3863            .collect())
3864    }
3865
3866    /// Obtain a reference to the coordinator's connection context.
3867    fn connection_context(&self) -> &ConnectionContext {
3868        self.controller.connection_context()
3869    }
3870
3871    /// Obtain a reference to the coordinator's secret reader, in an `Arc`.
3872    fn secrets_reader(&self) -> &Arc<dyn SecretsReader> {
3873        &self.connection_context().secrets_reader
3874    }
3875
3876    /// Publishes a notice message to all sessions.
3877    ///
3878    /// TODO(parkmycar): This code is dead, but is a nice parallel to [`Coordinator::broadcast_notice_tx`]
3879    /// so we keep it around.
3880    #[allow(dead_code)]
3881    pub(crate) fn broadcast_notice(&self, notice: AdapterNotice) {
3882        for meta in self.active_conns.values() {
3883            let _ = meta.notice_tx.send(notice.clone());
3884        }
3885    }
3886
3887    /// Returns a closure that will publish a notice to all sessions that were active at the time
3888    /// this method was called.
3889    pub(crate) fn broadcast_notice_tx(
3890        &self,
3891    ) -> Box<dyn FnOnce(AdapterNotice) -> () + Send + 'static> {
3892        let senders: Vec<_> = self
3893            .active_conns
3894            .values()
3895            .map(|meta| meta.notice_tx.clone())
3896            .collect();
3897        Box::new(move |notice| {
3898            for tx in senders {
3899                let _ = tx.send(notice.clone());
3900            }
3901        })
3902    }
3903
3904    pub(crate) fn active_conns(&self) -> &BTreeMap<ConnectionId, ConnMeta> {
3905        &self.active_conns
3906    }
3907
3908    #[instrument(level = "debug")]
3909    pub(crate) fn retire_execution(
3910        &mut self,
3911        reason: StatementEndedExecutionReason,
3912        ctx_extra: ExecuteContextExtra,
3913    ) {
3914        if let Some(uuid) = ctx_extra.retire() {
3915            self.end_statement_execution(uuid, reason);
3916        }
3917    }
3918
3919    /// Creates a new dataflow builder from the catalog and indexes in `self`.
3920    #[instrument(level = "debug")]
3921    pub fn dataflow_builder(&self, instance: ComputeInstanceId) -> DataflowBuilder<'_> {
3922        let compute = self
3923            .instance_snapshot(instance)
3924            .expect("compute instance does not exist");
3925        DataflowBuilder::new(self.catalog().state(), compute)
3926    }
3927
3928    /// Return a reference-less snapshot to the indicated compute instance.
3929    pub fn instance_snapshot(
3930        &self,
3931        id: ComputeInstanceId,
3932    ) -> Result<ComputeInstanceSnapshot, InstanceMissing> {
3933        ComputeInstanceSnapshot::new(&self.controller, id)
3934    }
3935
3936    /// Call into the compute controller to install a finalized dataflow, and
3937    /// initialize the read policies for its exported readable objects.
3938    ///
3939    /// # Panics
3940    ///
3941    /// Panics if dataflow creation fails.
3942    pub(crate) async fn ship_dataflow(
3943        &mut self,
3944        dataflow: DataflowDescription<Plan>,
3945        instance: ComputeInstanceId,
3946        target_replica: Option<ReplicaId>,
3947    ) {
3948        self.try_ship_dataflow(dataflow, instance, target_replica)
3949            .await
3950            .unwrap_or_terminate("dataflow creation cannot fail");
3951    }
3952
3953    /// Call into the compute controller to install a finalized dataflow, and
3954    /// initialize the read policies for its exported readable objects.
3955    pub(crate) async fn try_ship_dataflow(
3956        &mut self,
3957        dataflow: DataflowDescription<Plan>,
3958        instance: ComputeInstanceId,
3959        target_replica: Option<ReplicaId>,
3960    ) -> Result<(), DataflowCreationError> {
3961        // We must only install read policies for indexes, not for sinks.
3962        // Sinks are write-only compute collections that don't have read policies.
3963        let export_ids = dataflow.exported_index_ids().collect();
3964
3965        self.controller
3966            .compute
3967            .create_dataflow(instance, dataflow, target_replica)?;
3968
3969        self.initialize_compute_read_policies(export_ids, instance, CompactionWindow::Default)
3970            .await;
3971
3972        Ok(())
3973    }
3974
3975    /// Call into the compute controller to allow writes to the specified IDs
3976    /// from the specified instance. Calling this function multiple times and
3977    /// calling it on a read-only instance has no effect.
3978    pub(crate) fn allow_writes(&mut self, instance: ComputeInstanceId, id: GlobalId) {
3979        self.controller
3980            .compute
3981            .allow_writes(instance, id)
3982            .unwrap_or_terminate("allow_writes cannot fail");
3983    }
3984
3985    /// Like `ship_dataflow`, but also await on builtin table updates.
3986    pub(crate) async fn ship_dataflow_and_notice_builtin_table_updates(
3987        &mut self,
3988        dataflow: DataflowDescription<Plan>,
3989        instance: ComputeInstanceId,
3990        notice_builtin_updates_fut: Option<BuiltinTableAppendNotify>,
3991        target_replica: Option<ReplicaId>,
3992    ) {
3993        if let Some(notice_builtin_updates_fut) = notice_builtin_updates_fut {
3994            let ship_dataflow_fut = self.ship_dataflow(dataflow, instance, target_replica);
3995            let ((), ()) =
3996                futures::future::join(notice_builtin_updates_fut, ship_dataflow_fut).await;
3997        } else {
3998            self.ship_dataflow(dataflow, instance, target_replica).await;
3999        }
4000    }
4001
4002    /// Install a _watch set_ in the controller that is automatically associated with the given
4003    /// connection id. The watchset will be automatically cleared if the connection terminates
4004    /// before the watchset completes.
4005    pub fn install_compute_watch_set(
4006        &mut self,
4007        conn_id: ConnectionId,
4008        objects: BTreeSet<GlobalId>,
4009        t: Timestamp,
4010        state: WatchSetResponse,
4011    ) -> Result<(), CollectionLookupError> {
4012        let ws_id = self.controller.install_compute_watch_set(objects, t)?;
4013        self.connection_watch_sets
4014            .entry(conn_id.clone())
4015            .or_default()
4016            .insert(ws_id);
4017        self.installed_watch_sets.insert(ws_id, (conn_id, state));
4018        Ok(())
4019    }
4020
4021    /// Install a _watch set_ in the controller that is automatically associated with the given
4022    /// connection id. The watchset will be automatically cleared if the connection terminates
4023    /// before the watchset completes.
4024    pub fn install_storage_watch_set(
4025        &mut self,
4026        conn_id: ConnectionId,
4027        objects: BTreeSet<GlobalId>,
4028        t: Timestamp,
4029        state: WatchSetResponse,
4030    ) -> Result<(), CollectionMissing> {
4031        let ws_id = self.controller.install_storage_watch_set(objects, t)?;
4032        self.connection_watch_sets
4033            .entry(conn_id.clone())
4034            .or_default()
4035            .insert(ws_id);
4036        self.installed_watch_sets.insert(ws_id, (conn_id, state));
4037        Ok(())
4038    }
4039
4040    /// Cancels pending watchsets associated with the provided connection id.
4041    pub fn cancel_pending_watchsets(&mut self, conn_id: &ConnectionId) {
4042        if let Some(ws_ids) = self.connection_watch_sets.remove(conn_id) {
4043            for ws_id in ws_ids {
4044                self.installed_watch_sets.remove(&ws_id);
4045            }
4046        }
4047    }
4048
4049    /// Returns the state of the [`Coordinator`] formatted as JSON.
4050    ///
4051    /// The returned value is not guaranteed to be stable and may change at any point in time.
4052    pub async fn dump(&self) -> Result<serde_json::Value, anyhow::Error> {
4053        // Note: We purposefully use the `Debug` formatting for the value of all fields in the
4054        // returned object as a tradeoff between usability and stability. `serde_json` will fail
4055        // to serialize an object if the keys aren't strings, so `Debug` formatting the values
4056        // prevents a future unrelated change from silently breaking this method.
4057
4058        let global_timelines: BTreeMap<_, _> = self
4059            .global_timelines
4060            .iter()
4061            .map(|(timeline, state)| (timeline.to_string(), format!("{state:?}")))
4062            .collect();
4063        let active_conns: BTreeMap<_, _> = self
4064            .active_conns
4065            .iter()
4066            .map(|(id, meta)| (id.unhandled().to_string(), format!("{meta:?}")))
4067            .collect();
4068        let txn_read_holds: BTreeMap<_, _> = self
4069            .txn_read_holds
4070            .iter()
4071            .map(|(id, capability)| (id.unhandled().to_string(), format!("{capability:?}")))
4072            .collect();
4073        let pending_peeks: BTreeMap<_, _> = self
4074            .pending_peeks
4075            .iter()
4076            .map(|(id, peek)| (id.to_string(), format!("{peek:?}")))
4077            .collect();
4078        let client_pending_peeks: BTreeMap<_, _> = self
4079            .client_pending_peeks
4080            .iter()
4081            .map(|(id, peek)| {
4082                let peek: BTreeMap<_, _> = peek
4083                    .iter()
4084                    .map(|(uuid, storage_id)| (uuid.to_string(), storage_id))
4085                    .collect();
4086                (id.to_string(), peek)
4087            })
4088            .collect();
4089        let pending_linearize_read_txns: BTreeMap<_, _> = self
4090            .pending_linearize_read_txns
4091            .iter()
4092            .map(|(id, read_txn)| (id.unhandled().to_string(), format!("{read_txn:?}")))
4093            .collect();
4094
4095        Ok(serde_json::json!({
4096            "global_timelines": global_timelines,
4097            "active_conns": active_conns,
4098            "txn_read_holds": txn_read_holds,
4099            "pending_peeks": pending_peeks,
4100            "client_pending_peeks": client_pending_peeks,
4101            "pending_linearize_read_txns": pending_linearize_read_txns,
4102            "controller": self.controller.dump().await?,
4103        }))
4104    }
4105
4106    /// Prune all storage usage events from the [`MZ_STORAGE_USAGE_BY_SHARD`] table that are older
4107    /// than `retention_period`.
4108    ///
4109    /// This method will read the entire contents of [`MZ_STORAGE_USAGE_BY_SHARD`] into memory
4110    /// which can be expensive.
4111    ///
4112    /// DO NOT call this method outside of startup. The safety of reading at the current oracle read
4113    /// timestamp and then writing at whatever the current write timestamp is (instead of
4114    /// `read_ts + 1`) relies on the fact that there are no outstanding writes during startup.
4115    ///
4116    /// Group commit, which this method uses to write the retractions, has builtin fencing, and we
4117    /// never commit retractions to [`MZ_STORAGE_USAGE_BY_SHARD`] outside of this method, which is
4118    /// only called once during startup. So we don't have to worry about double/invalid retractions.
4119    async fn prune_storage_usage_events_on_startup(&self, retention_period: Duration) {
4120        let item_id = self
4121            .catalog()
4122            .resolve_builtin_table(&MZ_STORAGE_USAGE_BY_SHARD);
4123        let global_id = self.catalog.get_entry(&item_id).latest_global_id();
4124        let read_ts = self.get_local_read_ts().await;
4125        let current_contents_fut = self
4126            .controller
4127            .storage_collections
4128            .snapshot(global_id, read_ts);
4129        let internal_cmd_tx = self.internal_cmd_tx.clone();
4130        spawn(|| "storage_usage_prune", async move {
4131            let mut current_contents = current_contents_fut
4132                .await
4133                .unwrap_or_terminate("cannot fail to fetch snapshot");
4134            differential_dataflow::consolidation::consolidate(&mut current_contents);
4135
4136            let cutoff_ts = u128::from(read_ts).saturating_sub(retention_period.as_millis());
4137            let mut expired = Vec::new();
4138            for (row, diff) in current_contents {
4139                assert_eq!(
4140                    diff, 1,
4141                    "consolidated contents should not contain retractions: ({row:#?}, {diff:#?})"
4142                );
4143                // This logic relies on the definition of `mz_storage_usage_by_shard` not changing.
4144                let collection_timestamp = row
4145                    .unpack()
4146                    .get(3)
4147                    .expect("definition of mz_storage_by_shard changed")
4148                    .unwrap_timestamptz();
4149                let collection_timestamp = collection_timestamp.timestamp_millis();
4150                let collection_timestamp: u128 = collection_timestamp
4151                    .try_into()
4152                    .expect("all collections happen after Jan 1 1970");
4153                if collection_timestamp < cutoff_ts {
4154                    debug!("pruning storage event {row:?}");
4155                    let builtin_update = BuiltinTableUpdate::row(item_id, row, Diff::MINUS_ONE);
4156                    expired.push(builtin_update);
4157                }
4158            }
4159
4160            // main thread has shut down.
4161            let _ = internal_cmd_tx.send(Message::StorageUsagePrune(expired));
4162        });
4163    }
4164
4165    /// Retracts `mz_object_arrangement_size_history` rows older than the
4166    /// `arrangement_size_history_retention_period` dyncfg.
4167    ///
4168    /// Must only run at startup: it reads at the oracle read timestamp and
4169    /// writes retractions at the current write timestamp, which is only safe
4170    /// when no other writes are in flight. See [the equivalent storage-usage
4171    /// pruner](Self::prune_storage_usage_events_on_startup) for the same
4172    /// reasoning.
4173    async fn prune_arrangement_sizes_history_on_startup(&self) {
4174        // The catalog server is not writable in read-only mode.
4175        if self.controller.read_only() {
4176            return;
4177        }
4178
4179        let retention_period = mz_adapter_types::dyncfgs::ARRANGEMENT_SIZE_HISTORY_RETENTION_PERIOD
4180            .get(self.catalog().system_config().dyncfgs());
4181        let item_id = self
4182            .catalog()
4183            .resolve_builtin_table(&mz_catalog::builtin::MZ_OBJECT_ARRANGEMENT_SIZE_HISTORY);
4184        let global_id = self.catalog.get_entry(&item_id).latest_global_id();
4185        let read_ts = self.get_local_read_ts().await;
4186        let current_contents_fut = self
4187            .controller
4188            .storage_collections
4189            .snapshot(global_id, read_ts);
4190        let internal_cmd_tx = self.internal_cmd_tx.clone();
4191        spawn(|| "arrangement_sizes_history_prune", async move {
4192            let mut current_contents = current_contents_fut
4193                .await
4194                .unwrap_or_terminate("cannot fail to fetch snapshot");
4195            differential_dataflow::consolidation::consolidate(&mut current_contents);
4196
4197            let cutoff_ts = u128::from(read_ts).saturating_sub(retention_period.as_millis());
4198            let expired =
4199                arrangement_sizes_expired_retractions(current_contents, cutoff_ts, item_id);
4200
4201            // TODO(arrangement-sizes): when the writeable-catalog-server
4202            // plumbing in https://github.com/MaterializeInc/materialize/pull/35436
4203            // lands, retract directly on `mz_catalog_server`.
4204            let _ = internal_cmd_tx.send(Message::ArrangementSizesPrune(expired));
4205        });
4206    }
4207
4208    fn current_credit_consumption_rate(&self) -> Numeric {
4209        self.catalog()
4210            .user_cluster_replicas()
4211            .filter_map(|replica| match &replica.config.location {
4212                ReplicaLocation::Managed(location) => Some(location.size_for_billing()),
4213                ReplicaLocation::Unmanaged(_) => None,
4214            })
4215            .map(|size| {
4216                self.catalog()
4217                    .cluster_replica_sizes()
4218                    .0
4219                    .get(size)
4220                    .expect("location size is validated against the cluster replica sizes")
4221                    .credits_per_hour
4222            })
4223            .sum()
4224    }
4225}
4226
4227/// Returns retraction updates for rows in a consolidated
4228/// `mz_object_arrangement_size_history` snapshot whose `collection_timestamp`
4229/// (column 3) is strictly before `cutoff_ts`.
4230///
4231/// Panics if any input row has `diff != 1`: the caller must consolidate first,
4232/// and a consolidated history table should never contain retractions because
4233/// the only source of retractions is this function itself.
4234fn arrangement_sizes_expired_retractions(
4235    rows: impl IntoIterator<Item = (mz_repr::Row, i64)>,
4236    cutoff_ts: u128,
4237    item_id: CatalogItemId,
4238) -> Vec<BuiltinTableUpdate> {
4239    let mut expired = Vec::new();
4240    for (row, diff) in rows {
4241        assert_eq!(
4242            diff, 1,
4243            "consolidated contents should not contain retractions: ({row:#?}, {diff:#?})"
4244        );
4245        let collection_timestamp = row
4246            .unpack()
4247            .get(3)
4248            .expect("definition of mz_object_arrangement_size_history changed")
4249            .unwrap_timestamptz()
4250            .timestamp_millis();
4251        let collection_timestamp: u128 = collection_timestamp
4252            .try_into()
4253            .expect("all collections happen after Jan 1 1970");
4254        if collection_timestamp < cutoff_ts {
4255            expired.push(BuiltinTableUpdate::row(item_id, row, Diff::MINUS_ONE));
4256        }
4257    }
4258    expired
4259}
4260
4261#[cfg(test)]
4262impl Coordinator {
4263    #[allow(dead_code)]
4264    async fn verify_ship_dataflow_no_error(&mut self, dataflow: DataflowDescription<Plan>) {
4265        // `ship_dataflow_new` is not allowed to have a `Result` return because this function is
4266        // called after `catalog_transact`, after which no errors are allowed. This test exists to
4267        // prevent us from incorrectly teaching those functions how to return errors (which has
4268        // happened twice and is the motivation for this test).
4269
4270        // An arbitrary compute instance ID to satisfy the function calls below. Note that
4271        // this only works because this function will never run.
4272        let compute_instance = ComputeInstanceId::user(1).expect("1 is a valid ID");
4273
4274        let _: () = self.ship_dataflow(dataflow, compute_instance, None).await;
4275    }
4276}
4277
4278/// Contains information about the last message the [`Coordinator`] processed.
4279struct LastMessage {
4280    kind: &'static str,
4281    stmt: Option<Arc<Statement<Raw>>>,
4282}
4283
4284impl LastMessage {
4285    /// Returns a redacted version of the statement that is safe for logs.
4286    fn stmt_to_string(&self) -> Cow<'static, str> {
4287        self.stmt
4288            .as_ref()
4289            .map(|stmt| stmt.to_ast_string_redacted().into())
4290            .unwrap_or(Cow::Borrowed("<none>"))
4291    }
4292}
4293
4294impl fmt::Debug for LastMessage {
4295    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4296        f.debug_struct("LastMessage")
4297            .field("kind", &self.kind)
4298            .field("stmt", &self.stmt_to_string())
4299            .finish()
4300    }
4301}
4302
4303impl Drop for LastMessage {
4304    fn drop(&mut self) {
4305        // Only print the last message if we're currently panicking, otherwise we'd spam our logs.
4306        if std::thread::panicking() {
4307            // If we're panicking theres no guarantee `tracing` still works, so print to stderr.
4308            eprintln!("Coordinator panicking, dumping last message\n{self:?}",);
4309        }
4310    }
4311}
4312
4313/// Serves the coordinator based on the provided configuration.
4314///
4315/// For a high-level description of the coordinator, see the [crate
4316/// documentation](crate).
4317///
4318/// Returns a handle to the coordinator and a client to communicate with the
4319/// coordinator.
4320///
4321/// BOXED FUTURE: As of Nov 2023 the returned Future from this function was 42KB. This would
4322/// get stored on the stack which is bad for runtime performance, and blow up our stack usage.
4323/// Because of that we purposefully move this Future onto the heap (i.e. Box it).
4324pub fn serve(
4325    Config {
4326        controller_config,
4327        controller_envd_epoch,
4328        mut storage,
4329        audit_logs_iterator,
4330        timestamp_oracle_url,
4331        unsafe_mode,
4332        all_features,
4333        build_info,
4334        environment_id,
4335        metrics_registry,
4336        now,
4337        secrets_controller,
4338        cloud_resource_controller,
4339        cluster_replica_sizes,
4340        builtin_system_cluster_config,
4341        builtin_catalog_server_cluster_config,
4342        builtin_probe_cluster_config,
4343        builtin_support_cluster_config,
4344        builtin_analytics_cluster_config,
4345        system_parameter_defaults,
4346        availability_zones,
4347        storage_usage_client,
4348        storage_usage_collection_interval,
4349        storage_usage_retention_period,
4350        segment_client,
4351        egress_addresses,
4352        aws_account_id,
4353        aws_privatelink_availability_zones,
4354        connection_context,
4355        connection_limit_callback,
4356        remote_system_parameters,
4357        webhook_concurrency_limit,
4358        http_host_name,
4359        tracing_handle,
4360        read_only_controllers,
4361        caught_up_trigger: clusters_caught_up_trigger,
4362        helm_chart_version,
4363        license_key,
4364        external_login_password_mz_system,
4365        force_builtin_schema_migration,
4366    }: Config,
4367) -> BoxFuture<'static, Result<(Handle, Client), AdapterError>> {
4368    async move {
4369        let coord_start = Instant::now();
4370        info!("startup: coordinator init: beginning");
4371        info!("startup: coordinator init: preamble beginning");
4372
4373        // Initializing the builtins can be an expensive process and consume a lot of memory. We
4374        // forcibly initialize it early while the stack is relatively empty to avoid stack
4375        // overflows later.
4376        let _builtins = LazyLock::force(&BUILTINS_STATIC);
4377
4378        let (cmd_tx, cmd_rx) = mpsc::unbounded_channel();
4379        let (internal_cmd_tx, internal_cmd_rx) = mpsc::unbounded_channel();
4380        let (strict_serializable_reads_tx, strict_serializable_reads_rx) =
4381            mpsc::unbounded_channel();
4382
4383        // Validate and process availability zones.
4384        if !availability_zones.iter().all_unique() {
4385            coord_bail!("availability zones must be unique");
4386        }
4387
4388        let aws_principal_context = match (
4389            aws_account_id,
4390            connection_context.aws_external_id_prefix.clone(),
4391        ) {
4392            (Some(aws_account_id), Some(aws_external_id_prefix)) => Some(AwsPrincipalContext {
4393                aws_account_id,
4394                aws_external_id_prefix,
4395            }),
4396            _ => None,
4397        };
4398
4399        let aws_privatelink_availability_zones = aws_privatelink_availability_zones
4400            .map(|azs_vec| BTreeSet::from_iter(azs_vec.iter().cloned()));
4401
4402        info!(
4403            "startup: coordinator init: preamble complete in {:?}",
4404            coord_start.elapsed()
4405        );
4406        let oracle_init_start = Instant::now();
4407        info!("startup: coordinator init: timestamp oracle init beginning");
4408
4409        let timestamp_oracle_config = timestamp_oracle_url
4410            .map(|url| TimestampOracleConfig::from_url(&url, &metrics_registry))
4411            .transpose()?;
4412        let mut initial_timestamps =
4413            get_initial_oracle_timestamps(&timestamp_oracle_config).await?;
4414
4415        // Insert an entry for the `EpochMilliseconds` timeline if one doesn't exist,
4416        // which will ensure that the timeline is initialized since it's required
4417        // by the system.
4418        initial_timestamps
4419            .entry(Timeline::EpochMilliseconds)
4420            .or_insert_with(mz_repr::Timestamp::minimum);
4421        let mut timestamp_oracles = BTreeMap::new();
4422        for (timeline, initial_timestamp) in initial_timestamps {
4423            Coordinator::ensure_timeline_state_with_initial_time(
4424                &timeline,
4425                initial_timestamp,
4426                now.clone(),
4427                timestamp_oracle_config.clone(),
4428                &mut timestamp_oracles,
4429                read_only_controllers,
4430            )
4431            .await;
4432        }
4433
4434        // Opening the durable catalog uses one or more timestamps without communicating with
4435        // the timestamp oracle. Here we make sure to apply the catalog upper with the timestamp
4436        // oracle to linearize future operations with opening the catalog.
4437        let catalog_upper = storage.current_upper().await;
4438        // Choose a time at which to boot. This is used, for example, to prune
4439        // old storage usage data or migrate audit log entries.
4440        //
4441        // This time is usually the current system time, but with protection
4442        // against backwards time jumps, even across restarts.
4443        let epoch_millis_oracle = &timestamp_oracles
4444            .get(&Timeline::EpochMilliseconds)
4445            .expect("inserted above")
4446            .oracle;
4447
4448        let mut boot_ts = if read_only_controllers {
4449            let read_ts = epoch_millis_oracle.read_ts().await;
4450            std::cmp::max(read_ts, catalog_upper)
4451        } else {
4452            // Getting/applying a write timestamp bumps the write timestamp in the
4453            // oracle, which we're not allowed in read-only mode.
4454            epoch_millis_oracle.apply_write(catalog_upper).await;
4455            epoch_millis_oracle.write_ts().await.timestamp
4456        };
4457
4458        info!(
4459            "startup: coordinator init: timestamp oracle init complete in {:?}",
4460            oracle_init_start.elapsed()
4461        );
4462
4463        let catalog_open_start = Instant::now();
4464        info!("startup: coordinator init: catalog open beginning");
4465        let persist_client = controller_config
4466            .persist_clients
4467            .open(controller_config.persist_location.clone())
4468            .await
4469            .context("opening persist client")?;
4470        let builtin_item_migration_config =
4471            BuiltinItemMigrationConfig {
4472                persist_client: persist_client.clone(),
4473                read_only: read_only_controllers,
4474                force_migration: force_builtin_schema_migration,
4475            }
4476        ;
4477        let OpenCatalogResult {
4478            mut catalog,
4479            migrated_storage_collections_0dt,
4480            new_builtin_collections,
4481            builtin_table_updates,
4482            cached_global_exprs,
4483            uncached_local_exprs,
4484        } = Catalog::open(mz_catalog::config::Config {
4485            storage,
4486            metrics_registry: &metrics_registry,
4487            state: mz_catalog::config::StateConfig {
4488                unsafe_mode,
4489                all_features,
4490                build_info,
4491                environment_id: environment_id.clone(),
4492                read_only: read_only_controllers,
4493                now: now.clone(),
4494                boot_ts: boot_ts.clone(),
4495                skip_migrations: false,
4496                cluster_replica_sizes,
4497                builtin_system_cluster_config,
4498                builtin_catalog_server_cluster_config,
4499                builtin_probe_cluster_config,
4500                builtin_support_cluster_config,
4501                builtin_analytics_cluster_config,
4502                system_parameter_defaults,
4503                remote_system_parameters,
4504                availability_zones,
4505                egress_addresses,
4506                aws_principal_context,
4507                aws_privatelink_availability_zones,
4508                connection_context,
4509                http_host_name,
4510                builtin_item_migration_config,
4511                persist_client: persist_client.clone(),
4512                enable_expression_cache_override: None,
4513                helm_chart_version,
4514                external_login_password_mz_system,
4515                license_key: license_key.clone(),
4516            },
4517        })
4518        .await?;
4519
4520        // Opening the catalog uses one or more timestamps, so push the boot timestamp up to the
4521        // current catalog upper.
4522        let catalog_upper = catalog.current_upper().await;
4523        boot_ts = std::cmp::max(boot_ts, catalog_upper);
4524
4525        if !read_only_controllers {
4526            epoch_millis_oracle.apply_write(boot_ts).await;
4527        }
4528
4529        info!(
4530            "startup: coordinator init: catalog open complete in {:?}",
4531            catalog_open_start.elapsed()
4532        );
4533
4534        let coord_thread_start = Instant::now();
4535        info!("startup: coordinator init: coordinator thread start beginning");
4536
4537        let session_id = catalog.config().session_id;
4538        let start_instant = catalog.config().start_instant;
4539
4540        // In order for the coordinator to support Rc and Refcell types, it cannot be
4541        // sent across threads. Spawn it in a thread and have this parent thread wait
4542        // for bootstrap completion before proceeding.
4543        let (bootstrap_tx, bootstrap_rx) = oneshot::channel();
4544        let handle = TokioHandle::current();
4545
4546        let metrics = Metrics::register_into(&metrics_registry);
4547        let metrics_clone = metrics.clone();
4548        let optimizer_metrics = OptimizerMetrics::register_into(
4549            &metrics_registry,
4550            catalog.system_config().optimizer_e2e_latency_warning_threshold(),
4551        );
4552        let segment_client_clone = segment_client.clone();
4553        let coord_now = now.clone();
4554        let advance_timelines_interval =
4555            tokio::time::interval(catalog.system_config().default_timestamp_interval());
4556        let mut check_scheduling_policies_interval = tokio::time::interval(
4557            catalog
4558                .system_config()
4559                .cluster_check_scheduling_policies_interval(),
4560        );
4561        check_scheduling_policies_interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
4562
4563        let clusters_caught_up_check_interval = if read_only_controllers {
4564            let dyncfgs = catalog.system_config().dyncfgs();
4565            let interval = WITH_0DT_DEPLOYMENT_CAUGHT_UP_CHECK_INTERVAL.get(dyncfgs);
4566
4567            let mut interval = tokio::time::interval(interval);
4568            interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
4569            interval
4570        } else {
4571            // When not in read-only mode, we don't do hydration checks. But we
4572            // still have to provide _some_ interval. This is large enough that
4573            // it doesn't matter.
4574            //
4575            // TODO(aljoscha): We cannot use Duration::MAX right now because of
4576            // https://github.com/tokio-rs/tokio/issues/6634. Use that once it's
4577            // fixed for good.
4578            let mut interval = tokio::time::interval(Duration::from_secs(60 * 60));
4579            interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
4580            interval
4581        };
4582
4583        let clusters_caught_up_check =
4584            clusters_caught_up_trigger.map(|trigger| {
4585                let mut exclude_collections: BTreeSet<GlobalId> =
4586                    new_builtin_collections.into_iter().collect();
4587
4588                // Migrated MVs can't make progress in read-only mode. Exclude them and all their
4589                // transitive dependents.
4590                //
4591                // TODO: Consider sending `allow_writes` for the dataflows of migrated MVs, which
4592                //       would allow them to make progress even in read-only mode. This doesn't
4593                //       work for MVs based on `mz_catalog_raw`, if the leader's version is less
4594                //       than v26.17, since before that version the catalog shard's frontier wasn't
4595                //       kept up-to-date with the current time. So this workaround has to remain in
4596                //       place upgrades from a version less than v26.17 are no longer supported.
4597                let mut todo: Vec<_> = migrated_storage_collections_0dt
4598                    .iter()
4599                    .filter(|id| {
4600                        catalog.state().get_entry(id).is_materialized_view()
4601                    })
4602                    .copied()
4603                    .collect();
4604                while let Some(item_id) = todo.pop() {
4605                    let entry = catalog.state().get_entry(&item_id);
4606                    exclude_collections.extend(entry.global_ids());
4607                    todo.extend_from_slice(entry.used_by());
4608                }
4609
4610                CaughtUpCheckContext {
4611                    trigger,
4612                    exclude_collections,
4613                }
4614            });
4615
4616        if let Some(TimestampOracleConfig::Postgres(pg_config)) =
4617            timestamp_oracle_config.as_ref()
4618        {
4619            // Apply settings from system vars as early as possible because some
4620            // of them are locked in right when an oracle is first opened!
4621            let pg_timestamp_oracle_params =
4622                flags::timestamp_oracle_config(catalog.system_config());
4623            pg_timestamp_oracle_params.apply(pg_config);
4624        }
4625
4626        // Register a callback so whenever the MAX_CONNECTIONS or SUPERUSER_RESERVED_CONNECTIONS
4627        // system variables change, we update our connection limits.
4628        let connection_limit_callback: Arc<dyn Fn(&SystemVars) + Send + Sync> =
4629            Arc::new(move |system_vars: &SystemVars| {
4630                let limit: u64 = system_vars.max_connections().cast_into();
4631                let superuser_reserved: u64 =
4632                    system_vars.superuser_reserved_connections().cast_into();
4633
4634                // If superuser_reserved > max_connections, prefer max_connections.
4635                //
4636                // In this scenario all normal users would be locked out because all connections
4637                // would be reserved for superusers so complain if this is the case.
4638                let superuser_reserved = if superuser_reserved >= limit {
4639                    tracing::warn!(
4640                        "superuser_reserved ({superuser_reserved}) is greater than max connections ({limit})!"
4641                    );
4642                    limit
4643                } else {
4644                    superuser_reserved
4645                };
4646
4647                (connection_limit_callback)(limit, superuser_reserved);
4648            });
4649        catalog.system_config_mut().register_callback(
4650            &mz_sql::session::vars::MAX_CONNECTIONS,
4651            Arc::clone(&connection_limit_callback),
4652        );
4653        catalog.system_config_mut().register_callback(
4654            &mz_sql::session::vars::SUPERUSER_RESERVED_CONNECTIONS,
4655            connection_limit_callback,
4656        );
4657
4658        let (group_commit_tx, group_commit_rx) = appends::notifier();
4659
4660        let parent_span = tracing::Span::current();
4661        let thread = thread::Builder::new()
4662            // The Coordinator thread tends to keep a lot of data on its stack. To
4663            // prevent a stack overflow we allocate a stack three times as big as the default
4664            // stack.
4665            .stack_size(3 * stack::STACK_SIZE)
4666            .name("coordinator".to_string())
4667            .spawn(move || {
4668                let span = info_span!(parent: parent_span, "coord::coordinator").entered();
4669
4670                let controller = handle
4671                    .block_on({
4672                        catalog.initialize_controller(
4673                            controller_config,
4674                            controller_envd_epoch,
4675                            read_only_controllers,
4676                        )
4677                    })
4678                    .unwrap_or_terminate("failed to initialize storage_controller");
4679                // Initializing the controller uses one or more timestamps, so push the boot timestamp up to the
4680                // current catalog upper.
4681                let catalog_upper = handle.block_on(catalog.current_upper());
4682                boot_ts = std::cmp::max(boot_ts, catalog_upper);
4683                if !read_only_controllers {
4684                    let epoch_millis_oracle = &timestamp_oracles
4685                        .get(&Timeline::EpochMilliseconds)
4686                        .expect("inserted above")
4687                        .oracle;
4688                    handle.block_on(epoch_millis_oracle.apply_write(boot_ts));
4689                }
4690
4691                let catalog = Arc::new(catalog);
4692
4693                let caching_secrets_reader = CachingSecretsReader::new(secrets_controller.reader());
4694                let mut coord = Coordinator {
4695                    controller,
4696                    catalog,
4697                    internal_cmd_tx,
4698                    group_commit_tx,
4699                    strict_serializable_reads_tx,
4700                    global_timelines: timestamp_oracles,
4701                    transient_id_gen: Arc::new(TransientIdGen::new()),
4702                    active_conns: BTreeMap::new(),
4703                    txn_read_holds: Default::default(),
4704                    pending_peeks: BTreeMap::new(),
4705                    client_pending_peeks: BTreeMap::new(),
4706                    pending_linearize_read_txns: BTreeMap::new(),
4707                    serialized_ddl: LockedVecDeque::new(),
4708                    active_compute_sinks: BTreeMap::new(),
4709                    active_webhooks: BTreeMap::new(),
4710                    active_copies: BTreeMap::new(),
4711                    staged_cancellation: BTreeMap::new(),
4712                    introspection_subscribes: BTreeMap::new(),
4713                    write_locks: BTreeMap::new(),
4714                    deferred_write_ops: BTreeMap::new(),
4715                    pending_writes: Vec::new(),
4716                    advance_timelines_interval,
4717                    secrets_controller,
4718                    caching_secrets_reader,
4719                    cloud_resource_controller,
4720                    storage_usage_client,
4721                    storage_usage_collection_interval,
4722                    segment_client,
4723                    metrics,
4724                    optimizer_metrics,
4725                    tracing_handle,
4726                    statement_logging: StatementLogging::new(coord_now.clone()),
4727                    webhook_concurrency_limit,
4728                    timestamp_oracle_config,
4729                    check_cluster_scheduling_policies_interval: check_scheduling_policies_interval,
4730                    cluster_scheduling_decisions: BTreeMap::new(),
4731                    caught_up_check_interval: clusters_caught_up_check_interval,
4732                    caught_up_check: clusters_caught_up_check,
4733                    installed_watch_sets: BTreeMap::new(),
4734                    connection_watch_sets: BTreeMap::new(),
4735                    cluster_replica_statuses: ClusterReplicaStatuses::new(),
4736                    read_only_controllers,
4737                    buffered_builtin_table_updates: Some(Vec::new()),
4738                    license_key,
4739                    user_id_pool: IdPool::empty(),
4740                    persist_client,
4741                };
4742                let bootstrap = handle.block_on(async {
4743                    coord
4744                        .bootstrap(
4745                            boot_ts,
4746                            migrated_storage_collections_0dt,
4747                            builtin_table_updates,
4748                            cached_global_exprs,
4749                            uncached_local_exprs,
4750                            audit_logs_iterator,
4751                        )
4752                        .await?;
4753                    coord
4754                        .controller
4755                        .remove_orphaned_replicas(
4756                            coord.catalog().get_next_user_replica_id().await?,
4757                            coord.catalog().get_next_system_replica_id().await?,
4758                        )
4759                        .await
4760                        .map_err(AdapterError::Orchestrator)?;
4761
4762                    if let Some(retention_period) = storage_usage_retention_period {
4763                        coord
4764                            .prune_storage_usage_events_on_startup(retention_period)
4765                            .await;
4766                    }
4767
4768                    coord.prune_arrangement_sizes_history_on_startup().await;
4769
4770                    Ok(())
4771                });
4772                let ok = bootstrap.is_ok();
4773                drop(span);
4774                bootstrap_tx
4775                    .send(bootstrap)
4776                    .expect("bootstrap_rx is not dropped until it receives this message");
4777                if ok {
4778                    handle.block_on(coord.serve(
4779                        internal_cmd_rx,
4780                        strict_serializable_reads_rx,
4781                        cmd_rx,
4782                        group_commit_rx,
4783                    ));
4784                }
4785            })
4786            .expect("failed to create coordinator thread");
4787        match bootstrap_rx
4788            .await
4789            .expect("bootstrap_tx always sends a message or panics/halts")
4790        {
4791            Ok(()) => {
4792                info!(
4793                    "startup: coordinator init: coordinator thread start complete in {:?}",
4794                    coord_thread_start.elapsed()
4795                );
4796                info!(
4797                    "startup: coordinator init: complete in {:?}",
4798                    coord_start.elapsed()
4799                );
4800                let handle = Handle {
4801                    session_id,
4802                    start_instant,
4803                    _thread: thread.join_on_drop(),
4804                };
4805                let client = Client::new(
4806                    build_info,
4807                    cmd_tx,
4808                    metrics_clone,
4809                    now,
4810                    environment_id,
4811                    segment_client_clone,
4812                );
4813                Ok((handle, client))
4814            }
4815            Err(e) => Err(e),
4816        }
4817    }
4818    .boxed()
4819}
4820
4821// Determines and returns the highest timestamp for each timeline, for all known
4822// timestamp oracle implementations.
4823//
4824// Initially, we did this so that we can switch between implementations of
4825// timestamp oracle, but now we also do this to determine a monotonic boot
4826// timestamp, a timestamp that does not regress across reboots.
4827//
4828// This mostly works, but there can be linearizability violations, because there
4829// is no central moment where we do distributed coordination for all oracle
4830// types. Working around this seems prohibitively hard, maybe even impossible so
4831// we have to live with this window of potential violations during the upgrade
4832// window (which is the only point where we should switch oracle
4833// implementations).
4834async fn get_initial_oracle_timestamps(
4835    timestamp_oracle_config: &Option<TimestampOracleConfig>,
4836) -> Result<BTreeMap<Timeline, Timestamp>, AdapterError> {
4837    let mut initial_timestamps = BTreeMap::new();
4838
4839    if let Some(config) = timestamp_oracle_config {
4840        let oracle_timestamps = config.get_all_timelines().await?;
4841
4842        let debug_msg = || {
4843            oracle_timestamps
4844                .iter()
4845                .map(|(timeline, ts)| format!("{:?} -> {}", timeline, ts))
4846                .join(", ")
4847        };
4848        info!(
4849            "current timestamps from the timestamp oracle: {}",
4850            debug_msg()
4851        );
4852
4853        for (timeline, ts) in oracle_timestamps {
4854            let entry = initial_timestamps
4855                .entry(Timeline::from_str(&timeline).expect("could not parse timeline"));
4856
4857            entry
4858                .and_modify(|current_ts| *current_ts = std::cmp::max(*current_ts, ts))
4859                .or_insert(ts);
4860        }
4861    } else {
4862        info!("no timestamp oracle configured!");
4863    };
4864
4865    let debug_msg = || {
4866        initial_timestamps
4867            .iter()
4868            .map(|(timeline, ts)| format!("{:?}: {}", timeline, ts))
4869            .join(", ")
4870    };
4871    info!("initial oracle timestamps: {}", debug_msg());
4872
4873    Ok(initial_timestamps)
4874}
4875
4876#[instrument]
4877pub async fn load_remote_system_parameters(
4878    storage: &mut Box<dyn OpenableDurableCatalogState>,
4879    system_parameter_sync_config: Option<SystemParameterSyncConfig>,
4880    system_parameter_sync_timeout: Duration,
4881) -> Result<Option<BTreeMap<String, String>>, AdapterError> {
4882    if let Some(system_parameter_sync_config) = system_parameter_sync_config {
4883        tracing::info!("parameter sync on boot: start sync");
4884
4885        // We intentionally block initial startup, potentially forever,
4886        // on initializing LaunchDarkly. This may seem scary, but the
4887        // alternative is even scarier. Over time, we expect that the
4888        // compiled-in default values for the system parameters will
4889        // drift substantially from the defaults configured in
4890        // LaunchDarkly, to the point that starting an environment
4891        // without loading the latest values from LaunchDarkly will
4892        // result in running an untested configuration.
4893        //
4894        // Note this only applies during initial startup. Restarting
4895        // after we've synced once only blocks for a maximum of
4896        // `FRONTEND_SYNC_TIMEOUT` on LaunchDarkly, as it seems
4897        // reasonable to assume that the last-synced configuration was
4898        // valid enough.
4899        //
4900        // This philosophy appears to provide a good balance between not
4901        // running untested configurations in production while also not
4902        // making LaunchDarkly a "tier 1" dependency for existing
4903        // environments.
4904        //
4905        // If this proves to be an issue, we could seek to address the
4906        // configuration drift in a different way--for example, by
4907        // writing a script that runs in CI nightly and checks for
4908        // deviation between the compiled Rust code and LaunchDarkly.
4909        //
4910        // If it is absolutely necessary to bring up a new environment
4911        // while LaunchDarkly is down, the following manual mitigation
4912        // can be performed:
4913        //
4914        //    1. Edit the environmentd startup parameters to omit the
4915        //       LaunchDarkly configuration.
4916        //    2. Boot environmentd.
4917        //    3. Use the catalog-debug tool to run `edit config "{\"key\":\"system_config_synced\"}" "{\"value\": 1}"`.
4918        //    4. Adjust any other parameters as necessary to avoid
4919        //       running a nonstandard configuration in production.
4920        //    5. Edit the environmentd startup parameters to restore the
4921        //       LaunchDarkly configuration, for when LaunchDarkly comes
4922        //       back online.
4923        //    6. Reboot environmentd.
4924        let mut params = SynchronizedParameters::new(SystemVars::default());
4925        let frontend_sync = async {
4926            let frontend = SystemParameterFrontend::from(&system_parameter_sync_config).await?;
4927            frontend.pull(&mut params);
4928            let ops = params
4929                .modified()
4930                .into_iter()
4931                .map(|param| {
4932                    let name = param.name;
4933                    let value = param.value;
4934                    tracing::info!(name, value, initial = true, "sync parameter");
4935                    (name, value)
4936                })
4937                .collect();
4938            tracing::info!("parameter sync on boot: end sync");
4939            Ok(Some(ops))
4940        };
4941        if !storage.has_system_config_synced_once().await? {
4942            frontend_sync.await
4943        } else {
4944            match mz_ore::future::timeout(system_parameter_sync_timeout, frontend_sync).await {
4945                Ok(ops) => Ok(ops),
4946                Err(TimeoutError::Inner(e)) => Err(e),
4947                Err(TimeoutError::DeadlineElapsed) => {
4948                    tracing::info!("parameter sync on boot: sync has timed out");
4949                    Ok(None)
4950                }
4951            }
4952        }
4953    } else {
4954        Ok(None)
4955    }
4956}
4957
4958#[derive(Debug)]
4959pub enum WatchSetResponse {
4960    StatementDependenciesReady(StatementLoggingId, StatementLifecycleEvent),
4961    AlterSinkReady(AlterSinkReadyContext),
4962    AlterMaterializedViewReady(AlterMaterializedViewReadyContext),
4963}
4964
4965#[derive(Debug)]
4966pub struct AlterSinkReadyContext {
4967    ctx: Option<ExecuteContext>,
4968    otel_ctx: OpenTelemetryContext,
4969    plan: AlterSinkPlan,
4970    plan_validity: PlanValidity,
4971    read_hold: ReadHolds,
4972}
4973
4974impl AlterSinkReadyContext {
4975    fn ctx(&mut self) -> &mut ExecuteContext {
4976        self.ctx.as_mut().expect("only cleared on drop")
4977    }
4978
4979    fn retire(mut self, result: Result<ExecuteResponse, AdapterError>) {
4980        self.ctx
4981            .take()
4982            .expect("only cleared on drop")
4983            .retire(result);
4984    }
4985}
4986
4987impl Drop for AlterSinkReadyContext {
4988    fn drop(&mut self) {
4989        if let Some(ctx) = self.ctx.take() {
4990            ctx.retire(Err(AdapterError::Canceled));
4991        }
4992    }
4993}
4994
4995#[derive(Debug)]
4996pub struct AlterMaterializedViewReadyContext {
4997    ctx: Option<ExecuteContext>,
4998    otel_ctx: OpenTelemetryContext,
4999    plan: plan::AlterMaterializedViewApplyReplacementPlan,
5000    plan_validity: PlanValidity,
5001}
5002
5003impl AlterMaterializedViewReadyContext {
5004    fn ctx(&mut self) -> &mut ExecuteContext {
5005        self.ctx.as_mut().expect("only cleared on drop")
5006    }
5007
5008    fn retire(mut self, result: Result<ExecuteResponse, AdapterError>) {
5009        self.ctx
5010            .take()
5011            .expect("only cleared on drop")
5012            .retire(result);
5013    }
5014}
5015
5016impl Drop for AlterMaterializedViewReadyContext {
5017    fn drop(&mut self) {
5018        if let Some(ctx) = self.ctx.take() {
5019            ctx.retire(Err(AdapterError::Canceled));
5020        }
5021    }
5022}
5023
5024/// A struct for tracking the ownership of a lock and a VecDeque to store to-be-done work after the
5025/// lock is freed.
5026#[derive(Debug)]
5027struct LockedVecDeque<T> {
5028    items: VecDeque<T>,
5029    lock: Arc<tokio::sync::Mutex<()>>,
5030}
5031
5032impl<T> LockedVecDeque<T> {
5033    pub fn new() -> Self {
5034        Self {
5035            items: VecDeque::new(),
5036            lock: Arc::new(tokio::sync::Mutex::new(())),
5037        }
5038    }
5039
5040    pub fn try_lock_owned(&self) -> Result<OwnedMutexGuard<()>, tokio::sync::TryLockError> {
5041        Arc::clone(&self.lock).try_lock_owned()
5042    }
5043
5044    pub fn is_empty(&self) -> bool {
5045        self.items.is_empty()
5046    }
5047
5048    pub fn push_back(&mut self, value: T) {
5049        self.items.push_back(value)
5050    }
5051
5052    pub fn pop_front(&mut self) -> Option<T> {
5053        self.items.pop_front()
5054    }
5055
5056    pub fn remove(&mut self, index: usize) -> Option<T> {
5057        self.items.remove(index)
5058    }
5059
5060    pub fn iter(&self) -> std::collections::vec_deque::Iter<'_, T> {
5061        self.items.iter()
5062    }
5063}
5064
5065#[derive(Debug)]
5066struct DeferredPlanStatement {
5067    ctx: ExecuteContext,
5068    ps: PlanStatement,
5069}
5070
5071#[derive(Debug)]
5072enum PlanStatement {
5073    Statement {
5074        stmt: Arc<Statement<Raw>>,
5075        params: Params,
5076    },
5077    Plan {
5078        plan: mz_sql::plan::Plan,
5079        resolved_ids: ResolvedIds,
5080    },
5081}
5082
5083#[derive(Debug, Error)]
5084pub enum NetworkPolicyError {
5085    #[error("Access denied for address {0}")]
5086    AddressDenied(IpAddr),
5087    #[error("Access denied missing IP address")]
5088    MissingIp,
5089}
5090
5091pub(crate) fn validate_ip_with_policy_rules(
5092    ip: &IpAddr,
5093    rules: &Vec<NetworkPolicyRule>,
5094) -> Result<(), NetworkPolicyError> {
5095    // At the moment we're not handling action or direction
5096    // as those are only able to be "allow" and "ingress" respectively
5097    if rules.iter().any(|r| r.address.0.contains(ip)) {
5098        Ok(())
5099    } else {
5100        Err(NetworkPolicyError::AddressDenied(ip.clone()))
5101    }
5102}
5103
5104pub(crate) fn infer_sql_type_for_catalog(
5105    hir_expr: &HirRelationExpr,
5106    mir_expr: &MirRelationExpr,
5107) -> SqlRelationType {
5108    let mut typ = hir_expr.top_level_typ();
5109    typ.backport_nullability_and_keys(&mir_expr.typ());
5110    typ
5111}
5112
5113#[cfg(test)]
5114mod id_pool_tests {
5115    use super::IdPool;
5116
5117    #[mz_ore::test]
5118    fn test_empty_pool() {
5119        let mut pool = IdPool::empty();
5120        assert_eq!(pool.remaining(), 0);
5121        assert_eq!(pool.allocate(), None);
5122        assert_eq!(pool.allocate_many(1), None);
5123    }
5124
5125    #[mz_ore::test]
5126    fn test_allocate_single() {
5127        let mut pool = IdPool::empty();
5128        pool.refill(10, 13);
5129        assert_eq!(pool.remaining(), 3);
5130        assert_eq!(pool.allocate(), Some(10));
5131        assert_eq!(pool.allocate(), Some(11));
5132        assert_eq!(pool.allocate(), Some(12));
5133        assert_eq!(pool.remaining(), 0);
5134        assert_eq!(pool.allocate(), None);
5135    }
5136
5137    #[mz_ore::test]
5138    fn test_allocate_many() {
5139        let mut pool = IdPool::empty();
5140        pool.refill(100, 105);
5141        assert_eq!(pool.allocate_many(3), Some(vec![100, 101, 102]));
5142        assert_eq!(pool.remaining(), 2);
5143        // Not enough remaining for 3 more.
5144        assert_eq!(pool.allocate_many(3), None);
5145        // But 2 works.
5146        assert_eq!(pool.allocate_many(2), Some(vec![103, 104]));
5147        assert_eq!(pool.remaining(), 0);
5148    }
5149
5150    #[mz_ore::test]
5151    fn test_allocate_many_zero() {
5152        let mut pool = IdPool::empty();
5153        pool.refill(1, 5);
5154        assert_eq!(pool.allocate_many(0), Some(vec![]));
5155        assert_eq!(pool.remaining(), 4);
5156    }
5157
5158    #[mz_ore::test]
5159    fn test_refill_resets_pool() {
5160        let mut pool = IdPool::empty();
5161        pool.refill(0, 2);
5162        assert_eq!(pool.allocate(), Some(0));
5163        // Refill before exhaustion replaces the range.
5164        pool.refill(50, 52);
5165        assert_eq!(pool.allocate(), Some(50));
5166        assert_eq!(pool.allocate(), Some(51));
5167        assert_eq!(pool.allocate(), None);
5168    }
5169
5170    #[mz_ore::test]
5171    fn test_mixed_allocate_and_allocate_many() {
5172        let mut pool = IdPool::empty();
5173        pool.refill(0, 10);
5174        assert_eq!(pool.allocate(), Some(0));
5175        assert_eq!(pool.allocate_many(3), Some(vec![1, 2, 3]));
5176        assert_eq!(pool.allocate(), Some(4));
5177        assert_eq!(pool.remaining(), 5);
5178    }
5179
5180    #[mz_ore::test]
5181    #[should_panic(expected = "invalid pool range")]
5182    fn test_refill_invalid_range_panics() {
5183        let mut pool = IdPool::empty();
5184        pool.refill(10, 5);
5185    }
5186}
5187
5188#[cfg(test)]
5189mod arrangement_sizes_pruner_tests {
5190    use mz_repr::catalog_item_id::CatalogItemId;
5191    use mz_repr::{Datum, Row};
5192
5193    use super::arrangement_sizes_expired_retractions;
5194
5195    // Pack a row shaped like `mz_object_arrangement_size_history`: the pruner
5196    // only cares about column 3 (`collection_timestamp`), but we stuff the
5197    // other three columns with realistic values so shape changes would fail.
5198    fn history_row(ts_ms: i64) -> Row {
5199        let dt = mz_ore::now::to_datetime(ts_ms.try_into().expect("non-negative"));
5200        Row::pack_slice(&[
5201            Datum::String("r1"),
5202            Datum::String("u1"),
5203            Datum::Int64(123),
5204            Datum::TimestampTz(dt.try_into().expect("fits in TimestampTz")),
5205        ])
5206    }
5207
5208    fn item_id() -> CatalogItemId {
5209        // Any CatalogItemId will do; tests don't dispatch on it.
5210        CatalogItemId::User(42)
5211    }
5212
5213    #[mz_ore::test]
5214    fn empty_input_produces_no_retractions() {
5215        let out = arrangement_sizes_expired_retractions(Vec::new(), 1_000, item_id());
5216        assert!(out.is_empty());
5217    }
5218
5219    #[mz_ore::test]
5220    fn retracts_only_rows_strictly_before_cutoff() {
5221        // Mixes both sides of the filter and includes a row at exactly
5222        // the cutoff timestamp to pin down the strict-less-than boundary.
5223        let rows = vec![
5224            (history_row(100), 1),
5225            (history_row(500), 1),
5226            (history_row(1_000), 1), // at cutoff: kept (strict <)
5227            (history_row(5_000), 1),
5228        ];
5229        let out = arrangement_sizes_expired_retractions(rows, 1_000, item_id());
5230        assert_eq!(out.len(), 2);
5231    }
5232
5233    #[mz_ore::test]
5234    #[should_panic(expected = "consolidated contents should not contain retractions")]
5235    fn retraction_in_input_panics() {
5236        let rows = vec![(history_row(100), -1)];
5237        let _ = arrangement_sizes_expired_retractions(rows, 1_000, item_id());
5238    }
5239}