mz_adapter/coord/appends.rs
1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10//! Logic and types for all appends executed by the [`Coordinator`].
11
12use std::collections::{BTreeMap, BTreeSet};
13use std::future::Future;
14use std::pin::Pin;
15use std::sync::{Arc, LazyLock};
16use std::time::Duration;
17
18use derivative::Derivative;
19use futures::future::{BoxFuture, FutureExt};
20use mz_adapter_types::connection::ConnectionId;
21use mz_catalog::builtin::{BuiltinTable, MZ_SESSIONS};
22use mz_expr::CollectionPlan;
23use mz_ore::metrics::MetricsFutureExt;
24use mz_ore::task;
25use mz_ore::tracing::OpenTelemetryContext;
26use mz_ore::{assert_none, instrument};
27use mz_repr::{CatalogItemId, Timestamp};
28use mz_sql::names::ResolvedIds;
29use mz_sql::plan::{ExplainPlanPlan, ExplainTimestampPlan, Explainee, ExplaineeStatement, Plan};
30use mz_sql::session::metadata::SessionMetadata;
31use mz_storage_client::client::TableData;
32use mz_timestamp_oracle::WriteTimestamp;
33use smallvec::SmallVec;
34use tokio::sync::{Notify, OwnedMutexGuard, OwnedSemaphorePermit, Semaphore, oneshot};
35use tracing::{Instrument, Span, debug_span, info, warn};
36
37use crate::catalog::{BuiltinTableUpdate, Catalog};
38use crate::coord::{Coordinator, Message, PendingTxn, PlanValidity};
39use crate::session::{GroupCommitWriteLocks, Session, WriteLocks};
40use crate::util::{CompletedClientTransmitter, ResultExt};
41use crate::{AdapterError, ExecuteContext};
42
43/// Tables that we emit updates for when starting a new session.
44pub(crate) static REQUIRED_BUILTIN_TABLES: &[&LazyLock<BuiltinTable>] = &[&MZ_SESSIONS];
45
46/// An operation that was deferred waiting on a resource to be available.
47///
48/// For example when inserting into a table we defer on acquiring [`WriteLocks`].
49#[derive(Debug)]
50pub enum DeferredOp {
51 /// A plan, e.g. ReadThenWrite, that needs locks before sequencing.
52 Plan(DeferredPlan),
53 /// Inserts into a collection.
54 Write(DeferredWrite),
55}
56
57impl DeferredOp {
58 /// Certain operations, e.g. "blind writes"/`INSERT` statements, can be optimistically retried
59 /// because we can share a write lock between multiple operations. In this case we wait to
60 /// acquire the locks until [`group_commit`], where writes are groupped by collection and
61 /// comitted at a single timestamp.
62 ///
63 /// Other operations, e.g. read-then-write plans/`UPDATE` statements, must uniquely hold their
64 /// write locks and thus we should acquire the locks in [`try_deferred`] to prevent multiple
65 /// queued plans attempting to get retried at the same time, when we know only one can proceed.
66 ///
67 /// [`try_deferred`]: crate::coord::Coordinator::try_deferred
68 /// [`group_commit`]: crate::coord::Coordinator::group_commit
69 pub(crate) fn can_be_optimistically_retried(&self) -> bool {
70 match self {
71 DeferredOp::Plan(_) => false,
72 DeferredOp::Write(_) => true,
73 }
74 }
75
76 /// Returns an Iterator of all the required locks for current operation.
77 pub fn required_locks(&self) -> impl Iterator<Item = CatalogItemId> + '_ {
78 match self {
79 DeferredOp::Plan(plan) => {
80 let iter = plan.requires_locks.iter().copied();
81 itertools::Either::Left(iter)
82 }
83 DeferredOp::Write(write) => {
84 let iter = write.writes.keys().copied();
85 itertools::Either::Right(iter)
86 }
87 }
88 }
89
90 /// Returns the [`ConnectionId`] associated with this deferred op.
91 pub fn conn_id(&self) -> &ConnectionId {
92 match self {
93 DeferredOp::Plan(plan) => plan.ctx.session().conn_id(),
94 DeferredOp::Write(write) => write.pending_txn.ctx.session().conn_id(),
95 }
96 }
97
98 /// Consumes the [`DeferredOp`], returning the inner [`ExecuteContext`].
99 pub fn into_ctx(self) -> ExecuteContext {
100 match self {
101 DeferredOp::Plan(plan) => plan.ctx,
102 DeferredOp::Write(write) => write.pending_txn.ctx,
103 }
104 }
105}
106
107/// Describes a plan that is awaiting [`WriteLocks`].
108#[derive(Derivative)]
109#[derivative(Debug)]
110pub struct DeferredPlan {
111 #[derivative(Debug = "ignore")]
112 pub ctx: ExecuteContext,
113 pub plan: Plan,
114 pub validity: PlanValidity,
115 pub requires_locks: BTreeSet<CatalogItemId>,
116}
117
118#[derive(Debug)]
119pub struct DeferredWrite {
120 pub span: Span,
121 pub writes: BTreeMap<CatalogItemId, SmallVec<[TableData; 1]>>,
122 pub pending_txn: PendingTxn,
123}
124
125/// Describes what action triggered an update to a builtin table.
126#[derive(Debug)]
127pub(crate) enum BuiltinTableUpdateSource {
128 /// Internal update, notify the caller when it's complete.
129 Internal(oneshot::Sender<()>),
130 /// Update was triggered by some background process, such as periodic heartbeats from COMPUTE.
131 Background(oneshot::Sender<()>),
132}
133
134/// A pending write transaction that will be committing during the next group commit.
135#[derive(Debug)]
136pub(crate) enum PendingWriteTxn {
137 /// Write to a user table.
138 User {
139 span: Span,
140 /// List of all write operations within the transaction.
141 writes: BTreeMap<CatalogItemId, SmallVec<[TableData; 1]>>,
142 /// If they exist, should contain locks for each [`CatalogItemId`] in `writes`.
143 write_locks: Option<WriteLocks>,
144 /// Inner transaction.
145 pending_txn: PendingTxn,
146 },
147 /// Write to a system table.
148 System {
149 updates: Vec<BuiltinTableUpdate>,
150 source: BuiltinTableUpdateSource,
151 },
152}
153
154impl PendingWriteTxn {
155 fn is_internal_system(&self) -> bool {
156 match self {
157 PendingWriteTxn::System {
158 source: BuiltinTableUpdateSource::Internal(_),
159 ..
160 } => true,
161 _ => false,
162 }
163 }
164}
165
166impl Coordinator {
167 /// Send a message to the Coordinate to start a group commit.
168 pub(crate) fn trigger_group_commit(&mut self) {
169 self.group_commit_tx.notify();
170 // Avoid excessive `Message::GroupCommitInitiate` by resetting the periodic table
171 // advancement. The group commit triggered by the message above will already advance all
172 // tables.
173 self.advance_timelines_interval.reset();
174 }
175
176 /// Tries to execute a previously [`DeferredOp`] that requires write locks.
177 ///
178 /// If we can't acquire all of the write locks then we'll defer the plan again and wait for
179 /// the necessary locks to become available.
180 pub(crate) async fn try_deferred(
181 &mut self,
182 conn_id: ConnectionId,
183 acquired_lock: Option<(CatalogItemId, tokio::sync::OwnedMutexGuard<()>)>,
184 ) {
185 // Try getting the deferred op, it may have already been canceled.
186 let Some(op) = self.deferred_write_ops.remove(&conn_id) else {
187 tracing::warn!(%conn_id, "no deferred op found, it must have been canceled?");
188 return;
189 };
190 tracing::info!(%conn_id, "trying deferred plan");
191
192 // If we pre-acquired a lock, try to acquire the rest.
193 let write_locks = match acquired_lock {
194 Some((acquired_gid, acquired_lock)) => {
195 let mut write_locks = WriteLocks::builder(op.required_locks());
196
197 // Insert the one lock we already acquired into the our builder.
198 write_locks.insert_lock(acquired_gid, acquired_lock);
199
200 // Acquire the rest of our locks, filtering out the one we already have.
201 for gid in op.required_locks().filter(|gid| *gid != acquired_gid) {
202 if let Some(lock) = self.try_grant_object_write_lock(gid) {
203 write_locks.insert_lock(gid, lock);
204 }
205 }
206
207 // If we failed to acquire any locks, spawn a task that waits for them to become available.
208 let locks = match write_locks.all_or_nothing(op.conn_id()) {
209 Ok(locks) => locks,
210 Err(failed_to_acquire) => {
211 let acquire_future = self
212 .grant_object_write_lock(failed_to_acquire)
213 .map(Option::Some);
214 self.defer_op(acquire_future, op);
215 return;
216 }
217 };
218
219 Some(locks)
220 }
221 None => None,
222 };
223
224 match op {
225 DeferredOp::Plan(mut deferred) => {
226 if let Err(e) = deferred.validity.check(self.catalog()) {
227 deferred.ctx.retire(Err(e))
228 } else {
229 // Write statements never need to track resolved IDs (NOTE: This is not the
230 // same thing as plan dependencies, which we do need to re-validate).
231 let resolved_ids = ResolvedIds::empty();
232
233 // If we pre-acquired our locks, grant them to the session.
234 if let Some(locks) = write_locks {
235 let conn_id = deferred.ctx.session().conn_id().clone();
236 if let Err(existing) =
237 deferred.ctx.session_mut().try_grant_write_locks(locks)
238 {
239 tracing::error!(
240 %conn_id,
241 ?existing,
242 "session already write locks granted?",
243 );
244 return deferred.ctx.retire(Err(AdapterError::WrongSetOfLocks));
245 }
246 };
247
248 // Note: This plan is not guaranteed to run, it may get deferred again.
249 self.sequence_plan(deferred.ctx, deferred.plan, resolved_ids)
250 .await;
251 }
252 }
253 DeferredOp::Write(DeferredWrite {
254 span,
255 writes,
256 pending_txn,
257 }) => {
258 self.submit_write(PendingWriteTxn::User {
259 span,
260 writes,
261 write_locks,
262 pending_txn,
263 });
264 }
265 }
266 }
267
268 /// Attempts to commit all pending write transactions in a group commit. If the timestamp
269 /// chosen for the writes is not ahead of `now()`, then we can execute and commit the writes
270 /// immediately. Otherwise we must wait for `now()` to advance past the timestamp chosen for the
271 /// writes.
272 #[instrument(level = "debug")]
273 pub(crate) async fn try_group_commit(&mut self, permit: Option<GroupCommitPermit>) {
274 let timestamp = self.peek_local_write_ts().await;
275 let now = Timestamp::from((self.catalog().config().now)());
276
277 // HACK: This is a special case to allow writes to the mz_sessions table to proceed even
278 // if the timestamp oracle is ahead of the current walltime. We do this because there are
279 // some tests that mock the walltime, so it doesn't automatically advance, and updating
280 // those tests to advance the walltime while creating a connection is too much.
281 //
282 // TODO(parkmycar): Get rid of the check below when refactoring group commits.
283 let contains_internal_system_write = self
284 .pending_writes
285 .iter()
286 .any(|write| write.is_internal_system());
287
288 if timestamp > now && !contains_internal_system_write {
289 // Cap retry time to 1s. In cases where the system clock has retreated by
290 // some large amount of time, this prevents against then waiting for that
291 // large amount of time in case the system clock then advances back to near
292 // what it was.
293 let remaining_ms = std::cmp::min(timestamp.saturating_sub(now), 1_000.into());
294 let internal_cmd_tx = self.internal_cmd_tx.clone();
295 task::spawn(
296 || "group_commit_initiate",
297 async move {
298 tokio::time::sleep(Duration::from_millis(remaining_ms.into())).await;
299 // It is not an error for this task to be running after `internal_cmd_rx` is dropped.
300 let result =
301 internal_cmd_tx.send(Message::GroupCommitInitiate(Span::current(), permit));
302 if let Err(e) = result {
303 warn!("internal_cmd_rx dropped before we could send: {:?}", e);
304 }
305 }
306 .instrument(Span::current()),
307 );
308 } else {
309 self.group_commit(permit).await;
310 }
311 }
312
313 /// Tries to commit all pending writes transactions at the same timestamp.
314 ///
315 /// If the caller of this function has the `write_lock` acquired, then they can optionally pass
316 /// it in to this method. If the caller does not have the `write_lock` acquired and the
317 /// `write_lock` is currently locked by another operation, then only writes to system tables
318 /// and table advancements will be applied. If the caller does not have the `write_lock`
319 /// acquired and the `write_lock` is not currently locked by another operation, then group
320 /// commit will acquire it and all writes will be applied.
321 ///
322 /// All applicable pending writes will be combined into a single Append command and sent to
323 /// STORAGE as a single batch. All applicable writes will happen at the same timestamp and all
324 /// involved tables will be advanced to some timestamp larger than the timestamp of the write.
325 ///
326 /// Returns the timestamp of the write.
327 #[instrument(name = "coord::group_commit")]
328 pub(crate) async fn group_commit(&mut self, permit: Option<GroupCommitPermit>) -> Timestamp {
329 let mut validated_writes = Vec::new();
330 let mut deferred_writes = Vec::new();
331 let mut group_write_locks = GroupCommitWriteLocks::default();
332
333 // TODO(parkmycar): Refactor away this allocation. Currently `drain(..)` requires holding
334 // a mutable borrow on the Coordinator and so does trying to grant a write lock.
335 let pending_writes: Vec<_> = self.pending_writes.drain(..).collect();
336
337 // Validate, merge, and possibly acquire write locks for as many pending writes as possible.
338 for pending_write in pending_writes {
339 match pending_write {
340 // We always allow system writes to proceed.
341 PendingWriteTxn::System { .. } => validated_writes.push(pending_write),
342 // We have a set of locks! Validate they're correct (expected).
343 PendingWriteTxn::User {
344 span,
345 write_locks: Some(write_locks),
346 writes,
347 pending_txn,
348 } => match write_locks.validate(writes.keys().copied()) {
349 Ok(validated_locks) => {
350 // Merge all of our write locks together since we can allow concurrent
351 // writes at the same timestamp.
352 group_write_locks.merge(validated_locks);
353
354 let validated_write = PendingWriteTxn::User {
355 span,
356 writes,
357 write_locks: None,
358 pending_txn,
359 };
360 validated_writes.push(validated_write);
361 }
362 // This is very unexpected since callers of this method should be validating.
363 //
364 // We cannot allow these write to occur since if the correct set of locks was
365 // not taken we could violate serializability.
366 Err(missing) => {
367 let writes: Vec<_> = writes.keys().collect();
368 panic!(
369 "got to group commit with partial set of locks!\nmissing: {:?}, writes: {:?}, txn: {:?}",
370 missing, writes, pending_txn,
371 );
372 }
373 },
374 // If we don't have any locks, try to acquire them, otherwise defer the write.
375 PendingWriteTxn::User {
376 span,
377 writes,
378 write_locks: None,
379 pending_txn,
380 } => {
381 let missing = group_write_locks.missing_locks(writes.keys().copied());
382
383 if missing.is_empty() {
384 // We have all the locks! Queue the pending write.
385 let validated_write = PendingWriteTxn::User {
386 span,
387 writes,
388 write_locks: None,
389 pending_txn,
390 };
391 validated_writes.push(validated_write);
392 } else {
393 // Try to acquire the locks we're missing.
394 let mut just_in_time_locks = WriteLocks::builder(missing.clone());
395 for collection in missing {
396 if let Some(lock) = self.try_grant_object_write_lock(collection) {
397 just_in_time_locks.insert_lock(collection, lock);
398 }
399 }
400
401 match just_in_time_locks.all_or_nothing(pending_txn.ctx.session().conn_id())
402 {
403 // We acquired all of the locks! Proceed with the write.
404 Ok(locks) => {
405 group_write_locks.merge(locks);
406 let validated_write = PendingWriteTxn::User {
407 span,
408 writes,
409 write_locks: None,
410 pending_txn,
411 };
412 validated_writes.push(validated_write);
413 }
414 // Darn. We couldn't acquire the locks, defer the write.
415 Err(missing) => {
416 let acquire_future =
417 self.grant_object_write_lock(missing).map(Option::Some);
418 let write = DeferredWrite {
419 span,
420 writes,
421 pending_txn,
422 };
423 deferred_writes.push((acquire_future, write));
424 }
425 }
426 }
427 }
428 }
429 }
430
431 // Queue all of our deferred ops.
432 for (acquire_future, write) in deferred_writes {
433 self.defer_op(acquire_future, DeferredOp::Write(write));
434 }
435
436 // The value returned here still might be ahead of `now()` if `now()` has gone backwards at
437 // any point during this method or if this was triggered from DDL. We will still commit the
438 // write without waiting for `now()` to advance. This is ok because the next batch of writes
439 // will trigger the wait loop in `try_group_commit()` if `now()` hasn't advanced past the
440 // global timeline, preventing an unbounded advancing of the global timeline ahead of
441 // `now()`. Additionally DDL is infrequent enough and takes long enough that we don't think
442 // it's practical for continuous DDL to advance the global timestamp in an unbounded manner.
443 let WriteTimestamp {
444 timestamp,
445 advance_to,
446 } = self.get_local_write_ts().await;
447
448 // While we're flipping on the feature flags for txn-wal tables and
449 // the separated Postgres timestamp oracle, we also need to confirm
450 // leadership on writes _after_ getting the timestamp and _before_
451 // writing anything to table shards.
452 //
453 // TODO: Remove this after both (either?) of the above features are on
454 // for good and no possibility of running the old code.
455 let () = self
456 .catalog
457 .confirm_leadership()
458 .await
459 .unwrap_or_terminate("unable to confirm leadership");
460
461 let mut appends: BTreeMap<CatalogItemId, SmallVec<[TableData; 1]>> = BTreeMap::new();
462 let mut responses = Vec::with_capacity(validated_writes.len());
463 let mut notifies = Vec::new();
464
465 for validated_write_txn in validated_writes {
466 match validated_write_txn {
467 PendingWriteTxn::User {
468 span: _,
469 writes,
470 write_locks,
471 pending_txn:
472 PendingTxn {
473 ctx,
474 response,
475 action,
476 },
477 } => {
478 assert_none!(write_locks, "should have merged together all locks above");
479 for (id, table_data) in writes {
480 // If the table that some write was targeting has been deleted while the
481 // write was waiting, then the write will be ignored and we respond to the
482 // client that the write was successful. This is only possible if the write
483 // and the delete were concurrent. Therefore, we are free to order the
484 // write before the delete without violating any consistency guarantees.
485 if self.catalog().try_get_entry(&id).is_some() {
486 appends.entry(id).or_default().extend(table_data);
487 }
488 }
489 if let Some(id) = ctx.extra().contents() {
490 self.set_statement_execution_timestamp(id, timestamp);
491 }
492
493 responses.push(CompletedClientTransmitter::new(ctx, response, action));
494 }
495 PendingWriteTxn::System { updates, source } => {
496 for update in updates {
497 appends.entry(update.id).or_default().push(update.data);
498 }
499 // Once the write completes we notify any waiters.
500 match source {
501 BuiltinTableUpdateSource::Internal(tx)
502 | BuiltinTableUpdateSource::Background(tx) => notifies.push(tx),
503 }
504 }
505 }
506 }
507
508 // Add table advancements for all tables.
509 for table in self.catalog().entries().filter(|entry| entry.is_table()) {
510 appends.entry(table.id()).or_default();
511 }
512
513 // Consolidate all Rows for a given table. We do not consolidate the
514 // staged batches, that's up to whoever staged them.
515 let mut all_appends = Vec::with_capacity(appends.len());
516 for (item_id, table_data) in appends.into_iter() {
517 let mut all_rows = Vec::new();
518 let mut all_data = Vec::new();
519 for data in table_data {
520 match data {
521 TableData::Rows(rows) => all_rows.extend(rows),
522 TableData::Batches(_) => all_data.push(data),
523 }
524 }
525 differential_dataflow::consolidation::consolidate(&mut all_rows);
526 all_data.push(TableData::Rows(all_rows));
527
528 // TODO(parkmycar): Use SmallVec throughout.
529 all_appends.push((item_id, all_data));
530 }
531
532 let appends: Vec<_> = all_appends
533 .into_iter()
534 .map(|(id, updates)| {
535 let gid = self.catalog().get_entry(&id).latest_global_id();
536 (gid, updates)
537 })
538 .collect();
539
540 // Log non-empty user appends.
541 let modified_tables: Vec<_> = appends
542 .iter()
543 .filter_map(|(id, updates)| {
544 if id.is_user() && !updates.iter().all(|u| u.is_empty()) {
545 Some(id)
546 } else {
547 None
548 }
549 })
550 .collect();
551 if !modified_tables.is_empty() {
552 info!(
553 "Appending to tables, {modified_tables:?}, at {timestamp}, advancing to {advance_to}"
554 );
555 }
556 // Instrument our table writes since they can block the coordinator.
557 let histogram = self
558 .metrics
559 .append_table_duration_seconds
560 .with_label_values(&[]);
561 let append_fut = self
562 .controller
563 .storage
564 .append_table(timestamp, advance_to, appends)
565 .expect("invalid updates")
566 .wall_time()
567 .observe(histogram);
568
569 // Spawn a task to do the table writes.
570 let internal_cmd_tx = self.internal_cmd_tx.clone();
571 let apply_write_fut = self.apply_local_write(timestamp);
572
573 let span = debug_span!(parent: None, "group_commit_apply");
574 OpenTelemetryContext::obtain().attach_as_parent_to(&span);
575 task::spawn(
576 || "group_commit_apply",
577 async move {
578 // Wait for the writes to complete.
579 match append_fut
580 .instrument(debug_span!("group_commit_apply::append_fut"))
581 .await
582 {
583 Ok(append_result) => {
584 append_result.unwrap_or_terminate("cannot fail to apply appends")
585 }
586 Err(_) => warn!("Writer terminated with writes in indefinite state"),
587 };
588
589 // Apply the write by marking the timestamp as complete on the timeline.
590 apply_write_fut
591 .instrument(debug_span!("group_commit_apply::append_write_fut"))
592 .await;
593
594 // Notify the external clients of the result.
595 for response in responses {
596 let (mut ctx, result) = response.finalize();
597 ctx.session_mut().apply_write(timestamp);
598 ctx.retire(result);
599 }
600
601 // IMPORTANT: Make sure we hold the permit and write locks
602 // until here, to prevent other writes from going through while
603 // we haven't yet applied the write at the timestamp oracle.
604 drop(permit);
605 drop(group_write_locks);
606
607 // Advance other timelines.
608 if let Err(e) = internal_cmd_tx.send(Message::AdvanceTimelines) {
609 warn!("Server closed with non-advanced timelines, {e}");
610 }
611
612 for notify in notifies {
613 // We don't care if the listeners have gone away.
614 let _ = notify.send(());
615 }
616 }
617 .instrument(span),
618 );
619
620 timestamp
621 }
622
623 /// Submit a write to be executed during the next group commit and trigger a group commit.
624 pub(crate) fn submit_write(&mut self, pending_write_txn: PendingWriteTxn) {
625 if self.controller.read_only() {
626 panic!(
627 "attempting table write in read-only mode: {:?}",
628 pending_write_txn
629 );
630 }
631 self.pending_writes.push(pending_write_txn);
632 self.trigger_group_commit();
633 }
634
635 /// Append some [`BuiltinTableUpdate`]s, with various degrees of waiting and blocking.
636 pub(crate) fn builtin_table_update<'a>(&'a mut self) -> BuiltinTableAppend<'a> {
637 BuiltinTableAppend { coord: self }
638 }
639
640 pub(crate) fn defer_op<F>(&mut self, acquire_future: F, op: DeferredOp)
641 where
642 F: Future<Output = Option<(CatalogItemId, tokio::sync::OwnedMutexGuard<()>)>>
643 + Send
644 + 'static,
645 {
646 let conn_id = op.conn_id().clone();
647
648 // Track all of our deferred ops.
649 let is_optimistic = op.can_be_optimistically_retried();
650 self.deferred_write_ops.insert(conn_id.clone(), op);
651
652 let internal_cmd_tx = self.internal_cmd_tx.clone();
653 let conn_id_ = conn_id.clone();
654 mz_ore::task::spawn(|| format!("defer op {conn_id_}"), async move {
655 tracing::info!(%conn_id, "deferring plan");
656 // Once we can acquire the first failed lock, try running the deferred plan.
657 //
658 // Note: This does not guarantee the plan will be able to run, there might be
659 // other locks that we later fail to get.
660 let acquired_lock = acquire_future.await;
661
662 // Some operations, e.g. blind INSERTs, can be optimistically retried, meaning we
663 // can run multiple at once. In those cases we don't hold the lock so we retry all
664 // blind writes for a single object.
665 let acquired_lock = match (acquired_lock, is_optimistic) {
666 (Some(_lock), true) => None,
667 (Some(lock), false) => Some(lock),
668 (None, _) => None,
669 };
670
671 // If this send fails then the Coordinator is shutting down.
672 let _ = internal_cmd_tx.send(Message::TryDeferred {
673 conn_id,
674 acquired_lock,
675 });
676 });
677 }
678
679 /// Returns a future that waits until it can get an exclusive lock on the specified collection.
680 pub(crate) fn grant_object_write_lock(
681 &mut self,
682 object_id: CatalogItemId,
683 ) -> impl Future<Output = (CatalogItemId, OwnedMutexGuard<()>)> + 'static {
684 let write_lock_handle = self
685 .write_locks
686 .entry(object_id)
687 .or_insert_with(|| Arc::new(tokio::sync::Mutex::new(())));
688 let write_lock_handle = Arc::clone(write_lock_handle);
689
690 write_lock_handle
691 .lock_owned()
692 .map(move |guard| (object_id, guard))
693 }
694
695 /// Lazily creates the lock for the provided `object_id`, and grants it if possible, returns
696 /// `None` if the lock is already held.
697 pub(crate) fn try_grant_object_write_lock(
698 &mut self,
699 object_id: CatalogItemId,
700 ) -> Option<OwnedMutexGuard<()>> {
701 let write_lock_handle = self
702 .write_locks
703 .entry(object_id)
704 .or_insert_with(|| Arc::new(tokio::sync::Mutex::new(())));
705 let write_lock_handle = Arc::clone(write_lock_handle);
706
707 write_lock_handle.try_lock_owned().ok()
708 }
709}
710
711/// Helper struct to run a builtin table append.
712pub struct BuiltinTableAppend<'a> {
713 coord: &'a mut Coordinator,
714}
715
716/// `Future` that notifies when a builtin table write has completed.
717///
718/// Note: builtin table writes need to talk to persist, which can take 100s of milliseconds. This
719/// type allows you to execute a builtin table write, e.g. via [`BuiltinTableAppend::execute`], and
720/// wait for it to complete, while other long running tasks are concurrently executing.
721pub type BuiltinTableAppendNotify = Pin<Box<dyn Future<Output = ()> + Send + Sync + 'static>>;
722
723impl<'a> BuiltinTableAppend<'a> {
724 /// Submit a write to a system table to be executed during the next group commit. This method
725 /// __does not__ trigger a group commit.
726 ///
727 /// This is useful for non-critical writes like metric updates because it allows us to piggy
728 /// back off the next group commit instead of triggering a potentially expensive group commit.
729 ///
730 /// Note: __do not__ call this for DDL which needs the system tables updated immediately.
731 ///
732 /// Note: When in read-only mode, this will buffer the update and return
733 /// immediately.
734 pub fn background(self, mut updates: Vec<BuiltinTableUpdate>) -> BuiltinTableAppendNotify {
735 if self.coord.controller.read_only() {
736 self.coord
737 .buffered_builtin_table_updates
738 .as_mut()
739 .expect("in read-only mode")
740 .append(&mut updates);
741
742 return Box::pin(futures::future::ready(()));
743 }
744
745 let (tx, rx) = oneshot::channel();
746 self.coord.pending_writes.push(PendingWriteTxn::System {
747 updates,
748 source: BuiltinTableUpdateSource::Background(tx),
749 });
750
751 Box::pin(rx.map(|_| ()))
752 }
753
754 /// Submits a write to be executed during the next group commit __and__ triggers a group commit.
755 ///
756 /// Returns a `Future` that resolves when the write has completed, does not block the
757 /// Coordinator.
758 ///
759 /// Note: When in read-only mode, this will buffer the update and the
760 /// returned future will resolve immediately, without the update actually
761 /// having been written.
762 pub fn defer(self, mut updates: Vec<BuiltinTableUpdate>) -> BuiltinTableAppendNotify {
763 if self.coord.controller.read_only() {
764 self.coord
765 .buffered_builtin_table_updates
766 .as_mut()
767 .expect("in read-only mode")
768 .append(&mut updates);
769
770 return Box::pin(futures::future::ready(()));
771 }
772
773 let (tx, rx) = oneshot::channel();
774 self.coord.pending_writes.push(PendingWriteTxn::System {
775 updates,
776 source: BuiltinTableUpdateSource::Internal(tx),
777 });
778 self.coord.trigger_group_commit();
779
780 Box::pin(rx.map(|_| ()))
781 }
782
783 /// Submit a write to a system table.
784 ///
785 /// This method will block the Coordinator on acquiring a write timestamp from the timestamp
786 /// oracle, and then returns a `Future` that will complete once the write has been applied and
787 /// the write timestamp.
788 ///
789 /// Note: When in read-only mode, this will buffer the update, the
790 /// returned future will resolve immediately, without the update actually
791 /// having been written, and no timestamp is returned.
792 pub async fn execute(
793 self,
794 mut updates: Vec<BuiltinTableUpdate>,
795 ) -> (BuiltinTableAppendNotify, Option<Timestamp>) {
796 if self.coord.controller.read_only() {
797 self.coord
798 .buffered_builtin_table_updates
799 .as_mut()
800 .expect("in read-only mode")
801 .append(&mut updates);
802
803 return (Box::pin(futures::future::ready(())), None);
804 }
805
806 let (tx, rx) = oneshot::channel();
807
808 // Most DDL queries cause writes to system tables. Unlike writes to user tables, system
809 // table writes do not wait for a group commit, they explicitly trigger one. There is a
810 // possibility that if a user is executing DDL at a rate faster than 1 query per
811 // millisecond, then the global timeline will unboundedly advance past the system clock.
812 // This can cause future queries to block, but will not affect correctness. Since this
813 // rate of DDL is unlikely, we allow DDL to explicitly trigger group commit.
814 self.coord.pending_writes.push(PendingWriteTxn::System {
815 updates,
816 source: BuiltinTableUpdateSource::Internal(tx),
817 });
818 let write_ts = self.coord.group_commit(None).await;
819
820 // Avoid excessive group commits by resetting the periodic table advancement timer. The
821 // group commit triggered by above will already advance all tables.
822 self.coord.advance_timelines_interval.reset();
823
824 (Box::pin(rx.map(|_| ())), Some(write_ts))
825 }
826
827 /// Submit a write to a system table, blocking until complete.
828 ///
829 /// Note: if possible you should use the `execute(...)` method, which returns a `Future` that
830 /// can be `await`-ed concurrently with other tasks.
831 ///
832 /// Note: When in read-only mode, this will buffer the update and the
833 /// returned future will resolve immediately, without the update actually
834 /// having been written.
835 pub async fn blocking(self, updates: Vec<BuiltinTableUpdate>) {
836 let (notify, _) = self.execute(updates).await;
837 notify.await;
838 }
839}
840
841/// Returns two sides of a "channel" that can be used to notify the coordinator when we want a
842/// group commit to be run.
843pub fn notifier() -> (GroupCommitNotifier, GroupCommitWaiter) {
844 let notify = Arc::new(Notify::new());
845 let in_progress = Arc::new(Semaphore::new(1));
846
847 let notifier = GroupCommitNotifier {
848 notify: Arc::clone(¬ify),
849 };
850 let waiter = GroupCommitWaiter {
851 notify,
852 in_progress,
853 };
854
855 (notifier, waiter)
856}
857
858/// A handle that allows us to notify the coordinator that a group commit should be run at some
859/// point in the future.
860#[derive(Debug, Clone)]
861pub struct GroupCommitNotifier {
862 /// Tracks if there are any outstanding group commits.
863 notify: Arc<Notify>,
864}
865
866impl GroupCommitNotifier {
867 /// Notifies the [`GroupCommitWaiter`] that we'd like a group commit to be run.
868 pub fn notify(&self) {
869 self.notify.notify_one()
870 }
871}
872
873/// A handle that returns a future when a group commit needs to be run, and one is not currently
874/// being run.
875#[derive(Debug)]
876pub struct GroupCommitWaiter {
877 /// Tracks if there are any outstanding group commits.
878 notify: Arc<Notify>,
879 /// Distributes permits which tracks in progress group commits.
880 in_progress: Arc<Semaphore>,
881}
882static_assertions::assert_not_impl_all!(GroupCommitWaiter: Clone);
883
884impl GroupCommitWaiter {
885 /// Returns a permit for a group commit, once a permit is available _and_ there someone
886 /// requested a group commit to be run.
887 ///
888 /// # Cancel Safety
889 ///
890 /// * Waiting on the returned Future is cancel safe because we acquire an in-progress permit
891 /// before waiting for notifications. If the Future gets dropped after acquiring a permit but
892 /// before a group commit is queued, we'll release the permit which can be acquired by the
893 /// next caller.
894 ///
895 pub async fn ready(&self) -> GroupCommitPermit {
896 let permit = Semaphore::acquire_owned(Arc::clone(&self.in_progress))
897 .await
898 .expect("semaphore should not close");
899
900 // Note: We must wait for notifies _after_ waiting for a permit to be acquired for cancel
901 // safety.
902 self.notify.notified().await;
903
904 GroupCommitPermit {
905 _permit: Some(permit),
906 }
907 }
908}
909
910/// A permit to run a group commit, this must be kept alive for the entire duration of the commit.
911///
912/// Note: We sometimes want to throttle how many group commits are running at once, which this
913/// permit allows us to do.
914#[derive(Debug)]
915pub struct GroupCommitPermit {
916 /// Permit that is preventing other group commits from running.
917 ///
918 /// Only `None` if the permit has been moved into a tokio task for waiting.
919 _permit: Option<OwnedSemaphorePermit>,
920}
921
922/// When we start a [`Session`] we need to update some builtin tables, we don't want to wait for
923/// these writes to complete for two reasons:
924///
925/// 1. Doing a write can take a relatively long time.
926/// 2. Decoupling the write from the session start allows us to batch multiple writes together, if
927/// sessions are being created with a high frequency.
928///
929/// So as an optimization we do not wait for these writes to complete. But if a [`Session`] tries
930/// to query any of these builtin objects, we need to block that query on the writes completing to
931/// maintain linearizability.
932pub(crate) fn waiting_on_startup_appends(
933 catalog: &Catalog,
934 session: &mut Session,
935 plan: &Plan,
936) -> Option<(BTreeSet<CatalogItemId>, BoxFuture<'static, ()>)> {
937 // TODO(parkmycar): We need to check transitive uses here too if we ever move the
938 // referenced builtin tables out of mz_internal, or we allow creating views on
939 // mz_internal objects.
940 let depends_on = match plan {
941 Plan::Select(plan) => plan.source.depends_on(),
942 Plan::ReadThenWrite(plan) => plan.selection.depends_on(),
943 Plan::ShowColumns(plan) => plan.select_plan.source.depends_on(),
944 Plan::Subscribe(plan) => plan.from.depends_on(),
945 Plan::ExplainPlan(ExplainPlanPlan {
946 explainee: Explainee::Statement(ExplaineeStatement::Select { plan, .. }),
947 ..
948 }) => plan.source.depends_on(),
949 Plan::ExplainTimestamp(ExplainTimestampPlan { raw_plan, .. }) => raw_plan.depends_on(),
950 Plan::CreateConnection(_)
951 | Plan::CreateDatabase(_)
952 | Plan::CreateSchema(_)
953 | Plan::CreateRole(_)
954 | Plan::CreateNetworkPolicy(_)
955 | Plan::CreateCluster(_)
956 | Plan::CreateClusterReplica(_)
957 | Plan::CreateContinualTask(_)
958 | Plan::CreateSource(_)
959 | Plan::CreateSources(_)
960 | Plan::CreateSecret(_)
961 | Plan::CreateSink(_)
962 | Plan::CreateTable(_)
963 | Plan::CreateView(_)
964 | Plan::CreateMaterializedView(_)
965 | Plan::CreateIndex(_)
966 | Plan::CreateType(_)
967 | Plan::Comment(_)
968 | Plan::DiscardTemp
969 | Plan::DiscardAll
970 | Plan::DropObjects(_)
971 | Plan::DropOwned(_)
972 | Plan::EmptyQuery
973 | Plan::ShowAllVariables
974 | Plan::ShowCreate(_)
975 | Plan::ShowVariable(_)
976 | Plan::InspectShard(_)
977 | Plan::SetVariable(_)
978 | Plan::ResetVariable(_)
979 | Plan::SetTransaction(_)
980 | Plan::StartTransaction(_)
981 | Plan::CommitTransaction(_)
982 | Plan::AbortTransaction(_)
983 | Plan::CopyFrom(_)
984 | Plan::CopyTo(_)
985 | Plan::ExplainPlan(_)
986 | Plan::ExplainPushdown(_)
987 | Plan::ExplainSinkSchema(_)
988 | Plan::Insert(_)
989 | Plan::AlterNetworkPolicy(_)
990 | Plan::AlterNoop(_)
991 | Plan::AlterClusterRename(_)
992 | Plan::AlterClusterSwap(_)
993 | Plan::AlterClusterReplicaRename(_)
994 | Plan::AlterCluster(_)
995 | Plan::AlterConnection(_)
996 | Plan::AlterSource(_)
997 | Plan::AlterSetCluster(_)
998 | Plan::AlterItemRename(_)
999 | Plan::AlterRetainHistory(_)
1000 | Plan::AlterSchemaRename(_)
1001 | Plan::AlterSchemaSwap(_)
1002 | Plan::AlterSecret(_)
1003 | Plan::AlterSink(_)
1004 | Plan::AlterSystemSet(_)
1005 | Plan::AlterSystemReset(_)
1006 | Plan::AlterSystemResetAll(_)
1007 | Plan::AlterRole(_)
1008 | Plan::AlterOwner(_)
1009 | Plan::AlterTableAddColumn(_)
1010 | Plan::Declare(_)
1011 | Plan::Fetch(_)
1012 | Plan::Close(_)
1013 | Plan::Prepare(_)
1014 | Plan::Execute(_)
1015 | Plan::Deallocate(_)
1016 | Plan::Raise(_)
1017 | Plan::GrantRole(_)
1018 | Plan::RevokeRole(_)
1019 | Plan::GrantPrivileges(_)
1020 | Plan::RevokePrivileges(_)
1021 | Plan::AlterDefaultPrivileges(_)
1022 | Plan::ReassignOwned(_)
1023 | Plan::ValidateConnection(_)
1024 | Plan::SideEffectingFunc(_) => BTreeSet::default(),
1025 };
1026 let depends_on_required_id = REQUIRED_BUILTIN_TABLES
1027 .iter()
1028 .map(|table| catalog.resolve_builtin_table(&**table))
1029 .any(|id| {
1030 catalog
1031 .get_global_ids(&id)
1032 .any(|gid| depends_on.contains(&gid))
1033 });
1034
1035 // If our plan does not depend on any required ID, then we don't need to
1036 // wait for any builtin writes to occur.
1037 if !depends_on_required_id {
1038 return None;
1039 }
1040
1041 // Even if we depend on a builtin table, there's no need to wait if the
1042 // writes have already completed.
1043 //
1044 // TODO(parkmycar): As an optimization we should add a `Notify` type to
1045 // `mz_ore` that allows peeking. If the builtin table writes have already
1046 // completed then there is no need to defer this plan.
1047 match session.clear_builtin_table_updates() {
1048 Some(wait_future) => {
1049 let depends_on = depends_on
1050 .into_iter()
1051 .map(|gid| catalog.get_entry_by_global_id(&gid).id())
1052 .collect();
1053 Some((depends_on, wait_future.boxed()))
1054 }
1055 None => None,
1056 }
1057}