mz_adapter/coord/appends.rs
1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10//! Logic and types for all appends executed by the [`Coordinator`].
11
12use std::collections::{BTreeMap, BTreeSet};
13use std::future::Future;
14use std::pin::Pin;
15use std::sync::{Arc, LazyLock};
16use std::time::Duration;
17
18use derivative::Derivative;
19use futures::future::{BoxFuture, FutureExt};
20use mz_adapter_types::connection::ConnectionId;
21use mz_catalog::builtin::{BuiltinTable, MZ_SESSIONS};
22use mz_expr::CollectionPlan;
23use mz_ore::metrics::MetricsFutureExt;
24use mz_ore::task;
25use mz_ore::tracing::OpenTelemetryContext;
26use mz_ore::{assert_none, instrument};
27use mz_repr::{CatalogItemId, Timestamp};
28use mz_sql::names::ResolvedIds;
29use mz_sql::plan::{ExplainPlanPlan, ExplainTimestampPlan, Explainee, ExplaineeStatement, Plan};
30use mz_sql::session::metadata::SessionMetadata;
31use mz_storage_client::client::TableData;
32use mz_timestamp_oracle::WriteTimestamp;
33use smallvec::SmallVec;
34use tokio::sync::{Notify, OwnedMutexGuard, OwnedSemaphorePermit, Semaphore, oneshot};
35use tracing::{Instrument, Span, debug_span, info, warn};
36
37use crate::catalog::{BuiltinTableUpdate, Catalog};
38use crate::coord::{Coordinator, Message, PendingTxn, PlanValidity};
39use crate::session::{GroupCommitWriteLocks, Session, WriteLocks};
40use crate::util::{CompletedClientTransmitter, ResultExt};
41use crate::{AdapterError, ExecuteContext};
42
43/// Tables that we emit updates for when starting a new session.
44pub(crate) static REQUIRED_BUILTIN_TABLES: &[&LazyLock<BuiltinTable>] = &[&MZ_SESSIONS];
45
46/// An operation that was deferred waiting on a resource to be available.
47///
48/// For example when inserting into a table we defer on acquiring [`WriteLocks`].
49#[derive(Debug)]
50pub enum DeferredOp {
51 /// A plan, e.g. ReadThenWrite, that needs locks before sequencing.
52 Plan(DeferredPlan),
53 /// Inserts into a collection.
54 Write(DeferredWrite),
55}
56
57impl DeferredOp {
58 /// Certain operations, e.g. "blind writes"/`INSERT` statements, can be optimistically retried
59 /// because we can share a write lock between multiple operations. In this case we wait to
60 /// acquire the locks until [`group_commit`], where writes are groupped by collection and
61 /// comitted at a single timestamp.
62 ///
63 /// Other operations, e.g. read-then-write plans/`UPDATE` statements, must uniquely hold their
64 /// write locks and thus we should acquire the locks in [`try_deferred`] to prevent multiple
65 /// queued plans attempting to get retried at the same time, when we know only one can proceed.
66 ///
67 /// [`try_deferred`]: crate::coord::Coordinator::try_deferred
68 /// [`group_commit`]: crate::coord::Coordinator::group_commit
69 pub(crate) fn can_be_optimistically_retried(&self) -> bool {
70 match self {
71 DeferredOp::Plan(_) => false,
72 DeferredOp::Write(_) => true,
73 }
74 }
75
76 /// Returns an Iterator of all the required locks for current operation.
77 pub fn required_locks(&self) -> impl Iterator<Item = CatalogItemId> + '_ {
78 match self {
79 DeferredOp::Plan(plan) => {
80 let iter = plan.requires_locks.iter().copied();
81 itertools::Either::Left(iter)
82 }
83 DeferredOp::Write(write) => {
84 let iter = write.writes.keys().copied();
85 itertools::Either::Right(iter)
86 }
87 }
88 }
89
90 /// Returns the [`ConnectionId`] associated with this deferred op.
91 pub fn conn_id(&self) -> &ConnectionId {
92 match self {
93 DeferredOp::Plan(plan) => plan.ctx.session().conn_id(),
94 DeferredOp::Write(write) => write.pending_txn.ctx.session().conn_id(),
95 }
96 }
97
98 /// Consumes the [`DeferredOp`], returning the inner [`ExecuteContext`].
99 pub fn into_ctx(self) -> ExecuteContext {
100 match self {
101 DeferredOp::Plan(plan) => plan.ctx,
102 DeferredOp::Write(write) => write.pending_txn.ctx,
103 }
104 }
105}
106
107/// Describes a plan that is awaiting [`WriteLocks`].
108#[derive(Derivative)]
109#[derivative(Debug)]
110pub struct DeferredPlan {
111 #[derivative(Debug = "ignore")]
112 pub ctx: ExecuteContext,
113 pub plan: Plan,
114 pub validity: PlanValidity,
115 pub requires_locks: BTreeSet<CatalogItemId>,
116}
117
118#[derive(Debug)]
119pub struct DeferredWrite {
120 pub span: Span,
121 pub writes: BTreeMap<CatalogItemId, SmallVec<[TableData; 1]>>,
122 pub pending_txn: PendingTxn,
123}
124
125/// Describes what action triggered an update to a builtin table.
126#[derive(Debug)]
127pub(crate) enum BuiltinTableUpdateSource {
128 /// Internal update, notify the caller when it's complete.
129 Internal(oneshot::Sender<()>),
130 /// Update was triggered by some background process, such as periodic heartbeats from COMPUTE.
131 Background(oneshot::Sender<()>),
132}
133
134/// A pending write transaction that will be committing during the next group commit.
135#[derive(Debug)]
136pub(crate) enum PendingWriteTxn {
137 /// Write to a user table.
138 User {
139 span: Span,
140 /// List of all write operations within the transaction.
141 writes: BTreeMap<CatalogItemId, SmallVec<[TableData; 1]>>,
142 /// If they exist, should contain locks for each [`CatalogItemId`] in `writes`.
143 write_locks: Option<WriteLocks>,
144 /// Inner transaction.
145 pending_txn: PendingTxn,
146 },
147 /// Write to a system table.
148 System {
149 updates: Vec<BuiltinTableUpdate>,
150 source: BuiltinTableUpdateSource,
151 },
152}
153
154impl PendingWriteTxn {
155 fn is_internal_system(&self) -> bool {
156 match self {
157 PendingWriteTxn::System {
158 source: BuiltinTableUpdateSource::Internal(_),
159 ..
160 } => true,
161 _ => false,
162 }
163 }
164}
165
166impl Coordinator {
167 /// Send a message to the Coordinate to start a group commit.
168 pub(crate) fn trigger_group_commit(&mut self) {
169 self.group_commit_tx.notify();
170 // Avoid excessive `Message::GroupCommitInitiate` by resetting the periodic table
171 // advancement. The group commit triggered by the message above will already advance all
172 // tables.
173 self.advance_timelines_interval.reset();
174 }
175
176 /// Tries to execute a previously [`DeferredOp`] that requires write locks.
177 ///
178 /// If we can't acquire all of the write locks then we'll defer the plan again and wait for
179 /// the necessary locks to become available.
180 pub(crate) async fn try_deferred(
181 &mut self,
182 conn_id: ConnectionId,
183 acquired_lock: Option<(CatalogItemId, tokio::sync::OwnedMutexGuard<()>)>,
184 ) {
185 // Try getting the deferred op, it may have already been canceled.
186 let Some(op) = self.deferred_write_ops.remove(&conn_id) else {
187 tracing::warn!(%conn_id, "no deferred op found, it must have been canceled?");
188 return;
189 };
190 tracing::info!(%conn_id, "trying deferred plan");
191
192 // If we pre-acquired a lock, try to acquire the rest.
193 let write_locks = match acquired_lock {
194 Some((acquired_gid, acquired_lock)) => {
195 let mut write_locks = WriteLocks::builder(op.required_locks());
196
197 // Insert the one lock we already acquired into the our builder.
198 write_locks.insert_lock(acquired_gid, acquired_lock);
199
200 // Acquire the rest of our locks, filtering out the one we already have.
201 for gid in op.required_locks().filter(|gid| *gid != acquired_gid) {
202 if let Some(lock) = self.try_grant_object_write_lock(gid) {
203 write_locks.insert_lock(gid, lock);
204 }
205 }
206
207 // If we failed to acquire any locks, spawn a task that waits for them to become available.
208 let locks = match write_locks.all_or_nothing(op.conn_id()) {
209 Ok(locks) => locks,
210 Err(failed_to_acquire) => {
211 let acquire_future = self
212 .grant_object_write_lock(failed_to_acquire)
213 .map(Option::Some);
214 self.defer_op(acquire_future, op);
215 return;
216 }
217 };
218
219 Some(locks)
220 }
221 None => None,
222 };
223
224 match op {
225 DeferredOp::Plan(mut deferred) => {
226 if let Err(e) = deferred.validity.check(self.catalog()) {
227 deferred.ctx.retire(Err(e))
228 } else {
229 // Write statements never need to track resolved IDs (NOTE: This is not the
230 // same thing as plan dependencies, which we do need to re-validate).
231 let resolved_ids = ResolvedIds::empty();
232
233 // If we pre-acquired our locks, grant them to the session.
234 if let Some(locks) = write_locks {
235 let conn_id = deferred.ctx.session().conn_id().clone();
236 if let Err(existing) =
237 deferred.ctx.session_mut().try_grant_write_locks(locks)
238 {
239 tracing::error!(
240 %conn_id,
241 ?existing,
242 "session already write locks granted?",
243 );
244 return deferred.ctx.retire(Err(AdapterError::WrongSetOfLocks));
245 }
246 };
247
248 // Note: This plan is not guaranteed to run, it may get deferred again.
249 self.sequence_plan(deferred.ctx, deferred.plan, resolved_ids)
250 .await;
251 }
252 }
253 DeferredOp::Write(DeferredWrite {
254 span,
255 writes,
256 pending_txn,
257 }) => {
258 self.submit_write(PendingWriteTxn::User {
259 span,
260 writes,
261 write_locks,
262 pending_txn,
263 });
264 }
265 }
266 }
267
268 /// Attempts to commit all pending write transactions in a group commit. If the timestamp
269 /// chosen for the writes is not ahead of `now()`, then we can execute and commit the writes
270 /// immediately. Otherwise we must wait for `now()` to advance past the timestamp chosen for the
271 /// writes.
272 #[instrument(level = "debug")]
273 pub(crate) async fn try_group_commit(&mut self, permit: Option<GroupCommitPermit>) {
274 let timestamp = self.peek_local_write_ts().await;
275 let now = Timestamp::from((self.catalog().config().now)());
276
277 // HACK: This is a special case to allow writes to the mz_sessions table to proceed even
278 // if the timestamp oracle is ahead of the current walltime. We do this because there are
279 // some tests that mock the walltime, so it doesn't automatically advance, and updating
280 // those tests to advance the walltime while creating a connection is too much.
281 //
282 // TODO(parkmycar): Get rid of the check below when refactoring group commits.
283 let contains_internal_system_write = self
284 .pending_writes
285 .iter()
286 .any(|write| write.is_internal_system());
287
288 if timestamp > now && !contains_internal_system_write {
289 // Cap retry time to 1s. In cases where the system clock has retreated by
290 // some large amount of time, this prevents against then waiting for that
291 // large amount of time in case the system clock then advances back to near
292 // what it was.
293 let remaining_ms = std::cmp::min(timestamp.saturating_sub(now), 1_000.into());
294 let internal_cmd_tx = self.internal_cmd_tx.clone();
295 task::spawn(
296 || "group_commit_initiate",
297 async move {
298 tokio::time::sleep(Duration::from_millis(remaining_ms.into())).await;
299 // It is not an error for this task to be running after `internal_cmd_rx` is dropped.
300 let result =
301 internal_cmd_tx.send(Message::GroupCommitInitiate(Span::current(), permit));
302 if let Err(e) = result {
303 warn!("internal_cmd_rx dropped before we could send: {:?}", e);
304 }
305 }
306 .instrument(Span::current()),
307 );
308 } else {
309 self.group_commit(permit).await;
310 }
311 }
312
313 /// Tries to commit all pending writes transactions at the same timestamp.
314 ///
315 /// If the caller of this function has the `write_lock` acquired, then they can optionally pass
316 /// it in to this method. If the caller does not have the `write_lock` acquired and the
317 /// `write_lock` is currently locked by another operation, then only writes to system tables
318 /// and table advancements will be applied. If the caller does not have the `write_lock`
319 /// acquired and the `write_lock` is not currently locked by another operation, then group
320 /// commit will acquire it and all writes will be applied.
321 ///
322 /// All applicable pending writes will be combined into a single Append command and sent to
323 /// STORAGE as a single batch. All applicable writes will happen at the same timestamp and all
324 /// involved tables will be advanced to some timestamp larger than the timestamp of the write.
325 ///
326 /// Returns the timestamp of the write.
327 #[instrument(name = "coord::group_commit")]
328 pub(crate) async fn group_commit(&mut self, permit: Option<GroupCommitPermit>) -> Timestamp {
329 let mut validated_writes = Vec::new();
330 let mut deferred_writes = Vec::new();
331 let mut group_write_locks = GroupCommitWriteLocks::default();
332
333 // TODO(parkmycar): Refactor away this allocation. Currently `drain(..)` requires holding
334 // a mutable borrow on the Coordinator and so does trying to grant a write lock.
335 let pending_writes: Vec<_> = self.pending_writes.drain(..).collect();
336
337 // Validate, merge, and possibly acquire write locks for as many pending writes as possible.
338 for pending_write in pending_writes {
339 match pending_write {
340 // We always allow system writes to proceed.
341 PendingWriteTxn::System { .. } => validated_writes.push(pending_write),
342 // We have a set of locks! Validate they're correct (expected).
343 PendingWriteTxn::User {
344 span,
345 write_locks: Some(write_locks),
346 writes,
347 pending_txn,
348 } => match write_locks.validate(writes.keys().copied()) {
349 Ok(validated_locks) => {
350 // Merge all of our write locks together since we can allow concurrent
351 // writes at the same timestamp.
352 group_write_locks.merge(validated_locks);
353
354 let validated_write = PendingWriteTxn::User {
355 span,
356 writes,
357 write_locks: None,
358 pending_txn,
359 };
360 validated_writes.push(validated_write);
361 }
362 // This is very unexpected since callers of this method should be validating.
363 //
364 // We cannot allow these write to occur since if the correct set of locks was
365 // not taken we could violate serializability.
366 Err(missing) => {
367 let writes: Vec<_> = writes.keys().collect();
368 panic!(
369 "got to group commit with partial set of locks!\nmissing: {:?}, writes: {:?}, txn: {:?}",
370 missing, writes, pending_txn,
371 );
372 }
373 },
374 // If we don't have any locks, try to acquire them, otherwise defer the write.
375 PendingWriteTxn::User {
376 span,
377 writes,
378 write_locks: None,
379 pending_txn,
380 } => {
381 let missing = group_write_locks.missing_locks(writes.keys().copied());
382
383 if missing.is_empty() {
384 // We have all the locks! Queue the pending write.
385 let validated_write = PendingWriteTxn::User {
386 span,
387 writes,
388 write_locks: None,
389 pending_txn,
390 };
391 validated_writes.push(validated_write);
392 } else {
393 // Try to acquire the locks we're missing.
394 let mut just_in_time_locks = WriteLocks::builder(missing.clone());
395 for collection in missing {
396 if let Some(lock) = self.try_grant_object_write_lock(collection) {
397 just_in_time_locks.insert_lock(collection, lock);
398 }
399 }
400
401 match just_in_time_locks.all_or_nothing(pending_txn.ctx.session().conn_id())
402 {
403 // We acquired all of the locks! Proceed with the write.
404 Ok(locks) => {
405 group_write_locks.merge(locks);
406 let validated_write = PendingWriteTxn::User {
407 span,
408 writes,
409 write_locks: None,
410 pending_txn,
411 };
412 validated_writes.push(validated_write);
413 }
414 // Darn. We couldn't acquire the locks, defer the write.
415 Err(missing) => {
416 let acquire_future =
417 self.grant_object_write_lock(missing).map(Option::Some);
418 let write = DeferredWrite {
419 span,
420 writes,
421 pending_txn,
422 };
423 deferred_writes.push((acquire_future, write));
424 }
425 }
426 }
427 }
428 }
429 }
430
431 // Queue all of our deferred ops.
432 for (acquire_future, write) in deferred_writes {
433 self.defer_op(acquire_future, DeferredOp::Write(write));
434 }
435
436 // The value returned here still might be ahead of `now()` if `now()` has gone backwards at
437 // any point during this method or if this was triggered from DDL. We will still commit the
438 // write without waiting for `now()` to advance. This is ok because the next batch of writes
439 // will trigger the wait loop in `try_group_commit()` if `now()` hasn't advanced past the
440 // global timeline, preventing an unbounded advancing of the global timeline ahead of
441 // `now()`. Additionally DDL is infrequent enough and takes long enough that we don't think
442 // it's practical for continuous DDL to advance the global timestamp in an unbounded manner.
443 let WriteTimestamp {
444 timestamp,
445 advance_to,
446 } = self.get_local_write_ts().await;
447
448 // While we're flipping on the feature flags for txn-wal tables and
449 // the separated Postgres timestamp oracle, we also need to confirm
450 // leadership on writes _after_ getting the timestamp and _before_
451 // writing anything to table shards.
452 //
453 // TODO: Remove this after both (either?) of the above features are on
454 // for good and no possibility of running the old code.
455 let () = self
456 .catalog
457 .confirm_leadership()
458 .await
459 .unwrap_or_terminate("unable to confirm leadership");
460
461 let mut appends: BTreeMap<CatalogItemId, SmallVec<[TableData; 1]>> = BTreeMap::new();
462 let mut responses = Vec::with_capacity(validated_writes.len());
463 let mut notifies = Vec::new();
464
465 for validated_write_txn in validated_writes {
466 match validated_write_txn {
467 PendingWriteTxn::User {
468 span: _,
469 writes,
470 write_locks,
471 pending_txn:
472 PendingTxn {
473 ctx,
474 response,
475 action,
476 },
477 } => {
478 assert_none!(write_locks, "should have merged together all locks above");
479 for (id, table_data) in writes {
480 // If the table that some write was targeting has been deleted while the
481 // write was waiting, then the write will be ignored and we respond to the
482 // client that the write was successful. This is only possible if the write
483 // and the delete were concurrent. Therefore, we are free to order the
484 // write before the delete without violating any consistency guarantees.
485 if self.catalog().try_get_entry(&id).is_some() {
486 appends.entry(id).or_default().extend(table_data);
487 }
488 }
489 if let Some(id) = ctx.extra().contents() {
490 self.set_statement_execution_timestamp(id, timestamp);
491 }
492
493 responses.push(CompletedClientTransmitter::new(ctx, response, action));
494 }
495 PendingWriteTxn::System { updates, source } => {
496 for update in updates {
497 appends.entry(update.id).or_default().push(update.data);
498 }
499 // Once the write completes we notify any waiters.
500 match source {
501 BuiltinTableUpdateSource::Internal(tx)
502 | BuiltinTableUpdateSource::Background(tx) => notifies.push(tx),
503 }
504 }
505 }
506 }
507
508 // Add table advancements for all tables.
509 for table in self.catalog().entries().filter(|entry| entry.is_table()) {
510 appends.entry(table.id()).or_default();
511 }
512
513 // Consolidate all Rows for a given table. We do not consolidate the
514 // staged batches, that's up to whoever staged them.
515 let mut all_appends = Vec::with_capacity(appends.len());
516 for (item_id, table_data) in appends.into_iter() {
517 let mut all_rows = Vec::new();
518 let mut all_data = Vec::new();
519 for data in table_data {
520 match data {
521 TableData::Rows(rows) => all_rows.extend(rows),
522 TableData::Batches(_) => all_data.push(data),
523 }
524 }
525 differential_dataflow::consolidation::consolidate(&mut all_rows);
526 all_data.push(TableData::Rows(all_rows));
527
528 // TODO(parkmycar): Use SmallVec throughout.
529 all_appends.push((item_id, all_data));
530 }
531
532 let appends: Vec<_> = all_appends
533 .into_iter()
534 .map(|(id, updates)| {
535 let gid = self.catalog().get_entry(&id).latest_global_id();
536 (gid, updates)
537 })
538 .collect();
539
540 // Log non-empty user appends.
541 let modified_tables: Vec<_> = appends
542 .iter()
543 .filter_map(|(id, updates)| {
544 if id.is_user() && !updates.iter().all(|u| u.is_empty()) {
545 Some(id)
546 } else {
547 None
548 }
549 })
550 .collect();
551 if !modified_tables.is_empty() {
552 info!(
553 "Appending to tables, {modified_tables:?}, at {timestamp}, advancing to {advance_to}"
554 );
555 }
556 // Instrument our table writes since they can block the coordinator.
557 let histogram = self.metrics.append_table_duration_seconds.clone();
558 let append_fut = self
559 .controller
560 .storage
561 .append_table(timestamp, advance_to, appends)
562 .expect("invalid updates")
563 .wall_time()
564 .observe(histogram);
565
566 // Spawn a task to do the table writes.
567 let internal_cmd_tx = self.internal_cmd_tx.clone();
568 let apply_write_fut = self.apply_local_write(timestamp);
569
570 let span = debug_span!(parent: None, "group_commit_apply");
571 OpenTelemetryContext::obtain().attach_as_parent_to(&span);
572 task::spawn(
573 || "group_commit_apply",
574 async move {
575 // Wait for the writes to complete.
576 match append_fut
577 .instrument(debug_span!("group_commit_apply::append_fut"))
578 .await
579 {
580 Ok(append_result) => {
581 append_result.unwrap_or_terminate("cannot fail to apply appends")
582 }
583 Err(_) => warn!("Writer terminated with writes in indefinite state"),
584 };
585
586 // Apply the write by marking the timestamp as complete on the timeline.
587 apply_write_fut
588 .instrument(debug_span!("group_commit_apply::append_write_fut"))
589 .await;
590
591 // Notify the external clients of the result.
592 for response in responses {
593 let (mut ctx, result) = response.finalize();
594 ctx.session_mut().apply_write(timestamp);
595 ctx.retire(result);
596 }
597
598 // IMPORTANT: Make sure we hold the permit and write locks
599 // until here, to prevent other writes from going through while
600 // we haven't yet applied the write at the timestamp oracle.
601 drop(permit);
602 drop(group_write_locks);
603
604 // Advance other timelines.
605 if let Err(e) = internal_cmd_tx.send(Message::AdvanceTimelines) {
606 warn!("Server closed with non-advanced timelines, {e}");
607 }
608
609 for notify in notifies {
610 // We don't care if the listeners have gone away.
611 let _ = notify.send(());
612 }
613 }
614 .instrument(span),
615 );
616
617 timestamp
618 }
619
620 /// Submit a write to be executed during the next group commit and trigger a group commit.
621 pub(crate) fn submit_write(&mut self, pending_write_txn: PendingWriteTxn) {
622 if self.controller.read_only() {
623 panic!(
624 "attempting table write in read-only mode: {:?}",
625 pending_write_txn
626 );
627 }
628 self.pending_writes.push(pending_write_txn);
629 self.trigger_group_commit();
630 }
631
632 /// Append some [`BuiltinTableUpdate`]s, with various degrees of waiting and blocking.
633 pub(crate) fn builtin_table_update<'a>(&'a mut self) -> BuiltinTableAppend<'a> {
634 BuiltinTableAppend { coord: self }
635 }
636
637 pub(crate) fn defer_op<F>(&mut self, acquire_future: F, op: DeferredOp)
638 where
639 F: Future<Output = Option<(CatalogItemId, tokio::sync::OwnedMutexGuard<()>)>>
640 + Send
641 + 'static,
642 {
643 let conn_id = op.conn_id().clone();
644
645 // Track all of our deferred ops.
646 let is_optimistic = op.can_be_optimistically_retried();
647 self.deferred_write_ops.insert(conn_id.clone(), op);
648
649 let internal_cmd_tx = self.internal_cmd_tx.clone();
650 let conn_id_ = conn_id.clone();
651 mz_ore::task::spawn(|| format!("defer op {conn_id_}"), async move {
652 tracing::info!(%conn_id, "deferring plan");
653 // Once we can acquire the first failed lock, try running the deferred plan.
654 //
655 // Note: This does not guarantee the plan will be able to run, there might be
656 // other locks that we later fail to get.
657 let acquired_lock = acquire_future.await;
658
659 // Some operations, e.g. blind INSERTs, can be optimistically retried, meaning we
660 // can run multiple at once. In those cases we don't hold the lock so we retry all
661 // blind writes for a single object.
662 let acquired_lock = match (acquired_lock, is_optimistic) {
663 (Some(_lock), true) => None,
664 (Some(lock), false) => Some(lock),
665 (None, _) => None,
666 };
667
668 // If this send fails then the Coordinator is shutting down.
669 let _ = internal_cmd_tx.send(Message::TryDeferred {
670 conn_id,
671 acquired_lock,
672 });
673 });
674 }
675
676 /// Returns a future that waits until it can get an exclusive lock on the specified collection.
677 pub(crate) fn grant_object_write_lock(
678 &mut self,
679 object_id: CatalogItemId,
680 ) -> impl Future<Output = (CatalogItemId, OwnedMutexGuard<()>)> + 'static {
681 let write_lock_handle = self
682 .write_locks
683 .entry(object_id)
684 .or_insert_with(|| Arc::new(tokio::sync::Mutex::new(())));
685 let write_lock_handle = Arc::clone(write_lock_handle);
686
687 write_lock_handle
688 .lock_owned()
689 .map(move |guard| (object_id, guard))
690 }
691
692 /// Lazily creates the lock for the provided `object_id`, and grants it if possible, returns
693 /// `None` if the lock is already held.
694 pub(crate) fn try_grant_object_write_lock(
695 &mut self,
696 object_id: CatalogItemId,
697 ) -> Option<OwnedMutexGuard<()>> {
698 let write_lock_handle = self
699 .write_locks
700 .entry(object_id)
701 .or_insert_with(|| Arc::new(tokio::sync::Mutex::new(())));
702 let write_lock_handle = Arc::clone(write_lock_handle);
703
704 write_lock_handle.try_lock_owned().ok()
705 }
706}
707
708/// Helper struct to run a builtin table append.
709pub struct BuiltinTableAppend<'a> {
710 coord: &'a mut Coordinator,
711}
712
713/// `Future` that notifies when a builtin table write has completed.
714///
715/// Note: builtin table writes need to talk to persist, which can take 100s of milliseconds. This
716/// type allows you to execute a builtin table write, e.g. via [`BuiltinTableAppend::execute`], and
717/// wait for it to complete, while other long running tasks are concurrently executing.
718pub type BuiltinTableAppendNotify = Pin<Box<dyn Future<Output = ()> + Send + Sync + 'static>>;
719
720impl<'a> BuiltinTableAppend<'a> {
721 /// Submit a write to a system table to be executed during the next group commit. This method
722 /// __does not__ trigger a group commit.
723 ///
724 /// This is useful for non-critical writes like metric updates because it allows us to piggy
725 /// back off the next group commit instead of triggering a potentially expensive group commit.
726 ///
727 /// Note: __do not__ call this for DDL which needs the system tables updated immediately.
728 ///
729 /// Note: When in read-only mode, this will buffer the update and return
730 /// immediately.
731 pub fn background(self, mut updates: Vec<BuiltinTableUpdate>) -> BuiltinTableAppendNotify {
732 if self.coord.controller.read_only() {
733 self.coord
734 .buffered_builtin_table_updates
735 .as_mut()
736 .expect("in read-only mode")
737 .append(&mut updates);
738
739 return Box::pin(futures::future::ready(()));
740 }
741
742 let (tx, rx) = oneshot::channel();
743 self.coord.pending_writes.push(PendingWriteTxn::System {
744 updates,
745 source: BuiltinTableUpdateSource::Background(tx),
746 });
747
748 Box::pin(rx.map(|_| ()))
749 }
750
751 /// Submits a write to be executed during the next group commit __and__ triggers a group commit.
752 ///
753 /// Returns a `Future` that resolves when the write has completed, does not block the
754 /// Coordinator.
755 ///
756 /// Note: When in read-only mode, this will buffer the update and the
757 /// returned future will resolve immediately, without the update actually
758 /// having been written.
759 pub fn defer(self, mut updates: Vec<BuiltinTableUpdate>) -> BuiltinTableAppendNotify {
760 if self.coord.controller.read_only() {
761 self.coord
762 .buffered_builtin_table_updates
763 .as_mut()
764 .expect("in read-only mode")
765 .append(&mut updates);
766
767 return Box::pin(futures::future::ready(()));
768 }
769
770 let (tx, rx) = oneshot::channel();
771 self.coord.pending_writes.push(PendingWriteTxn::System {
772 updates,
773 source: BuiltinTableUpdateSource::Internal(tx),
774 });
775 self.coord.trigger_group_commit();
776
777 Box::pin(rx.map(|_| ()))
778 }
779
780 /// Submit a write to a system table.
781 ///
782 /// This method will block the Coordinator on acquiring a write timestamp from the timestamp
783 /// oracle, and then returns a `Future` that will complete once the write has been applied and
784 /// the write timestamp.
785 ///
786 /// Note: When in read-only mode, this will buffer the update, the
787 /// returned future will resolve immediately, without the update actually
788 /// having been written, and no timestamp is returned.
789 pub async fn execute(
790 self,
791 mut updates: Vec<BuiltinTableUpdate>,
792 ) -> (BuiltinTableAppendNotify, Option<Timestamp>) {
793 if self.coord.controller.read_only() {
794 self.coord
795 .buffered_builtin_table_updates
796 .as_mut()
797 .expect("in read-only mode")
798 .append(&mut updates);
799
800 return (Box::pin(futures::future::ready(())), None);
801 }
802
803 let (tx, rx) = oneshot::channel();
804
805 // Most DDL queries cause writes to system tables. Unlike writes to user tables, system
806 // table writes do not wait for a group commit, they explicitly trigger one. There is a
807 // possibility that if a user is executing DDL at a rate faster than 1 query per
808 // millisecond, then the global timeline will unboundedly advance past the system clock.
809 // This can cause future queries to block, but will not affect correctness. Since this
810 // rate of DDL is unlikely, we allow DDL to explicitly trigger group commit.
811 self.coord.pending_writes.push(PendingWriteTxn::System {
812 updates,
813 source: BuiltinTableUpdateSource::Internal(tx),
814 });
815 let write_ts = self.coord.group_commit(None).await;
816
817 // Avoid excessive group commits by resetting the periodic table advancement timer. The
818 // group commit triggered by above will already advance all tables.
819 self.coord.advance_timelines_interval.reset();
820
821 (Box::pin(rx.map(|_| ())), Some(write_ts))
822 }
823
824 /// Submit a write to a system table, blocking until complete.
825 ///
826 /// Note: if possible you should use the `execute(...)` method, which returns a `Future` that
827 /// can be `await`-ed concurrently with other tasks.
828 ///
829 /// Note: When in read-only mode, this will buffer the update and the
830 /// returned future will resolve immediately, without the update actually
831 /// having been written.
832 pub async fn blocking(self, updates: Vec<BuiltinTableUpdate>) {
833 let (notify, _) = self.execute(updates).await;
834 notify.await;
835 }
836}
837
838/// Returns two sides of a "channel" that can be used to notify the coordinator when we want a
839/// group commit to be run.
840pub fn notifier() -> (GroupCommitNotifier, GroupCommitWaiter) {
841 let notify = Arc::new(Notify::new());
842 let in_progress = Arc::new(Semaphore::new(1));
843
844 let notifier = GroupCommitNotifier {
845 notify: Arc::clone(¬ify),
846 };
847 let waiter = GroupCommitWaiter {
848 notify,
849 in_progress,
850 };
851
852 (notifier, waiter)
853}
854
855/// A handle that allows us to notify the coordinator that a group commit should be run at some
856/// point in the future.
857#[derive(Debug, Clone)]
858pub struct GroupCommitNotifier {
859 /// Tracks if there are any outstanding group commits.
860 notify: Arc<Notify>,
861}
862
863impl GroupCommitNotifier {
864 /// Notifies the [`GroupCommitWaiter`] that we'd like a group commit to be run.
865 pub fn notify(&self) {
866 self.notify.notify_one()
867 }
868}
869
870/// A handle that returns a future when a group commit needs to be run, and one is not currently
871/// being run.
872#[derive(Debug)]
873pub struct GroupCommitWaiter {
874 /// Tracks if there are any outstanding group commits.
875 notify: Arc<Notify>,
876 /// Distributes permits which tracks in progress group commits.
877 in_progress: Arc<Semaphore>,
878}
879static_assertions::assert_not_impl_all!(GroupCommitWaiter: Clone);
880
881impl GroupCommitWaiter {
882 /// Returns a permit for a group commit, once a permit is available _and_ there someone
883 /// requested a group commit to be run.
884 ///
885 /// # Cancel Safety
886 ///
887 /// * Waiting on the returned Future is cancel safe because we acquire an in-progress permit
888 /// before waiting for notifications. If the Future gets dropped after acquiring a permit but
889 /// before a group commit is queued, we'll release the permit which can be acquired by the
890 /// next caller.
891 ///
892 pub async fn ready(&self) -> GroupCommitPermit {
893 let permit = Semaphore::acquire_owned(Arc::clone(&self.in_progress))
894 .await
895 .expect("semaphore should not close");
896
897 // Note: We must wait for notifies _after_ waiting for a permit to be acquired for cancel
898 // safety.
899 self.notify.notified().await;
900
901 GroupCommitPermit {
902 _permit: Some(permit),
903 }
904 }
905}
906
907/// A permit to run a group commit, this must be kept alive for the entire duration of the commit.
908///
909/// Note: We sometimes want to throttle how many group commits are running at once, which this
910/// permit allows us to do.
911#[derive(Debug)]
912pub struct GroupCommitPermit {
913 /// Permit that is preventing other group commits from running.
914 ///
915 /// Only `None` if the permit has been moved into a tokio task for waiting.
916 _permit: Option<OwnedSemaphorePermit>,
917}
918
919/// When we start a [`Session`] we need to update some builtin tables, we don't want to wait for
920/// these writes to complete for two reasons:
921///
922/// 1. Doing a write can take a relatively long time.
923/// 2. Decoupling the write from the session start allows us to batch multiple writes together, if
924/// sessions are being created with a high frequency.
925///
926/// So as an optimization we do not wait for these writes to complete. But if a [`Session`] tries
927/// to query any of these builtin objects, we need to block that query on the writes completing to
928/// maintain linearizability.
929pub(crate) fn waiting_on_startup_appends(
930 catalog: &Catalog,
931 session: &mut Session,
932 plan: &Plan,
933) -> Option<(BTreeSet<CatalogItemId>, BoxFuture<'static, ()>)> {
934 // TODO(parkmycar): We need to check transitive uses here too if we ever move the
935 // referenced builtin tables out of mz_internal, or we allow creating views on
936 // mz_internal objects.
937 let depends_on = match plan {
938 Plan::Select(plan) => plan.source.depends_on(),
939 Plan::ReadThenWrite(plan) => plan.selection.depends_on(),
940 Plan::ShowColumns(plan) => plan.select_plan.source.depends_on(),
941 Plan::Subscribe(plan) => plan.from.depends_on(),
942 Plan::ExplainPlan(ExplainPlanPlan {
943 explainee: Explainee::Statement(ExplaineeStatement::Select { plan, .. }),
944 ..
945 }) => plan.source.depends_on(),
946 Plan::ExplainTimestamp(ExplainTimestampPlan { raw_plan, .. }) => raw_plan.depends_on(),
947 Plan::CreateConnection(_)
948 | Plan::CreateDatabase(_)
949 | Plan::CreateSchema(_)
950 | Plan::CreateRole(_)
951 | Plan::CreateNetworkPolicy(_)
952 | Plan::CreateCluster(_)
953 | Plan::CreateClusterReplica(_)
954 | Plan::CreateContinualTask(_)
955 | Plan::CreateSource(_)
956 | Plan::CreateSources(_)
957 | Plan::CreateSecret(_)
958 | Plan::CreateSink(_)
959 | Plan::CreateTable(_)
960 | Plan::CreateView(_)
961 | Plan::CreateMaterializedView(_)
962 | Plan::CreateIndex(_)
963 | Plan::CreateType(_)
964 | Plan::Comment(_)
965 | Plan::DiscardTemp
966 | Plan::DiscardAll
967 | Plan::DropObjects(_)
968 | Plan::DropOwned(_)
969 | Plan::EmptyQuery
970 | Plan::ShowAllVariables
971 | Plan::ShowCreate(_)
972 | Plan::ShowVariable(_)
973 | Plan::InspectShard(_)
974 | Plan::SetVariable(_)
975 | Plan::ResetVariable(_)
976 | Plan::SetTransaction(_)
977 | Plan::StartTransaction(_)
978 | Plan::CommitTransaction(_)
979 | Plan::AbortTransaction(_)
980 | Plan::CopyFrom(_)
981 | Plan::CopyTo(_)
982 | Plan::ExplainPlan(_)
983 | Plan::ExplainPushdown(_)
984 | Plan::ExplainSinkSchema(_)
985 | Plan::Insert(_)
986 | Plan::AlterNetworkPolicy(_)
987 | Plan::AlterNoop(_)
988 | Plan::AlterClusterRename(_)
989 | Plan::AlterClusterSwap(_)
990 | Plan::AlterClusterReplicaRename(_)
991 | Plan::AlterCluster(_)
992 | Plan::AlterConnection(_)
993 | Plan::AlterSource(_)
994 | Plan::AlterSetCluster(_)
995 | Plan::AlterItemRename(_)
996 | Plan::AlterRetainHistory(_)
997 | Plan::AlterSchemaRename(_)
998 | Plan::AlterSchemaSwap(_)
999 | Plan::AlterSecret(_)
1000 | Plan::AlterSink(_)
1001 | Plan::AlterSystemSet(_)
1002 | Plan::AlterSystemReset(_)
1003 | Plan::AlterSystemResetAll(_)
1004 | Plan::AlterRole(_)
1005 | Plan::AlterOwner(_)
1006 | Plan::AlterTableAddColumn(_)
1007 | Plan::Declare(_)
1008 | Plan::Fetch(_)
1009 | Plan::Close(_)
1010 | Plan::Prepare(_)
1011 | Plan::Execute(_)
1012 | Plan::Deallocate(_)
1013 | Plan::Raise(_)
1014 | Plan::GrantRole(_)
1015 | Plan::RevokeRole(_)
1016 | Plan::GrantPrivileges(_)
1017 | Plan::RevokePrivileges(_)
1018 | Plan::AlterDefaultPrivileges(_)
1019 | Plan::ReassignOwned(_)
1020 | Plan::ValidateConnection(_)
1021 | Plan::SideEffectingFunc(_) => BTreeSet::default(),
1022 };
1023 let depends_on_required_id = REQUIRED_BUILTIN_TABLES
1024 .iter()
1025 .map(|table| catalog.resolve_builtin_table(&**table))
1026 .any(|id| {
1027 catalog
1028 .get_global_ids(&id)
1029 .any(|gid| depends_on.contains(&gid))
1030 });
1031
1032 // If our plan does not depend on any required ID, then we don't need to
1033 // wait for any builtin writes to occur.
1034 if !depends_on_required_id {
1035 return None;
1036 }
1037
1038 // Even if we depend on a builtin table, there's no need to wait if the
1039 // writes have already completed.
1040 //
1041 // TODO(parkmycar): As an optimization we should add a `Notify` type to
1042 // `mz_ore` that allows peeking. If the builtin table writes have already
1043 // completed then there is no need to defer this plan.
1044 match session.clear_builtin_table_updates() {
1045 Some(wait_future) => {
1046 let depends_on = depends_on
1047 .into_iter()
1048 .map(|gid| catalog.get_entry_by_global_id(&gid).id())
1049 .collect();
1050 Some((depends_on, wait_future.boxed()))
1051 }
1052 None => None,
1053 }
1054}