mz_persist_client/
cfg.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10#![allow(missing_docs)]
11
12//! The tunable knobs for persist.
13
14use std::sync::Arc;
15use std::sync::atomic::AtomicBool;
16use std::time::{Duration, SystemTime, UNIX_EPOCH};
17
18use mz_build_info::BuildInfo;
19use mz_dyncfg::{Config, ConfigDefault, ConfigSet, ConfigUpdates};
20use mz_ore::instrument;
21use mz_ore::now::NowFn;
22use mz_persist::cfg::BlobKnobs;
23use mz_persist::retry::Retry;
24use mz_postgres_client::PostgresClientKnobs;
25use proptest_derive::Arbitrary;
26use semver::Version;
27use serde::{Deserialize, Serialize};
28use tokio::sync::watch;
29
30use crate::async_runtime;
31use crate::internal::machine::{
32    NEXT_LISTEN_BATCH_RETRYER_CLAMP, NEXT_LISTEN_BATCH_RETRYER_INITIAL_BACKOFF,
33    NEXT_LISTEN_BATCH_RETRYER_MULTIPLIER,
34};
35use crate::internal::state::ROLLUP_THRESHOLD;
36use crate::operators::STORAGE_SOURCE_DECODE_FUEL;
37use crate::read::READER_LEASE_DURATION;
38
39// Ignores the patch version
40const SELF_MANAGED_VERSIONS: &[Version] = &[
41    // 25.1
42    Version::new(0, 130, 0),
43    // 25.2
44    Version::new(0, 147, 0),
45];
46
47/// The tunable knobs for persist.
48///
49/// Tuning inputs:
50/// - A larger blob_target_size (capped at KEY_VAL_DATA_MAX_LEN) results in
51///   fewer entries in consensus state. Before we have compaction and/or
52///   incremental state, it is already growing without bound, so this is a
53///   concern. OTOH, for any "reasonable" size (> 100MiB?) of blob_target_size,
54///   it seems we'd end up with a pretty tremendous amount of data in the shard
55///   before this became a real issue.
56/// - A larger blob_target_size will results in fewer s3 operations, which are
57///   charged per operation. (Hmm, maybe not if we're charged per call in a
58///   multipart op. The S3Blob impl already chunks things at 8MiB.)
59/// - A smaller blob_target_size will result in more even memory usage in
60///   readers.
61/// - A larger batch_builder_max_outstanding_parts increases throughput (to a
62///   point).
63/// - A smaller batch_builder_max_outstanding_parts provides a bound on the
64///   amount of memory used by a writer.
65/// - A larger compaction_heuristic_min_inputs means state size is larger.
66/// - A smaller compaction_heuristic_min_inputs means more compactions happen
67///   (higher write amp).
68/// - A larger compaction_heuristic_min_updates means more consolidations are
69///   discovered while reading a snapshot (higher read amp and higher space
70///   amp).
71/// - A smaller compaction_heuristic_min_updates means more compactions happen
72///   (higher write amp).
73///
74/// Tuning logic:
75/// - blob_target_size was initially selected to be an exact multiple of 8MiB
76///   (the s3 multipart size) that was in the same neighborhood as our initial
77///   max throughput (~250MiB).
78/// - batch_builder_max_outstanding_parts was initially selected to be as small
79///   as possible without harming pipelining. 0 means no pipelining, 1 is full
80///   pipelining as long as generating data takes less time than writing to s3
81///   (hopefully a fair assumption), 2 is a little extra slop on top of 1.
82/// - compaction_heuristic_min_inputs was set by running the open-loop benchmark
83///   with batches of size 10,240 bytes (selected to be small but such that the
84///   overhead of our columnar encoding format was less than 10%) and manually
85///   increased until the write amp stopped going down. This becomes much less
86///   important once we have incremental state. The initial value is a
87///   placeholder and should be revisited at some point.
88/// - compaction_heuristic_min_updates was set via a thought experiment. This is
89///   an `O(n*log(n))` upper bound on the number of unconsolidated updates that
90///   would be consolidated if we compacted as the in-mem Spine does. The
91///   initial value is a placeholder and should be revisited at some point.
92///
93/// TODO: Move these tuning notes into SessionVar descriptions once we have
94/// SystemVars for most of these.
95//
96// TODO: The configs left here don't react dynamically to changes. Move as many
97// of them to DynamicConfig as possible.
98#[derive(Debug, Clone)]
99pub struct PersistConfig {
100    /// Info about which version of the code is running.
101    pub build_version: Version,
102    /// Hostname of this persist user. Stored in state and used for debugging.
103    pub hostname: String,
104    /// Whether this persist instance is running in a "cc" sized cluster.
105    pub is_cc_active: bool,
106    /// Memory limit of the process, if known.
107    pub announce_memory_limit: Option<usize>,
108    /// A clock to use for all leasing and other non-debugging use.
109    pub now: NowFn,
110    /// Persist [Config]s that can change value dynamically within the lifetime
111    /// of a process.
112    ///
113    /// TODO(cfg): Entirely replace dynamic with this.
114    pub configs: Arc<ConfigSet>,
115    /// Indicates whether `configs` has been synced at least once with an
116    /// upstream source.
117    configs_synced_once: Arc<watch::Sender<bool>>,
118    /// Whether to physically and logically compact batches in blob storage.
119    pub compaction_enabled: bool,
120    /// Whether the `Compactor` will process compaction requests, or drop them on the floor.
121    pub compaction_process_requests: Arc<AtomicBool>,
122    /// In Compactor::compact_and_apply_background, the maximum number of concurrent
123    /// compaction requests that can execute for a given shard.
124    pub compaction_concurrency_limit: usize,
125    /// In Compactor::compact_and_apply_background, the maximum number of pending
126    /// compaction requests to queue.
127    pub compaction_queue_size: usize,
128    /// In Compactor::compact_and_apply_background, how many updates to encode or
129    /// decode before voluntarily yielding the task.
130    pub compaction_yield_after_n_updates: usize,
131    /// Length of time after a writer's last operation after which the writer
132    /// may be expired.
133    pub writer_lease_duration: Duration,
134    /// Length of time between critical handles' calls to downgrade since
135    pub critical_downgrade_interval: Duration,
136    /// Number of worker threads to create for the [`crate::IsolatedRuntime`], defaults to the
137    /// number of threads.
138    pub isolated_runtime_worker_threads: usize,
139}
140
141// Impl Deref to ConfigSet for convenience of accessing the dynamic configs.
142impl std::ops::Deref for PersistConfig {
143    type Target = ConfigSet;
144    fn deref(&self) -> &Self::Target {
145        &self.configs
146    }
147}
148
149impl PersistConfig {
150    /// Returns a new instance of [PersistConfig] with default tuning and
151    /// default ConfigSet.
152    pub fn new_default_configs(build_info: &BuildInfo, now: NowFn) -> Self {
153        Self::new(build_info, now, all_dyncfgs(ConfigSet::default()))
154    }
155
156    /// Returns a new instance of [PersistConfig] with default tuning and the
157    /// specified ConfigSet.
158    pub fn new(build_info: &BuildInfo, now: NowFn, configs: ConfigSet) -> Self {
159        // Escape hatch in case we need to disable compaction.
160        let compaction_disabled = mz_ore::env::is_var_truthy("MZ_PERSIST_COMPACTION_DISABLED");
161
162        // We create receivers on demand, so we drop the initial receiver.
163        let (configs_synced_once, _) = watch::channel(false);
164
165        Self {
166            build_version: build_info.semver_version(),
167            is_cc_active: false,
168            announce_memory_limit: None,
169            now,
170            configs: Arc::new(configs),
171            configs_synced_once: Arc::new(configs_synced_once),
172            compaction_enabled: !compaction_disabled,
173            compaction_process_requests: Arc::new(AtomicBool::new(true)),
174            compaction_concurrency_limit: 5,
175            compaction_queue_size: 20,
176            compaction_yield_after_n_updates: 100_000,
177            writer_lease_duration: 60 * Duration::from_secs(60),
178            critical_downgrade_interval: Duration::from_secs(30),
179            isolated_runtime_worker_threads: num_cpus::get(),
180            // TODO: This doesn't work with the process orchestrator. Instead,
181            // separate --log-prefix into --service-name and --enable-log-prefix
182            // options, where the first is always provided and the second is
183            // conditionally enabled by the process orchestrator.
184            hostname: std::env::var("HOSTNAME").unwrap_or_else(|_| "unknown".to_owned()),
185        }
186    }
187
188    pub(crate) fn set_config<T: ConfigDefault>(&self, cfg: &Config<T>, val: T) {
189        let mut updates = ConfigUpdates::default();
190        updates.add(cfg, val);
191        updates.apply(self)
192    }
193
194    /// Applies the provided updates to this configuration.
195    ///
196    /// You should prefer calling this method over mutating `self.configs`
197    /// directly, so that [`Self::configs_synced_once`] can be properly
198    /// maintained.
199    pub fn apply_from(&self, updates: &ConfigUpdates) {
200        updates.apply(&self.configs);
201        self.configs_synced_once.send_replace(true);
202    }
203
204    /// Resolves when `configs` has been synced at least once with an upstream
205    /// source, i.e., via [`Self::apply_from`].
206    ///
207    /// If `configs` has already been synced once at the time the method is
208    /// called, resolves immediately.
209    ///
210    /// Useful in conjunction with configuration parameters that cannot be
211    /// dynamically updated once set (e.g., PubSub).
212    #[instrument(level = "info")]
213    pub async fn configs_synced_once(&self) {
214        self.configs_synced_once
215            .subscribe()
216            .wait_for(|synced| *synced)
217            .await
218            .expect("we have a borrow on sender so it cannot drop");
219    }
220
221    /// The maximum amount of work to do in the persist_source mfp_and_decode
222    /// operator before yielding.
223    pub fn storage_source_decode_fuel(&self) -> usize {
224        STORAGE_SOURCE_DECODE_FUEL.get(self)
225    }
226
227    /// Overrides the value for "persist_reader_lease_duration".
228    pub fn set_reader_lease_duration(&self, val: Duration) {
229        self.set_config(&READER_LEASE_DURATION, val);
230    }
231
232    /// Overrides the value for "persist_rollup_threshold".
233    pub fn set_rollup_threshold(&self, val: usize) {
234        self.set_config(&ROLLUP_THRESHOLD, val);
235    }
236
237    /// Overrides the value for the "persist_next_listen_batch_retryer_*"
238    /// configs.
239    pub fn set_next_listen_batch_retryer(&self, val: RetryParameters) {
240        self.set_config(
241            &NEXT_LISTEN_BATCH_RETRYER_INITIAL_BACKOFF,
242            val.initial_backoff,
243        );
244        self.set_config(&NEXT_LISTEN_BATCH_RETRYER_MULTIPLIER, val.multiplier);
245        self.set_config(&NEXT_LISTEN_BATCH_RETRYER_CLAMP, val.clamp);
246    }
247
248    pub fn disable_compaction(&self) {
249        tracing::info!("Disabling Persist Compaction");
250        self.compaction_process_requests
251            .store(false, std::sync::atomic::Ordering::Relaxed);
252    }
253
254    pub fn enable_compaction(&self) {
255        tracing::info!("Enabling Persist Compaction");
256        self.compaction_process_requests
257            .store(true, std::sync::atomic::Ordering::Relaxed);
258    }
259
260    /// Returns a new instance of [PersistConfig] for tests.
261    pub fn new_for_tests() -> Self {
262        use mz_build_info::DUMMY_BUILD_INFO;
263        use mz_ore::now::SYSTEM_TIME;
264
265        let mut cfg = Self::new_default_configs(&DUMMY_BUILD_INFO, SYSTEM_TIME.clone());
266        cfg.hostname = "tests".into();
267        cfg.isolated_runtime_worker_threads = async_runtime::TEST_THREADS;
268        cfg
269    }
270}
271
272#[allow(non_upper_case_globals)]
273pub(crate) const MiB: usize = 1024 * 1024;
274
275/// Adds the full set of all persist [Config]s.
276///
277/// TODO(cfg): Consider replacing this with a static global registry powered by
278/// something like the `ctor` or `inventory` crate. This would involve managing
279/// the footgun of a Config being linked into one binary but not the other.
280pub fn all_dyncfgs(configs: ConfigSet) -> ConfigSet {
281    mz_persist::cfg::all_dyn_configs(configs)
282        .add(&crate::batch::BATCH_DELETE_ENABLED)
283        .add(&crate::batch::BLOB_TARGET_SIZE)
284        .add(&crate::batch::INLINE_WRITES_TOTAL_MAX_BYTES)
285        .add(&crate::batch::INLINE_WRITES_SINGLE_MAX_BYTES)
286        .add(&crate::batch::ENCODING_ENABLE_DICTIONARY)
287        .add(&crate::batch::ENCODING_COMPRESSION_FORMAT)
288        .add(&crate::batch::STRUCTURED_KEY_LOWER_LEN)
289        .add(&crate::batch::MAX_RUN_LEN)
290        .add(&crate::batch::MAX_RUNS)
291        .add(&BLOB_OPERATION_TIMEOUT)
292        .add(&BLOB_OPERATION_ATTEMPT_TIMEOUT)
293        .add(&BLOB_CONNECT_TIMEOUT)
294        .add(&BLOB_READ_TIMEOUT)
295        .add(&crate::cfg::CONSENSUS_CONNECTION_POOL_MAX_SIZE)
296        .add(&crate::cfg::CONSENSUS_CONNECTION_POOL_MAX_WAIT)
297        .add(&crate::cfg::CONSENSUS_CONNECTION_POOL_TTL_STAGGER)
298        .add(&crate::cfg::CONSENSUS_CONNECTION_POOL_TTL)
299        .add(&crate::cfg::CRDB_CONNECT_TIMEOUT)
300        .add(&crate::cfg::CRDB_TCP_USER_TIMEOUT)
301        .add(&crate::cfg::USE_CRITICAL_SINCE_TXN)
302        .add(&crate::cfg::USE_CRITICAL_SINCE_CATALOG)
303        .add(&crate::cfg::USE_CRITICAL_SINCE_SOURCE)
304        .add(&crate::cfg::USE_CRITICAL_SINCE_SNAPSHOT)
305        .add(&crate::cfg::USE_GLOBAL_TXN_CACHE_SOURCE)
306        .add(&BATCH_BUILDER_MAX_OUTSTANDING_PARTS)
307        .add(&COMPACTION_HEURISTIC_MIN_INPUTS)
308        .add(&COMPACTION_HEURISTIC_MIN_PARTS)
309        .add(&COMPACTION_HEURISTIC_MIN_UPDATES)
310        .add(&COMPACTION_MEMORY_BOUND_BYTES)
311        .add(&GC_BLOB_DELETE_CONCURRENCY_LIMIT)
312        .add(&STATE_VERSIONS_RECENT_LIVE_DIFFS_LIMIT)
313        .add(&USAGE_STATE_FETCH_CONCURRENCY_LIMIT)
314        .add(&crate::cli::admin::CATALOG_FORCE_COMPACTION_FUEL)
315        .add(&crate::cli::admin::CATALOG_FORCE_COMPACTION_WAIT)
316        .add(&crate::cli::admin::EXPRESSION_CACHE_FORCE_COMPACTION_FUEL)
317        .add(&crate::cli::admin::EXPRESSION_CACHE_FORCE_COMPACTION_WAIT)
318        .add(&crate::fetch::FETCH_SEMAPHORE_COST_ADJUSTMENT)
319        .add(&crate::fetch::FETCH_SEMAPHORE_PERMIT_ADJUSTMENT)
320        .add(&crate::fetch::VALIDATE_PART_BOUNDS_ON_READ)
321        .add(&crate::fetch::OPTIMIZE_IGNORED_DATA_FETCH)
322        .add(&crate::internal::cache::BLOB_CACHE_MEM_LIMIT_BYTES)
323        .add(&crate::internal::cache::BLOB_CACHE_SCALE_WITH_THREADS)
324        .add(&crate::internal::cache::BLOB_CACHE_SCALE_FACTOR_BYTES)
325        .add(&crate::internal::compact::COMPACTION_MINIMUM_TIMEOUT)
326        .add(&crate::internal::compact::COMPACTION_USE_MOST_RECENT_SCHEMA)
327        .add(&crate::internal::compact::COMPACTION_CHECK_PROCESS_FLAG)
328        .add(&crate::internal::machine::CLAIM_UNCLAIMED_COMPACTIONS)
329        .add(&crate::internal::machine::CLAIM_COMPACTION_PERCENT)
330        .add(&crate::internal::machine::CLAIM_COMPACTION_MIN_VERSION)
331        .add(&crate::internal::machine::NEXT_LISTEN_BATCH_RETRYER_CLAMP)
332        .add(&crate::internal::machine::NEXT_LISTEN_BATCH_RETRYER_FIXED_SLEEP)
333        .add(&crate::internal::machine::NEXT_LISTEN_BATCH_RETRYER_INITIAL_BACKOFF)
334        .add(&crate::internal::machine::NEXT_LISTEN_BATCH_RETRYER_MULTIPLIER)
335        .add(&crate::internal::state::ROLLUP_THRESHOLD)
336        .add(&crate::internal::state::ROLLUP_USE_ACTIVE_ROLLUP)
337        .add(&crate::internal::state::GC_FALLBACK_THRESHOLD_MS)
338        .add(&crate::internal::state::GC_USE_ACTIVE_GC)
339        .add(&crate::internal::state::GC_MIN_VERSIONS)
340        .add(&crate::internal::state::GC_MAX_VERSIONS)
341        .add(&crate::internal::state::ROLLUP_FALLBACK_THRESHOLD_MS)
342        .add(&crate::internal::state::ENABLE_INCREMENTAL_COMPACTION)
343        .add(&crate::operators::STORAGE_SOURCE_DECODE_FUEL)
344        .add(&crate::read::READER_LEASE_DURATION)
345        .add(&crate::rpc::PUBSUB_CLIENT_ENABLED)
346        .add(&crate::rpc::PUBSUB_PUSH_DIFF_ENABLED)
347        .add(&crate::rpc::PUBSUB_SAME_PROCESS_DELEGATE_ENABLED)
348        .add(&crate::rpc::PUBSUB_CONNECT_ATTEMPT_TIMEOUT)
349        .add(&crate::rpc::PUBSUB_REQUEST_TIMEOUT)
350        .add(&crate::rpc::PUBSUB_CONNECT_MAX_BACKOFF)
351        .add(&crate::rpc::PUBSUB_CLIENT_SENDER_CHANNEL_SIZE)
352        .add(&crate::rpc::PUBSUB_CLIENT_RECEIVER_CHANNEL_SIZE)
353        .add(&crate::rpc::PUBSUB_SERVER_CONNECTION_CHANNEL_SIZE)
354        .add(&crate::rpc::PUBSUB_STATE_CACHE_SHARD_REF_CHANNEL_SIZE)
355        .add(&crate::rpc::PUBSUB_RECONNECT_BACKOFF)
356        .add(&crate::stats::STATS_AUDIT_PERCENT)
357        .add(&crate::stats::STATS_AUDIT_PANIC)
358        .add(&crate::stats::STATS_BUDGET_BYTES)
359        .add(&crate::stats::STATS_COLLECTION_ENABLED)
360        .add(&crate::stats::STATS_FILTER_ENABLED)
361        .add(&crate::stats::STATS_UNTRIMMABLE_COLUMNS_EQUALS)
362        .add(&crate::stats::STATS_UNTRIMMABLE_COLUMNS_PREFIX)
363        .add(&crate::stats::STATS_UNTRIMMABLE_COLUMNS_SUFFIX)
364        .add(&crate::fetch::PART_DECODE_FORMAT)
365        .add(&crate::write::COMBINE_INLINE_WRITES)
366        .add(&crate::write::VALIDATE_PART_BOUNDS_ON_WRITE)
367}
368
369impl PersistConfig {
370    pub(crate) const DEFAULT_FALLBACK_ROLLUP_THRESHOLD_MULTIPLIER: usize = 3;
371
372    pub fn set_state_versions_recent_live_diffs_limit(&self, val: usize) {
373        self.set_config(&STATE_VERSIONS_RECENT_LIVE_DIFFS_LIMIT, val);
374    }
375}
376
377/// Sets the maximum size of the connection pool that is used by consensus.
378///
379/// Requires a restart of the process to take effect.
380pub const CONSENSUS_CONNECTION_POOL_MAX_SIZE: Config<usize> = Config::new(
381    "persist_consensus_connection_pool_max_size",
382    50,
383    "The maximum size the connection pool to Postgres/CRDB will grow to.",
384);
385
386/// Sets the maximum amount of time we'll wait to acquire a connection from
387/// the connection pool.
388///
389/// Requires a restart of the process to take effect.
390const CONSENSUS_CONNECTION_POOL_MAX_WAIT: Config<Duration> = Config::new(
391    "persist_consensus_connection_pool_max_wait",
392    Duration::from_secs(60),
393    "The amount of time we'll wait for a connection to become available.",
394);
395
396/// The minimum TTL of a connection to Postgres/CRDB before it is proactively
397/// terminated. Connections are routinely culled to balance load against the
398/// downstream database.
399const CONSENSUS_CONNECTION_POOL_TTL: Config<Duration> = Config::new(
400    "persist_consensus_connection_pool_ttl",
401    Duration::from_secs(300),
402    "\
403    The minimum TTL of a Consensus connection to Postgres/CRDB before it is \
404    proactively terminated",
405);
406
407/// The minimum time between TTLing connections to Postgres/CRDB. This delay is
408/// used to stagger reconnections to avoid stampedes and high tail latencies.
409/// This value should be much less than `consensus_connection_pool_ttl` so that
410/// reconnections are biased towards terminating the oldest connections first. A
411/// value of `consensus_connection_pool_ttl /
412/// consensus_connection_pool_max_size` is likely a good place to start so that
413/// all connections are rotated when the pool is fully used.
414const CONSENSUS_CONNECTION_POOL_TTL_STAGGER: Config<Duration> = Config::new(
415    "persist_consensus_connection_pool_ttl_stagger",
416    Duration::from_secs(6),
417    "The minimum time between TTLing Consensus connections to Postgres/CRDB.",
418);
419
420/// The duration to wait for a Consensus Postgres/CRDB connection to be made
421/// before retrying.
422pub const CRDB_CONNECT_TIMEOUT: Config<Duration> = Config::new(
423    "crdb_connect_timeout",
424    Duration::from_secs(5),
425    "The time to connect to CockroachDB before timing out and retrying.",
426);
427
428/// The TCP user timeout for a Consensus Postgres/CRDB connection. Specifies the
429/// amount of time that transmitted data may remain unacknowledged before the
430/// TCP connection is forcibly closed.
431pub const CRDB_TCP_USER_TIMEOUT: Config<Duration> = Config::new(
432    "crdb_tcp_user_timeout",
433    Duration::from_secs(30),
434    "\
435    The TCP timeout for connections to CockroachDB. Specifies the amount of \
436    time that transmitted data may remain unacknowledged before the TCP \
437    connection is forcibly closed.",
438);
439
440/// Migrate the txns code to use the critical since when opening a new read handle.
441pub const USE_CRITICAL_SINCE_TXN: Config<bool> = Config::new(
442    "persist_use_critical_since_txn",
443    true,
444    "Use the critical since (instead of the overall since) when initializing a subscribe.",
445);
446
447/// Migrate the catalog to use the critical since when opening a new read handle.
448pub const USE_CRITICAL_SINCE_CATALOG: Config<bool> = Config::new(
449    "persist_use_critical_since_catalog",
450    false,
451    "Use the critical since (instead of the overall since) for the Persist-backed catalog.",
452);
453
454/// Migrate the persist source to use the critical since when opening a new read handle.
455pub const USE_CRITICAL_SINCE_SOURCE: Config<bool> = Config::new(
456    "persist_use_critical_since_source",
457    false,
458    "Use the critical since (instead of the overall since) in the Persist source.",
459);
460
461/// Migrate snapshots to use the critical since when opening a new read handle.
462pub const USE_CRITICAL_SINCE_SNAPSHOT: Config<bool> = Config::new(
463    "persist_use_critical_since_snapshot",
464    false,
465    "Use the critical since (instead of the overall since) when taking snapshots in the controller or in fast-path peeks.",
466);
467
468/// Migrate the persist source to use a process global txn cache.
469pub const USE_GLOBAL_TXN_CACHE_SOURCE: Config<bool> = Config::new(
470    "use_global_txn_cache_source",
471    true,
472    "Use the process global txn cache (instead of an operator local one) in the Persist source.",
473);
474
475/// The maximum number of parts (s3 blobs) that [crate::batch::BatchBuilder]
476/// will pipeline before back-pressuring [crate::batch::BatchBuilder::add]
477/// calls on previous ones finishing.
478pub const BATCH_BUILDER_MAX_OUTSTANDING_PARTS: Config<usize> = Config::new(
479    "persist_batch_builder_max_outstanding_parts",
480    2,
481    "The number of writes a batch builder can have outstanding before we slow down the writer.",
482);
483
484/// In Compactor::compact_and_apply, we do the compaction (don't skip it)
485/// if the number of inputs is at least this many. Compaction is performed
486/// if any of the heuristic criteria are met (they are OR'd).
487pub const COMPACTION_HEURISTIC_MIN_INPUTS: Config<usize> = Config::new(
488    "persist_compaction_heuristic_min_inputs",
489    8,
490    "Don't skip compaction if we have more than this many hollow batches as input.",
491);
492
493/// In Compactor::compact_and_apply, we do the compaction (don't skip it)
494/// if the number of batch parts is at least this many. Compaction is performed
495/// if any of the heuristic criteria are met (they are OR'd).
496pub const COMPACTION_HEURISTIC_MIN_PARTS: Config<usize> = Config::new(
497    "persist_compaction_heuristic_min_parts",
498    8,
499    "Don't skip compaction if we have more than this many parts as input.",
500);
501
502/// In Compactor::compact_and_apply, we do the compaction (don't skip it)
503/// if the number of updates is at least this many. Compaction is performed
504/// if any of the heuristic criteria are met (they are OR'd).
505pub const COMPACTION_HEURISTIC_MIN_UPDATES: Config<usize> = Config::new(
506    "persist_compaction_heuristic_min_updates",
507    1024,
508    "Don't skip compaction if we have more than this many updates as input.",
509);
510
511/// The upper bound on compaction's memory consumption. The value must be at
512/// least 4*`blob_target_size`. Increasing this value beyond the minimum allows
513/// compaction to merge together more runs at once, providing greater
514/// consolidation of updates, at the cost of greater memory usage.
515pub const COMPACTION_MEMORY_BOUND_BYTES: Config<usize> = Config::new(
516    "persist_compaction_memory_bound_bytes",
517    1024 * MiB,
518    "Attempt to limit compaction to this amount of memory.",
519);
520
521/// The maximum number of concurrent blob deletes during garbage collection.
522pub const GC_BLOB_DELETE_CONCURRENCY_LIMIT: Config<usize> = Config::new(
523    "persist_gc_blob_delete_concurrency_limit",
524    32,
525    "Limit the number of concurrent deletes GC can perform to this threshold.",
526);
527
528/// The # of diffs to initially scan when fetching the latest consensus state, to
529/// determine which requests go down the fast vs slow path. Should be large enough
530/// to fetch all live diffs in the steady-state, and small enough to query Consensus
531/// at high volume. Steady-state usage should accommodate readers that require
532/// seqno-holds for reasonable amounts of time, which to start we say is 10s of minutes.
533///
534/// This value ought to be defined in terms of `NEED_ROLLUP_THRESHOLD` to approximate
535/// when we expect rollups to be written and therefore when old states will be truncated
536/// by GC.
537pub const STATE_VERSIONS_RECENT_LIVE_DIFFS_LIMIT: Config<usize> = Config::new(
538    "persist_state_versions_recent_live_diffs_limit",
539    30 * 128,
540    "Fetch this many diffs when fetching recent diffs.",
541);
542
543/// The maximum number of concurrent state fetches during usage computation.
544pub const USAGE_STATE_FETCH_CONCURRENCY_LIMIT: Config<usize> = Config::new(
545    "persist_usage_state_fetch_concurrency_limit",
546    8,
547    "Limit the concurrency in of fetching in the perioding Persist-storage-usage calculation.",
548);
549
550impl PostgresClientKnobs for PersistConfig {
551    fn connection_pool_max_size(&self) -> usize {
552        CONSENSUS_CONNECTION_POOL_MAX_SIZE.get(self)
553    }
554
555    fn connection_pool_max_wait(&self) -> Option<Duration> {
556        Some(CONSENSUS_CONNECTION_POOL_MAX_WAIT.get(self))
557    }
558
559    fn connection_pool_ttl(&self) -> Duration {
560        CONSENSUS_CONNECTION_POOL_TTL.get(self)
561    }
562
563    fn connection_pool_ttl_stagger(&self) -> Duration {
564        CONSENSUS_CONNECTION_POOL_TTL_STAGGER.get(self)
565    }
566
567    fn connect_timeout(&self) -> Duration {
568        CRDB_CONNECT_TIMEOUT.get(self)
569    }
570
571    fn tcp_user_timeout(&self) -> Duration {
572        CRDB_TCP_USER_TIMEOUT.get(self)
573    }
574}
575
576#[derive(Copy, Clone, Debug, Eq, PartialEq, Arbitrary, Serialize, Deserialize)]
577pub struct RetryParameters {
578    pub fixed_sleep: Duration,
579    pub initial_backoff: Duration,
580    pub multiplier: u32,
581    pub clamp: Duration,
582}
583
584impl RetryParameters {
585    pub(crate) fn into_retry(self, now: SystemTime) -> Retry {
586        let seed = now
587            .duration_since(UNIX_EPOCH)
588            .map_or(0, |x| u64::from(x.subsec_nanos()));
589        Retry {
590            fixed_sleep: self.fixed_sleep,
591            initial_backoff: self.initial_backoff,
592            multiplier: self.multiplier,
593            clamp_backoff: self.clamp,
594            seed,
595        }
596    }
597}
598
599pub(crate) const BLOB_OPERATION_TIMEOUT: Config<Duration> = Config::new(
600    "persist_blob_operation_timeout",
601    Duration::from_secs(180),
602    "Maximum time allowed for a network call, including retry attempts.",
603);
604
605pub(crate) const BLOB_OPERATION_ATTEMPT_TIMEOUT: Config<Duration> = Config::new(
606    "persist_blob_operation_attempt_timeout",
607    Duration::from_secs(90),
608    "Maximum time allowed for a single network call.",
609);
610
611pub(crate) const BLOB_CONNECT_TIMEOUT: Config<Duration> = Config::new(
612    "persist_blob_connect_timeout",
613    Duration::from_secs(7),
614    "Maximum time to wait for a socket connection to be made.",
615);
616
617pub(crate) const BLOB_READ_TIMEOUT: Config<Duration> = Config::new(
618    "persist_blob_read_timeout",
619    Duration::from_secs(10),
620    "Maximum time to wait to read the first byte of a response, including connection time.",
621);
622
623impl BlobKnobs for PersistConfig {
624    fn operation_timeout(&self) -> Duration {
625        BLOB_OPERATION_TIMEOUT.get(self)
626    }
627
628    fn operation_attempt_timeout(&self) -> Duration {
629        BLOB_OPERATION_ATTEMPT_TIMEOUT.get(self)
630    }
631
632    fn connect_timeout(&self) -> Duration {
633        BLOB_CONNECT_TIMEOUT.get(self)
634    }
635
636    fn read_timeout(&self) -> Duration {
637        BLOB_READ_TIMEOUT.get(self)
638    }
639
640    fn is_cc_active(&self) -> bool {
641        self.is_cc_active
642    }
643}
644
645pub fn check_data_version(code_version: &Version, data_version: &Version) -> Result<(), String> {
646    check_data_version_with_self_managed_versions(code_version, data_version, SELF_MANAGED_VERSIONS)
647}
648
649// If persist gets some encoded ProtoState from the future (e.g. two versions of
650// code are running simultaneously against the same shard), it might have a
651// field that the current code doesn't know about. This would be silently
652// discarded at proto decode time. Unknown Fields [1] are a tool we can use in
653// the future to help deal with this, but in the short-term, it's best to keep
654// the persist read-modify-CaS loop simple for as long as we can get away with
655// it (i.e. until we have to offer the ability to do rollbacks).
656//
657// [1]: https://developers.google.com/protocol-buffers/docs/proto3#unknowns
658//
659// To detect the bad situation and disallow it, we tag every version of state
660// written to consensus with the version of code used to encode it. Then at
661// decode time, we're able to compare the current version against any we receive
662// and assert as necessary.
663//
664// Initially we allow any from the past (permanent backward compatibility) and
665// one minor version into the future (forward compatibility). This allows us to
666// run two versions concurrently for rolling upgrades. We'll have to revisit
667// this logic if/when we start using major versions other than 0.
668//
669// We could do the same for blob data, but it shouldn't be necessary. Any blob
670// data we read is going to be because we fetched it using a pointer stored in
671// some persist state. If we can handle the state, we can handle the blobs it
672// references, too.
673pub(crate) fn check_data_version_with_self_managed_versions(
674    code_version: &Version,
675    data_version: &Version,
676    self_managed_versions: &[Version],
677) -> Result<(), String> {
678    // Allow upgrades specifically between consecutive Self-Managed releases.
679    let base_code_version = Version {
680        patch: 0,
681        ..code_version.clone()
682    };
683    let base_data_version = Version {
684        patch: 0,
685        ..data_version.clone()
686    };
687    if data_version >= code_version {
688        for window in self_managed_versions.windows(2) {
689            if base_code_version == window[0] && base_data_version <= window[1] {
690                return Ok(());
691            }
692        }
693
694        if let Some(last) = self_managed_versions.last() {
695            if base_code_version == *last
696                // kind of arbitrary, but just ensure we don't accidentally
697                // upgrade too far (the previous check should ensure that a
698                // new version won't take over from a too-old previous
699                // version, but we want to make sure the other side also
700                // doesn't get confused)
701                && base_data_version
702                    .minor
703                    .saturating_sub(base_code_version.minor)
704                    < 40
705            {
706                return Ok(());
707            }
708        }
709    }
710
711    // Allow one minor version of forward compatibility. We could avoid the
712    // clone with some nested comparisons of the semver fields, but this code
713    // isn't particularly performance sensitive and I find this impl easier to
714    // reason about.
715    let max_allowed_data_version = Version::new(
716        code_version.major,
717        code_version.minor.saturating_add(1),
718        u64::MAX,
719    );
720
721    if &max_allowed_data_version < data_version {
722        Err(format!(
723            "{code_version} received persist state from the future {data_version}",
724        ))
725    } else {
726        Ok(())
727    }
728}