mz_persist_client/
cfg.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10#![allow(missing_docs)]
11
12//! The tunable knobs for persist.
13
14use std::sync::Arc;
15use std::sync::atomic::AtomicBool;
16use std::time::{Duration, SystemTime, UNIX_EPOCH};
17
18use mz_build_info::BuildInfo;
19use mz_dyncfg::{Config, ConfigDefault, ConfigSet, ConfigUpdates};
20use mz_ore::instrument;
21use mz_ore::now::NowFn;
22use mz_persist::cfg::BlobKnobs;
23use mz_persist::retry::Retry;
24use mz_postgres_client::PostgresClientKnobs;
25use proptest_derive::Arbitrary;
26use semver::Version;
27use serde::{Deserialize, Serialize};
28use tokio::sync::watch;
29
30use crate::async_runtime;
31use crate::internal::machine::{
32    NEXT_LISTEN_BATCH_RETRYER_CLAMP, NEXT_LISTEN_BATCH_RETRYER_INITIAL_BACKOFF,
33    NEXT_LISTEN_BATCH_RETRYER_MULTIPLIER,
34};
35use crate::internal::state::ROLLUP_THRESHOLD;
36use crate::operators::STORAGE_SOURCE_DECODE_FUEL;
37use crate::read::READER_LEASE_DURATION;
38
39// Ignores the patch version
40const SELF_MANAGED_VERSIONS: &[Version; 2] = &[
41    // 25.1
42    Version::new(0, 130, 0),
43    // 25.2
44    Version::new(0, 147, 0),
45];
46
47/// The tunable knobs for persist.
48///
49/// Tuning inputs:
50/// - A larger blob_target_size (capped at KEY_VAL_DATA_MAX_LEN) results in
51///   fewer entries in consensus state. Before we have compaction and/or
52///   incremental state, it is already growing without bound, so this is a
53///   concern. OTOH, for any "reasonable" size (> 100MiB?) of blob_target_size,
54///   it seems we'd end up with a pretty tremendous amount of data in the shard
55///   before this became a real issue.
56/// - A larger blob_target_size will results in fewer s3 operations, which are
57///   charged per operation. (Hmm, maybe not if we're charged per call in a
58///   multipart op. The S3Blob impl already chunks things at 8MiB.)
59/// - A smaller blob_target_size will result in more even memory usage in
60///   readers.
61/// - A larger batch_builder_max_outstanding_parts increases throughput (to a
62///   point).
63/// - A smaller batch_builder_max_outstanding_parts provides a bound on the
64///   amount of memory used by a writer.
65/// - A larger compaction_heuristic_min_inputs means state size is larger.
66/// - A smaller compaction_heuristic_min_inputs means more compactions happen
67///   (higher write amp).
68/// - A larger compaction_heuristic_min_updates means more consolidations are
69///   discovered while reading a snapshot (higher read amp and higher space
70///   amp).
71/// - A smaller compaction_heuristic_min_updates means more compactions happen
72///   (higher write amp).
73///
74/// Tuning logic:
75/// - blob_target_size was initially selected to be an exact multiple of 8MiB
76///   (the s3 multipart size) that was in the same neighborhood as our initial
77///   max throughput (~250MiB).
78/// - batch_builder_max_outstanding_parts was initially selected to be as small
79///   as possible without harming pipelining. 0 means no pipelining, 1 is full
80///   pipelining as long as generating data takes less time than writing to s3
81///   (hopefully a fair assumption), 2 is a little extra slop on top of 1.
82/// - compaction_heuristic_min_inputs was set by running the open-loop benchmark
83///   with batches of size 10,240 bytes (selected to be small but such that the
84///   overhead of our columnar encoding format was less than 10%) and manually
85///   increased until the write amp stopped going down. This becomes much less
86///   important once we have incremental state. The initial value is a
87///   placeholder and should be revisited at some point.
88/// - compaction_heuristic_min_updates was set via a thought experiment. This is
89///   an `O(n*log(n))` upper bound on the number of unconsolidated updates that
90///   would be consolidated if we compacted as the in-mem Spine does. The
91///   initial value is a placeholder and should be revisited at some point.
92///
93/// TODO: Move these tuning notes into SessionVar descriptions once we have
94/// SystemVars for most of these.
95//
96// TODO: The configs left here don't react dynamically to changes. Move as many
97// of them to DynamicConfig as possible.
98#[derive(Debug, Clone)]
99pub struct PersistConfig {
100    /// Info about which version of the code is running.
101    pub build_version: Version,
102    /// An opaque string describing the host of this persist client.
103    /// Stored in state and used for debugging.
104    pub hostname: String,
105    /// Whether this persist instance is running in a "cc" sized cluster.
106    pub is_cc_active: bool,
107    /// Memory limit of the process, if known.
108    pub announce_memory_limit: Option<usize>,
109    /// A clock to use for all leasing and other non-debugging use.
110    pub now: NowFn,
111    /// Persist [Config]s that can change value dynamically within the lifetime
112    /// of a process.
113    ///
114    /// TODO(cfg): Entirely replace dynamic with this.
115    pub configs: Arc<ConfigSet>,
116    /// Indicates whether `configs` has been synced at least once with an
117    /// upstream source.
118    configs_synced_once: Arc<watch::Sender<bool>>,
119    /// Whether to physically and logically compact batches in blob storage.
120    pub compaction_enabled: bool,
121    /// Whether the `Compactor` will process compaction requests, or drop them on the floor.
122    pub compaction_process_requests: Arc<AtomicBool>,
123    /// In Compactor::compact_and_apply_background, the maximum number of concurrent
124    /// compaction requests that can execute for a given shard.
125    pub compaction_concurrency_limit: usize,
126    /// In Compactor::compact_and_apply_background, the maximum number of pending
127    /// compaction requests to queue.
128    pub compaction_queue_size: usize,
129    /// In Compactor::compact_and_apply_background, how many updates to encode or
130    /// decode before voluntarily yielding the task.
131    pub compaction_yield_after_n_updates: usize,
132    /// Length of time after a writer's last operation after which the writer
133    /// may be expired.
134    pub writer_lease_duration: Duration,
135    /// Length of time between critical handles' calls to downgrade since
136    pub critical_downgrade_interval: Duration,
137    /// Number of worker threads to create for the [`crate::IsolatedRuntime`], defaults to the
138    /// number of threads.
139    pub isolated_runtime_worker_threads: usize,
140}
141
142// Impl Deref to ConfigSet for convenience of accessing the dynamic configs.
143impl std::ops::Deref for PersistConfig {
144    type Target = ConfigSet;
145    fn deref(&self) -> &Self::Target {
146        &self.configs
147    }
148}
149
150impl PersistConfig {
151    /// Returns a new instance of [PersistConfig] with default tuning and
152    /// default ConfigSet.
153    pub fn new_default_configs(build_info: &BuildInfo, now: NowFn) -> Self {
154        Self::new(build_info, now, all_dyncfgs(ConfigSet::default()))
155    }
156
157    /// Returns a new instance of [PersistConfig] with default tuning and the
158    /// specified ConfigSet.
159    pub fn new(build_info: &BuildInfo, now: NowFn, configs: ConfigSet) -> Self {
160        // Escape hatch in case we need to disable compaction.
161        let compaction_disabled = mz_ore::env::is_var_truthy("MZ_PERSIST_COMPACTION_DISABLED");
162
163        // We create receivers on demand, so we drop the initial receiver.
164        let (configs_synced_once, _) = watch::channel(false);
165
166        Self {
167            build_version: build_info.semver_version(),
168            is_cc_active: false,
169            announce_memory_limit: None,
170            now,
171            configs: Arc::new(configs),
172            configs_synced_once: Arc::new(configs_synced_once),
173            compaction_enabled: !compaction_disabled,
174            compaction_process_requests: Arc::new(AtomicBool::new(true)),
175            compaction_concurrency_limit: 5,
176            compaction_queue_size: 20,
177            compaction_yield_after_n_updates: 100_000,
178            writer_lease_duration: 60 * Duration::from_secs(60),
179            critical_downgrade_interval: Duration::from_secs(30),
180            isolated_runtime_worker_threads: num_cpus::get(),
181            // TODO: This doesn't work with the process orchestrator. Instead,
182            // separate --log-prefix into --service-name and --enable-log-prefix
183            // options, where the first is always provided and the second is
184            // conditionally enabled by the process orchestrator.
185            hostname: {
186                use std::fmt::Write;
187                let mut name = std::env::var("HOSTNAME").unwrap_or_else(|_| "unknown".to_owned());
188                write!(&mut name, " {}", build_info.version)
189                    .expect("writing to string should not fail");
190                name
191            },
192        }
193    }
194
195    pub(crate) fn set_config<T: ConfigDefault>(&self, cfg: &Config<T>, val: T) {
196        let mut updates = ConfigUpdates::default();
197        updates.add(cfg, val);
198        updates.apply(self)
199    }
200
201    /// Applies the provided updates to this configuration.
202    ///
203    /// You should prefer calling this method over mutating `self.configs`
204    /// directly, so that [`Self::configs_synced_once`] can be properly
205    /// maintained.
206    pub fn apply_from(&self, updates: &ConfigUpdates) {
207        updates.apply(&self.configs);
208        self.configs_synced_once.send_replace(true);
209    }
210
211    /// Resolves when `configs` has been synced at least once with an upstream
212    /// source, i.e., via [`Self::apply_from`].
213    ///
214    /// If `configs` has already been synced once at the time the method is
215    /// called, resolves immediately.
216    ///
217    /// Useful in conjunction with configuration parameters that cannot be
218    /// dynamically updated once set (e.g., PubSub).
219    #[instrument(level = "info")]
220    pub async fn configs_synced_once(&self) {
221        self.configs_synced_once
222            .subscribe()
223            .wait_for(|synced| *synced)
224            .await
225            .expect("we have a borrow on sender so it cannot drop");
226    }
227
228    /// The maximum amount of work to do in the persist_source mfp_and_decode
229    /// operator before yielding.
230    pub fn storage_source_decode_fuel(&self) -> usize {
231        STORAGE_SOURCE_DECODE_FUEL.get(self)
232    }
233
234    /// Overrides the value for "persist_reader_lease_duration".
235    pub fn set_reader_lease_duration(&self, val: Duration) {
236        self.set_config(&READER_LEASE_DURATION, val);
237    }
238
239    /// Overrides the value for "persist_rollup_threshold".
240    pub fn set_rollup_threshold(&self, val: usize) {
241        self.set_config(&ROLLUP_THRESHOLD, val);
242    }
243
244    /// Overrides the value for the "persist_next_listen_batch_retryer_*"
245    /// configs.
246    pub fn set_next_listen_batch_retryer(&self, val: RetryParameters) {
247        self.set_config(
248            &NEXT_LISTEN_BATCH_RETRYER_INITIAL_BACKOFF,
249            val.initial_backoff,
250        );
251        self.set_config(&NEXT_LISTEN_BATCH_RETRYER_MULTIPLIER, val.multiplier);
252        self.set_config(&NEXT_LISTEN_BATCH_RETRYER_CLAMP, val.clamp);
253    }
254
255    pub fn disable_compaction(&self) {
256        tracing::info!("Disabling Persist Compaction");
257        self.compaction_process_requests
258            .store(false, std::sync::atomic::Ordering::Relaxed);
259    }
260
261    pub fn enable_compaction(&self) {
262        tracing::info!("Enabling Persist Compaction");
263        self.compaction_process_requests
264            .store(true, std::sync::atomic::Ordering::Relaxed);
265    }
266
267    /// Returns a new instance of [PersistConfig] for tests.
268    pub fn new_for_tests() -> Self {
269        use mz_build_info::DUMMY_BUILD_INFO;
270        use mz_ore::now::SYSTEM_TIME;
271
272        let mut cfg = Self::new_default_configs(&DUMMY_BUILD_INFO, SYSTEM_TIME.clone());
273        cfg.hostname = "tests".into();
274        cfg.isolated_runtime_worker_threads = async_runtime::TEST_THREADS;
275        cfg
276    }
277}
278
279#[allow(non_upper_case_globals)]
280pub(crate) const MiB: usize = 1024 * 1024;
281
282/// Adds the full set of all persist [Config]s.
283///
284/// TODO(cfg): Consider replacing this with a static global registry powered by
285/// something like the `ctor` or `inventory` crate. This would involve managing
286/// the footgun of a Config being linked into one binary but not the other.
287pub fn all_dyncfgs(configs: ConfigSet) -> ConfigSet {
288    mz_persist::cfg::all_dyn_configs(configs)
289        .add(&crate::batch::BATCH_DELETE_ENABLED)
290        .add(&crate::batch::BLOB_TARGET_SIZE)
291        .add(&crate::batch::INLINE_WRITES_TOTAL_MAX_BYTES)
292        .add(&crate::batch::INLINE_WRITES_SINGLE_MAX_BYTES)
293        .add(&crate::batch::ENCODING_ENABLE_DICTIONARY)
294        .add(&crate::batch::ENCODING_COMPRESSION_FORMAT)
295        .add(&crate::batch::STRUCTURED_KEY_LOWER_LEN)
296        .add(&crate::batch::MAX_RUN_LEN)
297        .add(&crate::batch::MAX_RUNS)
298        .add(&BLOB_OPERATION_TIMEOUT)
299        .add(&BLOB_OPERATION_ATTEMPT_TIMEOUT)
300        .add(&BLOB_CONNECT_TIMEOUT)
301        .add(&BLOB_READ_TIMEOUT)
302        .add(&crate::cfg::CONSENSUS_CONNECTION_POOL_MAX_SIZE)
303        .add(&crate::cfg::CONSENSUS_CONNECTION_POOL_MAX_WAIT)
304        .add(&crate::cfg::CONSENSUS_CONNECTION_POOL_TTL_STAGGER)
305        .add(&crate::cfg::CONSENSUS_CONNECTION_POOL_TTL)
306        .add(&crate::cfg::CRDB_CONNECT_TIMEOUT)
307        .add(&crate::cfg::CRDB_TCP_USER_TIMEOUT)
308        .add(&crate::cfg::USE_CRITICAL_SINCE_TXN)
309        .add(&crate::cfg::USE_CRITICAL_SINCE_CATALOG)
310        .add(&crate::cfg::USE_CRITICAL_SINCE_SOURCE)
311        .add(&crate::cfg::USE_CRITICAL_SINCE_SNAPSHOT)
312        .add(&BATCH_BUILDER_MAX_OUTSTANDING_PARTS)
313        .add(&COMPACTION_HEURISTIC_MIN_INPUTS)
314        .add(&COMPACTION_HEURISTIC_MIN_PARTS)
315        .add(&COMPACTION_HEURISTIC_MIN_UPDATES)
316        .add(&COMPACTION_MEMORY_BOUND_BYTES)
317        .add(&GC_BLOB_DELETE_CONCURRENCY_LIMIT)
318        .add(&STATE_VERSIONS_RECENT_LIVE_DIFFS_LIMIT)
319        .add(&USAGE_STATE_FETCH_CONCURRENCY_LIMIT)
320        .add(&crate::cli::admin::CATALOG_FORCE_COMPACTION_FUEL)
321        .add(&crate::cli::admin::CATALOG_FORCE_COMPACTION_WAIT)
322        .add(&crate::cli::admin::EXPRESSION_CACHE_FORCE_COMPACTION_FUEL)
323        .add(&crate::cli::admin::EXPRESSION_CACHE_FORCE_COMPACTION_WAIT)
324        .add(&crate::fetch::FETCH_SEMAPHORE_COST_ADJUSTMENT)
325        .add(&crate::fetch::FETCH_SEMAPHORE_PERMIT_ADJUSTMENT)
326        .add(&crate::fetch::VALIDATE_PART_BOUNDS_ON_READ)
327        .add(&crate::fetch::OPTIMIZE_IGNORED_DATA_FETCH)
328        .add(&crate::internal::cache::BLOB_CACHE_MEM_LIMIT_BYTES)
329        .add(&crate::internal::cache::BLOB_CACHE_SCALE_WITH_THREADS)
330        .add(&crate::internal::cache::BLOB_CACHE_SCALE_FACTOR_BYTES)
331        .add(&crate::internal::compact::COMPACTION_MINIMUM_TIMEOUT)
332        .add(&crate::internal::compact::COMPACTION_CHECK_PROCESS_FLAG)
333        .add(&crate::internal::machine::CLAIM_UNCLAIMED_COMPACTIONS)
334        .add(&crate::internal::machine::CLAIM_COMPACTION_PERCENT)
335        .add(&crate::internal::machine::CLAIM_COMPACTION_MIN_VERSION)
336        .add(&crate::internal::machine::NEXT_LISTEN_BATCH_RETRYER_CLAMP)
337        .add(&crate::internal::machine::NEXT_LISTEN_BATCH_RETRYER_FIXED_SLEEP)
338        .add(&crate::internal::machine::NEXT_LISTEN_BATCH_RETRYER_INITIAL_BACKOFF)
339        .add(&crate::internal::machine::NEXT_LISTEN_BATCH_RETRYER_MULTIPLIER)
340        .add(&crate::internal::state::ROLLUP_THRESHOLD)
341        .add(&crate::internal::state::ROLLUP_USE_ACTIVE_ROLLUP)
342        .add(&crate::internal::state::GC_FALLBACK_THRESHOLD_MS)
343        .add(&crate::internal::state::GC_USE_ACTIVE_GC)
344        .add(&crate::internal::state::GC_MIN_VERSIONS)
345        .add(&crate::internal::state::GC_MAX_VERSIONS)
346        .add(&crate::internal::state::ROLLUP_FALLBACK_THRESHOLD_MS)
347        .add(&crate::internal::state::ENABLE_INCREMENTAL_COMPACTION)
348        .add(&crate::operators::STORAGE_SOURCE_DECODE_FUEL)
349        .add(&crate::read::READER_LEASE_DURATION)
350        .add(&crate::rpc::PUBSUB_CLIENT_ENABLED)
351        .add(&crate::rpc::PUBSUB_PUSH_DIFF_ENABLED)
352        .add(&crate::rpc::PUBSUB_SAME_PROCESS_DELEGATE_ENABLED)
353        .add(&crate::rpc::PUBSUB_CONNECT_ATTEMPT_TIMEOUT)
354        .add(&crate::rpc::PUBSUB_REQUEST_TIMEOUT)
355        .add(&crate::rpc::PUBSUB_CONNECT_MAX_BACKOFF)
356        .add(&crate::rpc::PUBSUB_CLIENT_SENDER_CHANNEL_SIZE)
357        .add(&crate::rpc::PUBSUB_CLIENT_RECEIVER_CHANNEL_SIZE)
358        .add(&crate::rpc::PUBSUB_SERVER_CONNECTION_CHANNEL_SIZE)
359        .add(&crate::rpc::PUBSUB_STATE_CACHE_SHARD_REF_CHANNEL_SIZE)
360        .add(&crate::rpc::PUBSUB_RECONNECT_BACKOFF)
361        .add(&crate::stats::STATS_AUDIT_PERCENT)
362        .add(&crate::stats::STATS_AUDIT_PANIC)
363        .add(&crate::stats::STATS_BUDGET_BYTES)
364        .add(&crate::stats::STATS_COLLECTION_ENABLED)
365        .add(&crate::stats::STATS_FILTER_ENABLED)
366        .add(&crate::stats::STATS_UNTRIMMABLE_COLUMNS_EQUALS)
367        .add(&crate::stats::STATS_UNTRIMMABLE_COLUMNS_PREFIX)
368        .add(&crate::stats::STATS_UNTRIMMABLE_COLUMNS_SUFFIX)
369        .add(&crate::fetch::PART_DECODE_FORMAT)
370        .add(&crate::write::COMBINE_INLINE_WRITES)
371        .add(&crate::write::VALIDATE_PART_BOUNDS_ON_WRITE)
372}
373
374impl PersistConfig {
375    pub(crate) const DEFAULT_FALLBACK_ROLLUP_THRESHOLD_MULTIPLIER: usize = 3;
376
377    pub fn set_state_versions_recent_live_diffs_limit(&self, val: usize) {
378        self.set_config(&STATE_VERSIONS_RECENT_LIVE_DIFFS_LIMIT, val);
379    }
380}
381
382/// Sets the maximum size of the connection pool that is used by consensus.
383///
384/// Requires a restart of the process to take effect.
385pub const CONSENSUS_CONNECTION_POOL_MAX_SIZE: Config<usize> = Config::new(
386    "persist_consensus_connection_pool_max_size",
387    50,
388    "The maximum size the connection pool to Postgres/CRDB will grow to.",
389);
390
391/// Sets the maximum amount of time we'll wait to acquire a connection from
392/// the connection pool.
393///
394/// Requires a restart of the process to take effect.
395const CONSENSUS_CONNECTION_POOL_MAX_WAIT: Config<Duration> = Config::new(
396    "persist_consensus_connection_pool_max_wait",
397    Duration::from_secs(60),
398    "The amount of time we'll wait for a connection to become available.",
399);
400
401/// The minimum TTL of a connection to Postgres/CRDB before it is proactively
402/// terminated. Connections are routinely culled to balance load against the
403/// downstream database.
404const CONSENSUS_CONNECTION_POOL_TTL: Config<Duration> = Config::new(
405    "persist_consensus_connection_pool_ttl",
406    Duration::from_secs(300),
407    "\
408    The minimum TTL of a Consensus connection to Postgres/CRDB before it is \
409    proactively terminated",
410);
411
412/// The minimum time between TTLing connections to Postgres/CRDB. This delay is
413/// used to stagger reconnections to avoid stampedes and high tail latencies.
414/// This value should be much less than `consensus_connection_pool_ttl` so that
415/// reconnections are biased towards terminating the oldest connections first. A
416/// value of `consensus_connection_pool_ttl /
417/// consensus_connection_pool_max_size` is likely a good place to start so that
418/// all connections are rotated when the pool is fully used.
419const CONSENSUS_CONNECTION_POOL_TTL_STAGGER: Config<Duration> = Config::new(
420    "persist_consensus_connection_pool_ttl_stagger",
421    Duration::from_secs(6),
422    "The minimum time between TTLing Consensus connections to Postgres/CRDB.",
423);
424
425/// The duration to wait for a Consensus Postgres/CRDB connection to be made
426/// before retrying.
427pub const CRDB_CONNECT_TIMEOUT: Config<Duration> = Config::new(
428    "crdb_connect_timeout",
429    Duration::from_secs(5),
430    "The time to connect to CockroachDB before timing out and retrying.",
431);
432
433/// The TCP user timeout for a Consensus Postgres/CRDB connection. Specifies the
434/// amount of time that transmitted data may remain unacknowledged before the
435/// TCP connection is forcibly closed.
436pub const CRDB_TCP_USER_TIMEOUT: Config<Duration> = Config::new(
437    "crdb_tcp_user_timeout",
438    Duration::from_secs(30),
439    "\
440    The TCP timeout for connections to CockroachDB. Specifies the amount of \
441    time that transmitted data may remain unacknowledged before the TCP \
442    connection is forcibly closed.",
443);
444
445/// Migrate the txns code to use the critical since when opening a new read handle.
446pub const USE_CRITICAL_SINCE_TXN: Config<bool> = Config::new(
447    "persist_use_critical_since_txn",
448    true,
449    "Use the critical since (instead of the overall since) when initializing a subscribe.",
450);
451
452/// Migrate the catalog to use the critical since when opening a new read handle.
453pub const USE_CRITICAL_SINCE_CATALOG: Config<bool> = Config::new(
454    "persist_use_critical_since_catalog",
455    false,
456    "Use the critical since (instead of the overall since) for the Persist-backed catalog.",
457);
458
459/// Migrate the persist source to use the critical since when opening a new read handle.
460pub const USE_CRITICAL_SINCE_SOURCE: Config<bool> = Config::new(
461    "persist_use_critical_since_source",
462    false,
463    "Use the critical since (instead of the overall since) in the Persist source.",
464);
465
466/// Migrate snapshots to use the critical since when opening a new read handle.
467pub const USE_CRITICAL_SINCE_SNAPSHOT: Config<bool> = Config::new(
468    "persist_use_critical_since_snapshot",
469    false,
470    "Use the critical since (instead of the overall since) when taking snapshots in the controller or in fast-path peeks.",
471);
472
473/// The maximum number of parts (s3 blobs) that [crate::batch::BatchBuilder]
474/// will pipeline before back-pressuring [crate::batch::BatchBuilder::add]
475/// calls on previous ones finishing.
476pub const BATCH_BUILDER_MAX_OUTSTANDING_PARTS: Config<usize> = Config::new(
477    "persist_batch_builder_max_outstanding_parts",
478    2,
479    "The number of writes a batch builder can have outstanding before we slow down the writer.",
480);
481
482/// In Compactor::compact_and_apply, we do the compaction (don't skip it)
483/// if the number of inputs is at least this many. Compaction is performed
484/// if any of the heuristic criteria are met (they are OR'd).
485pub const COMPACTION_HEURISTIC_MIN_INPUTS: Config<usize> = Config::new(
486    "persist_compaction_heuristic_min_inputs",
487    8,
488    "Don't skip compaction if we have more than this many hollow batches as input.",
489);
490
491/// In Compactor::compact_and_apply, we do the compaction (don't skip it)
492/// if the number of batch parts is at least this many. Compaction is performed
493/// if any of the heuristic criteria are met (they are OR'd).
494pub const COMPACTION_HEURISTIC_MIN_PARTS: Config<usize> = Config::new(
495    "persist_compaction_heuristic_min_parts",
496    8,
497    "Don't skip compaction if we have more than this many parts as input.",
498);
499
500/// In Compactor::compact_and_apply, we do the compaction (don't skip it)
501/// if the number of updates is at least this many. Compaction is performed
502/// if any of the heuristic criteria are met (they are OR'd).
503pub const COMPACTION_HEURISTIC_MIN_UPDATES: Config<usize> = Config::new(
504    "persist_compaction_heuristic_min_updates",
505    1024,
506    "Don't skip compaction if we have more than this many updates as input.",
507);
508
509/// The upper bound on compaction's memory consumption. The value must be at
510/// least 4*`blob_target_size`. Increasing this value beyond the minimum allows
511/// compaction to merge together more runs at once, providing greater
512/// consolidation of updates, at the cost of greater memory usage.
513pub const COMPACTION_MEMORY_BOUND_BYTES: Config<usize> = Config::new(
514    "persist_compaction_memory_bound_bytes",
515    1024 * MiB,
516    "Attempt to limit compaction to this amount of memory.",
517);
518
519/// The maximum number of concurrent blob deletes during garbage collection.
520pub const GC_BLOB_DELETE_CONCURRENCY_LIMIT: Config<usize> = Config::new(
521    "persist_gc_blob_delete_concurrency_limit",
522    32,
523    "Limit the number of concurrent deletes GC can perform to this threshold.",
524);
525
526/// The # of diffs to initially scan when fetching the latest consensus state, to
527/// determine which requests go down the fast vs slow path. Should be large enough
528/// to fetch all live diffs in the steady-state, and small enough to query Consensus
529/// at high volume. Steady-state usage should accommodate readers that require
530/// seqno-holds for reasonable amounts of time, which to start we say is 10s of minutes.
531///
532/// This value ought to be defined in terms of `NEED_ROLLUP_THRESHOLD` to approximate
533/// when we expect rollups to be written and therefore when old states will be truncated
534/// by GC.
535pub const STATE_VERSIONS_RECENT_LIVE_DIFFS_LIMIT: Config<usize> = Config::new(
536    "persist_state_versions_recent_live_diffs_limit",
537    30 * 128,
538    "Fetch this many diffs when fetching recent diffs.",
539);
540
541/// The maximum number of concurrent state fetches during usage computation.
542pub const USAGE_STATE_FETCH_CONCURRENCY_LIMIT: Config<usize> = Config::new(
543    "persist_usage_state_fetch_concurrency_limit",
544    8,
545    "Limit the concurrency in of fetching in the perioding Persist-storage-usage calculation.",
546);
547
548impl PostgresClientKnobs for PersistConfig {
549    fn connection_pool_max_size(&self) -> usize {
550        CONSENSUS_CONNECTION_POOL_MAX_SIZE.get(self)
551    }
552
553    fn connection_pool_max_wait(&self) -> Option<Duration> {
554        Some(CONSENSUS_CONNECTION_POOL_MAX_WAIT.get(self))
555    }
556
557    fn connection_pool_ttl(&self) -> Duration {
558        CONSENSUS_CONNECTION_POOL_TTL.get(self)
559    }
560
561    fn connection_pool_ttl_stagger(&self) -> Duration {
562        CONSENSUS_CONNECTION_POOL_TTL_STAGGER.get(self)
563    }
564
565    fn connect_timeout(&self) -> Duration {
566        CRDB_CONNECT_TIMEOUT.get(self)
567    }
568
569    fn tcp_user_timeout(&self) -> Duration {
570        CRDB_TCP_USER_TIMEOUT.get(self)
571    }
572}
573
574#[derive(Copy, Clone, Debug, Eq, PartialEq, Arbitrary, Serialize, Deserialize)]
575pub struct RetryParameters {
576    pub fixed_sleep: Duration,
577    pub initial_backoff: Duration,
578    pub multiplier: u32,
579    pub clamp: Duration,
580}
581
582impl RetryParameters {
583    pub(crate) fn into_retry(self, now: SystemTime) -> Retry {
584        let seed = now
585            .duration_since(UNIX_EPOCH)
586            .map_or(0, |x| u64::from(x.subsec_nanos()));
587        Retry {
588            fixed_sleep: self.fixed_sleep,
589            initial_backoff: self.initial_backoff,
590            multiplier: self.multiplier,
591            clamp_backoff: self.clamp,
592            seed,
593        }
594    }
595}
596
597pub(crate) const BLOB_OPERATION_TIMEOUT: Config<Duration> = Config::new(
598    "persist_blob_operation_timeout",
599    Duration::from_secs(180),
600    "Maximum time allowed for a network call, including retry attempts.",
601);
602
603pub(crate) const BLOB_OPERATION_ATTEMPT_TIMEOUT: Config<Duration> = Config::new(
604    "persist_blob_operation_attempt_timeout",
605    Duration::from_secs(90),
606    "Maximum time allowed for a single network call.",
607);
608
609pub(crate) const BLOB_CONNECT_TIMEOUT: Config<Duration> = Config::new(
610    "persist_blob_connect_timeout",
611    Duration::from_secs(7),
612    "Maximum time to wait for a socket connection to be made.",
613);
614
615pub(crate) const BLOB_READ_TIMEOUT: Config<Duration> = Config::new(
616    "persist_blob_read_timeout",
617    Duration::from_secs(10),
618    "Maximum time to wait to read the first byte of a response, including connection time.",
619);
620
621impl BlobKnobs for PersistConfig {
622    fn operation_timeout(&self) -> Duration {
623        BLOB_OPERATION_TIMEOUT.get(self)
624    }
625
626    fn operation_attempt_timeout(&self) -> Duration {
627        BLOB_OPERATION_ATTEMPT_TIMEOUT.get(self)
628    }
629
630    fn connect_timeout(&self) -> Duration {
631        BLOB_CONNECT_TIMEOUT.get(self)
632    }
633
634    fn read_timeout(&self) -> Duration {
635        BLOB_READ_TIMEOUT.get(self)
636    }
637
638    fn is_cc_active(&self) -> bool {
639        self.is_cc_active
640    }
641}
642
643/// If persist gets some encoded ProtoState from the future (e.g. two versions of
644/// code are running simultaneously against the same shard), it might have a
645/// field that the current code doesn't know about. This would be silently
646/// discarded at proto decode time: our Proto library can't handle unknown fields,
647/// and old versions of code might not be able to respect the semantics of the new
648/// fields even if they did.
649///
650/// [1]: https://developers.google.com/protocol-buffers/docs/proto3#unknowns
651///
652/// To detect the bad situation and disallow it, we tag every version of state
653/// written to consensus with the version of code it's compatible with. Then at
654/// decode time, we're able to compare the current version against any we receive
655/// and assert as necessary. The current version is typically the version of code
656/// used to write the state, but it may be lower when code is intentionally emulating
657/// an older version during eg. a graceful upgrade process.
658///
659/// We could do the same for blob data, but it shouldn't be necessary. Any blob
660/// data we read is going to be because we fetched it using a pointer stored in
661/// some persist state. If we can handle the state, we can handle the blobs it
662/// references, too.
663pub fn code_can_read_data(code_version: &Version, data_version: &Version) -> bool {
664    // For now, Persist can read arbitrarily old state data.
665    // We expect to add a floor to this in future versions.
666    code_version.cmp_precedence(data_version).is_ge()
667}
668
669/// Can the given version of the code generate data that older versions can understand?
670/// Imagine the case of eg. garbage collection after a version upgrade... we may need to read old
671/// diffs to be able to find blobs to delete, even if we no longer have code to generate data in
672/// that format.
673pub fn code_can_write_data(code_version: &Version, data_version: &Version) -> bool {
674    if !code_can_read_data(code_version, data_version) {
675        return false;
676    }
677
678    if code_version.major == 0 && code_version.minor <= SELF_MANAGED_VERSIONS[1].minor {
679        // This code was added well after the last ad-hoc version was released,
680        // so we don't strictly model compatibility with earlier releases.
681        true
682    } else if code_version.major == 0 {
683        // Self-managed versions 25.2+ must be upgradeable from 25.1+.
684        SELF_MANAGED_VERSIONS[0]
685            .cmp_precedence(data_version)
686            .is_le()
687    } else if code_version.major <= 26 {
688        // Versions 26.x must be upgradeable from the last pre-1.0 release.
689        SELF_MANAGED_VERSIONS[1]
690            .cmp_precedence(data_version)
691            .is_le()
692    } else {
693        // Otherwise, the data must be from at earliest the _previous_ major version.
694        code_version.major - 1 <= data_version.major
695    }
696}