Skip to main content

mz_compute_types/
dyncfgs.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10//! Dyncfgs used by the compute layer.
11
12use std::time::Duration;
13
14use mz_dyncfg::{Config, ConfigSet};
15
16/// Whether rendering should use `half_join2` rather than DD's `half_join` for delta joins.
17///
18/// `half_join2` avoids quadratic behavior in certain join patterns. This flag exists as an escape
19/// hatch to revert to the old implementation if issues arise.
20pub const ENABLE_HALF_JOIN2: Config<bool> = Config::new(
21    "enable_compute_half_join2",
22    true,
23    "Whether compute should use `half_join2` rather than DD's `half_join` to render delta joins.",
24);
25
26/// Whether rendering should use `mz_join_core` rather than DD's `JoinCore::join_core`.
27pub const ENABLE_MZ_JOIN_CORE: Config<bool> = Config::new(
28    "enable_mz_join_core",
29    true,
30    "Whether compute should use `mz_join_core` rather than DD's `JoinCore::join_core` to render \
31     linear joins.",
32);
33
34/// Whether rendering should use the new MV sink correction buffer implementation.
35pub const ENABLE_CORRECTION_V2: Config<bool> = Config::new(
36    "enable_compute_correction_v2",
37    true,
38    "Whether compute should use the new MV sink correction buffer implementation.",
39);
40
41/// Whether to enable temporal bucketing in compute.
42pub const ENABLE_TEMPORAL_BUCKETING: Config<bool> = Config::new(
43    "enable_compute_temporal_bucketing",
44    false,
45    "Whether to enable temporal bucketing in compute.",
46);
47
48/// The summary to apply to the frontier in temporal bucketing in compute.
49pub const TEMPORAL_BUCKETING_SUMMARY: Config<Duration> = Config::new(
50    "compute_temporal_bucketing_summary",
51    Duration::from_secs(2),
52    "The summary to apply to frontiers in temporal bucketing in compute.",
53);
54
55/// The yielding behavior with which linear joins should be rendered.
56pub const LINEAR_JOIN_YIELDING: Config<&str> = Config::new(
57    "linear_join_yielding",
58    "work:1000000,time:100",
59    "The yielding behavior compute rendering should apply for linear join operators. Either \
60     'work:<amount>' or 'time:<milliseconds>' or 'work:<amount>,time:<milliseconds>'. Note \
61     that omitting one of 'work' or 'time' will entirely disable join yielding by time or \
62     work, respectively, rather than falling back to some default.",
63);
64
65/// Enable lgalloc.
66pub const ENABLE_LGALLOC: Config<bool> = Config::new("enable_lgalloc", true, "Enable lgalloc.");
67
68/// Enable lgalloc's eager memory return/reclamation feature.
69pub const ENABLE_LGALLOC_EAGER_RECLAMATION: Config<bool> = Config::new(
70    "enable_lgalloc_eager_reclamation",
71    true,
72    "Enable lgalloc's eager return behavior.",
73);
74
75/// The interval at which the background thread wakes.
76pub const LGALLOC_BACKGROUND_INTERVAL: Config<Duration> = Config::new(
77    "lgalloc_background_interval",
78    Duration::from_secs(1),
79    "Scheduling interval for lgalloc's background worker.",
80);
81
82/// Enable lgalloc's eager memory return/reclamation feature.
83pub const LGALLOC_FILE_GROWTH_DAMPENER: Config<usize> = Config::new(
84    "lgalloc_file_growth_dampener",
85    2,
86    "Lgalloc's file growth dampener parameter.",
87);
88
89/// Enable lgalloc's eager memory return/reclamation feature.
90pub const LGALLOC_LOCAL_BUFFER_BYTES: Config<usize> = Config::new(
91    "lgalloc_local_buffer_bytes",
92    64 << 20,
93    "Lgalloc's local buffer bytes parameter.",
94);
95
96/// The bytes to reclaim (slow path) per size class, for each background thread activation.
97pub const LGALLOC_SLOW_CLEAR_BYTES: Config<usize> = Config::new(
98    "lgalloc_slow_clear_bytes",
99    128 << 20,
100    "Clear byte size per size class for every invocation",
101);
102
103/// Interval to run the memory limiter. A zero duration disables the limiter.
104pub const MEMORY_LIMITER_INTERVAL: Config<Duration> = Config::new(
105    "memory_limiter_interval",
106    Duration::from_secs(10),
107    "Interval to run the memory limiter. A zero duration disables the limiter.",
108);
109
110/// Bias to the memory limiter usage factor.
111pub const MEMORY_LIMITER_USAGE_BIAS: Config<f64> = Config::new(
112    "memory_limiter_usage_bias",
113    1.,
114    "Multiplicative bias to the memory limiter's limit.",
115);
116
117/// Burst factor to memory limit.
118pub const MEMORY_LIMITER_BURST_FACTOR: Config<f64> = Config::new(
119    "memory_limiter_burst_factor",
120    0.,
121    "Multiplicative burst factor to the memory limiter's limit.",
122);
123
124/// Enable lgalloc for columnation.
125pub const ENABLE_COLUMNATION_LGALLOC: Config<bool> = Config::new(
126    "enable_columnation_lgalloc",
127    true,
128    "Enable allocating regions from lgalloc.",
129);
130
131/// Enable lgalloc for columnar.
132pub const ENABLE_COLUMNAR_LGALLOC: Config<bool> = Config::new(
133    "enable_columnar_lgalloc",
134    true,
135    "Enable allocating aligned regions in columnar from lgalloc.",
136);
137
138/// The interval at which the compute server performs maintenance tasks.
139pub const COMPUTE_SERVER_MAINTENANCE_INTERVAL: Config<Duration> = Config::new(
140    "compute_server_maintenance_interval",
141    Duration::from_millis(10),
142    "The interval at which the compute server performs maintenance tasks. Zero enables maintenance on every iteration.",
143);
144
145/// Maximum number of in-flight bytes emitted by persist_sources feeding dataflows.
146pub const DATAFLOW_MAX_INFLIGHT_BYTES: Config<Option<usize>> = Config::new(
147    "compute_dataflow_max_inflight_bytes",
148    None,
149    "The maximum number of in-flight bytes emitted by persist_sources feeding \
150     compute dataflows in non-cc clusters.",
151);
152
153/// The "physical backpressure" of `compute_dataflow_max_inflight_bytes_cc` has
154/// been replaced in cc replicas by persist lgalloc and we intend to remove it
155/// once everything has switched to cc. In the meantime, this is a CYA to turn
156/// it back on if absolutely necessary.
157pub const DATAFLOW_MAX_INFLIGHT_BYTES_CC: Config<Option<usize>> = Config::new(
158    "compute_dataflow_max_inflight_bytes_cc",
159    None,
160    "The maximum number of in-flight bytes emitted by persist_sources feeding \
161     compute dataflows in cc clusters.",
162);
163
164/// The term `n` in the growth rate `1 + 1/(n + 1)` for `ConsolidatingVec`.
165/// The smallest value `0` corresponds to the greatest allowed growth, of doubling.
166pub const CONSOLIDATING_VEC_GROWTH_DAMPENER: Config<usize> = Config::new(
167    "consolidating_vec_growth_dampener",
168    1,
169    "Dampener in growth rate for consolidating vector size",
170);
171
172/// The number of dataflows that may hydrate concurrently.
173pub const HYDRATION_CONCURRENCY: Config<usize> = Config::new(
174    "compute_hydration_concurrency",
175    4,
176    "Controls how many compute dataflows may hydrate concurrently.",
177);
178
179/// See `src/storage-operators/src/s3_oneshot_sink/parquet.rs` for more details.
180pub const COPY_TO_S3_PARQUET_ROW_GROUP_FILE_RATIO: Config<usize> = Config::new(
181    "copy_to_s3_parquet_row_group_file_ratio",
182    20,
183    "The ratio (defined as a percentage) of row-group size to max-file-size. \
184        Must be <= 100.",
185);
186
187/// See `src/storage-operators/src/s3_oneshot_sink/parquet.rs` for more details.
188pub const COPY_TO_S3_ARROW_BUILDER_BUFFER_RATIO: Config<usize> = Config::new(
189    "copy_to_s3_arrow_builder_buffer_ratio",
190    150,
191    "The ratio (defined as a percentage) of arrow-builder size to row-group size. \
192        Must be >= 100.",
193);
194
195/// The size of each part in the multi-part upload to use when uploading files to S3.
196pub const COPY_TO_S3_MULTIPART_PART_SIZE_BYTES: Config<usize> = Config::new(
197    "copy_to_s3_multipart_part_size_bytes",
198    1024 * 1024 * 8,
199    "The size of each part in a multipart upload to S3.",
200);
201
202/// Main switch to enable or disable replica expiration.
203///
204/// Changes affect existing replicas only after restart.
205pub const ENABLE_COMPUTE_REPLICA_EXPIRATION: Config<bool> = Config::new(
206    "enable_compute_replica_expiration",
207    true,
208    "Main switch to disable replica expiration.",
209);
210
211/// The maximum lifetime of a replica configured as an offset to the replica start time.
212/// Used in temporal filters to drop diffs generated at timestamps beyond the expiration time.
213///
214/// A zero duration implies no expiration. Changing this value does not affect existing replicas,
215/// even when they are restarted.
216pub const COMPUTE_REPLICA_EXPIRATION_OFFSET: Config<Duration> = Config::new(
217    "compute_replica_expiration_offset",
218    Duration::ZERO,
219    "The expiration time offset for replicas. Zero disables expiration.",
220);
221
222/// When enabled, applies the column demands from a MapFilterProject onto the RelationDesc used to
223/// read out of Persist. This allows Persist to prune unneeded columns as a performance
224/// optimization.
225pub const COMPUTE_APPLY_COLUMN_DEMANDS: Config<bool> = Config::new(
226    "compute_apply_column_demands",
227    true,
228    "When enabled, passes applys column demands to the RelationDesc used to read out of Persist.",
229);
230
231/// The amount of output the flat-map operator produces before yielding. Set to a high value to
232/// avoid yielding, or to a low value to yield frequently.
233pub const COMPUTE_FLAT_MAP_FUEL: Config<usize> = Config::new(
234    "compute_flat_map_fuel",
235    1_000_000,
236    "The amount of output the flat-map operator produces before yielding.",
237);
238
239/// Whether to render `as_specific_collection` using a fueled flat-map operator.
240pub const ENABLE_COMPUTE_RENDER_FUELED_AS_SPECIFIC_COLLECTION: Config<bool> = Config::new(
241    "enable_compute_render_fueled_as_specific_collection",
242    true,
243    "When enabled, renders `as_specific_collection` using a fueled flat-map operator.",
244);
245
246/// Whether to apply logical backpressure in compute dataflows.
247pub const ENABLE_COMPUTE_LOGICAL_BACKPRESSURE: Config<bool> = Config::new(
248    "enable_compute_logical_backpressure",
249    false,
250    "When enabled, compute dataflows will apply logical backpressure.",
251);
252
253/// Maximal number of capabilities retained by the logical backpressure operator.
254///
255/// Selecting this value is subtle. If it's too small, it'll diminish the effectiveness of the
256/// logical backpressure operators. If it's too big, we can slow down hydration and cause state
257/// in the operator's implementation to build up.
258///
259/// The default value represents a compromise between these two extremes. We retain some metrics
260/// for 30 days, and the metrics update every minute. The default is exactly this number.
261pub const COMPUTE_LOGICAL_BACKPRESSURE_MAX_RETAINED_CAPABILITIES: Config<Option<usize>> =
262    Config::new(
263        "compute_logical_backpressure_max_retained_capabilities",
264        Some(30 * 24 * 60),
265        "The maximum number of capabilities retained by the logical backpressure operator.",
266    );
267
268/// The slack to round observed timestamps up to.
269///
270/// The default corresponds to Mz's default tick interval, but does not need to do so. Ideally,
271/// it is not smaller than the tick interval, but it can be larger.
272pub const COMPUTE_LOGICAL_BACKPRESSURE_INFLIGHT_SLACK: Config<Duration> = Config::new(
273    "compute_logical_backpressure_inflight_slack",
274    Duration::from_secs(1),
275    "Round observed timestamps to slack.",
276);
277
278/// Whether to enable the peek response stash, for sending back large peek
279/// responses. The response stash will only be used for results that exceed
280/// `compute_peek_response_stash_threshold_bytes`.
281pub const ENABLE_PEEK_RESPONSE_STASH: Config<bool> = Config::new(
282    "enable_compute_peek_response_stash",
283    true,
284    "Whether to enable the peek response stash, for sending back large peek responses. Will only be used for results that exceed compute_peek_response_stash_threshold_bytes.",
285);
286
287/// The threshold for peek response size above which we should use the peek
288/// response stash. Only used if the peek response stash is enabled _and_ if the
289/// query is "streamable" (roughly: doesn't have an ORDER BY).
290pub const PEEK_RESPONSE_STASH_THRESHOLD_BYTES: Config<usize> = Config::new(
291    "compute_peek_response_stash_threshold_bytes",
292    1024 * 10, /* 10KB */
293    "The threshold above which to use the peek response stash, for sending back large peek responses.",
294);
295
296/// The target number of maximum runs in the batches written to the stash.
297///
298/// Setting this reasonably low will make it so batches get consolidated/sorted
299/// concurrently with data being written. Which will in turn make it so that we
300/// have to do less work when reading/consolidating those batches in
301/// `environmentd`.
302pub const PEEK_RESPONSE_STASH_BATCH_MAX_RUNS: Config<usize> = Config::new(
303    "compute_peek_response_stash_batch_max_runs",
304    // The lowest possible setting, do as much work as possible on the
305    // `clusterd` side.
306    2,
307    "The target number of maximum runs in the batches written to the stash.",
308);
309
310/// The target size for batches of rows we read out of the peek stash.
311pub const PEEK_RESPONSE_STASH_READ_BATCH_SIZE_BYTES: Config<usize> = Config::new(
312    "compute_peek_response_stash_read_batch_size_bytes",
313    1024 * 1024 * 100, /* 100mb */
314    "The target size for batches of rows we read out of the peek stash.",
315);
316
317/// The memory budget for consolidating stashed peek responses in
318/// `environmentd`.
319pub const PEEK_RESPONSE_STASH_READ_MEMORY_BUDGET_BYTES: Config<usize> = Config::new(
320    "compute_peek_response_stash_read_memory_budget_bytes",
321    1024 * 1024 * 64, /* 64mb */
322    "The memory budget for consolidating stashed peek responses in environmentd.",
323);
324
325/// The number of batches to pump from the peek result iterator when stashing peek responses.
326pub const PEEK_STASH_NUM_BATCHES: Config<usize> = Config::new(
327    "compute_peek_stash_num_batches",
328    100,
329    "The number of batches to pump from the peek result iterator (in one iteration through the worker loop) when stashing peek responses.",
330);
331
332/// The size of each batch, as number of rows, pumped from the peek result
333/// iterator when stashing peek responses.
334pub const PEEK_STASH_BATCH_SIZE: Config<usize> = Config::new(
335    "compute_peek_stash_batch_size",
336    100000,
337    "The size, as number of rows, of each batch pumped from the peek result iterator (in one iteration through the worker loop) when stashing peek responses.",
338);
339
340/// The collection interval for the Prometheus metrics introspection source.
341///
342/// Set to zero to disable scraping and retract any existing data.
343pub const COMPUTE_PROMETHEUS_INTROSPECTION_SCRAPE_INTERVAL: Config<Duration> = Config::new(
344    "compute_prometheus_introspection_scrape_interval",
345    Duration::from_secs(1),
346    "The collection interval for the Prometheus metrics introspection source. Set to zero to disable.",
347);
348
349/// If set, skip fetching or processing the snapshot data for subscribes when possible.
350pub const SUBSCRIBE_SNAPSHOT_OPTIMIZATION: Config<bool> = Config::new(
351    "compute_subscribe_snapshot_optimization",
352    true,
353    "If set, skip fetching or processing the snapshot data for subscribes when possible.",
354);
355
356/// Temporary flag to de-risk the rollout of a release-blocker fix.
357///
358/// TODO: Remove after one, or a couple, releases.
359pub const MV_SINK_ADVANCE_PERSIST_FRONTIERS: Config<bool> = Config::new(
360    "compute_mv_sink_advance_persist_frontiers",
361    true,
362    "Whether the MV sink's write operator advances its internal persist frontiers to the as_of.",
363);
364
365/// Adds the full set of all compute `Config`s.
366pub fn all_dyncfgs(configs: ConfigSet) -> ConfigSet {
367    configs
368        .add(&ENABLE_HALF_JOIN2)
369        .add(&ENABLE_MZ_JOIN_CORE)
370        .add(&ENABLE_CORRECTION_V2)
371        .add(&ENABLE_TEMPORAL_BUCKETING)
372        .add(&TEMPORAL_BUCKETING_SUMMARY)
373        .add(&LINEAR_JOIN_YIELDING)
374        .add(&ENABLE_LGALLOC)
375        .add(&LGALLOC_BACKGROUND_INTERVAL)
376        .add(&LGALLOC_FILE_GROWTH_DAMPENER)
377        .add(&LGALLOC_LOCAL_BUFFER_BYTES)
378        .add(&LGALLOC_SLOW_CLEAR_BYTES)
379        .add(&MEMORY_LIMITER_INTERVAL)
380        .add(&MEMORY_LIMITER_USAGE_BIAS)
381        .add(&MEMORY_LIMITER_BURST_FACTOR)
382        .add(&ENABLE_LGALLOC_EAGER_RECLAMATION)
383        .add(&ENABLE_COLUMNATION_LGALLOC)
384        .add(&ENABLE_COLUMNAR_LGALLOC)
385        .add(&COMPUTE_SERVER_MAINTENANCE_INTERVAL)
386        .add(&DATAFLOW_MAX_INFLIGHT_BYTES)
387        .add(&DATAFLOW_MAX_INFLIGHT_BYTES_CC)
388        .add(&HYDRATION_CONCURRENCY)
389        .add(&COPY_TO_S3_PARQUET_ROW_GROUP_FILE_RATIO)
390        .add(&COPY_TO_S3_ARROW_BUILDER_BUFFER_RATIO)
391        .add(&COPY_TO_S3_MULTIPART_PART_SIZE_BYTES)
392        .add(&ENABLE_COMPUTE_REPLICA_EXPIRATION)
393        .add(&COMPUTE_REPLICA_EXPIRATION_OFFSET)
394        .add(&COMPUTE_APPLY_COLUMN_DEMANDS)
395        .add(&COMPUTE_FLAT_MAP_FUEL)
396        .add(&CONSOLIDATING_VEC_GROWTH_DAMPENER)
397        .add(&ENABLE_COMPUTE_RENDER_FUELED_AS_SPECIFIC_COLLECTION)
398        .add(&ENABLE_COMPUTE_LOGICAL_BACKPRESSURE)
399        .add(&COMPUTE_LOGICAL_BACKPRESSURE_MAX_RETAINED_CAPABILITIES)
400        .add(&COMPUTE_LOGICAL_BACKPRESSURE_INFLIGHT_SLACK)
401        .add(&ENABLE_PEEK_RESPONSE_STASH)
402        .add(&PEEK_RESPONSE_STASH_THRESHOLD_BYTES)
403        .add(&PEEK_RESPONSE_STASH_BATCH_MAX_RUNS)
404        .add(&PEEK_RESPONSE_STASH_READ_BATCH_SIZE_BYTES)
405        .add(&PEEK_RESPONSE_STASH_READ_MEMORY_BUDGET_BYTES)
406        .add(&PEEK_STASH_NUM_BATCHES)
407        .add(&PEEK_STASH_BATCH_SIZE)
408        .add(&COMPUTE_PROMETHEUS_INTROSPECTION_SCRAPE_INTERVAL)
409        .add(&SUBSCRIBE_SNAPSHOT_OPTIMIZATION)
410        .add(&MV_SINK_ADVANCE_PERSIST_FRONTIERS)
411}