1use mz_dyncfg::{Config, ConfigSet};
14use std::time::Duration;
15
16pub const CLUSTER_SHUTDOWN_GRACE_PERIOD: Config<Duration> = Config::new(
20 "storage_cluster_shutdown_grace_period",
21 Duration::from_secs(10 * 60),
22 "When dataflows observe an invariant violation it is either due to a bug or due to \
23 the cluster being shut down. This configuration defines the amount of time to \
24 wait before panicking the process, which will register the invariant violation.",
25);
26
27pub const DELAY_SOURCES_PAST_REHYDRATION: Config<bool> = Config::new(
32 "storage_dataflow_delay_sources_past_rehydration",
33 true,
35 "Whether or not to delay sources producing values in some scenarios \
36 (namely, upsert) till after rehydration is finished",
37);
38
39pub const SUSPENDABLE_SOURCES: Config<bool> = Config::new(
42 "storage_dataflow_suspendable_sources",
43 true,
44 "Whether storage dataflows should suspend execution while downstream operators are still \
45 processing data.",
46);
47
48pub const STORAGE_DOWNGRADE_SINCE_DURING_FINALIZATION: Config<bool> = Config::new(
53 "storage_downgrade_since_during_finalization",
54 true,
56 "When enabled, force-downgrade the controller's since handle on the shard\
57 during shard finalization",
58);
59
60pub const REPLICA_METRICS_HISTORY_RETENTION_INTERVAL: Config<Duration> = Config::new(
62 "replica_metrics_history_retention_interval",
63 Duration::from_secs(60 * 60 * 24 * 30), "The interval of time to keep when truncating the replica metrics history.",
65);
66
67pub const WALLCLOCK_LAG_HISTORY_RETENTION_INTERVAL: Config<Duration> = Config::new(
69 "wallclock_lag_history_retention_interval",
70 Duration::from_secs(60 * 60 * 24 * 30), "The interval of time to keep when truncating the wallclock lag history.",
72);
73
74pub const WALLCLOCK_GLOBAL_LAG_HISTOGRAM_RETENTION_INTERVAL: Config<Duration> = Config::new(
76 "wallclock_global_lag_histogram_retention_interval",
77 Duration::from_secs(60 * 60 * 24 * 30), "The interval of time to keep when truncating the wallclock lag histogram.",
79);
80
81pub const KAFKA_CLIENT_ID_ENRICHMENT_RULES: Config<fn() -> serde_json::Value> = Config::new(
94 "kafka_client_id_enrichment_rules",
95 || serde_json::json!([]),
96 "Rules for enriching the `client.id` property of Kafka clients with additional data.",
97);
98
99pub const KAFKA_POLL_MAX_WAIT: Config<Duration> = Config::new(
102 "kafka_poll_max_wait",
103 Duration::from_secs(1),
104 "The maximum time we will wait before re-polling rdkafka to see if new partitions/data are \
105 available.",
106);
107
108pub static KAFKA_METADATA_FETCH_INTERVAL: Config<Duration> = Config::new(
110 "kafka_default_metadata_fetch_interval",
111 Duration::from_secs(1),
112 "Interval to fetch topic partition metadata.",
113);
114
115pub const KAFKA_DEFAULT_AWS_PRIVATELINK_ENDPOINT_IDENTIFICATION_ALGORITHM: Config<&'static str> =
116 Config::new(
117 "kafka_default_aws_privatelink_endpoint_identification_algorithm",
118 "none",
120 "The value we set for the 'ssl.endpoint.identification.algorithm' option in the Kafka \
121 Connection config. default: 'none'",
122 );
123
124pub const KAFKA_BUFFERED_EVENT_RESIZE_THRESHOLD_ELEMENTS: Config<usize> = Config::new(
125 "kafka_buffered_event_resize_threshold_elements",
126 1000,
127 "In the Kafka sink operator we might need to buffer messages before emitting them. As a \
128 performance optimization we reuse the buffer allocations, but shrink it to retain at \
129 most this number of elements.",
130);
131
132pub const KAFKA_RETRY_BACKOFF: Config<Duration> = Config::new(
135 "kafka_retry_backoff",
136 Duration::from_millis(100),
137 "Sets retry.backoff.ms in librdkafka for sources and sinks.",
138);
139
140pub const KAFKA_RETRY_BACKOFF_MAX: Config<Duration> = Config::new(
143 "kafka_retry_backoff_max",
144 Duration::from_secs(1),
145 "Sets retry.backoff.max.ms in librdkafka for sources and sinks.",
146);
147
148pub const KAFKA_RECONNECT_BACKOFF: Config<Duration> = Config::new(
151 "kafka_reconnect_backoff",
152 Duration::from_millis(100),
153 "Sets reconnect.backoff.ms in librdkafka for sources and sinks.",
154);
155
156pub const KAFKA_RECONNECT_BACKOFF_MAX: Config<Duration> = Config::new(
161 "kafka_reconnect_backoff_max",
162 Duration::from_secs(30),
163 "Sets reconnect.backoff.max.ms in librdkafka for sources and sinks.",
164);
165
166pub const MYSQL_REPLICATION_HEARTBEAT_INTERVAL: Config<Duration> = Config::new(
170 "mysql_replication_heartbeat_interval",
171 Duration::from_secs(30),
172 "Replication heartbeat interval requested from the MySQL server.",
173);
174
175pub const MYSQL_OFFSET_KNOWN_INTERVAL: Config<Duration> = Config::new(
177 "mysql_offset_known_interval",
178 Duration::from_secs(1),
179 "Interval to fetch `offset_known`, from `@gtid_executed`",
180);
181
182pub const PG_FETCH_SLOT_RESUME_LSN_INTERVAL: Config<Duration> = Config::new(
186 "postgres_fetch_slot_resume_lsn_interval",
187 Duration::from_millis(500),
188 "Interval to poll `confirmed_flush_lsn` to get a resumption lsn.",
189);
190
191pub const PG_OFFSET_KNOWN_INTERVAL: Config<Duration> = Config::new(
193 "pg_offset_known_interval",
194 Duration::from_secs(1),
195 "Interval to fetch `offset_known`, from `pg_current_wal_lsn`",
196);
197
198pub const PG_SCHEMA_VALIDATION_INTERVAL: Config<Duration> = Config::new(
200 "pg_schema_validation_interval",
201 Duration::from_secs(15),
202 "Interval to re-validate the schemas of ingested tables.",
203);
204
205pub const ENFORCE_EXTERNAL_ADDRESSES: Config<bool> = Config::new(
210 "storage_enforce_external_addresses",
211 false,
212 "Whether or not to enforce that external connection addresses are global \
213 (not private or local) when resolving them",
214);
215
216pub const STORAGE_UPSERT_PREVENT_SNAPSHOT_BUFFERING: Config<bool> = Config::new(
233 "storage_upsert_prevent_snapshot_buffering",
234 true,
235 "Prevent snapshot buffering in upsert.",
236);
237
238pub const STORAGE_ROCKSDB_USE_MERGE_OPERATOR: Config<bool> = Config::new(
240 "storage_rocksdb_use_merge_operator",
241 true,
242 "Use the native rocksdb merge operator where possible.",
243);
244
245pub const STORAGE_UPSERT_MAX_SNAPSHOT_BATCH_BUFFERING: Config<Option<usize>> = Config::new(
250 "storage_upsert_max_snapshot_batch_buffering",
251 None,
252 "Limit snapshot buffering in upsert.",
253);
254
255pub const STORAGE_ROCKSDB_CLEANUP_TRIES: Config<usize> = Config::new(
259 "storage_rocksdb_cleanup_tries",
260 5,
261 "How many times to try to cleanup old RocksDB DB's on disk before giving up.",
262);
263
264pub const STORAGE_SUSPEND_AND_RESTART_DELAY: Config<Duration> = Config::new(
266 "storage_suspend_and_restart_delay",
267 Duration::from_secs(5),
268 "Delay interval when reconnecting to a source / sink after halt.",
269);
270
271pub const STORAGE_RECLOCK_TO_LATEST: Config<bool> = Config::new(
274 "storage_reclock_to_latest",
275 true,
276 "Whether to mint reclock bindings based on the latest probed offset or the latest ingested offset.",
277);
278
279pub const STORAGE_USE_CONTINUAL_FEEDBACK_UPSERT: Config<bool> = Config::new(
281 "storage_use_continual_feedback_upsert",
282 true,
283 "Whether to use the new continual feedback upsert operator.",
284);
285
286pub const STORAGE_SERVER_MAINTENANCE_INTERVAL: Config<Duration> = Config::new(
288 "storage_server_maintenance_interval",
289 Duration::from_millis(10),
290 "The interval at which the storage server performs maintenance tasks. Zero enables maintenance on every iteration.",
291);
292
293pub const SINK_PROGRESS_SEARCH: Config<bool> = Config::new(
295 "storage_sink_progress_search",
296 true,
297 "If set, iteratively search the progress topic for a progress record with increasing lookback.",
298);
299
300pub const SINK_ENSURE_TOPIC_CONFIG: Config<&'static str> = Config::new(
302 "storage_sink_ensure_topic_config",
303 "skip",
304 "If `skip`, don't check the config of existing topics; if `check`, fetch the config and \
305 warn if it does not match the expected configs; if `alter`, attempt to change the upstream to \
306 match the expected configs.",
307);
308
309pub const ORE_OVERFLOWING_BEHAVIOR: Config<&'static str> = Config::new(
311 "ore_overflowing_behavior",
312 "ignore",
313 "Overflow behavior for Overflowing types. One of 'ignore', 'panic', 'soft_panic'.",
314);
315
316pub const STATISTICS_RETENTION_DURATION: Config<Duration> = Config::new(
322 "storage_statistics_retention_duration",
323 Duration::from_secs(86_400), "The time after which we delete per replica statistics (for sources and sinks) after there have been no updates.",
325);
326
327pub fn all_dyncfgs(configs: ConfigSet) -> ConfigSet {
329 configs
330 .add(&CLUSTER_SHUTDOWN_GRACE_PERIOD)
331 .add(&DELAY_SOURCES_PAST_REHYDRATION)
332 .add(&ENFORCE_EXTERNAL_ADDRESSES)
333 .add(&KAFKA_BUFFERED_EVENT_RESIZE_THRESHOLD_ELEMENTS)
334 .add(&KAFKA_CLIENT_ID_ENRICHMENT_RULES)
335 .add(&KAFKA_DEFAULT_AWS_PRIVATELINK_ENDPOINT_IDENTIFICATION_ALGORITHM)
336 .add(&KAFKA_METADATA_FETCH_INTERVAL)
337 .add(&KAFKA_POLL_MAX_WAIT)
338 .add(&KAFKA_RETRY_BACKOFF)
339 .add(&KAFKA_RETRY_BACKOFF_MAX)
340 .add(&KAFKA_RECONNECT_BACKOFF)
341 .add(&KAFKA_RECONNECT_BACKOFF_MAX)
342 .add(&MYSQL_OFFSET_KNOWN_INTERVAL)
343 .add(&MYSQL_REPLICATION_HEARTBEAT_INTERVAL)
344 .add(&ORE_OVERFLOWING_BEHAVIOR)
345 .add(&PG_FETCH_SLOT_RESUME_LSN_INTERVAL)
346 .add(&PG_OFFSET_KNOWN_INTERVAL)
347 .add(&PG_SCHEMA_VALIDATION_INTERVAL)
348 .add(&REPLICA_METRICS_HISTORY_RETENTION_INTERVAL)
349 .add(&SINK_ENSURE_TOPIC_CONFIG)
350 .add(&SINK_PROGRESS_SEARCH)
351 .add(&STORAGE_DOWNGRADE_SINCE_DURING_FINALIZATION)
352 .add(&STORAGE_RECLOCK_TO_LATEST)
353 .add(&STORAGE_ROCKSDB_CLEANUP_TRIES)
354 .add(&STORAGE_ROCKSDB_USE_MERGE_OPERATOR)
355 .add(&STORAGE_SERVER_MAINTENANCE_INTERVAL)
356 .add(&STORAGE_SUSPEND_AND_RESTART_DELAY)
357 .add(&STORAGE_UPSERT_MAX_SNAPSHOT_BATCH_BUFFERING)
358 .add(&STORAGE_UPSERT_PREVENT_SNAPSHOT_BUFFERING)
359 .add(&STORAGE_USE_CONTINUAL_FEEDBACK_UPSERT)
360 .add(&SUSPENDABLE_SOURCES)
361 .add(&WALLCLOCK_GLOBAL_LAG_HISTOGRAM_RETENTION_INTERVAL)
362 .add(&WALLCLOCK_LAG_HISTORY_RETENTION_INTERVAL)
363 .add(&crate::sources::sql_server::CDC_POLL_INTERVAL)
364 .add(&crate::sources::sql_server::CDC_CLEANUP_CHANGE_TABLE)
365 .add(&crate::sources::sql_server::CDC_CLEANUP_CHANGE_TABLE_MAX_DELETES)
366 .add(&crate::sources::sql_server::MAX_LSN_WAIT)
367 .add(&crate::sources::sql_server::SNAPSHOT_PROGRESS_REPORT_INTERVAL)
368 .add(&crate::sources::sql_server::OFFSET_KNOWN_INTERVAL)
369 .add(&STATISTICS_RETENTION_DURATION)
370}