Skip to main content

mz_catalog/
config.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10use std::collections::{BTreeMap, BTreeSet};
11use std::num::NonZero;
12
13use anyhow::bail;
14use bytesize::ByteSize;
15use ipnet::IpNet;
16use mz_adapter_types::bootstrap_builtin_cluster_config::BootstrapBuiltinClusterConfig;
17use mz_auth::password::Password;
18use mz_build_info::BuildInfo;
19use mz_cloud_resources::AwsExternalIdPrefix;
20use mz_controller::clusters::ReplicaAllocation;
21use mz_license_keys::ValidatedLicenseKey;
22use mz_orchestrator::MemoryLimit;
23use mz_ore::cast::CastFrom;
24use mz_ore::metrics::MetricsRegistry;
25use mz_persist_client::PersistClient;
26use mz_repr::CatalogItemId;
27use mz_repr::adt::numeric::Numeric;
28use mz_sql::catalog::CatalogError as SqlCatalogError;
29use mz_sql::catalog::EnvironmentId;
30use serde::Serialize;
31
32use crate::durable::{CatalogError, DurableCatalogState};
33
34const GIB: u64 = 1024 * 1024 * 1024;
35
36/// Configures a catalog.
37#[derive(Debug)]
38pub struct Config<'a> {
39    /// The connection to the catalog storage.
40    pub storage: Box<dyn DurableCatalogState>,
41    /// The registry that catalog uses to report metrics.
42    pub metrics_registry: &'a MetricsRegistry,
43    pub state: StateConfig,
44}
45
46#[derive(Debug)]
47pub struct StateConfig {
48    /// Whether to enable unsafe mode.
49    pub unsafe_mode: bool,
50    /// Whether the build is a local dev build.
51    pub all_features: bool,
52    /// Information about this build of Materialize.
53    pub build_info: &'static BuildInfo,
54    /// The deploy generation with which the process was started.
55    pub deploy_generation: u64,
56    /// A persistent ID associated with the environment.
57    pub environment_id: EnvironmentId,
58    /// Whether to start Materialize in read-only mode.
59    pub read_only: bool,
60    /// Function to generate wall clock now; can be mocked.
61    pub now: mz_ore::now::NowFn,
62    /// Linearizable timestamp of when this environment booted.
63    pub boot_ts: mz_repr::Timestamp,
64    /// Whether or not to skip catalog migrations.
65    pub skip_migrations: bool,
66    /// Map of strings to corresponding compute replica sizes.
67    pub cluster_replica_sizes: ClusterReplicaSizeMap,
68    /// Builtin system cluster config.
69    pub builtin_system_cluster_config: BootstrapBuiltinClusterConfig,
70    /// Builtin catalog server cluster config.
71    pub builtin_catalog_server_cluster_config: BootstrapBuiltinClusterConfig,
72    /// Builtin probe cluster config.
73    pub builtin_probe_cluster_config: BootstrapBuiltinClusterConfig,
74    /// Builtin support cluster config.
75    pub builtin_support_cluster_config: BootstrapBuiltinClusterConfig,
76    /// Builtin analytics cluster config.
77    pub builtin_analytics_cluster_config: BootstrapBuiltinClusterConfig,
78    /// Dynamic defaults for system parameters.
79    pub system_parameter_defaults: BTreeMap<String, String>,
80    /// An optional map of system parameters pulled from a remote frontend.
81    /// A `None` value indicates that the initial sync was skipped.
82    pub remote_system_parameters: Option<BTreeMap<String, String>>,
83    /// Valid availability zones for replicas.
84    pub availability_zones: Vec<String>,
85    /// IP Addresses which will be used for egress.
86    pub egress_addresses: Vec<IpNet>,
87    /// Context for generating an AWS Principal.
88    pub aws_principal_context: Option<AwsPrincipalContext>,
89    /// Supported AWS PrivateLink availability zone ids.
90    pub aws_privatelink_availability_zones: Option<BTreeSet<String>>,
91    /// Host name or URL for connecting to the HTTP server of this instance.
92    pub http_host_name: Option<String>,
93    /// Context for source and sink connections.
94    pub connection_context: mz_storage_types::connections::ConnectionContext,
95    pub builtin_item_migration_config: BuiltinItemMigrationConfig,
96    pub persist_client: PersistClient,
97    /// Overrides the current value of the [`mz_adapter_types::dyncfgs::ENABLE_EXPRESSION_CACHE`]
98    /// feature flag.
99    pub enable_expression_cache_override: Option<bool>,
100    /// Helm chart version
101    pub helm_chart_version: Option<String>,
102    pub external_login_password_mz_system: Option<Password>,
103    pub license_key: ValidatedLicenseKey,
104}
105
106#[derive(Debug)]
107pub struct BuiltinItemMigrationConfig {
108    pub persist_client: PersistClient,
109    pub read_only: bool,
110    pub force_migration: Option<String>,
111}
112
113#[derive(Debug, Clone, Serialize)]
114pub struct ClusterReplicaSizeMap(pub BTreeMap<String, ReplicaAllocation>);
115
116impl ClusterReplicaSizeMap {
117    pub fn parse_from_str(s: &str, credit_consumption_from_memory: bool) -> anyhow::Result<Self> {
118        let mut cluster_replica_sizes: BTreeMap<String, ReplicaAllocation> =
119            serde_json::from_str(s)?;
120        if credit_consumption_from_memory {
121            for (name, replica) in cluster_replica_sizes.iter_mut() {
122                let Some(memory_limit) = replica.memory_limit else {
123                    bail!("No memory limit found in cluster definition for {name}");
124                };
125                let total_memory = memory_limit.0 * replica.scale.get();
126                replica.credits_per_hour = Numeric::from(total_memory.0) / Numeric::from(GIB);
127            }
128        }
129        Ok(Self(cluster_replica_sizes))
130    }
131
132    /// Iterate all enabled (not disabled) replica allocations, with their name.
133    pub fn enabled_allocations(&self) -> impl Iterator<Item = (&String, &ReplicaAllocation)> {
134        self.0.iter().filter(|(_, a)| !a.disabled)
135    }
136
137    /// Get a replica allocation by size name. Returns a reference to the allocation, or an
138    /// error if the size is unknown.
139    pub fn get_allocation_by_name(&self, name: &str) -> Result<&ReplicaAllocation, CatalogError> {
140        self.0.get(name).ok_or_else(|| {
141            CatalogError::Catalog(SqlCatalogError::UnknownClusterReplicaSize(name.into()))
142        })
143    }
144
145    /// Used for testing and local purposes. This default value should not be used in production.
146    ///
147    /// Credits per hour are calculated as being equal to scale. This is not necessarily how the
148    /// value is computed in production.
149    pub fn for_tests() -> Self {
150        // {
151        //     "scale=1,workers=1": {"scale": 1, "workers": 1},
152        //     "scale=1,workers=2": {"scale": 1, "workers": 2},
153        //     "scale=1,workers=4": {"scale": 1, "workers": 4},
154        //     /// ...
155        //     "scale=1,workers=32": {"scale": 1, "workers": 32}
156        //     /// Testing with multiple processes on a single machine
157        //     "scale=2,workers=4": {"scale": 2, "workers": 4},
158        //     /// Used in mzcompose tests
159        //     "scale=2,workers=2": {"scale": 2, "workers": 2},
160        //     ...
161        //     "scale=16,workers=16": {"scale": 16, "workers": 16},
162        //     /// Used in the shared_fate cloudtest tests
163        //     "scale=2,workers=1": {"scale": 2, "workers": 1},
164        //     ...
165        //     "scale=16,workers=1": {"scale": 16, "workers": 1},
166        //     /// Used in the cloudtest tests that force OOMs
167        //     "scale=1,workers=1,mem=2GiB": { "memory_limit": 2GiB },
168        //     ...
169        //     "scale=1,workers=1,mem=16": { "memory_limit": 16GiB },
170        // }
171        let mut inner = (0..=5)
172            .flat_map(|i| {
173                let workers = 1 << i;
174                [
175                    (format!("scale=1,workers={workers}"), None),
176                    (format!("scale=1,workers={workers},mem=4GiB"), Some(4)),
177                    (format!("scale=1,workers={workers},mem=8GiB"), Some(8)),
178                    (format!("scale=1,workers={workers},mem=16GiB"), Some(16)),
179                    (format!("scale=1,workers={workers},mem=32GiB"), Some(32)),
180                ]
181                .map(|(name, memory_limit)| {
182                    (
183                        name,
184                        ReplicaAllocation {
185                            memory_limit: memory_limit.map(|gib| MemoryLimit(ByteSize::gib(gib))),
186                            cpu_limit: None,
187                            disk_limit: None,
188                            scale: NonZero::new(1).expect("not zero"),
189                            workers: NonZero::new(workers).expect("not zero"),
190                            credits_per_hour: 1.into(),
191                            cpu_exclusive: false,
192                            is_cc: false,
193                            swap_enabled: false,
194                            disabled: false,
195                            selectors: BTreeMap::default(),
196                        },
197                    )
198                })
199            })
200            .collect::<BTreeMap<_, _>>();
201
202        for i in 1..=5 {
203            let scale = 1 << i;
204            inner.insert(
205                format!("scale={scale},workers=1"),
206                ReplicaAllocation {
207                    memory_limit: None,
208                    cpu_limit: None,
209                    disk_limit: None,
210                    scale: NonZero::new(scale).expect("not zero"),
211                    workers: NonZero::new(1).expect("not zero"),
212                    credits_per_hour: scale.into(),
213                    cpu_exclusive: false,
214                    is_cc: false,
215                    swap_enabled: false,
216                    disabled: false,
217                    selectors: BTreeMap::default(),
218                },
219            );
220
221            inner.insert(
222                format!("scale={scale},workers={scale}"),
223                ReplicaAllocation {
224                    memory_limit: None,
225                    cpu_limit: None,
226                    disk_limit: None,
227                    scale: NonZero::new(scale).expect("not zero"),
228                    workers: NonZero::new(scale.into()).expect("not zero"),
229                    credits_per_hour: scale.into(),
230                    cpu_exclusive: false,
231                    is_cc: false,
232                    swap_enabled: false,
233                    disabled: false,
234                    selectors: BTreeMap::default(),
235                },
236            );
237
238            inner.insert(
239                format!("scale=1,workers=8,mem={scale}GiB"),
240                ReplicaAllocation {
241                    memory_limit: Some(MemoryLimit(ByteSize(u64::cast_from(scale) * (1 << 30)))),
242                    cpu_limit: None,
243                    disk_limit: None,
244                    scale: NonZero::new(1).expect("not zero"),
245                    workers: NonZero::new(8).expect("not zero"),
246                    credits_per_hour: 1.into(),
247                    cpu_exclusive: false,
248                    is_cc: false,
249                    swap_enabled: false,
250                    disabled: false,
251                    selectors: BTreeMap::default(),
252                },
253            );
254        }
255
256        inner.insert(
257            "scale=2,workers=4".to_string(),
258            ReplicaAllocation {
259                memory_limit: None,
260                cpu_limit: None,
261                disk_limit: None,
262                scale: NonZero::new(2).expect("not zero"),
263                workers: NonZero::new(4).expect("not zero"),
264                credits_per_hour: 2.into(),
265                cpu_exclusive: false,
266                is_cc: false,
267                swap_enabled: false,
268                disabled: false,
269                selectors: BTreeMap::default(),
270            },
271        );
272
273        inner.insert(
274            "free".to_string(),
275            ReplicaAllocation {
276                memory_limit: None,
277                cpu_limit: None,
278                disk_limit: None,
279                scale: NonZero::new(1).expect("not zero"),
280                workers: NonZero::new(1).expect("not zero"),
281                credits_per_hour: 0.into(),
282                cpu_exclusive: false,
283                is_cc: true,
284                swap_enabled: false,
285                disabled: true,
286                selectors: BTreeMap::default(),
287            },
288        );
289
290        Self(inner)
291    }
292}
293
294/// Context used to generate an AWS Principal.
295///
296/// In the case of AWS PrivateLink connections, Materialize will connect to the
297/// VPC endpoint as the AWS Principal generated via this context.
298#[derive(Debug, Clone, Serialize)]
299pub struct AwsPrincipalContext {
300    pub aws_account_id: String,
301    pub aws_external_id_prefix: AwsExternalIdPrefix,
302}
303
304impl AwsPrincipalContext {
305    pub fn to_principal_string(&self, aws_external_id_suffix: CatalogItemId) -> String {
306        format!(
307            "arn:aws:iam::{}:role/mz_{}_{}",
308            self.aws_account_id, self.aws_external_id_prefix, aws_external_id_suffix
309        )
310    }
311}
312
313#[cfg(test)]
314#[allow(clippy::unwrap_used)]
315mod tests {
316    use super::*;
317
318    #[mz_ore::test]
319    #[cfg_attr(miri, ignore)] // can't call foreign function `decContextDefault`
320    fn cluster_replica_size_credits_from_memory() {
321        let s = r#"{
322            "test": {
323                "memory_limit": "1000MiB",
324                "scale": 2,
325                "workers": 10,
326                "credits_per_hour": "0"
327            }
328        }"#;
329        let map = ClusterReplicaSizeMap::parse_from_str(s, true).unwrap();
330
331        let alloc = map.get_allocation_by_name("test").unwrap();
332        let expected = Numeric::from(2000) / Numeric::from(1024);
333        assert_eq!(alloc.credits_per_hour, expected);
334    }
335}