mz_catalog/
config.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10use std::collections::{BTreeMap, BTreeSet};
11use std::num::NonZero;
12
13use anyhow::bail;
14use bytesize::ByteSize;
15use ipnet::IpNet;
16use mz_adapter_types::bootstrap_builtin_cluster_config::BootstrapBuiltinClusterConfig;
17use mz_auth::password::Password;
18use mz_build_info::BuildInfo;
19use mz_cloud_resources::AwsExternalIdPrefix;
20use mz_controller::clusters::ReplicaAllocation;
21use mz_license_keys::ValidatedLicenseKey;
22use mz_orchestrator::MemoryLimit;
23use mz_ore::cast::CastFrom;
24use mz_ore::metrics::MetricsRegistry;
25use mz_persist_client::PersistClient;
26use mz_repr::CatalogItemId;
27use mz_repr::adt::numeric::Numeric;
28use mz_sql::catalog::CatalogError as SqlCatalogError;
29use mz_sql::catalog::EnvironmentId;
30use serde::Serialize;
31
32use crate::durable::{CatalogError, DurableCatalogState};
33
34const GIB: u64 = 1024 * 1024 * 1024;
35
36/// Configures a catalog.
37#[derive(Debug)]
38pub struct Config<'a> {
39    /// The connection to the catalog storage.
40    pub storage: Box<dyn DurableCatalogState>,
41    /// The registry that catalog uses to report metrics.
42    pub metrics_registry: &'a MetricsRegistry,
43    pub state: StateConfig,
44}
45
46#[derive(Debug)]
47pub struct StateConfig {
48    /// Whether to enable unsafe mode.
49    pub unsafe_mode: bool,
50    /// Whether the build is a local dev build.
51    pub all_features: bool,
52    /// Information about this build of Materialize.
53    pub build_info: &'static BuildInfo,
54    /// The deploy generation with which the process was started.
55    pub deploy_generation: u64,
56    /// A persistent ID associated with the environment.
57    pub environment_id: EnvironmentId,
58    /// Whether to start Materialize in read-only mode.
59    pub read_only: bool,
60    /// Function to generate wall clock now; can be mocked.
61    pub now: mz_ore::now::NowFn,
62    /// Linearizable timestamp of when this environment booted.
63    pub boot_ts: mz_repr::Timestamp,
64    /// Whether or not to skip catalog migrations.
65    pub skip_migrations: bool,
66    /// Map of strings to corresponding compute replica sizes.
67    pub cluster_replica_sizes: ClusterReplicaSizeMap,
68    /// Builtin system cluster config.
69    pub builtin_system_cluster_config: BootstrapBuiltinClusterConfig,
70    /// Builtin catalog server cluster config.
71    pub builtin_catalog_server_cluster_config: BootstrapBuiltinClusterConfig,
72    /// Builtin probe cluster config.
73    pub builtin_probe_cluster_config: BootstrapBuiltinClusterConfig,
74    /// Builtin support cluster config.
75    pub builtin_support_cluster_config: BootstrapBuiltinClusterConfig,
76    /// Builtin analytics cluster config.
77    pub builtin_analytics_cluster_config: BootstrapBuiltinClusterConfig,
78    /// Dynamic defaults for system parameters.
79    pub system_parameter_defaults: BTreeMap<String, String>,
80    /// An optional map of system parameters pulled from a remote frontend.
81    /// A `None` value indicates that the initial sync was skipped.
82    pub remote_system_parameters: Option<BTreeMap<String, String>>,
83    /// Valid availability zones for replicas.
84    pub availability_zones: Vec<String>,
85    /// IP Addresses which will be used for egress.
86    pub egress_addresses: Vec<IpNet>,
87    /// Context for generating an AWS Principal.
88    pub aws_principal_context: Option<AwsPrincipalContext>,
89    /// Supported AWS PrivateLink availability zone ids.
90    pub aws_privatelink_availability_zones: Option<BTreeSet<String>>,
91    /// Host name or URL for connecting to the HTTP server of this instance.
92    pub http_host_name: Option<String>,
93    /// Context for source and sink connections.
94    pub connection_context: mz_storage_types::connections::ConnectionContext,
95    pub builtin_item_migration_config: BuiltinItemMigrationConfig,
96    pub persist_client: PersistClient,
97    /// Overrides the current value of the [`mz_adapter_types::dyncfgs::ENABLE_EXPRESSION_CACHE`]
98    /// feature flag.
99    pub enable_expression_cache_override: Option<bool>,
100    /// Helm chart version
101    pub helm_chart_version: Option<String>,
102    pub external_login_password_mz_system: Option<Password>,
103    pub license_key: ValidatedLicenseKey,
104}
105
106#[derive(Debug)]
107pub struct BuiltinItemMigrationConfig {
108    pub persist_client: PersistClient,
109    pub read_only: bool,
110    pub force_migration: Option<String>,
111}
112
113#[derive(Debug, Clone, Serialize)]
114pub struct ClusterReplicaSizeMap(pub BTreeMap<String, ReplicaAllocation>);
115
116impl ClusterReplicaSizeMap {
117    pub fn parse_from_str(s: &str, credit_consumption_from_memory: bool) -> anyhow::Result<Self> {
118        let mut cluster_replica_sizes: BTreeMap<String, ReplicaAllocation> =
119            serde_json::from_str(s)?;
120        if credit_consumption_from_memory {
121            for (name, replica) in cluster_replica_sizes.iter_mut() {
122                let Some(memory_limit) = replica.memory_limit else {
123                    bail!("No memory limit found in cluster definition for {name}");
124                };
125                replica.credits_per_hour = Numeric::from(
126                    (memory_limit.0 * replica.scale.get() * u64::cast_from(replica.workers)).0,
127                ) / Numeric::from(1 * GIB);
128            }
129        }
130        Ok(Self(cluster_replica_sizes))
131    }
132
133    /// Iterate all enabled (not disabled) replica allocations, with their name.
134    pub fn enabled_allocations(&self) -> impl Iterator<Item = (&String, &ReplicaAllocation)> {
135        self.0.iter().filter(|(_, a)| !a.disabled)
136    }
137
138    /// Get a replica allocation by size name. Returns a reference to the allocation, or an
139    /// error if the size is unknown.
140    pub fn get_allocation_by_name(&self, name: &str) -> Result<&ReplicaAllocation, CatalogError> {
141        self.0.get(name).ok_or_else(|| {
142            CatalogError::Catalog(SqlCatalogError::UnknownClusterReplicaSize(name.into()))
143        })
144    }
145
146    /// Used for testing and local purposes. This default value should not be used in production.
147    ///
148    /// Credits per hour are calculated as being equal to scale. This is not necessarily how the
149    /// value is computed in production.
150    pub fn for_tests() -> Self {
151        // {
152        //     "scale=1,workers=1": {"scale": 1, "workers": 1},
153        //     "scale=1,workers=2": {"scale": 1, "workers": 2},
154        //     "scale=1,workers=4": {"scale": 1, "workers": 4},
155        //     /// ...
156        //     "scale=1,workers=32": {"scale": 1, "workers": 32}
157        //     /// Testing with multiple processes on a single machine
158        //     "scale=2,workers=4": {"scale": 2, "workers": 4},
159        //     /// Used in mzcompose tests
160        //     "scale=2,workers=2": {"scale": 2, "workers": 2},
161        //     ...
162        //     "scale=16,workers=16": {"scale": 16, "workers": 16},
163        //     /// Used in the shared_fate cloudtest tests
164        //     "scale=2,workers=1": {"scale": 2, "workers": 1},
165        //     ...
166        //     "scale=16,workers=1": {"scale": 16, "workers": 1},
167        //     /// Used in the cloudtest tests that force OOMs
168        //     "scale=1,workers=1,mem=2GiB": { "memory_limit": 2GiB },
169        //     ...
170        //     "scale=1,workers=1,mem=16": { "memory_limit": 16GiB },
171        // }
172        let mut inner = (0..=5)
173            .flat_map(|i| {
174                let workers = 1 << i;
175                [
176                    (format!("scale=1,workers={workers}"), None),
177                    (format!("scale=1,workers={workers},mem=4GiB"), Some(4)),
178                    (format!("scale=1,workers={workers},mem=8GiB"), Some(8)),
179                    (format!("scale=1,workers={workers},mem=16GiB"), Some(16)),
180                    (format!("scale=1,workers={workers},mem=32GiB"), Some(32)),
181                ]
182                .map(|(name, memory_limit)| {
183                    (
184                        name,
185                        ReplicaAllocation {
186                            memory_limit: memory_limit.map(|gib| MemoryLimit(ByteSize::gib(gib))),
187                            cpu_limit: None,
188                            disk_limit: None,
189                            scale: NonZero::new(1).expect("not zero"),
190                            workers: NonZero::new(workers).expect("not zero"),
191                            credits_per_hour: 1.into(),
192                            cpu_exclusive: false,
193                            is_cc: false,
194                            swap_enabled: false,
195                            disabled: false,
196                            selectors: BTreeMap::default(),
197                        },
198                    )
199                })
200            })
201            .collect::<BTreeMap<_, _>>();
202
203        for i in 1..=5 {
204            let scale = 1 << i;
205            inner.insert(
206                format!("scale={scale},workers=1"),
207                ReplicaAllocation {
208                    memory_limit: None,
209                    cpu_limit: None,
210                    disk_limit: None,
211                    scale: NonZero::new(scale).expect("not zero"),
212                    workers: NonZero::new(1).expect("not zero"),
213                    credits_per_hour: scale.into(),
214                    cpu_exclusive: false,
215                    is_cc: false,
216                    swap_enabled: false,
217                    disabled: false,
218                    selectors: BTreeMap::default(),
219                },
220            );
221
222            inner.insert(
223                format!("scale={scale},workers={scale}"),
224                ReplicaAllocation {
225                    memory_limit: None,
226                    cpu_limit: None,
227                    disk_limit: None,
228                    scale: NonZero::new(scale).expect("not zero"),
229                    workers: NonZero::new(scale.into()).expect("not zero"),
230                    credits_per_hour: scale.into(),
231                    cpu_exclusive: false,
232                    is_cc: false,
233                    swap_enabled: false,
234                    disabled: false,
235                    selectors: BTreeMap::default(),
236                },
237            );
238
239            inner.insert(
240                format!("scale=1,workers=8,mem={scale}GiB"),
241                ReplicaAllocation {
242                    memory_limit: Some(MemoryLimit(ByteSize(u64::cast_from(scale) * (1 << 30)))),
243                    cpu_limit: None,
244                    disk_limit: None,
245                    scale: NonZero::new(1).expect("not zero"),
246                    workers: NonZero::new(8).expect("not zero"),
247                    credits_per_hour: 1.into(),
248                    cpu_exclusive: false,
249                    is_cc: false,
250                    swap_enabled: false,
251                    disabled: false,
252                    selectors: BTreeMap::default(),
253                },
254            );
255        }
256
257        inner.insert(
258            "scale=2,workers=4".to_string(),
259            ReplicaAllocation {
260                memory_limit: None,
261                cpu_limit: None,
262                disk_limit: None,
263                scale: NonZero::new(2).expect("not zero"),
264                workers: NonZero::new(4).expect("not zero"),
265                credits_per_hour: 2.into(),
266                cpu_exclusive: false,
267                is_cc: false,
268                swap_enabled: false,
269                disabled: false,
270                selectors: BTreeMap::default(),
271            },
272        );
273
274        inner.insert(
275            "free".to_string(),
276            ReplicaAllocation {
277                memory_limit: None,
278                cpu_limit: None,
279                disk_limit: None,
280                scale: NonZero::new(1).expect("not zero"),
281                workers: NonZero::new(1).expect("not zero"),
282                credits_per_hour: 0.into(),
283                cpu_exclusive: false,
284                is_cc: true,
285                swap_enabled: false,
286                disabled: true,
287                selectors: BTreeMap::default(),
288            },
289        );
290
291        Self(inner)
292    }
293}
294
295/// Context used to generate an AWS Principal.
296///
297/// In the case of AWS PrivateLink connections, Materialize will connect to the
298/// VPC endpoint as the AWS Principal generated via this context.
299#[derive(Debug, Clone, Serialize)]
300pub struct AwsPrincipalContext {
301    pub aws_account_id: String,
302    pub aws_external_id_prefix: AwsExternalIdPrefix,
303}
304
305impl AwsPrincipalContext {
306    pub fn to_principal_string(&self, aws_external_id_suffix: CatalogItemId) -> String {
307        format!(
308            "arn:aws:iam::{}:role/mz_{}_{}",
309            self.aws_account_id, self.aws_external_id_prefix, aws_external_id_suffix
310        )
311    }
312}