1use std::collections::{BTreeMap, BTreeSet};
11use std::num::NonZero;
12
13use anyhow::bail;
14use bytesize::ByteSize;
15use ipnet::IpNet;
16use mz_adapter_types::bootstrap_builtin_cluster_config::BootstrapBuiltinClusterConfig;
17use mz_auth::password::Password;
18use mz_build_info::BuildInfo;
19use mz_cloud_resources::AwsExternalIdPrefix;
20use mz_controller::clusters::ReplicaAllocation;
21use mz_license_keys::ValidatedLicenseKey;
22use mz_orchestrator::MemoryLimit;
23use mz_ore::cast::CastFrom;
24use mz_ore::metrics::MetricsRegistry;
25use mz_persist_client::PersistClient;
26use mz_repr::CatalogItemId;
27use mz_repr::adt::numeric::Numeric;
28use mz_sql::catalog::CatalogError as SqlCatalogError;
29use mz_sql::catalog::EnvironmentId;
30use serde::Serialize;
31
32use crate::durable::{CatalogError, DurableCatalogState};
33
34const GIB: u64 = 1024 * 1024 * 1024;
35
36#[derive(Debug)]
38pub struct Config<'a> {
39 pub storage: Box<dyn DurableCatalogState>,
41 pub metrics_registry: &'a MetricsRegistry,
43 pub state: StateConfig,
44}
45
46#[derive(Debug)]
47pub struct StateConfig {
48 pub unsafe_mode: bool,
50 pub all_features: bool,
52 pub build_info: &'static BuildInfo,
54 pub deploy_generation: u64,
56 pub environment_id: EnvironmentId,
58 pub read_only: bool,
60 pub now: mz_ore::now::NowFn,
62 pub boot_ts: mz_repr::Timestamp,
64 pub skip_migrations: bool,
66 pub cluster_replica_sizes: ClusterReplicaSizeMap,
68 pub builtin_system_cluster_config: BootstrapBuiltinClusterConfig,
70 pub builtin_catalog_server_cluster_config: BootstrapBuiltinClusterConfig,
72 pub builtin_probe_cluster_config: BootstrapBuiltinClusterConfig,
74 pub builtin_support_cluster_config: BootstrapBuiltinClusterConfig,
76 pub builtin_analytics_cluster_config: BootstrapBuiltinClusterConfig,
78 pub system_parameter_defaults: BTreeMap<String, String>,
80 pub remote_system_parameters: Option<BTreeMap<String, String>>,
83 pub availability_zones: Vec<String>,
85 pub egress_addresses: Vec<IpNet>,
87 pub aws_principal_context: Option<AwsPrincipalContext>,
89 pub aws_privatelink_availability_zones: Option<BTreeSet<String>>,
91 pub http_host_name: Option<String>,
93 pub connection_context: mz_storage_types::connections::ConnectionContext,
95 pub builtin_item_migration_config: BuiltinItemMigrationConfig,
96 pub persist_client: PersistClient,
97 pub enable_expression_cache_override: Option<bool>,
100 pub helm_chart_version: Option<String>,
102 pub external_login_password_mz_system: Option<Password>,
103 pub license_key: ValidatedLicenseKey,
104}
105
106#[derive(Debug)]
107pub struct BuiltinItemMigrationConfig {
108 pub persist_client: PersistClient,
109 pub read_only: bool,
110 pub force_migration: Option<String>,
111}
112
113#[derive(Debug, Clone, Serialize)]
114pub struct ClusterReplicaSizeMap(pub BTreeMap<String, ReplicaAllocation>);
115
116impl ClusterReplicaSizeMap {
117 pub fn parse_from_str(s: &str, credit_consumption_from_memory: bool) -> anyhow::Result<Self> {
118 let mut cluster_replica_sizes: BTreeMap<String, ReplicaAllocation> =
119 serde_json::from_str(s)?;
120 if credit_consumption_from_memory {
121 for (name, replica) in cluster_replica_sizes.iter_mut() {
122 let Some(memory_limit) = replica.memory_limit else {
123 bail!("No memory limit found in cluster definition for {name}");
124 };
125 let total_memory = memory_limit.0 * replica.scale.get();
126 replica.credits_per_hour = Numeric::from(total_memory.0) / Numeric::from(GIB);
127 }
128 }
129 Ok(Self(cluster_replica_sizes))
130 }
131
132 pub fn enabled_allocations(&self) -> impl Iterator<Item = (&String, &ReplicaAllocation)> {
134 self.0.iter().filter(|(_, a)| !a.disabled)
135 }
136
137 pub fn get_allocation_by_name(&self, name: &str) -> Result<&ReplicaAllocation, CatalogError> {
140 self.0.get(name).ok_or_else(|| {
141 CatalogError::Catalog(SqlCatalogError::UnknownClusterReplicaSize(name.into()))
142 })
143 }
144
145 pub fn for_tests() -> Self {
150 let mut inner = (0..=5)
172 .flat_map(|i| {
173 let workers = 1 << i;
174 [
175 (format!("scale=1,workers={workers}"), None),
176 (format!("scale=1,workers={workers},mem=4GiB"), Some(4)),
177 (format!("scale=1,workers={workers},mem=8GiB"), Some(8)),
178 (format!("scale=1,workers={workers},mem=16GiB"), Some(16)),
179 (format!("scale=1,workers={workers},mem=32GiB"), Some(32)),
180 ]
181 .map(|(name, memory_limit)| {
182 (
183 name,
184 ReplicaAllocation {
185 memory_limit: memory_limit.map(|gib| MemoryLimit(ByteSize::gib(gib))),
186 cpu_limit: None,
187 disk_limit: None,
188 scale: NonZero::new(1).expect("not zero"),
189 workers: NonZero::new(workers).expect("not zero"),
190 credits_per_hour: 1.into(),
191 cpu_exclusive: false,
192 is_cc: false,
193 swap_enabled: false,
194 disabled: false,
195 selectors: BTreeMap::default(),
196 },
197 )
198 })
199 })
200 .collect::<BTreeMap<_, _>>();
201
202 for i in 1..=5 {
203 let scale = 1 << i;
204 inner.insert(
205 format!("scale={scale},workers=1"),
206 ReplicaAllocation {
207 memory_limit: None,
208 cpu_limit: None,
209 disk_limit: None,
210 scale: NonZero::new(scale).expect("not zero"),
211 workers: NonZero::new(1).expect("not zero"),
212 credits_per_hour: scale.into(),
213 cpu_exclusive: false,
214 is_cc: false,
215 swap_enabled: false,
216 disabled: false,
217 selectors: BTreeMap::default(),
218 },
219 );
220
221 inner.insert(
222 format!("scale={scale},workers={scale}"),
223 ReplicaAllocation {
224 memory_limit: None,
225 cpu_limit: None,
226 disk_limit: None,
227 scale: NonZero::new(scale).expect("not zero"),
228 workers: NonZero::new(scale.into()).expect("not zero"),
229 credits_per_hour: scale.into(),
230 cpu_exclusive: false,
231 is_cc: false,
232 swap_enabled: false,
233 disabled: false,
234 selectors: BTreeMap::default(),
235 },
236 );
237
238 inner.insert(
239 format!("scale=1,workers=8,mem={scale}GiB"),
240 ReplicaAllocation {
241 memory_limit: Some(MemoryLimit(ByteSize(u64::cast_from(scale) * (1 << 30)))),
242 cpu_limit: None,
243 disk_limit: None,
244 scale: NonZero::new(1).expect("not zero"),
245 workers: NonZero::new(8).expect("not zero"),
246 credits_per_hour: 1.into(),
247 cpu_exclusive: false,
248 is_cc: false,
249 swap_enabled: false,
250 disabled: false,
251 selectors: BTreeMap::default(),
252 },
253 );
254 }
255
256 inner.insert(
257 "scale=2,workers=4".to_string(),
258 ReplicaAllocation {
259 memory_limit: None,
260 cpu_limit: None,
261 disk_limit: None,
262 scale: NonZero::new(2).expect("not zero"),
263 workers: NonZero::new(4).expect("not zero"),
264 credits_per_hour: 2.into(),
265 cpu_exclusive: false,
266 is_cc: false,
267 swap_enabled: false,
268 disabled: false,
269 selectors: BTreeMap::default(),
270 },
271 );
272
273 inner.insert(
274 "free".to_string(),
275 ReplicaAllocation {
276 memory_limit: None,
277 cpu_limit: None,
278 disk_limit: None,
279 scale: NonZero::new(1).expect("not zero"),
280 workers: NonZero::new(1).expect("not zero"),
281 credits_per_hour: 0.into(),
282 cpu_exclusive: false,
283 is_cc: true,
284 swap_enabled: false,
285 disabled: true,
286 selectors: BTreeMap::default(),
287 },
288 );
289
290 Self(inner)
291 }
292}
293
294#[derive(Debug, Clone, Serialize)]
299pub struct AwsPrincipalContext {
300 pub aws_account_id: String,
301 pub aws_external_id_prefix: AwsExternalIdPrefix,
302}
303
304impl AwsPrincipalContext {
305 pub fn to_principal_string(&self, aws_external_id_suffix: CatalogItemId) -> String {
306 format!(
307 "arn:aws:iam::{}:role/mz_{}_{}",
308 self.aws_account_id, self.aws_external_id_prefix, aws_external_id_suffix
309 )
310 }
311}
312
313#[cfg(test)]
314#[allow(clippy::unwrap_used)]
315mod tests {
316 use super::*;
317
318 #[mz_ore::test]
319 #[cfg_attr(miri, ignore)] fn cluster_replica_size_credits_from_memory() {
321 let s = r#"{
322 "test": {
323 "memory_limit": "1000MiB",
324 "scale": 2,
325 "workers": 10,
326 "credits_per_hour": "0"
327 }
328 }"#;
329 let map = ClusterReplicaSizeMap::parse_from_str(s, true).unwrap();
330
331 let alloc = map.get_allocation_by_name("test").unwrap();
332 let expected = Numeric::from(2000) / Numeric::from(1024);
333 assert_eq!(alloc.credits_per_hour, expected);
334 }
335}