1use std::collections::{BTreeMap, BTreeSet};
11use std::num::NonZero;
12
13use anyhow::bail;
14use bytesize::ByteSize;
15use ipnet::IpNet;
16use mz_adapter_types::bootstrap_builtin_cluster_config::BootstrapBuiltinClusterConfig;
17use mz_auth::password::Password;
18use mz_build_info::BuildInfo;
19use mz_cloud_resources::AwsExternalIdPrefix;
20use mz_controller::clusters::ReplicaAllocation;
21use mz_license_keys::ValidatedLicenseKey;
22use mz_orchestrator::MemoryLimit;
23use mz_ore::cast::CastFrom;
24use mz_ore::metrics::MetricsRegistry;
25use mz_persist_client::PersistClient;
26use mz_repr::CatalogItemId;
27use mz_repr::adt::numeric::Numeric;
28use mz_sql::catalog::CatalogError as SqlCatalogError;
29use mz_sql::catalog::EnvironmentId;
30use serde::Serialize;
31
32use crate::durable::{CatalogError, DurableCatalogState};
33
34const GIB: u64 = 1024 * 1024 * 1024;
35
36#[derive(Debug)]
38pub struct Config<'a> {
39 pub storage: Box<dyn DurableCatalogState>,
41 pub metrics_registry: &'a MetricsRegistry,
43 pub state: StateConfig,
44}
45
46#[derive(Debug)]
47pub struct StateConfig {
48 pub unsafe_mode: bool,
50 pub all_features: bool,
52 pub build_info: &'static BuildInfo,
54 pub deploy_generation: u64,
56 pub environment_id: EnvironmentId,
58 pub read_only: bool,
60 pub now: mz_ore::now::NowFn,
62 pub boot_ts: mz_repr::Timestamp,
64 pub skip_migrations: bool,
66 pub cluster_replica_sizes: ClusterReplicaSizeMap,
68 pub builtin_system_cluster_config: BootstrapBuiltinClusterConfig,
70 pub builtin_catalog_server_cluster_config: BootstrapBuiltinClusterConfig,
72 pub builtin_probe_cluster_config: BootstrapBuiltinClusterConfig,
74 pub builtin_support_cluster_config: BootstrapBuiltinClusterConfig,
76 pub builtin_analytics_cluster_config: BootstrapBuiltinClusterConfig,
78 pub system_parameter_defaults: BTreeMap<String, String>,
80 pub remote_system_parameters: Option<BTreeMap<String, String>>,
83 pub availability_zones: Vec<String>,
85 pub egress_addresses: Vec<IpNet>,
87 pub aws_principal_context: Option<AwsPrincipalContext>,
89 pub aws_privatelink_availability_zones: Option<BTreeSet<String>>,
91 pub http_host_name: Option<String>,
93 pub connection_context: mz_storage_types::connections::ConnectionContext,
95 pub builtin_item_migration_config: BuiltinItemMigrationConfig,
96 pub persist_client: PersistClient,
97 pub enable_expression_cache_override: Option<bool>,
100 pub helm_chart_version: Option<String>,
102 pub external_login_password_mz_system: Option<Password>,
103 pub license_key: ValidatedLicenseKey,
104}
105
106#[derive(Debug)]
107pub struct BuiltinItemMigrationConfig {
108 pub persist_client: PersistClient,
109 pub read_only: bool,
110 pub force_migration: Option<String>,
111}
112
113#[derive(Debug, Clone, Serialize)]
114pub struct ClusterReplicaSizeMap(pub BTreeMap<String, ReplicaAllocation>);
115
116impl ClusterReplicaSizeMap {
117 pub fn parse_from_str(s: &str, credit_consumption_from_memory: bool) -> anyhow::Result<Self> {
118 let mut cluster_replica_sizes: BTreeMap<String, ReplicaAllocation> =
119 serde_json::from_str(s)?;
120 if credit_consumption_from_memory {
121 for (name, replica) in cluster_replica_sizes.iter_mut() {
122 let Some(memory_limit) = replica.memory_limit else {
123 bail!("No memory limit found in cluster definition for {name}");
124 };
125 replica.credits_per_hour = Numeric::from(
126 (memory_limit.0 * replica.scale.get() * u64::cast_from(replica.workers)).0,
127 ) / Numeric::from(1 * GIB);
128 }
129 }
130 Ok(Self(cluster_replica_sizes))
131 }
132
133 pub fn enabled_allocations(&self) -> impl Iterator<Item = (&String, &ReplicaAllocation)> {
135 self.0.iter().filter(|(_, a)| !a.disabled)
136 }
137
138 pub fn get_allocation_by_name(&self, name: &str) -> Result<&ReplicaAllocation, CatalogError> {
141 self.0.get(name).ok_or_else(|| {
142 CatalogError::Catalog(SqlCatalogError::UnknownClusterReplicaSize(name.into()))
143 })
144 }
145
146 pub fn for_tests() -> Self {
151 let mut inner = (0..=5)
173 .flat_map(|i| {
174 let workers = 1 << i;
175 [
176 (format!("scale=1,workers={workers}"), None),
177 (format!("scale=1,workers={workers},mem=4GiB"), Some(4)),
178 (format!("scale=1,workers={workers},mem=8GiB"), Some(8)),
179 (format!("scale=1,workers={workers},mem=16GiB"), Some(16)),
180 (format!("scale=1,workers={workers},mem=32GiB"), Some(32)),
181 ]
182 .map(|(name, memory_limit)| {
183 (
184 name,
185 ReplicaAllocation {
186 memory_limit: memory_limit.map(|gib| MemoryLimit(ByteSize::gib(gib))),
187 cpu_limit: None,
188 disk_limit: None,
189 scale: NonZero::new(1).expect("not zero"),
190 workers: NonZero::new(workers).expect("not zero"),
191 credits_per_hour: 1.into(),
192 cpu_exclusive: false,
193 is_cc: false,
194 swap_enabled: false,
195 disabled: false,
196 selectors: BTreeMap::default(),
197 },
198 )
199 })
200 })
201 .collect::<BTreeMap<_, _>>();
202
203 for i in 1..=5 {
204 let scale = 1 << i;
205 inner.insert(
206 format!("scale={scale},workers=1"),
207 ReplicaAllocation {
208 memory_limit: None,
209 cpu_limit: None,
210 disk_limit: None,
211 scale: NonZero::new(scale).expect("not zero"),
212 workers: NonZero::new(1).expect("not zero"),
213 credits_per_hour: scale.into(),
214 cpu_exclusive: false,
215 is_cc: false,
216 swap_enabled: false,
217 disabled: false,
218 selectors: BTreeMap::default(),
219 },
220 );
221
222 inner.insert(
223 format!("scale={scale},workers={scale}"),
224 ReplicaAllocation {
225 memory_limit: None,
226 cpu_limit: None,
227 disk_limit: None,
228 scale: NonZero::new(scale).expect("not zero"),
229 workers: NonZero::new(scale.into()).expect("not zero"),
230 credits_per_hour: scale.into(),
231 cpu_exclusive: false,
232 is_cc: false,
233 swap_enabled: false,
234 disabled: false,
235 selectors: BTreeMap::default(),
236 },
237 );
238
239 inner.insert(
240 format!("scale=1,workers=8,mem={scale}GiB"),
241 ReplicaAllocation {
242 memory_limit: Some(MemoryLimit(ByteSize(u64::cast_from(scale) * (1 << 30)))),
243 cpu_limit: None,
244 disk_limit: None,
245 scale: NonZero::new(1).expect("not zero"),
246 workers: NonZero::new(8).expect("not zero"),
247 credits_per_hour: 1.into(),
248 cpu_exclusive: false,
249 is_cc: false,
250 swap_enabled: false,
251 disabled: false,
252 selectors: BTreeMap::default(),
253 },
254 );
255 }
256
257 inner.insert(
258 "scale=2,workers=4".to_string(),
259 ReplicaAllocation {
260 memory_limit: None,
261 cpu_limit: None,
262 disk_limit: None,
263 scale: NonZero::new(2).expect("not zero"),
264 workers: NonZero::new(4).expect("not zero"),
265 credits_per_hour: 2.into(),
266 cpu_exclusive: false,
267 is_cc: false,
268 swap_enabled: false,
269 disabled: false,
270 selectors: BTreeMap::default(),
271 },
272 );
273
274 inner.insert(
275 "free".to_string(),
276 ReplicaAllocation {
277 memory_limit: None,
278 cpu_limit: None,
279 disk_limit: None,
280 scale: NonZero::new(1).expect("not zero"),
281 workers: NonZero::new(1).expect("not zero"),
282 credits_per_hour: 0.into(),
283 cpu_exclusive: false,
284 is_cc: true,
285 swap_enabled: false,
286 disabled: true,
287 selectors: BTreeMap::default(),
288 },
289 );
290
291 Self(inner)
292 }
293}
294
295#[derive(Debug, Clone, Serialize)]
300pub struct AwsPrincipalContext {
301 pub aws_account_id: String,
302 pub aws_external_id_prefix: AwsExternalIdPrefix,
303}
304
305impl AwsPrincipalContext {
306 pub fn to_principal_string(&self, aws_external_id_suffix: CatalogItemId) -> String {
307 format!(
308 "arn:aws:iam::{}:role/mz_{}_{}",
309 self.aws_account_id, self.aws_external_id_prefix, aws_external_id_suffix
310 )
311 }
312}