Skip to main content

mz_orchestrator_kubernetes/
lib.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10use std::collections::BTreeMap;
11use std::future::Future;
12use std::num::NonZero;
13use std::sync::{Arc, Mutex};
14use std::time::{Duration, Instant};
15use std::{env, fmt};
16
17use anyhow::{Context, anyhow, bail};
18use async_trait::async_trait;
19use chrono::DateTime;
20use clap::ValueEnum;
21use cloud_resource_controller::KubernetesResourceReader;
22use futures::TryFutureExt;
23use futures::stream::{BoxStream, StreamExt};
24use k8s_openapi::DeepMerge;
25use k8s_openapi::api::apps::v1::{StatefulSet, StatefulSetSpec, StatefulSetUpdateStrategy};
26use k8s_openapi::api::core::v1::{
27    Affinity, Capabilities, Container, ContainerPort, EnvVar, EnvVarSource, EphemeralVolumeSource,
28    NodeAffinity, NodeSelector, NodeSelectorRequirement, NodeSelectorTerm, ObjectFieldSelector,
29    ObjectReference, PersistentVolumeClaim, PersistentVolumeClaimSpec,
30    PersistentVolumeClaimTemplate, Pod, PodAffinity, PodAffinityTerm, PodAntiAffinity,
31    PodSecurityContext, PodSpec, PodTemplateSpec, PreferredSchedulingTerm, ResourceRequirements,
32    SeccompProfile, Secret, SecurityContext, Service as K8sService, ServicePort, ServiceSpec,
33    Toleration, TopologySpreadConstraint, Volume, VolumeMount, VolumeResourceRequirements,
34    WeightedPodAffinityTerm,
35};
36use k8s_openapi::apimachinery::pkg::api::resource::Quantity;
37use k8s_openapi::apimachinery::pkg::apis::meta::v1::{
38    LabelSelector, LabelSelectorRequirement, OwnerReference,
39};
40use k8s_openapi::jiff::Timestamp;
41use kube::ResourceExt;
42use kube::api::{Api, DeleteParams, ObjectMeta, PartialObjectMetaExt, Patch, PatchParams};
43use kube::client::Client;
44use kube::error::Error as K8sError;
45use kube::runtime::{WatchStreamExt, watcher};
46use maplit::btreemap;
47use mz_cloud_resources::AwsExternalIdPrefix;
48use mz_cloud_resources::crd::vpc_endpoint::v1::VpcEndpoint;
49use mz_orchestrator::{
50    DiskLimit, LabelSelectionLogic, LabelSelector as MzLabelSelector, NamespacedOrchestrator,
51    OfflineReason, Orchestrator, Service, ServiceAssignments, ServiceConfig, ServiceEvent,
52    ServiceProcessMetrics, ServiceStatus, scheduling_config::*,
53};
54use mz_ore::cast::CastInto;
55use mz_ore::retry::Retry;
56use mz_ore::task::AbortOnDropHandle;
57use serde::Deserialize;
58use sha2::{Digest, Sha256};
59use tokio::sync::{mpsc, oneshot};
60use tracing::{error, info, warn};
61
62pub mod cloud_resource_controller;
63pub mod secrets;
64pub mod util;
65
66const FIELD_MANAGER: &str = "environmentd";
67const NODE_FAILURE_THRESHOLD_SECONDS: i64 = 30;
68
69const POD_TEMPLATE_HASH_ANNOTATION: &str = "environmentd.materialize.cloud/pod-template-hash";
70
71/// Configures a [`KubernetesOrchestrator`].
72#[derive(Debug, Clone)]
73pub struct KubernetesOrchestratorConfig {
74    /// The name of a Kubernetes context to use, if the Kubernetes configuration
75    /// is loaded from the local kubeconfig.
76    pub context: String,
77    /// The name of a non-default Kubernetes scheduler to use, if any.
78    pub scheduler_name: Option<String>,
79    /// Annotations to install on every service created by the orchestrator.
80    pub service_annotations: BTreeMap<String, String>,
81    /// Labels to install on every service created by the orchestrator.
82    pub service_labels: BTreeMap<String, String>,
83    /// Node selector to install on every service created by the orchestrator.
84    pub service_node_selector: BTreeMap<String, String>,
85    /// Affinity to install on every service created by the orchestrator.
86    pub service_affinity: Option<String>,
87    /// Tolerations to install on every service created by the orchestrator.
88    pub service_tolerations: Option<String>,
89    /// The service account that each service should run as, if any.
90    pub service_account: Option<String>,
91    /// The image pull policy to set for services created by the orchestrator.
92    pub image_pull_policy: KubernetesImagePullPolicy,
93    /// An AWS external ID prefix to use when making AWS operations on behalf
94    /// of the environment.
95    pub aws_external_id_prefix: Option<AwsExternalIdPrefix>,
96    /// Whether to use code coverage mode or not. Always false for production.
97    pub coverage: bool,
98    /// The Kubernetes StorageClass to use for the ephemeral volume attached to
99    /// services that request disk.
100    ///
101    /// If unspecified, the orchestrator will refuse to create services that
102    /// request disk.
103    pub ephemeral_volume_storage_class: Option<String>,
104    /// The optional fs group for service's pods' `securityContext`.
105    pub service_fs_group: Option<i64>,
106    /// The prefix to prepend to all object names
107    pub name_prefix: Option<String>,
108    /// Whether we should attempt to collect metrics from kubernetes
109    pub collect_pod_metrics: bool,
110    /// Whether to annotate pods for prometheus service discovery.
111    pub enable_prometheus_scrape_annotations: bool,
112}
113
114impl KubernetesOrchestratorConfig {
115    pub fn name_prefix(&self) -> String {
116        self.name_prefix.clone().unwrap_or_default()
117    }
118}
119
120/// Specifies whether Kubernetes should pull Docker images when creating pods.
121#[derive(ValueEnum, Debug, Clone, Copy)]
122pub enum KubernetesImagePullPolicy {
123    /// Always pull the Docker image from the registry.
124    Always,
125    /// Pull the Docker image only if the image is not present.
126    IfNotPresent,
127    /// Never pull the Docker image.
128    Never,
129}
130
131impl fmt::Display for KubernetesImagePullPolicy {
132    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
133        match self {
134            KubernetesImagePullPolicy::Always => f.write_str("Always"),
135            KubernetesImagePullPolicy::IfNotPresent => f.write_str("IfNotPresent"),
136            KubernetesImagePullPolicy::Never => f.write_str("Never"),
137        }
138    }
139}
140
141impl KubernetesImagePullPolicy {
142    pub fn as_kebab_case_str(&self) -> &'static str {
143        match self {
144            Self::Always => "always",
145            Self::IfNotPresent => "if-not-present",
146            Self::Never => "never",
147        }
148    }
149}
150
151/// An orchestrator backed by Kubernetes.
152pub struct KubernetesOrchestrator {
153    client: Client,
154    kubernetes_namespace: String,
155    config: KubernetesOrchestratorConfig,
156    secret_api: Api<Secret>,
157    vpc_endpoint_api: Api<VpcEndpoint>,
158    namespaces: Mutex<BTreeMap<String, Arc<dyn NamespacedOrchestrator>>>,
159    resource_reader: Arc<KubernetesResourceReader>,
160}
161
162impl fmt::Debug for KubernetesOrchestrator {
163    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
164        f.debug_struct("KubernetesOrchestrator").finish()
165    }
166}
167
168impl KubernetesOrchestrator {
169    /// Creates a new Kubernetes orchestrator from the provided configuration.
170    pub async fn new(
171        config: KubernetesOrchestratorConfig,
172    ) -> Result<KubernetesOrchestrator, anyhow::Error> {
173        let (client, kubernetes_namespace) = util::create_client(config.context.clone()).await?;
174        let resource_reader =
175            Arc::new(KubernetesResourceReader::new(config.context.clone()).await?);
176        Ok(KubernetesOrchestrator {
177            client: client.clone(),
178            kubernetes_namespace,
179            config,
180            secret_api: Api::default_namespaced(client.clone()),
181            vpc_endpoint_api: Api::default_namespaced(client),
182            namespaces: Mutex::new(BTreeMap::new()),
183            resource_reader,
184        })
185    }
186}
187
188impl Orchestrator for KubernetesOrchestrator {
189    fn namespace(&self, namespace: &str) -> Arc<dyn NamespacedOrchestrator> {
190        let mut namespaces = self.namespaces.lock().expect("lock poisoned");
191        Arc::clone(namespaces.entry(namespace.into()).or_insert_with(|| {
192            let (command_tx, command_rx) = mpsc::unbounded_channel();
193            let worker = OrchestratorWorker {
194                metrics_api: Api::default_namespaced(self.client.clone()),
195                service_api: Api::default_namespaced(self.client.clone()),
196                stateful_set_api: Api::default_namespaced(self.client.clone()),
197                pod_api: Api::default_namespaced(self.client.clone()),
198                owner_references: vec![],
199                command_rx,
200                name_prefix: self.config.name_prefix.clone().unwrap_or_default(),
201                collect_pod_metrics: self.config.collect_pod_metrics,
202            }
203            .spawn(format!("kubernetes-orchestrator-worker:{namespace}"));
204
205            Arc::new(NamespacedKubernetesOrchestrator {
206                pod_api: Api::default_namespaced(self.client.clone()),
207                kubernetes_namespace: self.kubernetes_namespace.clone(),
208                namespace: namespace.into(),
209                config: self.config.clone(),
210                // TODO(guswynn): make this configurable.
211                scheduling_config: Default::default(),
212                service_infos: std::sync::Mutex::new(BTreeMap::new()),
213                command_tx,
214                _worker: worker,
215            })
216        }))
217    }
218}
219
220#[derive(Clone, Copy)]
221struct ServiceInfo {
222    scale: NonZero<u16>,
223}
224
225struct NamespacedKubernetesOrchestrator {
226    pod_api: Api<Pod>,
227    kubernetes_namespace: String,
228    namespace: String,
229    config: KubernetesOrchestratorConfig,
230    scheduling_config: std::sync::RwLock<ServiceSchedulingConfig>,
231    service_infos: std::sync::Mutex<BTreeMap<String, ServiceInfo>>,
232    command_tx: mpsc::UnboundedSender<WorkerCommand>,
233    _worker: AbortOnDropHandle<()>,
234}
235
236impl fmt::Debug for NamespacedKubernetesOrchestrator {
237    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
238        f.debug_struct("NamespacedKubernetesOrchestrator")
239            .field("kubernetes_namespace", &self.kubernetes_namespace)
240            .field("namespace", &self.namespace)
241            .field("config", &self.config)
242            .finish()
243    }
244}
245
246/// Commands sent from a [`NamespacedKubernetesOrchestrator`] to its
247/// [`OrchestratorWorker`].
248///
249/// Commands for which the caller expects a result include a `result_tx` on which the
250/// [`OrchestratorWorker`] will deliver the result.
251enum WorkerCommand {
252    EnsureService {
253        desc: ServiceDescription,
254    },
255    DropService {
256        name: String,
257    },
258    ListServices {
259        namespace: String,
260        result_tx: oneshot::Sender<Vec<String>>,
261    },
262    FetchServiceMetrics {
263        name: String,
264        info: ServiceInfo,
265        result_tx: oneshot::Sender<Vec<ServiceProcessMetrics>>,
266    },
267}
268
269/// A description of a service to be created by an [`OrchestratorWorker`].
270#[derive(Debug, Clone)]
271struct ServiceDescription {
272    name: String,
273    scale: NonZero<u16>,
274    service: K8sService,
275    stateful_set: StatefulSet,
276    pod_template_hash: String,
277}
278
279/// A task executing blocking work for a [`NamespacedKubernetesOrchestrator`] in the background.
280///
281/// This type exists to enable making [`NamespacedKubernetesOrchestrator::ensure_service`] and
282/// [`NamespacedKubernetesOrchestrator::drop_service`] non-blocking, allowing invocation of these
283/// methods in latency-sensitive contexts.
284///
285/// Note that, apart from `ensure_service` and `drop_service`, this worker also handles blocking
286/// orchestrator calls that query service state (such as `list_services`). These need to be
287/// sequenced through the worker loop to ensure they linearize as expected. For example, we want to
288/// ensure that a `list_services` result contains exactly those services that were previously
289/// created with `ensure_service` and not yet dropped with `drop_service`.
290struct OrchestratorWorker {
291    metrics_api: Api<PodMetrics>,
292    service_api: Api<K8sService>,
293    stateful_set_api: Api<StatefulSet>,
294    pod_api: Api<Pod>,
295    owner_references: Vec<OwnerReference>,
296    command_rx: mpsc::UnboundedReceiver<WorkerCommand>,
297    name_prefix: String,
298    collect_pod_metrics: bool,
299}
300
301#[derive(Deserialize, Clone, Debug)]
302pub struct PodMetricsContainer {
303    pub name: String,
304    pub usage: PodMetricsContainerUsage,
305}
306
307#[derive(Deserialize, Clone, Debug)]
308pub struct PodMetricsContainerUsage {
309    pub cpu: Quantity,
310    pub memory: Quantity,
311}
312
313#[derive(Deserialize, Clone, Debug)]
314pub struct PodMetrics {
315    pub metadata: ObjectMeta,
316    pub timestamp: String,
317    pub window: String,
318    pub containers: Vec<PodMetricsContainer>,
319}
320
321impl k8s_openapi::Resource for PodMetrics {
322    const GROUP: &'static str = "metrics.k8s.io";
323    const KIND: &'static str = "PodMetrics";
324    const VERSION: &'static str = "v1beta1";
325    const API_VERSION: &'static str = "metrics.k8s.io/v1beta1";
326    const URL_PATH_SEGMENT: &'static str = "pods";
327
328    type Scope = k8s_openapi::NamespaceResourceScope;
329}
330
331impl k8s_openapi::Metadata for PodMetrics {
332    type Ty = ObjectMeta;
333
334    fn metadata(&self) -> &Self::Ty {
335        &self.metadata
336    }
337
338    fn metadata_mut(&mut self) -> &mut Self::Ty {
339        &mut self.metadata
340    }
341}
342
343// Note that these types are very weird. We are `get`-ing a
344// `List` object, and lying about it having an `ObjectMeta`
345// (it deserializes as empty, but we don't need it). The custom
346// metrics API is designed this way, which is very non-standard.
347// A discussion in the `kube` channel in the `tokio` discord
348// confirmed that this layout + using `get_subresource` is the
349// best way to handle this.
350
351#[derive(Deserialize, Clone, Debug)]
352pub struct MetricIdentifier {
353    #[serde(rename = "metricName")]
354    pub name: String,
355    // We skip `selector` for now, as we don't use it
356}
357
358#[derive(Deserialize, Clone, Debug)]
359pub struct MetricValue {
360    #[serde(rename = "describedObject")]
361    pub described_object: ObjectReference,
362    #[serde(flatten)]
363    pub metric_identifier: MetricIdentifier,
364    pub timestamp: String,
365    pub value: Quantity,
366    // We skip `windowSeconds`, as we don't need it
367}
368
369impl NamespacedKubernetesOrchestrator {
370    fn service_name(&self, id: &str) -> String {
371        format!(
372            "{}{}-{id}",
373            self.config.name_prefix.as_deref().unwrap_or(""),
374            self.namespace
375        )
376    }
377
378    /// Return a `watcher::Config` instance that limits results to the namespace
379    /// assigned to this orchestrator.
380    fn watch_pod_params(&self) -> watcher::Config {
381        let ns_selector = format!(
382            "environmentd.materialize.cloud/namespace={}",
383            self.namespace
384        );
385        // This watcher timeout must be shorter than the client read timeout.
386        watcher::Config::default().timeout(59).labels(&ns_selector)
387    }
388
389    /// Convert a higher-level label key to the actual one we
390    /// will give to Kubernetes
391    fn make_label_key(&self, key: &str) -> String {
392        format!("{}.environmentd.materialize.cloud/{}", self.namespace, key)
393    }
394
395    fn label_selector_to_k8s(
396        &self,
397        MzLabelSelector { label_name, logic }: MzLabelSelector,
398    ) -> Result<LabelSelectorRequirement, anyhow::Error> {
399        let (operator, values) = match logic {
400            LabelSelectionLogic::Eq { value } => Ok(("In", vec![value])),
401            LabelSelectionLogic::NotEq { value } => Ok(("NotIn", vec![value])),
402            LabelSelectionLogic::Exists => Ok(("Exists", vec![])),
403            LabelSelectionLogic::NotExists => Ok(("DoesNotExist", vec![])),
404            LabelSelectionLogic::InSet { values } => {
405                if values.is_empty() {
406                    Err(anyhow!(
407                        "Invalid selector logic for {label_name}: empty `in` set"
408                    ))
409                } else {
410                    Ok(("In", values))
411                }
412            }
413            LabelSelectionLogic::NotInSet { values } => {
414                if values.is_empty() {
415                    Err(anyhow!(
416                        "Invalid selector logic for {label_name}: empty `notin` set"
417                    ))
418                } else {
419                    Ok(("NotIn", values))
420                }
421            }
422        }?;
423        let lsr = LabelSelectorRequirement {
424            key: self.make_label_key(&label_name),
425            operator: operator.to_string(),
426            values: Some(values),
427        };
428        Ok(lsr)
429    }
430
431    fn send_command(&self, cmd: WorkerCommand) {
432        self.command_tx.send(cmd).expect("worker task not dropped");
433    }
434}
435
436#[derive(Debug)]
437struct ScaledQuantity {
438    integral_part: u64,
439    exponent: i8,
440    base10: bool,
441}
442
443impl ScaledQuantity {
444    pub fn try_to_integer(&self, scale: i8, base10: bool) -> Option<u64> {
445        if base10 != self.base10 {
446            return None;
447        }
448        let exponent = self.exponent - scale;
449        let mut result = self.integral_part;
450        let base = if self.base10 { 10 } else { 2 };
451        if exponent < 0 {
452            for _ in exponent..0 {
453                result /= base;
454            }
455        } else {
456            for _ in 0..exponent {
457                result = result.checked_mul(base)?;
458            }
459        }
460        Some(result)
461    }
462}
463
464// Parse a k8s `Quantity` object
465// into a numeric value.
466//
467// This is intended to support collecting CPU and Memory data.
468// Thus, there are a few that things Kubernetes attempts to do, that we don't,
469// because I've never observed metrics-server specifically sending them:
470// (1) Handle negative numbers (because it's not useful for that use-case)
471// (2) Handle non-integers (because I have never observed them being actually sent)
472// (3) Handle scientific notation (e.g. 1.23e2)
473fn parse_k8s_quantity(s: &str) -> Result<ScaledQuantity, anyhow::Error> {
474    const DEC_SUFFIXES: &[(&str, i8)] = &[
475        ("n", -9),
476        ("u", -6),
477        ("m", -3),
478        ("", 0),
479        ("k", 3), // yep, intentionally lowercase.
480        ("M", 6),
481        ("G", 9),
482        ("T", 12),
483        ("P", 15),
484        ("E", 18),
485    ];
486    const BIN_SUFFIXES: &[(&str, i8)] = &[
487        ("", 0),
488        ("Ki", 10),
489        ("Mi", 20),
490        ("Gi", 30),
491        ("Ti", 40),
492        ("Pi", 50),
493        ("Ei", 60),
494    ];
495
496    let (positive, s) = match s.chars().next() {
497        Some('+') => (true, &s[1..]),
498        Some('-') => (false, &s[1..]),
499        _ => (true, s),
500    };
501
502    if !positive {
503        anyhow::bail!("Negative numbers not supported")
504    }
505
506    fn is_suffix_char(ch: char) -> bool {
507        "numkMGTPEKi".contains(ch)
508    }
509    let (num, suffix) = match s.find(is_suffix_char) {
510        None => (s, ""),
511        Some(idx) => s.split_at(idx),
512    };
513    let num: u64 = num.parse()?;
514    let (exponent, base10) = if let Some((_, exponent)) =
515        DEC_SUFFIXES.iter().find(|(target, _)| suffix == *target)
516    {
517        (exponent, true)
518    } else if let Some((_, exponent)) = BIN_SUFFIXES.iter().find(|(target, _)| suffix == *target) {
519        (exponent, false)
520    } else {
521        anyhow::bail!("Unrecognized suffix: {suffix}");
522    };
523    Ok(ScaledQuantity {
524        integral_part: num,
525        exponent: *exponent,
526        base10,
527    })
528}
529
530#[async_trait]
531impl NamespacedOrchestrator for NamespacedKubernetesOrchestrator {
532    async fn fetch_service_metrics(
533        &self,
534        id: &str,
535    ) -> Result<Vec<ServiceProcessMetrics>, anyhow::Error> {
536        let info = if let Some(info) = self.service_infos.lock().expect("poisoned lock").get(id) {
537            *info
538        } else {
539            // This should have been set in `ensure_service`.
540            tracing::error!("Failed to get info for {id}");
541            anyhow::bail!("Failed to get info for {id}");
542        };
543
544        let (result_tx, result_rx) = oneshot::channel();
545        self.send_command(WorkerCommand::FetchServiceMetrics {
546            name: self.service_name(id),
547            info,
548            result_tx,
549        });
550
551        let metrics = result_rx.await.expect("worker task not dropped");
552        Ok(metrics)
553    }
554
555    fn ensure_service(
556        &self,
557        id: &str,
558        ServiceConfig {
559            image,
560            init_container_image,
561            args,
562            ports: ports_in,
563            memory_limit,
564            memory_request,
565            cpu_limit,
566            cpu_request,
567            scale,
568            labels: labels_in,
569            annotations: annotations_in,
570            availability_zones,
571            other_replicas_selector,
572            replicas_selector,
573            disk_limit,
574            node_selector,
575        }: ServiceConfig,
576    ) -> Result<Box<dyn Service>, anyhow::Error> {
577        // This is extremely cheap to clone, so just look into the lock once.
578        let scheduling_config: ServiceSchedulingConfig =
579            self.scheduling_config.read().expect("poisoned").clone();
580
581        // Enable disk if the size does not disable it.
582        let disk = disk_limit != Some(DiskLimit::ZERO);
583
584        let name = self.service_name(id);
585        // The match labels should be the minimal set of labels that uniquely
586        // identify the pods in the stateful set. Changing these after the
587        // `StatefulSet` is created is not permitted by Kubernetes, and we're
588        // not yet smart enough to handle deleting and recreating the
589        // `StatefulSet`.
590        let mut match_labels = btreemap! {
591            "environmentd.materialize.cloud/namespace".into() => self.namespace.clone(),
592            "environmentd.materialize.cloud/service-id".into() => id.into(),
593        };
594        for (key, value) in &self.config.service_labels {
595            match_labels.insert(key.clone(), value.clone());
596        }
597
598        let mut labels = match_labels.clone();
599        for (key, value) in labels_in {
600            labels.insert(self.make_label_key(&key), value);
601        }
602
603        labels.insert(self.make_label_key("scale"), scale.to_string());
604
605        for port in &ports_in {
606            labels.insert(
607                format!("environmentd.materialize.cloud/port-{}", port.name),
608                "true".into(),
609            );
610        }
611        let mut limits = BTreeMap::new();
612        let mut requests = BTreeMap::new();
613        if let Some(memory_limit) = memory_limit {
614            limits.insert(
615                "memory".into(),
616                Quantity(memory_limit.0.as_u64().to_string()),
617            );
618            requests.insert(
619                "memory".into(),
620                Quantity(memory_limit.0.as_u64().to_string()),
621            );
622        }
623        if let Some(memory_request) = memory_request {
624            requests.insert(
625                "memory".into(),
626                Quantity(memory_request.0.as_u64().to_string()),
627            );
628        }
629        if let Some(cpu_limit) = cpu_limit {
630            limits.insert(
631                "cpu".into(),
632                Quantity(format!("{}m", cpu_limit.as_millicpus())),
633            );
634            requests.insert(
635                "cpu".into(),
636                Quantity(format!("{}m", cpu_limit.as_millicpus())),
637            );
638        }
639        if let Some(cpu_request) = cpu_request {
640            requests.insert(
641                "cpu".into(),
642                Quantity(format!("{}m", cpu_request.as_millicpus())),
643            );
644        }
645        let service = K8sService {
646            metadata: ObjectMeta {
647                name: Some(name.clone()),
648                ..Default::default()
649            },
650            spec: Some(ServiceSpec {
651                ports: Some(
652                    ports_in
653                        .iter()
654                        .map(|port| ServicePort {
655                            port: port.port_hint.into(),
656                            name: Some(port.name.clone()),
657                            ..Default::default()
658                        })
659                        .collect(),
660                ),
661                cluster_ip: Some("None".to_string()),
662                selector: Some(match_labels.clone()),
663                ..Default::default()
664            }),
665            status: None,
666        };
667
668        let hosts = (0..scale.get())
669            .map(|i| {
670                format!(
671                    "{name}-{i}.{name}.{}.svc.cluster.local",
672                    self.kubernetes_namespace
673                )
674            })
675            .collect::<Vec<_>>();
676        let ports = ports_in
677            .iter()
678            .map(|p| (p.name.clone(), p.port_hint))
679            .collect::<BTreeMap<_, _>>();
680
681        let mut listen_addrs = BTreeMap::new();
682        let mut peer_addrs = vec![BTreeMap::new(); hosts.len()];
683        for (name, port) in &ports {
684            listen_addrs.insert(name.clone(), format!("0.0.0.0:{port}"));
685            for (i, host) in hosts.iter().enumerate() {
686                peer_addrs[i].insert(name.clone(), format!("{host}:{port}"));
687            }
688        }
689        let mut args = args(ServiceAssignments {
690            listen_addrs: &listen_addrs,
691            peer_addrs: &peer_addrs,
692        });
693
694        // This constrains the orchestrator (for those orchestrators that support
695        // anti-affinity, today just k8s) to never schedule pods for different replicas
696        // of the same cluster on the same node. Pods from the _same_ replica are fine;
697        // pods from different clusters are also fine.
698        //
699        // The point is that if pods of two replicas are on the same node, that node
700        // going down would kill both replicas, and so the replication factor of the
701        // cluster in question is illusory.
702        let anti_affinity = Some({
703            let label_selector_requirements = other_replicas_selector
704                .clone()
705                .into_iter()
706                .map(|ls| self.label_selector_to_k8s(ls))
707                .collect::<Result<Vec<_>, _>>()?;
708            let ls = LabelSelector {
709                match_expressions: Some(label_selector_requirements),
710                ..Default::default()
711            };
712            let pat = PodAffinityTerm {
713                label_selector: Some(ls),
714                topology_key: "kubernetes.io/hostname".to_string(),
715                ..Default::default()
716            };
717
718            if !scheduling_config.soften_replication_anti_affinity {
719                PodAntiAffinity {
720                    required_during_scheduling_ignored_during_execution: Some(vec![pat]),
721                    ..Default::default()
722                }
723            } else {
724                PodAntiAffinity {
725                    preferred_during_scheduling_ignored_during_execution: Some(vec![
726                        WeightedPodAffinityTerm {
727                            weight: scheduling_config.soften_replication_anti_affinity_weight,
728                            pod_affinity_term: pat,
729                        },
730                    ]),
731                    ..Default::default()
732                }
733            }
734        });
735
736        let pod_affinity = if let Some(weight) = scheduling_config.multi_pod_az_affinity_weight {
737            // `match_labels` sufficiently selects pods in the same replica.
738            let ls = LabelSelector {
739                match_labels: Some(match_labels.clone()),
740                ..Default::default()
741            };
742            let pat = PodAffinityTerm {
743                label_selector: Some(ls),
744                topology_key: "topology.kubernetes.io/zone".to_string(),
745                ..Default::default()
746            };
747
748            Some(PodAffinity {
749                preferred_during_scheduling_ignored_during_execution: Some(vec![
750                    WeightedPodAffinityTerm {
751                        weight,
752                        pod_affinity_term: pat,
753                    },
754                ]),
755                ..Default::default()
756            })
757        } else {
758            None
759        };
760
761        let topology_spread = if scheduling_config.topology_spread.enabled {
762            let config = &scheduling_config.topology_spread;
763
764            if !config.ignore_non_singular_scale || scale.get() == 1 {
765                let label_selector_requirements = (if config.ignore_non_singular_scale {
766                    let mut replicas_selector_ignoring_scale = replicas_selector.clone();
767
768                    replicas_selector_ignoring_scale.push(mz_orchestrator::LabelSelector {
769                        label_name: "scale".into(),
770                        logic: mz_orchestrator::LabelSelectionLogic::Eq {
771                            value: "1".to_string(),
772                        },
773                    });
774
775                    replicas_selector_ignoring_scale
776                } else {
777                    replicas_selector
778                })
779                .into_iter()
780                .map(|ls| self.label_selector_to_k8s(ls))
781                .collect::<Result<Vec<_>, _>>()?;
782                let ls = LabelSelector {
783                    match_expressions: Some(label_selector_requirements),
784                    ..Default::default()
785                };
786
787                if config.soft && config.min_domains.is_some() {
788                    warn!(
789                        "topology spread is soft but min_domains is set; \
790                         Kubernetes rejects minDomains with ScheduleAnyway, \
791                         so min_domains will be ignored"
792                    );
793                }
794
795                let constraint = TopologySpreadConstraint {
796                    label_selector: Some(ls),
797                    min_domains: if config.soft {
798                        None
799                    } else {
800                        config.min_domains
801                    },
802                    max_skew: config.max_skew,
803                    topology_key: "topology.kubernetes.io/zone".to_string(),
804                    when_unsatisfiable: if config.soft {
805                        "ScheduleAnyway".to_string()
806                    } else {
807                        "DoNotSchedule".to_string()
808                    },
809                    // TODO(guswynn): restore these once they are supported.
810                    // Consider node affinities when calculating topology spread. This is the
811                    // default: <https://docs.rs/k8s-openapi/latest/k8s_openapi/api/core/v1/struct.TopologySpreadConstraint.html#structfield.node_affinity_policy>,
812                    // made explicit.
813                    // node_affinity_policy: Some("Honor".to_string()),
814                    // Do not consider node taints when calculating topology spread. This is the
815                    // default: <https://docs.rs/k8s-openapi/latest/k8s_openapi/api/core/v1/struct.TopologySpreadConstraint.html#structfield.node_taints_policy>,
816                    // made explicit.
817                    // node_taints_policy: Some("Ignore".to_string()),
818                    match_label_keys: None,
819                    // Once the above are restorted, we should't have `..Default::default()` here because the specifics of these fields are
820                    // subtle enough where we want compilation failures when we upgrade
821                    ..Default::default()
822                };
823                Some(vec![constraint])
824            } else {
825                None
826            }
827        } else {
828            None
829        };
830
831        let mut pod_annotations = btreemap! {
832            // Prevent the cluster-autoscaler (or karpenter) from evicting these pods in attempts to scale down
833            // and terminate nodes.
834            // This will cost us more money, but should give us better uptime.
835            // This does not prevent all evictions by Kubernetes, only the ones initiated by the
836            // cluster-autoscaler (or karpenter). Notably, eviction of pods for resource overuse is still enabled.
837            "cluster-autoscaler.kubernetes.io/safe-to-evict".to_owned() => "false".to_string(),
838            "karpenter.sh/do-not-evict".to_owned() => "true".to_string(),
839
840            // It's called do-not-disrupt in newer versions of karpenter, so adding for forward/backward compatibility
841            "karpenter.sh/do-not-disrupt".to_owned() => "true".to_string(),
842        };
843        for (key, value) in annotations_in {
844            // We want to use the same prefix as our labels keys
845            pod_annotations.insert(self.make_label_key(&key), value);
846        }
847        if self.config.enable_prometheus_scrape_annotations {
848            if let Some(internal_http_port) = ports_in
849                .iter()
850                .find(|port| port.name == "internal-http")
851                .map(|port| port.port_hint.to_string())
852            {
853                // Enable prometheus scrape discovery
854                pod_annotations.insert("prometheus.io/scrape".to_owned(), "true".to_string());
855                pod_annotations.insert("prometheus.io/port".to_owned(), internal_http_port);
856                pod_annotations.insert("prometheus.io/path".to_owned(), "/metrics".to_string());
857                pod_annotations.insert("prometheus.io/scheme".to_owned(), "http".to_string());
858            }
859        }
860        for (key, value) in &self.config.service_annotations {
861            pod_annotations.insert(key.clone(), value.clone());
862        }
863
864        let default_node_selector = if disk {
865            vec![("materialize.cloud/disk".to_string(), disk.to_string())]
866        } else {
867            // if the cluster doesn't require disk, we can omit the selector
868            // allowing it to be scheduled onto nodes with and without the
869            // selector
870            vec![]
871        };
872
873        let node_selector: BTreeMap<String, String> = default_node_selector
874            .into_iter()
875            .chain(self.config.service_node_selector.clone())
876            .chain(node_selector)
877            .collect();
878
879        let node_affinity = if let Some(availability_zones) = availability_zones {
880            let selector = NodeSelectorTerm {
881                match_expressions: Some(vec![NodeSelectorRequirement {
882                    key: "materialize.cloud/availability-zone".to_string(),
883                    operator: "In".to_string(),
884                    values: Some(availability_zones),
885                }]),
886                match_fields: None,
887            };
888
889            if scheduling_config.soften_az_affinity {
890                Some(NodeAffinity {
891                    preferred_during_scheduling_ignored_during_execution: Some(vec![
892                        PreferredSchedulingTerm {
893                            preference: selector,
894                            weight: scheduling_config.soften_az_affinity_weight,
895                        },
896                    ]),
897                    required_during_scheduling_ignored_during_execution: None,
898                })
899            } else {
900                Some(NodeAffinity {
901                    preferred_during_scheduling_ignored_during_execution: None,
902                    required_during_scheduling_ignored_during_execution: Some(NodeSelector {
903                        node_selector_terms: vec![selector],
904                    }),
905                })
906            }
907        } else {
908            None
909        };
910
911        let mut affinity = Affinity {
912            pod_anti_affinity: anti_affinity,
913            pod_affinity,
914            node_affinity,
915            ..Default::default()
916        };
917        if let Some(service_affinity) = &self.config.service_affinity {
918            affinity.merge_from(serde_json::from_str(service_affinity)?);
919        }
920
921        let container_name = image
922            .rsplit_once('/')
923            .and_then(|(_, name_version)| name_version.rsplit_once(':'))
924            .context("`image` is not ORG/NAME:VERSION")?
925            .0
926            .to_string();
927
928        let container_security_context = if scheduling_config.security_context_enabled {
929            Some(SecurityContext {
930                privileged: Some(false),
931                run_as_non_root: Some(true),
932                allow_privilege_escalation: Some(false),
933                seccomp_profile: Some(SeccompProfile {
934                    type_: "RuntimeDefault".to_string(),
935                    ..Default::default()
936                }),
937                capabilities: Some(Capabilities {
938                    drop: Some(vec!["ALL".to_string()]),
939                    ..Default::default()
940                }),
941                ..Default::default()
942            })
943        } else {
944            None
945        };
946
947        let init_containers = init_container_image.map(|image| {
948            vec![Container {
949                name: "init".to_string(),
950                image: Some(image),
951                image_pull_policy: Some(self.config.image_pull_policy.to_string()),
952                resources: Some(ResourceRequirements {
953                    claims: None,
954                    limits: Some(limits.clone()),
955                    requests: Some(requests.clone()),
956                }),
957                security_context: container_security_context.clone(),
958                env: Some(vec![
959                    EnvVar {
960                        name: "MZ_NAMESPACE".to_string(),
961                        value_from: Some(EnvVarSource {
962                            field_ref: Some(ObjectFieldSelector {
963                                field_path: "metadata.namespace".to_string(),
964                                ..Default::default()
965                            }),
966                            ..Default::default()
967                        }),
968                        ..Default::default()
969                    },
970                    EnvVar {
971                        name: "MZ_POD_NAME".to_string(),
972                        value_from: Some(EnvVarSource {
973                            field_ref: Some(ObjectFieldSelector {
974                                field_path: "metadata.name".to_string(),
975                                ..Default::default()
976                            }),
977                            ..Default::default()
978                        }),
979                        ..Default::default()
980                    },
981                    EnvVar {
982                        name: "MZ_NODE_NAME".to_string(),
983                        value_from: Some(EnvVarSource {
984                            field_ref: Some(ObjectFieldSelector {
985                                field_path: "spec.nodeName".to_string(),
986                                ..Default::default()
987                            }),
988                            ..Default::default()
989                        }),
990                        ..Default::default()
991                    },
992                ]),
993                ..Default::default()
994            }]
995        });
996
997        let env = if self.config.coverage {
998            Some(vec![EnvVar {
999                name: "LLVM_PROFILE_FILE".to_string(),
1000                value: Some(format!("/coverage/{}-%p-%9m%c.profraw", self.namespace)),
1001                ..Default::default()
1002            }])
1003        } else {
1004            None
1005        };
1006
1007        let mut volume_mounts = vec![];
1008
1009        if self.config.coverage {
1010            volume_mounts.push(VolumeMount {
1011                name: "coverage".to_string(),
1012                mount_path: "/coverage".to_string(),
1013                ..Default::default()
1014            })
1015        }
1016
1017        let volumes = match (disk, &self.config.ephemeral_volume_storage_class) {
1018            (true, Some(ephemeral_volume_storage_class)) => {
1019                volume_mounts.push(VolumeMount {
1020                    name: "scratch".to_string(),
1021                    mount_path: "/scratch".to_string(),
1022                    ..Default::default()
1023                });
1024                args.push("--scratch-directory=/scratch".into());
1025
1026                Some(vec![Volume {
1027                    name: "scratch".to_string(),
1028                    ephemeral: Some(EphemeralVolumeSource {
1029                        volume_claim_template: Some(PersistentVolumeClaimTemplate {
1030                            spec: PersistentVolumeClaimSpec {
1031                                access_modes: Some(vec!["ReadWriteOnce".to_string()]),
1032                                storage_class_name: Some(
1033                                    ephemeral_volume_storage_class.to_string(),
1034                                ),
1035                                resources: Some(VolumeResourceRequirements {
1036                                    requests: Some(BTreeMap::from([(
1037                                        "storage".to_string(),
1038                                        Quantity(
1039                                            disk_limit
1040                                                .unwrap_or(DiskLimit::ARBITRARY)
1041                                                .0
1042                                                .as_u64()
1043                                                .to_string(),
1044                                        ),
1045                                    )])),
1046                                    ..Default::default()
1047                                }),
1048                                ..Default::default()
1049                            },
1050                            ..Default::default()
1051                        }),
1052                        ..Default::default()
1053                    }),
1054                    ..Default::default()
1055                }])
1056            }
1057            (true, None) => {
1058                return Err(anyhow!(
1059                    "service requested disk but no ephemeral volume storage class was configured"
1060                ));
1061            }
1062            (false, _) => None,
1063        };
1064
1065        if let Some(name_prefix) = &self.config.name_prefix {
1066            args.push(format!("--secrets-reader-name-prefix={}", name_prefix));
1067        }
1068
1069        let volume_claim_templates = if self.config.coverage {
1070            Some(vec![PersistentVolumeClaim {
1071                metadata: ObjectMeta {
1072                    name: Some("coverage".to_string()),
1073                    ..Default::default()
1074                },
1075                spec: Some(PersistentVolumeClaimSpec {
1076                    access_modes: Some(vec!["ReadWriteOnce".to_string()]),
1077                    resources: Some(VolumeResourceRequirements {
1078                        requests: Some(BTreeMap::from([(
1079                            "storage".to_string(),
1080                            Quantity("10Gi".to_string()),
1081                        )])),
1082                        ..Default::default()
1083                    }),
1084                    ..Default::default()
1085                }),
1086                ..Default::default()
1087            }])
1088        } else {
1089            None
1090        };
1091
1092        let security_context = if let Some(fs_group) = self.config.service_fs_group {
1093            Some(PodSecurityContext {
1094                fs_group: Some(fs_group),
1095                run_as_user: Some(fs_group),
1096                run_as_group: Some(fs_group),
1097                ..Default::default()
1098            })
1099        } else {
1100            None
1101        };
1102
1103        let mut tolerations = vec![
1104            // When the node becomes `NotReady` it indicates there is a problem
1105            // with the node. By default Kubernetes waits 300s (5 minutes)
1106            // before descheduling the pod, but we tune this to 30s for faster
1107            // recovery in the case of node failure.
1108            Toleration {
1109                effect: Some("NoExecute".into()),
1110                key: Some("node.kubernetes.io/not-ready".into()),
1111                operator: Some("Exists".into()),
1112                toleration_seconds: Some(NODE_FAILURE_THRESHOLD_SECONDS),
1113                value: None,
1114            },
1115            Toleration {
1116                effect: Some("NoExecute".into()),
1117                key: Some("node.kubernetes.io/unreachable".into()),
1118                operator: Some("Exists".into()),
1119                toleration_seconds: Some(NODE_FAILURE_THRESHOLD_SECONDS),
1120                value: None,
1121            },
1122        ];
1123        if let Some(service_tolerations) = &self.config.service_tolerations {
1124            tolerations.extend(serde_json::from_str::<Vec<_>>(service_tolerations)?);
1125        }
1126        let tolerations = Some(tolerations);
1127
1128        let mut pod_template_spec = PodTemplateSpec {
1129            metadata: Some(ObjectMeta {
1130                labels: Some(labels.clone()),
1131                // Only set `annotations` _after_ we have computed the pod template hash, to
1132                // avoid that annotation changes cause pod replacements.
1133                ..Default::default()
1134            }),
1135            spec: Some(PodSpec {
1136                init_containers,
1137                containers: vec![Container {
1138                    name: container_name,
1139                    image: Some(image),
1140                    args: Some(args),
1141                    image_pull_policy: Some(self.config.image_pull_policy.to_string()),
1142                    ports: Some(
1143                        ports_in
1144                            .iter()
1145                            .map(|port| ContainerPort {
1146                                container_port: port.port_hint.into(),
1147                                name: Some(port.name.clone()),
1148                                ..Default::default()
1149                            })
1150                            .collect(),
1151                    ),
1152                    security_context: container_security_context.clone(),
1153                    resources: Some(ResourceRequirements {
1154                        claims: None,
1155                        limits: Some(limits),
1156                        requests: Some(requests),
1157                    }),
1158                    volume_mounts: if !volume_mounts.is_empty() {
1159                        Some(volume_mounts)
1160                    } else {
1161                        None
1162                    },
1163                    env,
1164                    ..Default::default()
1165                }],
1166                volumes,
1167                security_context,
1168                node_selector: Some(node_selector),
1169                scheduler_name: self.config.scheduler_name.clone(),
1170                service_account: self.config.service_account.clone(),
1171                affinity: Some(affinity),
1172                topology_spread_constraints: topology_spread,
1173                tolerations,
1174                // Setting a 0s termination grace period has the side effect of
1175                // automatically starting a new pod when the previous pod is
1176                // currently terminating. This enables recovery from a node
1177                // failure with no manual intervention. Without this setting,
1178                // the StatefulSet controller will refuse to start a new pod
1179                // until the failed node is manually removed from the Kubernetes
1180                // cluster.
1181                //
1182                // The Kubernetes documentation strongly advises against this
1183                // setting, as StatefulSets attempt to provide "at most once"
1184                // semantics [0]--that is, the guarantee that for a given pod in
1185                // a StatefulSet there is *at most* one pod with that identity
1186                // running in the cluster.
1187                //
1188                // Materialize services, however, are carefully designed to
1189                // *not* rely on this guarantee. In fact, we do not believe that
1190                // correct distributed systems can meaningfully rely on
1191                // Kubernetes's guarantee--network packets from a pod can be
1192                // arbitrarily delayed, long past that pod's termination.
1193                //
1194                // [0]: https://kubernetes.io/docs/tasks/run-application/force-delete-stateful-set-pod/#statefulset-considerations
1195                termination_grace_period_seconds: Some(0),
1196                ..Default::default()
1197            }),
1198        };
1199        let pod_template_json = serde_json::to_string(&pod_template_spec).unwrap();
1200        let mut hasher = Sha256::new();
1201        hasher.update(pod_template_json);
1202        let pod_template_hash = format!("{:x}", hasher.finalize());
1203        pod_annotations.insert(
1204            POD_TEMPLATE_HASH_ANNOTATION.to_owned(),
1205            pod_template_hash.clone(),
1206        );
1207
1208        pod_template_spec.metadata.as_mut().unwrap().annotations = Some(pod_annotations);
1209
1210        let stateful_set = StatefulSet {
1211            metadata: ObjectMeta {
1212                name: Some(name.clone()),
1213                ..Default::default()
1214            },
1215            spec: Some(StatefulSetSpec {
1216                selector: LabelSelector {
1217                    match_labels: Some(match_labels),
1218                    ..Default::default()
1219                },
1220                service_name: Some(name.clone()),
1221                replicas: Some(scale.cast_into()),
1222                template: pod_template_spec,
1223                update_strategy: Some(StatefulSetUpdateStrategy {
1224                    type_: Some("OnDelete".to_owned()),
1225                    ..Default::default()
1226                }),
1227                pod_management_policy: Some("Parallel".to_string()),
1228                volume_claim_templates,
1229                ..Default::default()
1230            }),
1231            status: None,
1232        };
1233
1234        self.send_command(WorkerCommand::EnsureService {
1235            desc: ServiceDescription {
1236                name,
1237                scale,
1238                service,
1239                stateful_set,
1240                pod_template_hash,
1241            },
1242        });
1243
1244        self.service_infos
1245            .lock()
1246            .expect("poisoned lock")
1247            .insert(id.to_string(), ServiceInfo { scale });
1248
1249        Ok(Box::new(KubernetesService { hosts, ports }))
1250    }
1251
1252    /// Drops the identified service, if it exists.
1253    fn drop_service(&self, id: &str) -> Result<(), anyhow::Error> {
1254        fail::fail_point!("kubernetes_drop_service", |_| Err(anyhow!("failpoint")));
1255        self.service_infos.lock().expect("poisoned lock").remove(id);
1256
1257        self.send_command(WorkerCommand::DropService {
1258            name: self.service_name(id),
1259        });
1260
1261        Ok(())
1262    }
1263
1264    /// Lists the identifiers of all known services.
1265    async fn list_services(&self) -> Result<Vec<String>, anyhow::Error> {
1266        let (result_tx, result_rx) = oneshot::channel();
1267        self.send_command(WorkerCommand::ListServices {
1268            namespace: self.namespace.clone(),
1269            result_tx,
1270        });
1271
1272        let list = result_rx.await.expect("worker task not dropped");
1273        Ok(list)
1274    }
1275
1276    fn watch_services(&self) -> BoxStream<'static, Result<ServiceEvent, anyhow::Error>> {
1277        fn into_service_event(pod: Pod) -> Result<ServiceEvent, anyhow::Error> {
1278            let process_id = pod.name_any().split('-').next_back().unwrap().parse()?;
1279            let service_id_label = "environmentd.materialize.cloud/service-id";
1280            let service_id = pod
1281                .labels()
1282                .get(service_id_label)
1283                .ok_or_else(|| anyhow!("missing label: {service_id_label}"))?
1284                .clone();
1285
1286            let oomed = pod
1287                .status
1288                .as_ref()
1289                .and_then(|status| status.container_statuses.as_ref())
1290                .map(|container_statuses| {
1291                    container_statuses.iter().any(|cs| {
1292                        // The container might have already transitioned from "terminated" to
1293                        // "waiting"/"running" state, in which case we need to check its previous
1294                        // state to find out why it terminated.
1295                        let current_state = cs.state.as_ref().and_then(|s| s.terminated.as_ref());
1296                        let last_state = cs.last_state.as_ref().and_then(|s| s.terminated.as_ref());
1297                        let termination_state = current_state.or(last_state);
1298
1299                        // The interesting exit codes are:
1300                        //  * 135 (SIGBUS): occurs when lgalloc runs out of disk
1301                        //  * 137 (SIGKILL): occurs when the OOM killer terminates the container
1302                        //  * 167: occurs when the lgalloc or memory limiter terminates the process
1303                        // We treat the all of these as OOM conditions since swap and lgalloc use
1304                        // disk only for spilling memory.
1305                        let exit_code = termination_state.map(|s| s.exit_code);
1306                        exit_code.is_some_and(|e| [135, 137, 167].contains(&e))
1307                    })
1308                })
1309                .unwrap_or(false);
1310
1311            let (pod_ready, last_probe_time) = pod
1312                .status
1313                .and_then(|status| status.conditions)
1314                .and_then(|conditions| conditions.into_iter().find(|c| c.type_ == "Ready"))
1315                .map(|c| (c.status == "True", c.last_probe_time))
1316                .unwrap_or((false, None));
1317
1318            let status = if pod_ready {
1319                ServiceStatus::Online
1320            } else {
1321                ServiceStatus::Offline(oomed.then_some(OfflineReason::OomKilled))
1322            };
1323            let time = if let Some(time) = last_probe_time {
1324                time.0
1325            } else {
1326                Timestamp::now()
1327            };
1328
1329            Ok(ServiceEvent {
1330                service_id,
1331                process_id,
1332                status,
1333                time: DateTime::from_timestamp_nanos(
1334                    time.as_nanosecond().try_into().expect("must fit"),
1335                ),
1336            })
1337        }
1338
1339        let stream = watcher(self.pod_api.clone(), self.watch_pod_params())
1340            .touched_objects()
1341            .filter_map(|object| async move {
1342                match object {
1343                    Ok(pod) => Some(into_service_event(pod)),
1344                    Err(error) => {
1345                        // We assume that errors returned by Kubernetes are usually transient, so we
1346                        // just log a warning and ignore them otherwise.
1347                        tracing::warn!("service watch error: {error}");
1348                        None
1349                    }
1350                }
1351            });
1352        Box::pin(stream)
1353    }
1354
1355    fn update_scheduling_config(&self, config: ServiceSchedulingConfig) {
1356        *self.scheduling_config.write().expect("poisoned") = config;
1357    }
1358}
1359
1360impl OrchestratorWorker {
1361    fn spawn(self, name: String) -> AbortOnDropHandle<()> {
1362        mz_ore::task::spawn(|| name, self.run()).abort_on_drop()
1363    }
1364
1365    async fn run(mut self) {
1366        {
1367            info!("initializing Kubernetes orchestrator worker");
1368            let start = Instant::now();
1369
1370            // Fetch the owner reference for our own pod (usually a
1371            // StatefulSet), so that we can propagate it to the services we
1372            // create.
1373            let hostname = env::var("HOSTNAME").unwrap_or_else(|_| panic!("HOSTNAME environment variable missing or invalid; required for Kubernetes orchestrator"));
1374            let orchestrator_pod = Retry::default()
1375                .clamp_backoff(Duration::from_secs(10))
1376                .retry_async(|_| self.pod_api.get(&hostname))
1377                .await
1378                .expect("always retries on error");
1379            self.owner_references
1380                .extend(orchestrator_pod.owner_references().into_iter().cloned());
1381
1382            if !self.collect_pod_metrics {
1383                info!(
1384                    "pod metrics collection is disabled; resource usage graphs in the console will not be available"
1385                );
1386            }
1387
1388            info!(
1389                "Kubernetes orchestrator worker initialized in {:?}",
1390                start.elapsed()
1391            );
1392        }
1393
1394        while let Some(cmd) = self.command_rx.recv().await {
1395            self.handle_command(cmd).await;
1396        }
1397    }
1398
1399    /// Handle a worker command.
1400    ///
1401    /// If handling the command fails, it is automatically retried. All command handlers return
1402    /// [`K8sError`], so we can reasonably assume that a failure is caused by issues communicating
1403    /// with the K8S server and that retrying resolves them eventually.
1404    async fn handle_command(&self, cmd: WorkerCommand) {
1405        async fn retry<F, U, R>(f: F, cmd_type: &str) -> R
1406        where
1407            F: Fn() -> U,
1408            U: Future<Output = Result<R, K8sError>>,
1409        {
1410            Retry::default()
1411                .clamp_backoff(Duration::from_secs(10))
1412                .retry_async(|_| {
1413                    f().map_err(
1414                        |error| tracing::error!(%cmd_type, "orchestrator call failed: {error}"),
1415                    )
1416                })
1417                .await
1418                .expect("always retries on error")
1419        }
1420
1421        use WorkerCommand::*;
1422        match cmd {
1423            EnsureService { desc } => {
1424                retry(|| self.ensure_service(desc.clone()), "EnsureService").await
1425            }
1426            DropService { name } => retry(|| self.drop_service(&name), "DropService").await,
1427            ListServices {
1428                namespace,
1429                result_tx,
1430            } => {
1431                let result = retry(|| self.list_services(&namespace), "ListServices").await;
1432                let _ = result_tx.send(result);
1433            }
1434            FetchServiceMetrics {
1435                name,
1436                info,
1437                result_tx,
1438            } => {
1439                let result = self.fetch_service_metrics(&name, &info).await;
1440                let _ = result_tx.send(result);
1441            }
1442        }
1443    }
1444
1445    async fn fetch_service_metrics(
1446        &self,
1447        name: &str,
1448        info: &ServiceInfo,
1449    ) -> Vec<ServiceProcessMetrics> {
1450        if !self.collect_pod_metrics {
1451            return (0..info.scale.get())
1452                .map(|_| ServiceProcessMetrics::default())
1453                .collect();
1454        }
1455
1456        /// Usage metrics reported by clusterd processes.
1457        #[derive(Deserialize)]
1458        pub(crate) struct ClusterdUsage {
1459            disk_bytes: Option<u64>,
1460            memory_bytes: Option<u64>,
1461            swap_bytes: Option<u64>,
1462            heap_limit: Option<u64>,
1463        }
1464
1465        /// Get metrics for a particular service and process, converting them into a sane (i.e., numeric) format.
1466        ///
1467        /// Note that we want to keep going even if a lookup fails for whatever reason,
1468        /// so this function is infallible. If we fail to get cpu or memory for a particular pod,
1469        /// we just log a warning and install `None` in the returned struct.
1470        async fn get_metrics(
1471            self_: &OrchestratorWorker,
1472            service_name: &str,
1473            i: usize,
1474        ) -> ServiceProcessMetrics {
1475            let name = format!("{service_name}-{i}");
1476
1477            let clusterd_usage_fut = get_clusterd_usage(self_, service_name, i);
1478            let (metrics, clusterd_usage) =
1479                match futures::future::join(self_.metrics_api.get(&name), clusterd_usage_fut).await
1480                {
1481                    (Ok(metrics), Ok(clusterd_usage)) => (metrics, Some(clusterd_usage)),
1482                    (Ok(metrics), Err(e)) => {
1483                        warn!("Failed to fetch clusterd usage for {name}: {e}");
1484                        (metrics, None)
1485                    }
1486                    (Err(e), _) => {
1487                        warn!("Failed to get metrics for {name}: {e}");
1488                        return ServiceProcessMetrics::default();
1489                    }
1490                };
1491            let Some(PodMetricsContainer {
1492                usage:
1493                    PodMetricsContainerUsage {
1494                        cpu: Quantity(cpu_str),
1495                        memory: Quantity(mem_str),
1496                    },
1497                ..
1498            }) = metrics.containers.get(0)
1499            else {
1500                warn!("metrics result contained no containers for {name}");
1501                return ServiceProcessMetrics::default();
1502            };
1503
1504            let mut process_metrics = ServiceProcessMetrics::default();
1505
1506            match parse_k8s_quantity(cpu_str) {
1507                Ok(q) => match q.try_to_integer(-9, true) {
1508                    Some(nano_cores) => process_metrics.cpu_nano_cores = Some(nano_cores),
1509                    None => error!("CPU value {q:?} out of range"),
1510                },
1511                Err(e) => error!("failed to parse CPU value {cpu_str}: {e}"),
1512            }
1513            match parse_k8s_quantity(mem_str) {
1514                Ok(q) => match q.try_to_integer(0, false) {
1515                    Some(mem) => process_metrics.memory_bytes = Some(mem),
1516                    None => error!("memory value {q:?} out of range"),
1517                },
1518                Err(e) => error!("failed to parse memory value {mem_str}: {e}"),
1519            }
1520
1521            if let Some(usage) = clusterd_usage {
1522                // clusterd may report disk usage as either `disk_bytes`, or `swap_bytes`, or both.
1523                //
1524                // For now the Console expects the swap size to be reported in `disk_bytes`.
1525                // Once the Console has been ported to use `heap_bytes`/`heap_limit`, we can
1526                // simplify things by setting `process_metrics.disk_bytes = usage.disk_bytes`.
1527                process_metrics.disk_bytes = match (usage.disk_bytes, usage.swap_bytes) {
1528                    (Some(disk), Some(swap)) => Some(disk + swap),
1529                    (disk, swap) => disk.or(swap),
1530                };
1531
1532                // clusterd may report heap usage as `memory_bytes` and optionally `swap_bytes`.
1533                // If no `memory_bytes` is reported, we can't know the heap usage.
1534                process_metrics.heap_bytes = match (usage.memory_bytes, usage.swap_bytes) {
1535                    (Some(memory), Some(swap)) => Some(memory + swap),
1536                    (Some(memory), None) => Some(memory),
1537                    (None, _) => None,
1538                };
1539
1540                process_metrics.heap_limit = usage.heap_limit;
1541            }
1542
1543            process_metrics
1544        }
1545
1546        /// Get the current usage metrics exposed by a clusterd process.
1547        ///
1548        /// Usage metrics are collected by connecting to a metrics endpoint exposed by the process.
1549        /// The endpoint is assumed to be reachable at the 'internal-http' under the HTTP path
1550        /// `/api/usage-metrics`.
1551        async fn get_clusterd_usage(
1552            self_: &OrchestratorWorker,
1553            service_name: &str,
1554            i: usize,
1555        ) -> anyhow::Result<ClusterdUsage> {
1556            let service = self_
1557                .service_api
1558                .get(service_name)
1559                .await
1560                .with_context(|| format!("failed to get service {service_name}"))?;
1561            let namespace = service
1562                .metadata
1563                .namespace
1564                .context("missing service namespace")?;
1565            let internal_http_port = service
1566                .spec
1567                .and_then(|spec| spec.ports)
1568                .and_then(|ports| {
1569                    ports
1570                        .into_iter()
1571                        .find(|p| p.name == Some("internal-http".into()))
1572                })
1573                .map(|p| p.port);
1574            let Some(port) = internal_http_port else {
1575                bail!("internal-http port missing in service spec");
1576            };
1577            let metrics_url = format!(
1578                "http://{service_name}-{i}.{service_name}.{namespace}.svc.cluster.local:{port}\
1579                 /api/usage-metrics"
1580            );
1581
1582            let http_client = reqwest::Client::builder()
1583                .timeout(Duration::from_secs(10))
1584                .build()
1585                .context("error building HTTP client")?;
1586            let resp = http_client.get(metrics_url).send().await?;
1587            let usage = resp.json().await?;
1588
1589            Ok(usage)
1590        }
1591
1592        let ret = futures::future::join_all(
1593            (0..info.scale.cast_into()).map(|i| get_metrics(self, name, i)),
1594        );
1595
1596        ret.await
1597    }
1598
1599    async fn ensure_service(&self, mut desc: ServiceDescription) -> Result<(), K8sError> {
1600        // We inject our own pod's owner references into the Kubernetes objects
1601        // created for the service so that if the
1602        // Deployment/StatefulSet/whatever that owns the pod running the
1603        // orchestrator gets deleted, so do all services spawned by this
1604        // orchestrator.
1605        desc.service
1606            .metadata
1607            .owner_references
1608            .get_or_insert(vec![])
1609            .extend(self.owner_references.iter().cloned());
1610        desc.stateful_set
1611            .metadata
1612            .owner_references
1613            .get_or_insert(vec![])
1614            .extend(self.owner_references.iter().cloned());
1615
1616        let ss_spec = desc.stateful_set.spec.as_ref().unwrap();
1617        let pod_metadata = ss_spec.template.metadata.as_ref().unwrap();
1618        let pod_annotations = pod_metadata.annotations.clone();
1619
1620        self.service_api
1621            .patch(
1622                &desc.name,
1623                &PatchParams::apply(FIELD_MANAGER).force(),
1624                &Patch::Apply(desc.service),
1625            )
1626            .await?;
1627        self.stateful_set_api
1628            .patch(
1629                &desc.name,
1630                &PatchParams::apply(FIELD_MANAGER).force(),
1631                &Patch::Apply(desc.stateful_set),
1632            )
1633            .await?;
1634
1635        // We manage pod recreation manually, using the OnDelete StatefulSet update strategy, for
1636        // two reasons:
1637        //  * Kubernetes doesn't always automatically replace StatefulSet pods when their specs
1638        //    change, see https://github.com/kubernetes/kubernetes#67250.
1639        //  * Kubernetes replaces StatefulSet pods when their annotations change, which is not
1640        //    something we want as it could cause unavailability.
1641        //
1642        // Our pod recreation policy is simple: If a pod's template hash changed, delete it, and
1643        // let the StatefulSet controller recreate it. Otherwise, patch the existing pod's
1644        // annotations to line up with the ones in the spec.
1645        for pod_id in 0..desc.scale.get() {
1646            let pod_name = format!("{}-{pod_id}", desc.name);
1647            let pod = match self.pod_api.get(&pod_name).await {
1648                Ok(pod) => pod,
1649                // Pod already doesn't exist.
1650                Err(kube::Error::Api(e)) if e.code == 404 => continue,
1651                Err(e) => return Err(e),
1652            };
1653
1654            let result = if pod.annotations().get(POD_TEMPLATE_HASH_ANNOTATION)
1655                != Some(&desc.pod_template_hash)
1656            {
1657                self.pod_api
1658                    .delete(&pod_name, &DeleteParams::default())
1659                    .await
1660                    .map(|_| ())
1661            } else {
1662                let metadata = ObjectMeta {
1663                    annotations: pod_annotations.clone(),
1664                    ..Default::default()
1665                }
1666                .into_request_partial::<Pod>();
1667                self.pod_api
1668                    .patch_metadata(
1669                        &pod_name,
1670                        &PatchParams::apply(FIELD_MANAGER).force(),
1671                        &Patch::Apply(&metadata),
1672                    )
1673                    .await
1674                    .map(|_| ())
1675            };
1676
1677            match result {
1678                Ok(()) => (),
1679                // Pod was deleted concurrently.
1680                Err(kube::Error::Api(e)) if e.code == 404 => continue,
1681                Err(e) => return Err(e),
1682            }
1683        }
1684
1685        Ok(())
1686    }
1687
1688    async fn drop_service(&self, name: &str) -> Result<(), K8sError> {
1689        let res = self
1690            .stateful_set_api
1691            .delete(name, &DeleteParams::default())
1692            .await;
1693        match res {
1694            Ok(_) => (),
1695            Err(K8sError::Api(e)) if e.code == 404 => (),
1696            Err(e) => return Err(e),
1697        }
1698
1699        let res = self
1700            .service_api
1701            .delete(name, &DeleteParams::default())
1702            .await;
1703        match res {
1704            Ok(_) => Ok(()),
1705            Err(K8sError::Api(e)) if e.code == 404 => Ok(()),
1706            Err(e) => Err(e),
1707        }
1708    }
1709
1710    async fn list_services(&self, namespace: &str) -> Result<Vec<String>, K8sError> {
1711        let stateful_sets = self.stateful_set_api.list(&Default::default()).await?;
1712        let name_prefix = format!("{}{namespace}-", self.name_prefix);
1713        Ok(stateful_sets
1714            .into_iter()
1715            .filter_map(|ss| {
1716                ss.metadata
1717                    .name
1718                    .unwrap()
1719                    .strip_prefix(&name_prefix)
1720                    .map(Into::into)
1721            })
1722            .collect())
1723    }
1724}
1725
1726#[derive(Debug, Clone)]
1727struct KubernetesService {
1728    hosts: Vec<String>,
1729    ports: BTreeMap<String, u16>,
1730}
1731
1732impl Service for KubernetesService {
1733    fn addresses(&self, port: &str) -> Vec<String> {
1734        let port = self.ports[port];
1735        self.hosts
1736            .iter()
1737            .map(|host| format!("{host}:{port}"))
1738            .collect()
1739    }
1740}
1741
1742#[cfg(test)]
1743mod tests {
1744    use super::*;
1745
1746    #[mz_ore::test]
1747    fn k8s_quantity_base10_large() {
1748        let cases = &[
1749            ("42", 42),
1750            ("42k", 42000),
1751            ("42M", 42000000),
1752            ("42G", 42000000000),
1753            ("42T", 42000000000000),
1754            ("42P", 42000000000000000),
1755        ];
1756
1757        for (input, expected) in cases {
1758            let quantity = parse_k8s_quantity(input).unwrap();
1759            let number = quantity.try_to_integer(0, true).unwrap();
1760            assert_eq!(number, *expected, "input={input}, quantity={quantity:?}");
1761        }
1762    }
1763
1764    #[mz_ore::test]
1765    fn k8s_quantity_base10_small() {
1766        let cases = &[("42n", 42), ("42u", 42000), ("42m", 42000000)];
1767
1768        for (input, expected) in cases {
1769            let quantity = parse_k8s_quantity(input).unwrap();
1770            let number = quantity.try_to_integer(-9, true).unwrap();
1771            assert_eq!(number, *expected, "input={input}, quantity={quantity:?}");
1772        }
1773    }
1774
1775    #[mz_ore::test]
1776    fn k8s_quantity_base2() {
1777        let cases = &[
1778            ("42Ki", 42 << 10),
1779            ("42Mi", 42 << 20),
1780            ("42Gi", 42 << 30),
1781            ("42Ti", 42 << 40),
1782            ("42Pi", 42 << 50),
1783        ];
1784
1785        for (input, expected) in cases {
1786            let quantity = parse_k8s_quantity(input).unwrap();
1787            let number = quantity.try_to_integer(0, false).unwrap();
1788            assert_eq!(number, *expected, "input={input}, quantity={quantity:?}");
1789        }
1790    }
1791}