mz_orchestrator_kubernetes/
lib.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10use std::collections::BTreeMap;
11use std::future::Future;
12use std::sync::{Arc, Mutex};
13use std::time::{Duration, Instant};
14use std::{env, fmt};
15
16use anyhow::{Context, anyhow, bail};
17use async_trait::async_trait;
18use chrono::Utc;
19use clap::ValueEnum;
20use cloud_resource_controller::KubernetesResourceReader;
21use futures::TryFutureExt;
22use futures::stream::{BoxStream, StreamExt};
23use k8s_openapi::DeepMerge;
24use k8s_openapi::api::apps::v1::{StatefulSet, StatefulSetSpec};
25use k8s_openapi::api::core::v1::{
26    Affinity, Capabilities, Container, ContainerPort, EnvVar, EnvVarSource, EphemeralVolumeSource,
27    NodeAffinity, NodeSelector, NodeSelectorRequirement, NodeSelectorTerm, ObjectFieldSelector,
28    ObjectReference, PersistentVolumeClaim, PersistentVolumeClaimSpec,
29    PersistentVolumeClaimTemplate, Pod, PodAffinity, PodAffinityTerm, PodAntiAffinity,
30    PodSecurityContext, PodSpec, PodTemplateSpec, PreferredSchedulingTerm, ResourceRequirements,
31    SeccompProfile, Secret, SecurityContext, Service as K8sService, ServicePort, ServiceSpec,
32    Toleration, TopologySpreadConstraint, Volume, VolumeMount, VolumeResourceRequirements,
33    WeightedPodAffinityTerm,
34};
35use k8s_openapi::apimachinery::pkg::api::resource::Quantity;
36use k8s_openapi::apimachinery::pkg::apis::meta::v1::{
37    LabelSelector, LabelSelectorRequirement, OwnerReference,
38};
39use kube::ResourceExt;
40use kube::api::{Api, DeleteParams, ObjectMeta, Patch, PatchParams};
41use kube::client::Client;
42use kube::error::Error as K8sError;
43use kube::runtime::{WatchStreamExt, watcher};
44use maplit::btreemap;
45use mz_cloud_resources::AwsExternalIdPrefix;
46use mz_cloud_resources::crd::vpc_endpoint::v1::VpcEndpoint;
47use mz_orchestrator::{
48    DiskLimit, LabelSelectionLogic, LabelSelector as MzLabelSelector, NamespacedOrchestrator,
49    OfflineReason, Orchestrator, Service, ServiceAssignments, ServiceConfig, ServiceEvent,
50    ServiceProcessMetrics, ServiceStatus, scheduling_config::*,
51};
52use mz_ore::retry::Retry;
53use mz_ore::task::AbortOnDropHandle;
54use serde::Deserialize;
55use sha2::{Digest, Sha256};
56use tokio::sync::{mpsc, oneshot};
57use tracing::{info, warn};
58
59pub mod cloud_resource_controller;
60pub mod secrets;
61pub mod util;
62
63const FIELD_MANAGER: &str = "environmentd";
64const NODE_FAILURE_THRESHOLD_SECONDS: i64 = 30;
65
66const POD_TEMPLATE_HASH_ANNOTATION: &str = "environmentd.materialize.cloud/pod-template-hash";
67
68/// Configures a [`KubernetesOrchestrator`].
69#[derive(Debug, Clone)]
70pub struct KubernetesOrchestratorConfig {
71    /// The name of a Kubernetes context to use, if the Kubernetes configuration
72    /// is loaded from the local kubeconfig.
73    pub context: String,
74    /// The name of a non-default Kubernetes scheduler to use, if any.
75    pub scheduler_name: Option<String>,
76    /// Annotations to install on every service created by the orchestrator.
77    pub service_annotations: BTreeMap<String, String>,
78    /// Labels to install on every service created by the orchestrator.
79    pub service_labels: BTreeMap<String, String>,
80    /// Node selector to install on every service created by the orchestrator.
81    pub service_node_selector: BTreeMap<String, String>,
82    /// Affinity to install on every service created by the orchestrator.
83    pub service_affinity: Option<String>,
84    /// Tolerations to install on every service created by the orchestrator.
85    pub service_tolerations: Option<String>,
86    /// The service account that each service should run as, if any.
87    pub service_account: Option<String>,
88    /// The image pull policy to set for services created by the orchestrator.
89    pub image_pull_policy: KubernetesImagePullPolicy,
90    /// An AWS external ID prefix to use when making AWS operations on behalf
91    /// of the environment.
92    pub aws_external_id_prefix: Option<AwsExternalIdPrefix>,
93    /// Whether to use code coverage mode or not. Always false for production.
94    pub coverage: bool,
95    /// The Kubernetes StorageClass to use for the ephemeral volume attached to
96    /// services that request disk.
97    ///
98    /// If unspecified, the orchestrator will refuse to create services that
99    /// request disk.
100    pub ephemeral_volume_storage_class: Option<String>,
101    /// The optional fs group for service's pods' `securityContext`.
102    pub service_fs_group: Option<i64>,
103    /// The prefix to prepend to all object names
104    pub name_prefix: Option<String>,
105    /// Whether we should attempt to collect metrics from kubernetes
106    pub collect_pod_metrics: bool,
107    /// Whether to annotate pods for prometheus service discovery.
108    pub enable_prometheus_scrape_annotations: bool,
109}
110
111impl KubernetesOrchestratorConfig {
112    pub fn name_prefix(&self) -> String {
113        self.name_prefix.clone().unwrap_or_default()
114    }
115}
116
117/// Specifies whether Kubernetes should pull Docker images when creating pods.
118#[derive(ValueEnum, Debug, Clone, Copy)]
119pub enum KubernetesImagePullPolicy {
120    /// Always pull the Docker image from the registry.
121    Always,
122    /// Pull the Docker image only if the image is not present.
123    IfNotPresent,
124    /// Never pull the Docker image.
125    Never,
126}
127
128impl fmt::Display for KubernetesImagePullPolicy {
129    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
130        match self {
131            KubernetesImagePullPolicy::Always => f.write_str("Always"),
132            KubernetesImagePullPolicy::IfNotPresent => f.write_str("IfNotPresent"),
133            KubernetesImagePullPolicy::Never => f.write_str("Never"),
134        }
135    }
136}
137
138impl KubernetesImagePullPolicy {
139    pub fn as_kebab_case_str(&self) -> &'static str {
140        match self {
141            Self::Always => "always",
142            Self::IfNotPresent => "if-not-present",
143            Self::Never => "never",
144        }
145    }
146}
147
148/// An orchestrator backed by Kubernetes.
149pub struct KubernetesOrchestrator {
150    client: Client,
151    kubernetes_namespace: String,
152    config: KubernetesOrchestratorConfig,
153    secret_api: Api<Secret>,
154    vpc_endpoint_api: Api<VpcEndpoint>,
155    namespaces: Mutex<BTreeMap<String, Arc<dyn NamespacedOrchestrator>>>,
156    resource_reader: Arc<KubernetesResourceReader>,
157}
158
159impl fmt::Debug for KubernetesOrchestrator {
160    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
161        f.debug_struct("KubernetesOrchestrator").finish()
162    }
163}
164
165impl KubernetesOrchestrator {
166    /// Creates a new Kubernetes orchestrator from the provided configuration.
167    pub async fn new(
168        config: KubernetesOrchestratorConfig,
169    ) -> Result<KubernetesOrchestrator, anyhow::Error> {
170        let (client, kubernetes_namespace) = util::create_client(config.context.clone()).await?;
171        let resource_reader =
172            Arc::new(KubernetesResourceReader::new(config.context.clone()).await?);
173        Ok(KubernetesOrchestrator {
174            client: client.clone(),
175            kubernetes_namespace,
176            config,
177            secret_api: Api::default_namespaced(client.clone()),
178            vpc_endpoint_api: Api::default_namespaced(client),
179            namespaces: Mutex::new(BTreeMap::new()),
180            resource_reader,
181        })
182    }
183}
184
185impl Orchestrator for KubernetesOrchestrator {
186    fn namespace(&self, namespace: &str) -> Arc<dyn NamespacedOrchestrator> {
187        let mut namespaces = self.namespaces.lock().expect("lock poisoned");
188        Arc::clone(namespaces.entry(namespace.into()).or_insert_with(|| {
189            let (command_tx, command_rx) = mpsc::unbounded_channel();
190            let worker = OrchestratorWorker {
191                metrics_api: Api::default_namespaced(self.client.clone()),
192                service_api: Api::default_namespaced(self.client.clone()),
193                stateful_set_api: Api::default_namespaced(self.client.clone()),
194                pod_api: Api::default_namespaced(self.client.clone()),
195                owner_references: vec![],
196                command_rx,
197                name_prefix: self.config.name_prefix.clone().unwrap_or_default(),
198                collect_pod_metrics: self.config.collect_pod_metrics,
199            }
200            .spawn(format!("kubernetes-orchestrator-worker:{namespace}"));
201
202            Arc::new(NamespacedKubernetesOrchestrator {
203                pod_api: Api::default_namespaced(self.client.clone()),
204                kubernetes_namespace: self.kubernetes_namespace.clone(),
205                namespace: namespace.into(),
206                config: self.config.clone(),
207                // TODO(guswynn): make this configurable.
208                scheduling_config: Default::default(),
209                service_infos: std::sync::Mutex::new(BTreeMap::new()),
210                command_tx,
211                _worker: worker,
212            })
213        }))
214    }
215}
216
217#[derive(Clone, Copy)]
218struct ServiceInfo {
219    scale: u16,
220}
221
222struct NamespacedKubernetesOrchestrator {
223    pod_api: Api<Pod>,
224    kubernetes_namespace: String,
225    namespace: String,
226    config: KubernetesOrchestratorConfig,
227    scheduling_config: std::sync::RwLock<ServiceSchedulingConfig>,
228    service_infos: std::sync::Mutex<BTreeMap<String, ServiceInfo>>,
229    command_tx: mpsc::UnboundedSender<WorkerCommand>,
230    _worker: AbortOnDropHandle<()>,
231}
232
233impl fmt::Debug for NamespacedKubernetesOrchestrator {
234    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
235        f.debug_struct("NamespacedKubernetesOrchestrator")
236            .field("kubernetes_namespace", &self.kubernetes_namespace)
237            .field("namespace", &self.namespace)
238            .field("config", &self.config)
239            .finish()
240    }
241}
242
243/// Commands sent from a [`NamespacedKubernetesOrchestrator`] to its
244/// [`OrchestratorWorker`].
245///
246/// Commands for which the caller expects a result include a `result_tx` on which the
247/// [`OrchestratorWorker`] will deliver the result.
248enum WorkerCommand {
249    EnsureService {
250        desc: ServiceDescription,
251    },
252    DropService {
253        name: String,
254    },
255    ListServices {
256        namespace: String,
257        result_tx: oneshot::Sender<Vec<String>>,
258    },
259    FetchServiceMetrics {
260        name: String,
261        info: ServiceInfo,
262        result_tx: oneshot::Sender<Vec<ServiceProcessMetrics>>,
263    },
264}
265
266/// A description of a service to be created by an [`OrchestratorWorker`].
267#[derive(Debug, Clone)]
268struct ServiceDescription {
269    name: String,
270    scale: u16,
271    service: K8sService,
272    stateful_set: StatefulSet,
273    pod_template_hash: String,
274}
275
276/// A task executing blocking work for a [`NamespacedKubernetesOrchestrator`] in the background.
277///
278/// This type exists to enable making [`NamespacedKubernetesOrchestrator::ensure_service`] and
279/// [`NamespacedKubernetesOrchestrator::drop_service`] non-blocking, allowing invocation of these
280/// methods in latency-sensitive contexts.
281///
282/// Note that, apart from `ensure_service` and `drop_service`, this worker also handles blocking
283/// orchestrator calls that query service state (such as `list_services`). These need to be
284/// sequenced through the worker loop to ensure they linearize as expected. For example, we want to
285/// ensure that a `list_services` result contains exactly those services that were previously
286/// created with `ensure_service` and not yet dropped with `drop_service`.
287struct OrchestratorWorker {
288    metrics_api: Api<PodMetrics>,
289    service_api: Api<K8sService>,
290    stateful_set_api: Api<StatefulSet>,
291    pod_api: Api<Pod>,
292    owner_references: Vec<OwnerReference>,
293    command_rx: mpsc::UnboundedReceiver<WorkerCommand>,
294    name_prefix: String,
295    collect_pod_metrics: bool,
296}
297
298#[derive(Deserialize, Clone, Debug)]
299pub struct PodMetricsContainer {
300    pub name: String,
301    pub usage: PodMetricsContainerUsage,
302}
303
304#[derive(Deserialize, Clone, Debug)]
305pub struct PodMetricsContainerUsage {
306    pub cpu: Quantity,
307    pub memory: Quantity,
308}
309
310#[derive(Deserialize, Clone, Debug)]
311pub struct PodMetrics {
312    pub metadata: ObjectMeta,
313    pub timestamp: String,
314    pub window: String,
315    pub containers: Vec<PodMetricsContainer>,
316}
317
318impl k8s_openapi::Resource for PodMetrics {
319    const GROUP: &'static str = "metrics.k8s.io";
320    const KIND: &'static str = "PodMetrics";
321    const VERSION: &'static str = "v1beta1";
322    const API_VERSION: &'static str = "metrics.k8s.io/v1beta1";
323    const URL_PATH_SEGMENT: &'static str = "pods";
324
325    type Scope = k8s_openapi::NamespaceResourceScope;
326}
327
328impl k8s_openapi::Metadata for PodMetrics {
329    type Ty = ObjectMeta;
330
331    fn metadata(&self) -> &Self::Ty {
332        &self.metadata
333    }
334
335    fn metadata_mut(&mut self) -> &mut Self::Ty {
336        &mut self.metadata
337    }
338}
339
340// Note that these types are very weird. We are `get`-ing a
341// `List` object, and lying about it having an `ObjectMeta`
342// (it deserializes as empty, but we don't need it). The custom
343// metrics API is designed this way, which is very non-standard.
344// A discussion in the `kube` channel in the `tokio` discord
345// confirmed that this layout + using `get_subresource` is the
346// best way to handle this.
347
348#[derive(Deserialize, Clone, Debug)]
349pub struct MetricIdentifier {
350    #[serde(rename = "metricName")]
351    pub name: String,
352    // We skip `selector` for now, as we don't use it
353}
354
355#[derive(Deserialize, Clone, Debug)]
356pub struct MetricValue {
357    #[serde(rename = "describedObject")]
358    pub described_object: ObjectReference,
359    #[serde(flatten)]
360    pub metric_identifier: MetricIdentifier,
361    pub timestamp: String,
362    pub value: Quantity,
363    // We skip `windowSeconds`, as we don't need it
364}
365
366impl NamespacedKubernetesOrchestrator {
367    fn service_name(&self, id: &str) -> String {
368        format!(
369            "{}{}-{id}",
370            self.config.name_prefix.as_deref().unwrap_or(""),
371            self.namespace
372        )
373    }
374
375    /// Return a `watcher::Config` instance that limits results to the namespace
376    /// assigned to this orchestrator.
377    fn watch_pod_params(&self) -> watcher::Config {
378        let ns_selector = format!(
379            "environmentd.materialize.cloud/namespace={}",
380            self.namespace
381        );
382        // This watcher timeout must be shorter than the client read timeout.
383        watcher::Config::default().timeout(59).labels(&ns_selector)
384    }
385
386    /// Convert a higher-level label key to the actual one we
387    /// will give to Kubernetes
388    fn make_label_key(&self, key: &str) -> String {
389        format!("{}.environmentd.materialize.cloud/{}", self.namespace, key)
390    }
391
392    fn label_selector_to_k8s(
393        &self,
394        MzLabelSelector { label_name, logic }: MzLabelSelector,
395    ) -> Result<LabelSelectorRequirement, anyhow::Error> {
396        let (operator, values) = match logic {
397            LabelSelectionLogic::Eq { value } => Ok(("In", vec![value])),
398            LabelSelectionLogic::NotEq { value } => Ok(("NotIn", vec![value])),
399            LabelSelectionLogic::Exists => Ok(("Exists", vec![])),
400            LabelSelectionLogic::NotExists => Ok(("DoesNotExist", vec![])),
401            LabelSelectionLogic::InSet { values } => {
402                if values.is_empty() {
403                    Err(anyhow!(
404                        "Invalid selector logic for {label_name}: empty `in` set"
405                    ))
406                } else {
407                    Ok(("In", values))
408                }
409            }
410            LabelSelectionLogic::NotInSet { values } => {
411                if values.is_empty() {
412                    Err(anyhow!(
413                        "Invalid selector logic for {label_name}: empty `notin` set"
414                    ))
415                } else {
416                    Ok(("NotIn", values))
417                }
418            }
419        }?;
420        let lsr = LabelSelectorRequirement {
421            key: self.make_label_key(&label_name),
422            operator: operator.to_string(),
423            values: Some(values),
424        };
425        Ok(lsr)
426    }
427
428    fn send_command(&self, cmd: WorkerCommand) {
429        self.command_tx.send(cmd).expect("worker task not dropped");
430    }
431}
432
433#[derive(Debug)]
434struct ScaledQuantity {
435    integral_part: u64,
436    exponent: i8,
437    base10: bool,
438}
439
440impl ScaledQuantity {
441    pub fn try_to_integer(&self, scale: i8, base10: bool) -> Option<u64> {
442        if base10 != self.base10 {
443            return None;
444        }
445        let exponent = self.exponent - scale;
446        let mut result = self.integral_part;
447        let base = if self.base10 { 10 } else { 2 };
448        if exponent < 0 {
449            for _ in exponent..0 {
450                result /= base;
451            }
452        } else {
453            for _ in 0..exponent {
454                result = result.checked_mul(base)?;
455            }
456        }
457        Some(result)
458    }
459}
460
461// Parse a k8s `Quantity` object
462// into a numeric value.
463//
464// This is intended to support collecting CPU and Memory data.
465// Thus, there are a few that things Kubernetes attempts to do, that we don't,
466// because I've never observed metrics-server specifically sending them:
467// (1) Handle negative numbers (because it's not useful for that use-case)
468// (2) Handle non-integers (because I have never observed them being actually sent)
469// (3) Handle scientific notation (e.g. 1.23e2)
470fn parse_k8s_quantity(s: &str) -> Result<ScaledQuantity, anyhow::Error> {
471    const DEC_SUFFIXES: &[(&str, i8)] = &[
472        ("n", -9),
473        ("u", -6),
474        ("m", -3),
475        ("", 0),
476        ("k", 3), // yep, intentionally lowercase.
477        ("M", 6),
478        ("G", 9),
479        ("T", 12),
480        ("P", 15),
481        ("E", 18),
482    ];
483    const BIN_SUFFIXES: &[(&str, i8)] = &[
484        ("", 0),
485        ("Ki", 10),
486        ("Mi", 20),
487        ("Gi", 30),
488        ("Ti", 40),
489        ("Pi", 50),
490        ("Ei", 60),
491    ];
492
493    let (positive, s) = match s.chars().next() {
494        Some('+') => (true, &s[1..]),
495        Some('-') => (false, &s[1..]),
496        _ => (true, s),
497    };
498
499    if !positive {
500        anyhow::bail!("Negative numbers not supported")
501    }
502
503    fn is_suffix_char(ch: char) -> bool {
504        "numkMGTPEKi".contains(ch)
505    }
506    let (num, suffix) = match s.find(is_suffix_char) {
507        None => (s, ""),
508        Some(idx) => s.split_at(idx),
509    };
510    let num: u64 = num.parse()?;
511    let (exponent, base10) = if let Some((_, exponent)) =
512        DEC_SUFFIXES.iter().find(|(target, _)| suffix == *target)
513    {
514        (exponent, true)
515    } else if let Some((_, exponent)) = BIN_SUFFIXES.iter().find(|(target, _)| suffix == *target) {
516        (exponent, false)
517    } else {
518        anyhow::bail!("Unrecognized suffix: {suffix}");
519    };
520    Ok(ScaledQuantity {
521        integral_part: num,
522        exponent: *exponent,
523        base10,
524    })
525}
526
527#[async_trait]
528impl NamespacedOrchestrator for NamespacedKubernetesOrchestrator {
529    async fn fetch_service_metrics(
530        &self,
531        id: &str,
532    ) -> Result<Vec<ServiceProcessMetrics>, anyhow::Error> {
533        let info = if let Some(info) = self.service_infos.lock().expect("poisoned lock").get(id) {
534            *info
535        } else {
536            // This should have been set in `ensure_service`.
537            tracing::error!("Failed to get info for {id}");
538            anyhow::bail!("Failed to get info for {id}");
539        };
540
541        let (result_tx, result_rx) = oneshot::channel();
542        self.send_command(WorkerCommand::FetchServiceMetrics {
543            name: self.service_name(id),
544            info,
545            result_tx,
546        });
547
548        let metrics = result_rx.await.expect("worker task not dropped");
549        Ok(metrics)
550    }
551
552    fn ensure_service(
553        &self,
554        id: &str,
555        ServiceConfig {
556            image,
557            init_container_image,
558            args,
559            ports: ports_in,
560            memory_limit,
561            memory_request,
562            cpu_limit,
563            scale,
564            labels: labels_in,
565            annotations: annotations_in,
566            availability_zones,
567            other_replicas_selector,
568            replicas_selector,
569            disk_limit,
570            node_selector,
571        }: ServiceConfig,
572    ) -> Result<Box<dyn Service>, anyhow::Error> {
573        // This is extremely cheap to clone, so just look into the lock once.
574        let scheduling_config: ServiceSchedulingConfig =
575            self.scheduling_config.read().expect("poisoned").clone();
576
577        // Enable disk if the size does not disable it.
578        let disk = disk_limit != Some(DiskLimit::ZERO);
579
580        let name = self.service_name(id);
581        // The match labels should be the minimal set of labels that uniquely
582        // identify the pods in the stateful set. Changing these after the
583        // `StatefulSet` is created is not permitted by Kubernetes, and we're
584        // not yet smart enough to handle deleting and recreating the
585        // `StatefulSet`.
586        let match_labels = btreemap! {
587            "environmentd.materialize.cloud/namespace".into() => self.namespace.clone(),
588            "environmentd.materialize.cloud/service-id".into() => id.into(),
589        };
590        let mut labels = match_labels.clone();
591        for (key, value) in labels_in {
592            labels.insert(self.make_label_key(&key), value);
593        }
594
595        labels.insert(self.make_label_key("scale"), scale.to_string());
596
597        for port in &ports_in {
598            labels.insert(
599                format!("environmentd.materialize.cloud/port-{}", port.name),
600                "true".into(),
601            );
602        }
603        for (key, value) in &self.config.service_labels {
604            labels.insert(key.clone(), value.clone());
605        }
606        let mut limits = BTreeMap::new();
607        let mut requests = BTreeMap::new();
608        if let Some(memory_limit) = memory_limit {
609            limits.insert(
610                "memory".into(),
611                Quantity(memory_limit.0.as_u64().to_string()),
612            );
613            requests.insert(
614                "memory".into(),
615                Quantity(memory_limit.0.as_u64().to_string()),
616            );
617        }
618        if let Some(memory_request) = memory_request {
619            requests.insert(
620                "memory".into(),
621                Quantity(memory_request.0.as_u64().to_string()),
622            );
623        }
624        if let Some(cpu_limit) = cpu_limit {
625            limits.insert(
626                "cpu".into(),
627                Quantity(format!("{}m", cpu_limit.as_millicpus())),
628            );
629            requests.insert(
630                "cpu".into(),
631                Quantity(format!("{}m", cpu_limit.as_millicpus())),
632            );
633        }
634        let service = K8sService {
635            metadata: ObjectMeta {
636                name: Some(name.clone()),
637                ..Default::default()
638            },
639            spec: Some(ServiceSpec {
640                ports: Some(
641                    ports_in
642                        .iter()
643                        .map(|port| ServicePort {
644                            port: port.port_hint.into(),
645                            name: Some(port.name.clone()),
646                            ..Default::default()
647                        })
648                        .collect(),
649                ),
650                cluster_ip: Some("None".to_string()),
651                selector: Some(match_labels.clone()),
652                ..Default::default()
653            }),
654            status: None,
655        };
656
657        let hosts = (0..scale)
658            .map(|i| {
659                format!(
660                    "{name}-{i}.{name}.{}.svc.cluster.local",
661                    self.kubernetes_namespace
662                )
663            })
664            .collect::<Vec<_>>();
665        let ports = ports_in
666            .iter()
667            .map(|p| (p.name.clone(), p.port_hint))
668            .collect::<BTreeMap<_, _>>();
669
670        let mut listen_addrs = BTreeMap::new();
671        let mut peer_addrs = vec![BTreeMap::new(); hosts.len()];
672        for (name, port) in &ports {
673            listen_addrs.insert(name.clone(), format!("0.0.0.0:{port}"));
674            for (i, host) in hosts.iter().enumerate() {
675                peer_addrs[i].insert(name.clone(), format!("{host}:{port}"));
676            }
677        }
678        let mut args = args(ServiceAssignments {
679            listen_addrs: &listen_addrs,
680            peer_addrs: &peer_addrs,
681        });
682
683        // This constrains the orchestrator (for those orchestrators that support
684        // anti-affinity, today just k8s) to never schedule pods for different replicas
685        // of the same cluster on the same node. Pods from the _same_ replica are fine;
686        // pods from different clusters are also fine.
687        //
688        // The point is that if pods of two replicas are on the same node, that node
689        // going down would kill both replicas, and so the replication factor of the
690        // cluster in question is illusory.
691        let anti_affinity = Some({
692            let label_selector_requirements = other_replicas_selector
693                .clone()
694                .into_iter()
695                .map(|ls| self.label_selector_to_k8s(ls))
696                .collect::<Result<Vec<_>, _>>()?;
697            let ls = LabelSelector {
698                match_expressions: Some(label_selector_requirements),
699                ..Default::default()
700            };
701            let pat = PodAffinityTerm {
702                label_selector: Some(ls),
703                topology_key: "kubernetes.io/hostname".to_string(),
704                ..Default::default()
705            };
706
707            if !scheduling_config.soften_replication_anti_affinity {
708                PodAntiAffinity {
709                    required_during_scheduling_ignored_during_execution: Some(vec![pat]),
710                    ..Default::default()
711                }
712            } else {
713                PodAntiAffinity {
714                    preferred_during_scheduling_ignored_during_execution: Some(vec![
715                        WeightedPodAffinityTerm {
716                            weight: scheduling_config.soften_replication_anti_affinity_weight,
717                            pod_affinity_term: pat,
718                        },
719                    ]),
720                    ..Default::default()
721                }
722            }
723        });
724
725        let pod_affinity = if let Some(weight) = scheduling_config.multi_pod_az_affinity_weight {
726            // `match_labels` sufficiently selects pods in the same replica.
727            let ls = LabelSelector {
728                match_labels: Some(match_labels.clone()),
729                ..Default::default()
730            };
731            let pat = PodAffinityTerm {
732                label_selector: Some(ls),
733                topology_key: "topology.kubernetes.io/zone".to_string(),
734                ..Default::default()
735            };
736
737            Some(PodAffinity {
738                preferred_during_scheduling_ignored_during_execution: Some(vec![
739                    WeightedPodAffinityTerm {
740                        weight,
741                        pod_affinity_term: pat,
742                    },
743                ]),
744                ..Default::default()
745            })
746        } else {
747            None
748        };
749
750        let topology_spread = if scheduling_config.topology_spread.enabled {
751            let config = &scheduling_config.topology_spread;
752
753            if !config.ignore_non_singular_scale || scale <= 1 {
754                let label_selector_requirements = (if config.ignore_non_singular_scale {
755                    let mut replicas_selector_ignoring_scale = replicas_selector.clone();
756
757                    replicas_selector_ignoring_scale.push(mz_orchestrator::LabelSelector {
758                        label_name: "scale".into(),
759                        logic: mz_orchestrator::LabelSelectionLogic::Eq {
760                            value: "1".to_string(),
761                        },
762                    });
763
764                    replicas_selector_ignoring_scale
765                } else {
766                    replicas_selector
767                })
768                .into_iter()
769                .map(|ls| self.label_selector_to_k8s(ls))
770                .collect::<Result<Vec<_>, _>>()?;
771                let ls = LabelSelector {
772                    match_expressions: Some(label_selector_requirements),
773                    ..Default::default()
774                };
775
776                let constraint = TopologySpreadConstraint {
777                    label_selector: Some(ls),
778                    min_domains: config.min_domains,
779                    max_skew: config.max_skew,
780                    topology_key: "topology.kubernetes.io/zone".to_string(),
781                    when_unsatisfiable: if config.soft {
782                        "ScheduleAnyway".to_string()
783                    } else {
784                        "DoNotSchedule".to_string()
785                    },
786                    // TODO(guswynn): restore these once they are supported.
787                    // Consider node affinities when calculating topology spread. This is the
788                    // default: <https://docs.rs/k8s-openapi/latest/k8s_openapi/api/core/v1/struct.TopologySpreadConstraint.html#structfield.node_affinity_policy>,
789                    // made explicit.
790                    // node_affinity_policy: Some("Honor".to_string()),
791                    // Do not consider node taints when calculating topology spread. This is the
792                    // default: <https://docs.rs/k8s-openapi/latest/k8s_openapi/api/core/v1/struct.TopologySpreadConstraint.html#structfield.node_taints_policy>,
793                    // made explicit.
794                    // node_taints_policy: Some("Ignore".to_string()),
795                    match_label_keys: None,
796                    // Once the above are restorted, we should't have `..Default::default()` here because the specifics of these fields are
797                    // subtle enough where we want compilation failures when we upgrade
798                    ..Default::default()
799                };
800                Some(vec![constraint])
801            } else {
802                None
803            }
804        } else {
805            None
806        };
807
808        let mut pod_annotations = btreemap! {
809            // Prevent the cluster-autoscaler (or karpenter) from evicting these pods in attempts to scale down
810            // and terminate nodes.
811            // This will cost us more money, but should give us better uptime.
812            // This does not prevent all evictions by Kubernetes, only the ones initiated by the
813            // cluster-autoscaler (or karpenter). Notably, eviction of pods for resource overuse is still enabled.
814            "cluster-autoscaler.kubernetes.io/safe-to-evict".to_owned() => "false".to_string(),
815            "karpenter.sh/do-not-evict".to_owned() => "true".to_string(),
816
817            // It's called do-not-disrupt in newer versions of karpenter, so adding for forward/backward compatibility
818            "karpenter.sh/do-not-disrupt".to_owned() => "true".to_string(),
819        };
820        for (key, value) in annotations_in {
821            // We want to use the same prefix as our labels keys
822            pod_annotations.insert(self.make_label_key(&key), value);
823        }
824        if self.config.enable_prometheus_scrape_annotations {
825            if let Some(internal_http_port) = ports_in
826                .iter()
827                .find(|port| port.name == "internal-http")
828                .map(|port| port.port_hint.to_string())
829            {
830                // Enable prometheus scrape discovery
831                pod_annotations.insert("prometheus.io/scrape".to_owned(), "true".to_string());
832                pod_annotations.insert("prometheus.io/port".to_owned(), internal_http_port);
833                pod_annotations.insert("prometheus.io/path".to_owned(), "/metrics".to_string());
834                pod_annotations.insert("prometheus.io/scheme".to_owned(), "http".to_string());
835            }
836        }
837        for (key, value) in &self.config.service_annotations {
838            pod_annotations.insert(key.clone(), value.clone());
839        }
840
841        let default_node_selector = if disk {
842            vec![("materialize.cloud/disk".to_string(), disk.to_string())]
843        } else {
844            // if the cluster doesn't require disk, we can omit the selector
845            // allowing it to be scheduled onto nodes with and without the
846            // selector
847            vec![]
848        };
849
850        let node_selector: BTreeMap<String, String> = default_node_selector
851            .into_iter()
852            .chain(self.config.service_node_selector.clone())
853            .chain(node_selector)
854            .collect();
855
856        let node_affinity = if let Some(availability_zones) = availability_zones {
857            let selector = NodeSelectorTerm {
858                match_expressions: Some(vec![NodeSelectorRequirement {
859                    key: "materialize.cloud/availability-zone".to_string(),
860                    operator: "In".to_string(),
861                    values: Some(availability_zones),
862                }]),
863                match_fields: None,
864            };
865
866            if scheduling_config.soften_az_affinity {
867                Some(NodeAffinity {
868                    preferred_during_scheduling_ignored_during_execution: Some(vec![
869                        PreferredSchedulingTerm {
870                            preference: selector,
871                            weight: scheduling_config.soften_az_affinity_weight,
872                        },
873                    ]),
874                    required_during_scheduling_ignored_during_execution: None,
875                })
876            } else {
877                Some(NodeAffinity {
878                    preferred_during_scheduling_ignored_during_execution: None,
879                    required_during_scheduling_ignored_during_execution: Some(NodeSelector {
880                        node_selector_terms: vec![selector],
881                    }),
882                })
883            }
884        } else {
885            None
886        };
887
888        let mut affinity = Affinity {
889            pod_anti_affinity: anti_affinity,
890            pod_affinity,
891            node_affinity,
892            ..Default::default()
893        };
894        if let Some(service_affinity) = &self.config.service_affinity {
895            affinity.merge_from(serde_json::from_str(service_affinity)?);
896        }
897
898        let container_name = image
899            .rsplit_once('/')
900            .and_then(|(_, name_version)| name_version.rsplit_once(':'))
901            .context("`image` is not ORG/NAME:VERSION")?
902            .0
903            .to_string();
904
905        let container_security_context = if scheduling_config.security_context_enabled {
906            Some(SecurityContext {
907                privileged: Some(false),
908                run_as_non_root: Some(true),
909                allow_privilege_escalation: Some(false),
910                seccomp_profile: Some(SeccompProfile {
911                    type_: "RuntimeDefault".to_string(),
912                    ..Default::default()
913                }),
914                capabilities: Some(Capabilities {
915                    drop: Some(vec!["ALL".to_string()]),
916                    ..Default::default()
917                }),
918                ..Default::default()
919            })
920        } else {
921            None
922        };
923
924        let init_containers = init_container_image.map(|image| {
925            vec![Container {
926                name: "init".to_string(),
927                image: Some(image),
928                image_pull_policy: Some(self.config.image_pull_policy.to_string()),
929                resources: Some(ResourceRequirements {
930                    claims: None,
931                    limits: Some(limits.clone()),
932                    requests: Some(requests.clone()),
933                }),
934                security_context: container_security_context.clone(),
935                env: Some(vec![
936                    EnvVar {
937                        name: "MZ_NAMESPACE".to_string(),
938                        value_from: Some(EnvVarSource {
939                            field_ref: Some(ObjectFieldSelector {
940                                field_path: "metadata.namespace".to_string(),
941                                ..Default::default()
942                            }),
943                            ..Default::default()
944                        }),
945                        ..Default::default()
946                    },
947                    EnvVar {
948                        name: "MZ_POD_NAME".to_string(),
949                        value_from: Some(EnvVarSource {
950                            field_ref: Some(ObjectFieldSelector {
951                                field_path: "metadata.name".to_string(),
952                                ..Default::default()
953                            }),
954                            ..Default::default()
955                        }),
956                        ..Default::default()
957                    },
958                    EnvVar {
959                        name: "MZ_NODE_NAME".to_string(),
960                        value_from: Some(EnvVarSource {
961                            field_ref: Some(ObjectFieldSelector {
962                                field_path: "spec.nodeName".to_string(),
963                                ..Default::default()
964                            }),
965                            ..Default::default()
966                        }),
967                        ..Default::default()
968                    },
969                ]),
970                ..Default::default()
971            }]
972        });
973
974        let env = if self.config.coverage {
975            Some(vec![EnvVar {
976                name: "LLVM_PROFILE_FILE".to_string(),
977                value: Some(format!("/coverage/{}-%p-%9m%c.profraw", self.namespace)),
978                ..Default::default()
979            }])
980        } else {
981            None
982        };
983
984        let mut volume_mounts = vec![];
985
986        if self.config.coverage {
987            volume_mounts.push(VolumeMount {
988                name: "coverage".to_string(),
989                mount_path: "/coverage".to_string(),
990                ..Default::default()
991            })
992        }
993
994        let volumes = match (disk, &self.config.ephemeral_volume_storage_class) {
995            (true, Some(ephemeral_volume_storage_class)) => {
996                volume_mounts.push(VolumeMount {
997                    name: "scratch".to_string(),
998                    mount_path: "/scratch".to_string(),
999                    ..Default::default()
1000                });
1001                args.push("--scratch-directory=/scratch".into());
1002
1003                Some(vec![Volume {
1004                    name: "scratch".to_string(),
1005                    ephemeral: Some(EphemeralVolumeSource {
1006                        volume_claim_template: Some(PersistentVolumeClaimTemplate {
1007                            spec: PersistentVolumeClaimSpec {
1008                                access_modes: Some(vec!["ReadWriteOnce".to_string()]),
1009                                storage_class_name: Some(
1010                                    ephemeral_volume_storage_class.to_string(),
1011                                ),
1012                                resources: Some(VolumeResourceRequirements {
1013                                    requests: Some(BTreeMap::from([(
1014                                        "storage".to_string(),
1015                                        Quantity(
1016                                            disk_limit
1017                                                .unwrap_or(DiskLimit::ARBITRARY)
1018                                                .0
1019                                                .as_u64()
1020                                                .to_string(),
1021                                        ),
1022                                    )])),
1023                                    ..Default::default()
1024                                }),
1025                                ..Default::default()
1026                            },
1027                            ..Default::default()
1028                        }),
1029                        ..Default::default()
1030                    }),
1031                    ..Default::default()
1032                }])
1033            }
1034            (true, None) => {
1035                return Err(anyhow!(
1036                    "service requested disk but no ephemeral volume storage class was configured"
1037                ));
1038            }
1039            (false, _) => None,
1040        };
1041
1042        if let Some(name_prefix) = &self.config.name_prefix {
1043            args.push(format!("--secrets-reader-name-prefix={}", name_prefix));
1044        }
1045
1046        let volume_claim_templates = if self.config.coverage {
1047            Some(vec![PersistentVolumeClaim {
1048                metadata: ObjectMeta {
1049                    name: Some("coverage".to_string()),
1050                    ..Default::default()
1051                },
1052                spec: Some(PersistentVolumeClaimSpec {
1053                    access_modes: Some(vec!["ReadWriteOnce".to_string()]),
1054                    resources: Some(VolumeResourceRequirements {
1055                        requests: Some(BTreeMap::from([(
1056                            "storage".to_string(),
1057                            Quantity("10Gi".to_string()),
1058                        )])),
1059                        ..Default::default()
1060                    }),
1061                    ..Default::default()
1062                }),
1063                ..Default::default()
1064            }])
1065        } else {
1066            None
1067        };
1068
1069        let security_context = if let Some(fs_group) = self.config.service_fs_group {
1070            Some(PodSecurityContext {
1071                fs_group: Some(fs_group),
1072                run_as_user: Some(fs_group),
1073                run_as_group: Some(fs_group),
1074                ..Default::default()
1075            })
1076        } else {
1077            None
1078        };
1079
1080        let mut tolerations = vec![
1081            // When the node becomes `NotReady` it indicates there is a problem
1082            // with the node. By default Kubernetes waits 300s (5 minutes)
1083            // before descheduling the pod, but we tune this to 30s for faster
1084            // recovery in the case of node failure.
1085            Toleration {
1086                effect: Some("NoExecute".into()),
1087                key: Some("node.kubernetes.io/not-ready".into()),
1088                operator: Some("Exists".into()),
1089                toleration_seconds: Some(NODE_FAILURE_THRESHOLD_SECONDS),
1090                value: None,
1091            },
1092            Toleration {
1093                effect: Some("NoExecute".into()),
1094                key: Some("node.kubernetes.io/unreachable".into()),
1095                operator: Some("Exists".into()),
1096                toleration_seconds: Some(NODE_FAILURE_THRESHOLD_SECONDS),
1097                value: None,
1098            },
1099        ];
1100        if let Some(service_tolerations) = &self.config.service_tolerations {
1101            tolerations.extend(serde_json::from_str::<Vec<_>>(service_tolerations)?);
1102        }
1103        let tolerations = Some(tolerations);
1104
1105        let mut pod_template_spec = PodTemplateSpec {
1106            metadata: Some(ObjectMeta {
1107                labels: Some(labels.clone()),
1108                annotations: Some(pod_annotations), // Do not delete, we insert into it below.
1109                ..Default::default()
1110            }),
1111            spec: Some(PodSpec {
1112                init_containers,
1113                containers: vec![Container {
1114                    name: container_name,
1115                    image: Some(image),
1116                    args: Some(args),
1117                    image_pull_policy: Some(self.config.image_pull_policy.to_string()),
1118                    ports: Some(
1119                        ports_in
1120                            .iter()
1121                            .map(|port| ContainerPort {
1122                                container_port: port.port_hint.into(),
1123                                name: Some(port.name.clone()),
1124                                ..Default::default()
1125                            })
1126                            .collect(),
1127                    ),
1128                    security_context: container_security_context.clone(),
1129                    resources: Some(ResourceRequirements {
1130                        claims: None,
1131                        limits: Some(limits),
1132                        requests: Some(requests),
1133                    }),
1134                    volume_mounts: if !volume_mounts.is_empty() {
1135                        Some(volume_mounts)
1136                    } else {
1137                        None
1138                    },
1139                    env,
1140                    ..Default::default()
1141                }],
1142                volumes,
1143                security_context,
1144                node_selector: Some(node_selector),
1145                scheduler_name: self.config.scheduler_name.clone(),
1146                service_account: self.config.service_account.clone(),
1147                affinity: Some(affinity),
1148                topology_spread_constraints: topology_spread,
1149                tolerations,
1150                // Setting a 0s termination grace period has the side effect of
1151                // automatically starting a new pod when the previous pod is
1152                // currently terminating. This enables recovery from a node
1153                // failure with no manual intervention. Without this setting,
1154                // the StatefulSet controller will refuse to start a new pod
1155                // until the failed node is manually removed from the Kubernetes
1156                // cluster.
1157                //
1158                // The Kubernetes documentation strongly advises against this
1159                // setting, as StatefulSets attempt to provide "at most once"
1160                // semantics [0]--that is, the guarantee that for a given pod in
1161                // a StatefulSet there is *at most* one pod with that identity
1162                // running in the cluster.
1163                //
1164                // Materialize services, however, are carefully designed to
1165                // *not* rely on this guarantee. In fact, we do not believe that
1166                // correct distributed systems can meaningfully rely on
1167                // Kubernetes's guarantee--network packets from a pod can be
1168                // arbitrarily delayed, long past that pod's termination.
1169                //
1170                // [0]: https://kubernetes.io/docs/tasks/run-application/force-delete-stateful-set-pod/#statefulset-considerations
1171                termination_grace_period_seconds: Some(0),
1172                ..Default::default()
1173            }),
1174        };
1175        let pod_template_json = serde_json::to_string(&pod_template_spec).unwrap();
1176        let mut hasher = Sha256::new();
1177        hasher.update(pod_template_json);
1178        let pod_template_hash = format!("{:x}", hasher.finalize());
1179        pod_template_spec
1180            .metadata
1181            .as_mut()
1182            .unwrap()
1183            .annotations
1184            .as_mut()
1185            .unwrap()
1186            .insert(
1187                POD_TEMPLATE_HASH_ANNOTATION.to_owned(),
1188                pod_template_hash.clone(),
1189            );
1190
1191        let stateful_set = StatefulSet {
1192            metadata: ObjectMeta {
1193                name: Some(name.clone()),
1194                ..Default::default()
1195            },
1196            spec: Some(StatefulSetSpec {
1197                selector: LabelSelector {
1198                    match_labels: Some(match_labels),
1199                    ..Default::default()
1200                },
1201                service_name: Some(name.clone()),
1202                replicas: Some(scale.into()),
1203                template: pod_template_spec,
1204                pod_management_policy: Some("Parallel".to_string()),
1205                volume_claim_templates,
1206                ..Default::default()
1207            }),
1208            status: None,
1209        };
1210
1211        self.send_command(WorkerCommand::EnsureService {
1212            desc: ServiceDescription {
1213                name,
1214                scale,
1215                service,
1216                stateful_set,
1217                pod_template_hash,
1218            },
1219        });
1220
1221        self.service_infos
1222            .lock()
1223            .expect("poisoned lock")
1224            .insert(id.to_string(), ServiceInfo { scale });
1225
1226        Ok(Box::new(KubernetesService { hosts, ports }))
1227    }
1228
1229    /// Drops the identified service, if it exists.
1230    fn drop_service(&self, id: &str) -> Result<(), anyhow::Error> {
1231        fail::fail_point!("kubernetes_drop_service", |_| Err(anyhow!("failpoint")));
1232        self.service_infos.lock().expect("poisoned lock").remove(id);
1233
1234        self.send_command(WorkerCommand::DropService {
1235            name: self.service_name(id),
1236        });
1237
1238        Ok(())
1239    }
1240
1241    /// Lists the identifiers of all known services.
1242    async fn list_services(&self) -> Result<Vec<String>, anyhow::Error> {
1243        let (result_tx, result_rx) = oneshot::channel();
1244        self.send_command(WorkerCommand::ListServices {
1245            namespace: self.namespace.clone(),
1246            result_tx,
1247        });
1248
1249        let list = result_rx.await.expect("worker task not dropped");
1250        Ok(list)
1251    }
1252
1253    fn watch_services(&self) -> BoxStream<'static, Result<ServiceEvent, anyhow::Error>> {
1254        fn into_service_event(pod: Pod) -> Result<ServiceEvent, anyhow::Error> {
1255            let process_id = pod.name_any().split('-').next_back().unwrap().parse()?;
1256            let service_id_label = "environmentd.materialize.cloud/service-id";
1257            let service_id = pod
1258                .labels()
1259                .get(service_id_label)
1260                .ok_or_else(|| anyhow!("missing label: {service_id_label}"))?
1261                .clone();
1262
1263            let oomed = pod
1264                .status
1265                .as_ref()
1266                .and_then(|status| status.container_statuses.as_ref())
1267                .map(|container_statuses| {
1268                    container_statuses.iter().any(|cs| {
1269                        // The container might have already transitioned from "terminated" to
1270                        // "waiting"/"running" state, in which case we need to check its previous
1271                        // state to find out why it terminated.
1272                        let current_state = cs.state.as_ref().and_then(|s| s.terminated.as_ref());
1273                        let last_state = cs.last_state.as_ref().and_then(|s| s.terminated.as_ref());
1274                        let termination_state = current_state.or(last_state);
1275
1276                        // The interesting exit codes are:
1277                        //  * 135 (SIGBUS): occurs when lgalloc runs out of disk
1278                        //  * 137 (SIGKILL): occurs when the OOM killer terminates the container
1279                        //  * 167: occurs when the lgalloc or memory limiter terminates the process
1280                        // We treat the all of these as OOM conditions since swap and lgalloc use
1281                        // disk only for spilling memory.
1282                        let exit_code = termination_state.map(|s| s.exit_code);
1283                        exit_code.is_some_and(|e| [135, 137, 167].contains(&e))
1284                    })
1285                })
1286                .unwrap_or(false);
1287
1288            let (pod_ready, last_probe_time) = pod
1289                .status
1290                .and_then(|status| status.conditions)
1291                .and_then(|conditions| conditions.into_iter().find(|c| c.type_ == "Ready"))
1292                .map(|c| (c.status == "True", c.last_probe_time))
1293                .unwrap_or((false, None));
1294
1295            let status = if pod_ready {
1296                ServiceStatus::Online
1297            } else {
1298                ServiceStatus::Offline(oomed.then_some(OfflineReason::OomKilled))
1299            };
1300            let time = if let Some(time) = last_probe_time {
1301                time.0
1302            } else {
1303                Utc::now()
1304            };
1305
1306            Ok(ServiceEvent {
1307                service_id,
1308                process_id,
1309                status,
1310                time,
1311            })
1312        }
1313
1314        let stream = watcher(self.pod_api.clone(), self.watch_pod_params())
1315            .touched_objects()
1316            .filter_map(|object| async move {
1317                match object {
1318                    Ok(pod) => Some(into_service_event(pod)),
1319                    Err(error) => {
1320                        // We assume that errors returned by Kubernetes are usually transient, so we
1321                        // just log a warning and ignore them otherwise.
1322                        tracing::warn!("service watch error: {error}");
1323                        None
1324                    }
1325                }
1326            });
1327        Box::pin(stream)
1328    }
1329
1330    fn update_scheduling_config(&self, config: ServiceSchedulingConfig) {
1331        *self.scheduling_config.write().expect("poisoned") = config;
1332    }
1333}
1334
1335impl OrchestratorWorker {
1336    fn spawn(self, name: String) -> AbortOnDropHandle<()> {
1337        mz_ore::task::spawn(|| name, self.run()).abort_on_drop()
1338    }
1339
1340    async fn run(mut self) {
1341        {
1342            info!("initializing Kubernetes orchestrator worker");
1343            let start = Instant::now();
1344
1345            // Fetch the owner reference for our own pod (usually a
1346            // StatefulSet), so that we can propagate it to the services we
1347            // create.
1348            let hostname = env::var("HOSTNAME").unwrap_or_else(|_| panic!("HOSTNAME environment variable missing or invalid; required for Kubernetes orchestrator"));
1349            let orchestrator_pod = Retry::default()
1350                .clamp_backoff(Duration::from_secs(10))
1351                .retry_async(|_| self.pod_api.get(&hostname))
1352                .await
1353                .expect("always retries on error");
1354            self.owner_references
1355                .extend(orchestrator_pod.owner_references().into_iter().cloned());
1356
1357            info!(
1358                "Kubernetes orchestrator worker initialized in {:?}",
1359                start.elapsed()
1360            );
1361        }
1362
1363        while let Some(cmd) = self.command_rx.recv().await {
1364            self.handle_command(cmd).await;
1365        }
1366    }
1367
1368    /// Handle a worker command.
1369    ///
1370    /// If handling the command fails, it is automatically retried. All command handlers return
1371    /// [`K8sError`], so we can reasonably assume that a failure is caused by issues communicating
1372    /// with the K8S server and that retrying resolves them eventually.
1373    async fn handle_command(&self, cmd: WorkerCommand) {
1374        async fn retry<F, U, R>(f: F, cmd_type: &str) -> R
1375        where
1376            F: Fn() -> U,
1377            U: Future<Output = Result<R, K8sError>>,
1378        {
1379            Retry::default()
1380                .clamp_backoff(Duration::from_secs(10))
1381                .retry_async(|_| {
1382                    f().map_err(
1383                        |error| tracing::error!(%cmd_type, "orchestrator call failed: {error}"),
1384                    )
1385                })
1386                .await
1387                .expect("always retries on error")
1388        }
1389
1390        use WorkerCommand::*;
1391        match cmd {
1392            EnsureService { desc } => {
1393                retry(|| self.ensure_service(desc.clone()), "EnsureService").await
1394            }
1395            DropService { name } => retry(|| self.drop_service(&name), "DropService").await,
1396            ListServices {
1397                namespace,
1398                result_tx,
1399            } => {
1400                let result = retry(|| self.list_services(&namespace), "ListServices").await;
1401                let _ = result_tx.send(result);
1402            }
1403            FetchServiceMetrics {
1404                name,
1405                info,
1406                result_tx,
1407            } => {
1408                let result = self.fetch_service_metrics(&name, &info).await;
1409                let _ = result_tx.send(result);
1410            }
1411        }
1412    }
1413
1414    async fn fetch_service_metrics(
1415        &self,
1416        name: &str,
1417        info: &ServiceInfo,
1418    ) -> Vec<ServiceProcessMetrics> {
1419        if !self.collect_pod_metrics {
1420            return (0..info.scale)
1421                .map(|_| ServiceProcessMetrics::default())
1422                .collect();
1423        }
1424
1425        /// Get metrics for a particular service and process, converting them into a sane (i.e., numeric) format.
1426        ///
1427        /// Note that we want to keep going even if a lookup fails for whatever reason,
1428        /// so this function is infallible. If we fail to get cpu or memory for a particular pod,
1429        /// we just log a warning and install `None` in the returned struct.
1430        async fn get_metrics(
1431            self_: &OrchestratorWorker,
1432            service_name: &str,
1433            i: usize,
1434        ) -> ServiceProcessMetrics {
1435            let name = format!("{service_name}-{i}");
1436
1437            let disk_usage_fut = get_disk_usage(self_, service_name, i);
1438            let (metrics, disk_usage) =
1439                match futures::future::join(self_.metrics_api.get(&name), disk_usage_fut).await {
1440                    (Ok(metrics), Ok(disk_usage)) => (metrics, disk_usage),
1441                    (Ok(metrics), Err(e)) => {
1442                        warn!("Failed to fetch disk usage for {name}: {e}");
1443                        (metrics, None)
1444                    }
1445                    (Err(e), _) => {
1446                        warn!("Failed to get metrics for {name}: {e}");
1447                        return ServiceProcessMetrics::default();
1448                    }
1449                };
1450            let Some(PodMetricsContainer {
1451                usage:
1452                    PodMetricsContainerUsage {
1453                        cpu: Quantity(cpu_str),
1454                        memory: Quantity(mem_str),
1455                    },
1456                ..
1457            }) = metrics.containers.get(0)
1458            else {
1459                warn!("metrics result contained no containers for {name}");
1460                return ServiceProcessMetrics::default();
1461            };
1462
1463            let cpu = match parse_k8s_quantity(cpu_str) {
1464                Ok(q) => match q.try_to_integer(-9, true) {
1465                    Some(i) => Some(i),
1466                    None => {
1467                        tracing::error!("CPU value {q:? }out of range");
1468                        None
1469                    }
1470                },
1471                Err(e) => {
1472                    tracing::error!("Failed to parse CPU value {cpu_str}: {e}");
1473                    None
1474                }
1475            };
1476            let memory = match parse_k8s_quantity(mem_str) {
1477                Ok(q) => match q.try_to_integer(0, false) {
1478                    Some(i) => Some(i),
1479                    None => {
1480                        tracing::error!("Memory value {q:?} out of range");
1481                        None
1482                    }
1483                },
1484                Err(e) => {
1485                    tracing::error!("Failed to parse memory value {mem_str}: {e}");
1486                    None
1487                }
1488            };
1489
1490            ServiceProcessMetrics {
1491                cpu_nano_cores: cpu,
1492                memory_bytes: memory,
1493                disk_usage_bytes: disk_usage,
1494            }
1495        }
1496
1497        /// Get the current disk usage for a particular service and process.
1498        ///
1499        /// Disk usage is collected by connecting to a metrics endpoint exposed by the process. The
1500        /// endpoint is assumed to be reachable at the 'internal-http' under the HTTP path
1501        /// `/api/usage-metrics`.
1502        async fn get_disk_usage(
1503            self_: &OrchestratorWorker,
1504            service_name: &str,
1505            i: usize,
1506        ) -> anyhow::Result<Option<u64>> {
1507            #[derive(Deserialize)]
1508            pub(crate) struct Usage {
1509                disk_bytes: Option<u64>,
1510                swap_bytes: Option<u64>,
1511            }
1512
1513            let service = self_
1514                .service_api
1515                .get(service_name)
1516                .await
1517                .with_context(|| format!("failed to get service {service_name}"))?;
1518            let namespace = service
1519                .metadata
1520                .namespace
1521                .context("missing service namespace")?;
1522            let internal_http_port = service
1523                .spec
1524                .and_then(|spec| spec.ports)
1525                .and_then(|ports| {
1526                    ports
1527                        .into_iter()
1528                        .find(|p| p.name == Some("internal-http".into()))
1529                })
1530                .map(|p| p.port);
1531            let Some(port) = internal_http_port else {
1532                bail!("internal-http port missing in service spec");
1533            };
1534            let metrics_url = format!(
1535                "http://{service_name}-{i}.{service_name}.{namespace}.svc.cluster.local:{port}\
1536                 /api/usage-metrics"
1537            );
1538
1539            let http_client = reqwest::Client::builder()
1540                .timeout(Duration::from_secs(10))
1541                .build()
1542                .context("error building HTTP client")?;
1543            let resp = http_client.get(metrics_url).send().await?;
1544            let Usage {
1545                disk_bytes,
1546                swap_bytes,
1547            } = resp.json().await?;
1548
1549            let bytes = if let (Some(disk), Some(swap)) = (disk_bytes, swap_bytes) {
1550                Some(disk + swap)
1551            } else {
1552                disk_bytes.or(swap_bytes)
1553            };
1554            Ok(bytes)
1555        }
1556
1557        let ret =
1558            futures::future::join_all((0..info.scale).map(|i| get_metrics(self, name, i.into())));
1559
1560        ret.await
1561    }
1562
1563    async fn ensure_service(&self, mut desc: ServiceDescription) -> Result<(), K8sError> {
1564        // We inject our own pod's owner references into the Kubernetes objects
1565        // created for the service so that if the
1566        // Deployment/StatefulSet/whatever that owns the pod running the
1567        // orchestrator gets deleted, so do all services spawned by this
1568        // orchestrator.
1569        desc.service
1570            .metadata
1571            .owner_references
1572            .get_or_insert(vec![])
1573            .extend(self.owner_references.iter().cloned());
1574        desc.stateful_set
1575            .metadata
1576            .owner_references
1577            .get_or_insert(vec![])
1578            .extend(self.owner_references.iter().cloned());
1579
1580        self.service_api
1581            .patch(
1582                &desc.name,
1583                &PatchParams::apply(FIELD_MANAGER).force(),
1584                &Patch::Apply(desc.service),
1585            )
1586            .await?;
1587        self.stateful_set_api
1588            .patch(
1589                &desc.name,
1590                &PatchParams::apply(FIELD_MANAGER).force(),
1591                &Patch::Apply(desc.stateful_set),
1592            )
1593            .await?;
1594
1595        // Explicitly delete any pods in the stateful set that don't match the
1596        // template. In theory, Kubernetes would do this automatically, but
1597        // in practice we have observed that it does not.
1598        // See: https://github.com/kubernetes/kubernetes/issues/67250
1599        for pod_id in 0..desc.scale {
1600            let pod_name = format!("{}-{pod_id}", desc.name);
1601            let pod = match self.pod_api.get(&pod_name).await {
1602                Ok(pod) => pod,
1603                // Pod already doesn't exist.
1604                Err(kube::Error::Api(e)) if e.code == 404 => continue,
1605                Err(e) => return Err(e),
1606            };
1607            if pod.annotations().get(POD_TEMPLATE_HASH_ANNOTATION) != Some(&desc.pod_template_hash)
1608            {
1609                match self
1610                    .pod_api
1611                    .delete(&pod_name, &DeleteParams::default())
1612                    .await
1613                {
1614                    Ok(_) => (),
1615                    // Pod got deleted while we were looking at it.
1616                    Err(kube::Error::Api(e)) if e.code == 404 => (),
1617                    Err(e) => return Err(e),
1618                }
1619            }
1620        }
1621
1622        Ok(())
1623    }
1624
1625    async fn drop_service(&self, name: &str) -> Result<(), K8sError> {
1626        let res = self
1627            .stateful_set_api
1628            .delete(name, &DeleteParams::default())
1629            .await;
1630        match res {
1631            Ok(_) => (),
1632            Err(K8sError::Api(e)) if e.code == 404 => (),
1633            Err(e) => return Err(e),
1634        }
1635
1636        let res = self
1637            .service_api
1638            .delete(name, &DeleteParams::default())
1639            .await;
1640        match res {
1641            Ok(_) => Ok(()),
1642            Err(K8sError::Api(e)) if e.code == 404 => Ok(()),
1643            Err(e) => Err(e),
1644        }
1645    }
1646
1647    async fn list_services(&self, namespace: &str) -> Result<Vec<String>, K8sError> {
1648        let stateful_sets = self.stateful_set_api.list(&Default::default()).await?;
1649        let name_prefix = format!("{}{namespace}-", self.name_prefix);
1650        Ok(stateful_sets
1651            .into_iter()
1652            .filter_map(|ss| {
1653                ss.metadata
1654                    .name
1655                    .unwrap()
1656                    .strip_prefix(&name_prefix)
1657                    .map(Into::into)
1658            })
1659            .collect())
1660    }
1661}
1662
1663#[derive(Debug, Clone)]
1664struct KubernetesService {
1665    hosts: Vec<String>,
1666    ports: BTreeMap<String, u16>,
1667}
1668
1669impl Service for KubernetesService {
1670    fn addresses(&self, port: &str) -> Vec<String> {
1671        let port = self.ports[port];
1672        self.hosts
1673            .iter()
1674            .map(|host| format!("{host}:{port}"))
1675            .collect()
1676    }
1677}
1678
1679#[cfg(test)]
1680mod tests {
1681    use super::*;
1682
1683    #[mz_ore::test]
1684    fn k8s_quantity_base10_large() {
1685        let cases = &[
1686            ("42", 42),
1687            ("42k", 42000),
1688            ("42M", 42000000),
1689            ("42G", 42000000000),
1690            ("42T", 42000000000000),
1691            ("42P", 42000000000000000),
1692        ];
1693
1694        for (input, expected) in cases {
1695            let quantity = parse_k8s_quantity(input).unwrap();
1696            let number = quantity.try_to_integer(0, true).unwrap();
1697            assert_eq!(number, *expected, "input={input}, quantity={quantity:?}");
1698        }
1699    }
1700
1701    #[mz_ore::test]
1702    fn k8s_quantity_base10_small() {
1703        let cases = &[("42n", 42), ("42u", 42000), ("42m", 42000000)];
1704
1705        for (input, expected) in cases {
1706            let quantity = parse_k8s_quantity(input).unwrap();
1707            let number = quantity.try_to_integer(-9, true).unwrap();
1708            assert_eq!(number, *expected, "input={input}, quantity={quantity:?}");
1709        }
1710    }
1711
1712    #[mz_ore::test]
1713    fn k8s_quantity_base2() {
1714        let cases = &[
1715            ("42Ki", 42 << 10),
1716            ("42Mi", 42 << 20),
1717            ("42Gi", 42 << 30),
1718            ("42Ti", 42 << 40),
1719            ("42Pi", 42 << 50),
1720        ];
1721
1722        for (input, expected) in cases {
1723            let quantity = parse_k8s_quantity(input).unwrap();
1724            let number = quantity.try_to_integer(0, false).unwrap();
1725            assert_eq!(number, *expected, "input={input}, quantity={quantity:?}");
1726        }
1727    }
1728}