mz_persist_client/
schema.rs

1// Copyright Materialize, Inc. and contributors. All rights reserved.
2//
3// Use of this software is governed by the Business Source License
4// included in the LICENSE file.
5//
6// As of the Change Date specified in that file, in accordance with
7// the Business Source License, use of this software will be governed
8// by the Apache License, Version 2.0.
9
10//! Persist shard schema information.
11
12use std::collections::BTreeMap;
13use std::fmt::Debug;
14use std::sync::{Arc, RwLock};
15use std::time::Instant;
16
17use differential_dataflow::difference::Semigroup;
18use differential_dataflow::lattice::Lattice;
19use mz_ore::cast::CastFrom;
20use mz_persist_types::columnar::data_type;
21use mz_persist_types::schema::{Migration, SchemaId, backward_compatible};
22use mz_persist_types::{Codec, Codec64};
23use timely::progress::Timestamp;
24
25use crate::internal::apply::Applier;
26use crate::internal::encoding::Schemas;
27use crate::internal::metrics::{SchemaCacheMetrics, SchemaMetrics};
28use crate::internal::state::{BatchPart, EncodedSchemas};
29
30/// The result returned by [crate::PersistClient::compare_and_evolve_schema].
31#[derive(Debug)]
32#[cfg_attr(test, derive(PartialEq))]
33pub enum CaESchema<K: Codec, V: Codec> {
34    /// The schema was successfully evolved and registered with the included id.
35    Ok(SchemaId),
36    /// The schema was not compatible with previously registered schemas.
37    Incompatible,
38    /// The `expected` SchemaId did not match reality. The current one is
39    /// included for easy of retry.
40    ExpectedMismatch {
41        /// The current schema id.
42        schema_id: SchemaId,
43        /// The key schema at this id.
44        key: K::Schema,
45        /// The val schema at this id.
46        val: V::Schema,
47    },
48}
49
50/// A cache of decoded schemas and schema migrations.
51///
52/// The decoded schemas are a cache of the registry in state, and so are shared
53/// process-wide.
54///
55/// On the other hand, the migrations have an N^2 problem and so are per-handle.
56/// This also seems reasonable because for any given write handle, the write
57/// schema will be the same for all migration entries, and ditto for read handle
58/// and read schema.
59#[derive(Debug)]
60pub(crate) struct SchemaCache<K: Codec, V: Codec, T, D> {
61    maps: Arc<SchemaCacheMaps<K, V>>,
62    applier: Applier<K, V, T, D>,
63    key_migration_by_ids: MigrationCacheMap,
64    val_migration_by_ids: MigrationCacheMap,
65}
66
67impl<K: Codec, V: Codec, T: Clone, D> Clone for SchemaCache<K, V, T, D> {
68    fn clone(&self) -> Self {
69        Self {
70            maps: Arc::clone(&self.maps),
71            applier: self.applier.clone(),
72            key_migration_by_ids: self.key_migration_by_ids.clone(),
73            val_migration_by_ids: self.val_migration_by_ids.clone(),
74        }
75    }
76}
77
78impl<K: Codec, V: Codec, T, D> Drop for SchemaCache<K, V, T, D> {
79    fn drop(&mut self) {
80        let dropped = u64::cast_from(
81            self.key_migration_by_ids.by_ids.len() + self.val_migration_by_ids.by_ids.len(),
82        );
83        self.applier
84            .metrics
85            .schema
86            .cache_migration
87            .dropped_count
88            .inc_by(dropped);
89    }
90}
91
92impl<K, V, T, D> SchemaCache<K, V, T, D>
93where
94    K: Debug + Codec,
95    V: Debug + Codec,
96    T: Timestamp + Lattice + Codec64 + Sync,
97    D: Semigroup + Codec64,
98{
99    pub fn new(maps: Arc<SchemaCacheMaps<K, V>>, applier: Applier<K, V, T, D>) -> Self {
100        let key_migration_by_ids = MigrationCacheMap {
101            metrics: applier.metrics.schema.cache_migration.clone(),
102            by_ids: BTreeMap::new(),
103        };
104        let val_migration_by_ids = MigrationCacheMap {
105            metrics: applier.metrics.schema.cache_migration.clone(),
106            by_ids: BTreeMap::new(),
107        };
108        SchemaCache {
109            maps,
110            applier,
111            key_migration_by_ids,
112            val_migration_by_ids,
113        }
114    }
115
116    async fn schemas(&self, id: &SchemaId) -> Option<Schemas<K, V>> {
117        let key = self
118            .get_or_try_init(&self.maps.key_by_id, id, |schemas| {
119                self.maps.key_by_id.metrics.computed_count.inc();
120                schemas.get(id).map(|x| K::decode_schema(&x.key))
121            })
122            .await?;
123        let val = self
124            .get_or_try_init(&self.maps.val_by_id, id, |schemas| {
125                self.maps.val_by_id.metrics.computed_count.inc();
126                schemas.get(id).map(|x| V::decode_schema(&x.val))
127            })
128            .await?;
129        Some(Schemas {
130            id: Some(*id),
131            key,
132            val,
133        })
134    }
135
136    fn key_migration(
137        &mut self,
138        write: &Schemas<K, V>,
139        read: &Schemas<K, V>,
140    ) -> Option<Arc<Migration>> {
141        let migration_fn = || Self::migration::<K>(&write.key, &read.key);
142        let (Some(write_id), Some(read_id)) = (write.id, read.id) else {
143            // TODO: Annoying to cache this because we're missing an id. This
144            // will probably require some sort of refactor to fix so punting for
145            // now.
146            self.key_migration_by_ids.metrics.computed_count.inc();
147            return migration_fn().map(Arc::new);
148        };
149        self.key_migration_by_ids
150            .get_or_try_insert(write_id, read_id, migration_fn)
151    }
152
153    fn val_migration(
154        &mut self,
155        write: &Schemas<K, V>,
156        read: &Schemas<K, V>,
157    ) -> Option<Arc<Migration>> {
158        let migration_fn = || Self::migration::<V>(&write.val, &read.val);
159        let (Some(write_id), Some(read_id)) = (write.id, read.id) else {
160            // TODO: Annoying to cache this because we're missing an id. This
161            // will probably require some sort of refactor to fix so punting for
162            // now.
163            self.val_migration_by_ids.metrics.computed_count.inc();
164            return migration_fn().map(Arc::new);
165        };
166        self.val_migration_by_ids
167            .get_or_try_insert(write_id, read_id, migration_fn)
168    }
169
170    fn migration<C: Codec>(write: &C::Schema, read: &C::Schema) -> Option<Migration> {
171        let write_dt = data_type::<C>(write).expect("valid schema");
172        let read_dt = data_type::<C>(read).expect("valid schema");
173        backward_compatible(&write_dt, &read_dt)
174    }
175
176    async fn get_or_try_init<MK: Clone + Ord, MV: PartialEq + Debug>(
177        &self,
178        map: &SchemaCacheMap<MK, MV>,
179        key: &MK,
180        f: impl Fn(&BTreeMap<SchemaId, EncodedSchemas>) -> Option<MV>,
181    ) -> Option<Arc<MV>> {
182        let ret = map.get_or_try_init(key, || {
183            self.applier
184                .schemas(|seqno, schemas| f(schemas).ok_or(seqno))
185        });
186        let seqno = match ret {
187            Ok(ret) => return Some(ret),
188            Err(seqno) => seqno,
189        };
190        self.applier.metrics.schema.cache_fetch_state_count.inc();
191        self.applier.fetch_and_update_state(Some(seqno)).await;
192        map.get_or_try_init(key, || {
193            self.applier
194                .schemas(|seqno, schemas| f(schemas).ok_or(seqno))
195        })
196        .ok()
197    }
198}
199
200#[derive(Debug)]
201pub(crate) struct SchemaCacheMaps<K: Codec, V: Codec> {
202    key_by_id: SchemaCacheMap<SchemaId, K::Schema>,
203    val_by_id: SchemaCacheMap<SchemaId, V::Schema>,
204}
205
206impl<K: Codec, V: Codec> SchemaCacheMaps<K, V> {
207    pub(crate) fn new(metrics: &SchemaMetrics) -> Self {
208        Self {
209            key_by_id: SchemaCacheMap {
210                metrics: metrics.cache_schema.clone(),
211                map: RwLock::new(BTreeMap::new()),
212            },
213            val_by_id: SchemaCacheMap {
214                metrics: metrics.cache_schema.clone(),
215                map: RwLock::new(BTreeMap::new()),
216            },
217        }
218    }
219}
220
221#[derive(Debug)]
222struct SchemaCacheMap<I, S> {
223    metrics: SchemaCacheMetrics,
224    map: RwLock<BTreeMap<I, Arc<S>>>,
225}
226
227impl<I: Clone + Ord, S: PartialEq + Debug> SchemaCacheMap<I, S> {
228    fn get_or_try_init<E>(
229        &self,
230        id: &I,
231        state_fn: impl FnOnce() -> Result<S, E>,
232    ) -> Result<Arc<S>, E> {
233        // First see if we have the value cached.
234        {
235            let map = self.map.read().expect("lock");
236            if let Some(ret) = map.get(id).map(Arc::clone) {
237                self.metrics.cached_count.inc();
238                return Ok(ret);
239            }
240        }
241        // If not, see if we can get the value from current state.
242        let ret = state_fn().map(Arc::new);
243        if let Ok(val) = ret.as_ref() {
244            let mut map = self.map.write().expect("lock");
245            // If any answers got written in the meantime, they should be the
246            // same, so just overwrite
247            let prev = map.insert(id.clone(), Arc::clone(val));
248            match prev {
249                Some(prev) => debug_assert_eq!(*val, prev),
250                None => self.metrics.added_count.inc(),
251            }
252        } else {
253            self.metrics.unavailable_count.inc();
254        }
255        ret
256    }
257}
258
259impl<I, K> Drop for SchemaCacheMap<I, K> {
260    fn drop(&mut self) {
261        let map = self.map.read().expect("lock");
262        self.metrics.dropped_count.inc_by(u64::cast_from(map.len()));
263    }
264}
265
266#[derive(Debug, Clone)]
267struct MigrationCacheMap {
268    metrics: SchemaCacheMetrics,
269    by_ids: BTreeMap<(SchemaId, SchemaId), Arc<Migration>>,
270}
271
272impl MigrationCacheMap {
273    fn get_or_try_insert(
274        &mut self,
275        write_id: SchemaId,
276        read_id: SchemaId,
277        migration_fn: impl FnOnce() -> Option<Migration>,
278    ) -> Option<Arc<Migration>> {
279        if let Some(migration) = self.by_ids.get(&(write_id, read_id)) {
280            self.metrics.cached_count.inc();
281            return Some(Arc::clone(migration));
282        };
283        self.metrics.computed_count.inc();
284        let migration = migration_fn().map(Arc::new);
285        if let Some(migration) = migration.as_ref() {
286            self.metrics.added_count.inc();
287            // We just looked this up above and we've got mutable access, so no
288            // race issues.
289            self.by_ids
290                .insert((write_id, read_id), Arc::clone(migration));
291        } else {
292            self.metrics.unavailable_count.inc();
293        }
294        migration
295    }
296}
297
298#[derive(Debug)]
299pub(crate) enum PartMigration<K: Codec, V: Codec> {
300    /// No-op!
301    SameSchema { both: Schemas<K, V> },
302    /// We don't have a schema id for write schema.
303    Schemaless { read: Schemas<K, V> },
304    /// We have both write and read schemas, and they don't match.
305    Either {
306        write: Schemas<K, V>,
307        read: Schemas<K, V>,
308        key_migration: Arc<Migration>,
309        val_migration: Arc<Migration>,
310    },
311}
312
313impl<K: Codec, V: Codec> Clone for PartMigration<K, V> {
314    fn clone(&self) -> Self {
315        match self {
316            Self::SameSchema { both } => Self::SameSchema { both: both.clone() },
317            Self::Schemaless { read } => Self::Schemaless { read: read.clone() },
318            Self::Either {
319                write,
320                read,
321                key_migration,
322                val_migration,
323            } => Self::Either {
324                write: write.clone(),
325                read: read.clone(),
326                key_migration: Arc::clone(key_migration),
327                val_migration: Arc::clone(val_migration),
328            },
329        }
330    }
331}
332
333impl<K, V> PartMigration<K, V>
334where
335    K: Debug + Codec,
336    V: Debug + Codec,
337{
338    pub(crate) async fn new<T, D>(
339        part: &BatchPart<T>,
340        read: Schemas<K, V>,
341        schema_cache: &mut SchemaCache<K, V, T, D>,
342    ) -> Result<Self, Schemas<K, V>>
343    where
344        T: Timestamp + Lattice + Codec64 + Sync,
345        D: Semigroup + Codec64,
346    {
347        // At one point in time during our structured data migration, we deprecated the
348        // already written schema IDs because we made all columns at the Arrow/Parquet
349        // level nullable, thus changing the schema parts were written with.
350        //
351        // _After_ this deprecation, we've observed at least one instance where a
352        // structured only Part was written with the schema ID in the _old_ deprecated
353        // field. While unexpected, given the ordering of our releases it is safe to
354        // use the deprecated schema ID if we have a structured only part.
355        let write = match part.schema_id() {
356            Some(write_id) => Some(write_id),
357            None => {
358                if part.is_structured_only(&schema_cache.applier.metrics.columnar) {
359                    let deprecated_id = part.deprecated_schema_id();
360                    tracing::warn!(?deprecated_id, "falling back to deprecated schema ID");
361                    deprecated_id
362                } else {
363                    None
364                }
365            }
366        };
367
368        match (write, read.id) {
369            (None, _) => Ok(PartMigration::Schemaless { read }),
370            (Some(w), Some(r)) if w == r => Ok(PartMigration::SameSchema { both: read }),
371            (Some(w), _) => {
372                let write = schema_cache
373                    .schemas(&w)
374                    .await
375                    .expect("appended part should reference registered schema");
376                // Even if we missing a schema id, if the schemas are equal, use
377                // `SameSchema`. This isn't a correctness issue, we'd just
378                // generate NoOp migrations, but it'll make the metrics more
379                // intuitive.
380                if write.key == read.key && write.val == read.val {
381                    return Ok(PartMigration::SameSchema { both: read });
382                }
383
384                let start = Instant::now();
385                let key_migration = schema_cache
386                    .key_migration(&write, &read)
387                    .ok_or_else(|| read.clone())?;
388                let val_migration = schema_cache
389                    .val_migration(&write, &read)
390                    .ok_or_else(|| read.clone())?;
391                schema_cache
392                    .applier
393                    .metrics
394                    .schema
395                    .migration_new_count
396                    .inc();
397                schema_cache
398                    .applier
399                    .metrics
400                    .schema
401                    .migration_new_seconds
402                    .inc_by(start.elapsed().as_secs_f64());
403
404                Ok(PartMigration::Either {
405                    write,
406                    read,
407                    key_migration,
408                    val_migration,
409                })
410            }
411        }
412    }
413}
414
415impl<K: Codec, V: Codec> PartMigration<K, V> {
416    pub(crate) fn codec_read(&self) -> &Schemas<K, V> {
417        match self {
418            PartMigration::SameSchema { both } => both,
419            PartMigration::Schemaless { read } => read,
420            PartMigration::Either { read, .. } => read,
421        }
422    }
423}
424
425#[cfg(test)]
426mod tests {
427    use arrow::array::{
428        Array, ArrayBuilder, StringArray, StringBuilder, StructArray, as_string_array,
429    };
430    use arrow::datatypes::{DataType, Field};
431    use bytes::BufMut;
432    use futures::StreamExt;
433    use mz_dyncfg::ConfigUpdates;
434    use mz_persist_types::ShardId;
435    use mz_persist_types::arrow::ArrayOrd;
436    use mz_persist_types::codec_impls::UnitSchema;
437    use mz_persist_types::columnar::{ColumnDecoder, ColumnEncoder, Schema};
438    use mz_persist_types::stats::{NoneStats, StructStats};
439    use timely::progress::Antichain;
440
441    use crate::Diagnostics;
442    use crate::cli::admin::info_log_non_zero_metrics;
443    use crate::read::ReadHandle;
444    use crate::tests::new_test_client;
445
446    use super::*;
447
448    #[mz_ore::test]
449    fn schema_id() {
450        assert_eq!(SchemaId(1).to_string(), "h1");
451        assert_eq!(SchemaId::try_from("h1".to_owned()), Ok(SchemaId(1)));
452        assert!(SchemaId::try_from("nope".to_owned()).is_err());
453    }
454
455    #[derive(Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
456    struct Strings(Vec<String>);
457
458    impl Codec for Strings {
459        type Schema = StringsSchema;
460        type Storage = ();
461
462        fn codec_name() -> String {
463            "Strings".into()
464        }
465
466        fn encode<B: BufMut>(&self, buf: &mut B) {
467            buf.put_slice(self.0.join(",").as_bytes());
468        }
469        fn decode<'a>(buf: &'a [u8], schema: &Self::Schema) -> Result<Self, String> {
470            let buf = std::str::from_utf8(buf).map_err(|err| err.to_string())?;
471            let mut ret = buf.split(",").map(|x| x.to_owned()).collect::<Vec<_>>();
472            // Fill in nulls or drop columns to match the requested schema.
473            while schema.0.len() > ret.len() {
474                ret.push("".into());
475            }
476            while schema.0.len() < ret.len() {
477                ret.pop();
478            }
479            Ok(Strings(ret))
480        }
481
482        fn encode_schema(schema: &Self::Schema) -> bytes::Bytes {
483            schema
484                .0
485                .iter()
486                .map(|x| x.then_some('n').unwrap_or(' '))
487                .collect::<String>()
488                .into_bytes()
489                .into()
490        }
491        fn decode_schema(buf: &bytes::Bytes) -> Self::Schema {
492            let buf = std::str::from_utf8(buf).expect("valid schema");
493            StringsSchema(
494                buf.chars()
495                    .map(|x| match x {
496                        'n' => true,
497                        ' ' => false,
498                        _ => unreachable!(),
499                    })
500                    .collect(),
501            )
502        }
503    }
504
505    #[derive(Debug, Clone, Default, PartialEq)]
506    struct StringsSchema(Vec<bool>);
507
508    impl Schema<Strings> for StringsSchema {
509        type ArrowColumn = StructArray;
510        type Statistics = NoneStats;
511        type Decoder = StringsDecoder;
512        type Encoder = StringsEncoder;
513
514        fn decoder(&self, col: Self::ArrowColumn) -> Result<Self::Decoder, anyhow::Error> {
515            let mut cols = Vec::new();
516            for (idx, _) in self.0.iter().enumerate() {
517                cols.push(as_string_array(col.column_by_name(&idx.to_string()).unwrap()).clone());
518            }
519            Ok(StringsDecoder(cols))
520        }
521        fn encoder(&self) -> Result<Self::Encoder, anyhow::Error> {
522            let mut fields = Vec::new();
523            let mut arrays = Vec::new();
524            for (idx, nullable) in self.0.iter().enumerate() {
525                fields.push(Field::new(idx.to_string(), DataType::Utf8, *nullable));
526                arrays.push(StringBuilder::new());
527            }
528            Ok(StringsEncoder { fields, arrays })
529        }
530    }
531
532    #[derive(Debug)]
533    struct StringsDecoder(Vec<StringArray>);
534    impl ColumnDecoder<Strings> for StringsDecoder {
535        fn decode(&self, idx: usize, val: &mut Strings) {
536            val.0.clear();
537            for col in self.0.iter() {
538                if col.is_valid(idx) {
539                    val.0.push(col.value(idx).into());
540                } else {
541                    val.0.push("".into());
542                }
543            }
544        }
545        fn is_null(&self, _: usize) -> bool {
546            false
547        }
548        fn goodbytes(&self) -> usize {
549            self.0
550                .iter()
551                .map(|val| ArrayOrd::String(val.clone()).goodbytes())
552                .sum()
553        }
554        fn stats(&self) -> StructStats {
555            StructStats {
556                len: self.0[0].len(),
557                cols: Default::default(),
558            }
559        }
560    }
561
562    #[derive(Debug)]
563    struct StringsEncoder {
564        fields: Vec<Field>,
565        arrays: Vec<StringBuilder>,
566    }
567    impl ColumnEncoder<Strings> for StringsEncoder {
568        type FinishedColumn = StructArray;
569
570        fn goodbytes(&self) -> usize {
571            self.arrays.iter().map(|a| a.values_slice().len()).sum()
572        }
573
574        fn append(&mut self, val: &Strings) {
575            for (idx, val) in val.0.iter().enumerate() {
576                if val.is_empty() {
577                    self.arrays[idx].append_null();
578                } else {
579                    self.arrays[idx].append_value(val);
580                }
581            }
582        }
583        fn append_null(&mut self) {
584            unreachable!()
585        }
586        fn finish(self) -> Self::FinishedColumn {
587            let arrays = self
588                .arrays
589                .into_iter()
590                .map(|mut x| ArrayBuilder::finish(&mut x))
591                .collect();
592            StructArray::new(self.fields.into(), arrays, None)
593        }
594    }
595
596    #[mz_persist_proc::test(tokio::test)]
597    #[cfg_attr(miri, ignore)]
598    async fn compare_and_evolve_schema(dyncfgs: ConfigUpdates) {
599        let client = new_test_client(&dyncfgs).await;
600        let d = Diagnostics::for_tests();
601        let shard_id = ShardId::new();
602        let schema0 = StringsSchema(vec![false]);
603        let schema1 = StringsSchema(vec![false, true]);
604
605        let write0 = client
606            .open_writer::<Strings, (), u64, i64>(
607                shard_id,
608                Arc::new(schema0.clone()),
609                Arc::new(UnitSchema),
610                d.clone(),
611            )
612            .await
613            .unwrap();
614        assert_eq!(write0.write_schemas.id.unwrap(), SchemaId(0));
615
616        // Not backward compatible (yet... we don't support dropping a column at
617        // the moment).
618        let res = client
619            .compare_and_evolve_schema::<Strings, (), u64, i64>(
620                shard_id,
621                SchemaId(0),
622                &StringsSchema(vec![]),
623                &UnitSchema,
624                d.clone(),
625            )
626            .await
627            .unwrap();
628        assert_eq!(res, CaESchema::Incompatible);
629
630        // Incorrect expectation
631        let res = client
632            .compare_and_evolve_schema::<Strings, (), u64, i64>(
633                shard_id,
634                SchemaId(1),
635                &schema1,
636                &UnitSchema,
637                d.clone(),
638            )
639            .await
640            .unwrap();
641        assert_eq!(
642            res,
643            CaESchema::ExpectedMismatch {
644                schema_id: SchemaId(0),
645                key: schema0,
646                val: UnitSchema
647            }
648        );
649
650        // Successful evolution
651        let res = client
652            .compare_and_evolve_schema::<Strings, (), u64, i64>(
653                shard_id,
654                SchemaId(0),
655                &schema1,
656                &UnitSchema,
657                d.clone(),
658            )
659            .await
660            .unwrap();
661        assert_eq!(res, CaESchema::Ok(SchemaId(1)));
662
663        // Create a write handle with the new schema and validate that it picks
664        // up the correct schema id.
665        let write1 = client
666            .open_writer::<Strings, (), u64, i64>(
667                shard_id,
668                Arc::new(schema1),
669                Arc::new(UnitSchema),
670                d.clone(),
671            )
672            .await
673            .unwrap();
674        assert_eq!(write1.write_schemas.id.unwrap(), SchemaId(1));
675    }
676
677    fn strings(xs: &[((Result<Strings, String>, Result<(), String>), u64, i64)]) -> Vec<Vec<&str>> {
678        xs.iter()
679            .map(|((k, _), _, _)| k.as_ref().unwrap().0.iter().map(|x| x.as_str()).collect())
680            .collect()
681    }
682
683    #[mz_persist_proc::test(tokio::test)]
684    #[cfg_attr(miri, ignore)]
685    async fn schema_evolution(dyncfgs: ConfigUpdates) {
686        async fn snap_streaming(
687            as_of: u64,
688            read: &mut ReadHandle<Strings, (), u64, i64>,
689        ) -> Vec<((Result<Strings, String>, Result<(), String>), u64, i64)> {
690            // NB: We test with both snapshot_and_fetch and snapshot_and_stream
691            // because one uses the consolidating iter and one doesn't.
692            let mut ret = read
693                .snapshot_and_stream(Antichain::from_elem(as_of))
694                .await
695                .unwrap()
696                .collect::<Vec<_>>()
697                .await;
698            ret.sort();
699            ret
700        }
701
702        let client = new_test_client(&dyncfgs).await;
703        let d = Diagnostics::for_tests();
704        let shard_id = ShardId::new();
705        let schema0 = StringsSchema(vec![false]);
706        let schema1 = StringsSchema(vec![false, true]);
707
708        // Write some data at the original schema.
709        let (mut write0, mut read0) = client
710            .open::<Strings, (), u64, i64>(
711                shard_id,
712                Arc::new(schema0.clone()),
713                Arc::new(UnitSchema),
714                d.clone(),
715                true,
716            )
717            .await
718            .unwrap();
719        write0
720            .expect_compare_and_append(&[((Strings(vec!["0 before".into()]), ()), 0, 1)], 0, 1)
721            .await;
722        let expected = vec![vec!["0 before"]];
723        assert_eq!(strings(&snap_streaming(0, &mut read0).await), expected);
724        assert_eq!(strings(&read0.expect_snapshot_and_fetch(0).await), expected);
725
726        // Register and write some data at the new schema.
727        let res = client
728            .compare_and_evolve_schema::<Strings, (), u64, i64>(
729                shard_id,
730                SchemaId(0),
731                &schema1,
732                &UnitSchema,
733                d.clone(),
734            )
735            .await
736            .unwrap();
737        assert_eq!(res, CaESchema::Ok(SchemaId(1)));
738        let (mut write1, mut read1) = client
739            .open::<Strings, (), u64, i64>(
740                shard_id,
741                Arc::new(schema1.clone()),
742                Arc::new(UnitSchema),
743                d.clone(),
744                true,
745            )
746            .await
747            .unwrap();
748        write1
749            .expect_compare_and_append(
750                &[
751                    ((Strings(vec!["1 null".into(), "".into()]), ()), 1, 1),
752                    ((Strings(vec!["1 not".into(), "x".into()]), ()), 1, 1),
753                ],
754                1,
755                2,
756            )
757            .await;
758
759        // Continue to write data with the original schema.
760        write0
761            .expect_compare_and_append(&[((Strings(vec!["0 after".into()]), ()), 2, 1)], 2, 3)
762            .await;
763
764        // Original schema drops the new column in data written by new schema.
765        let expected = vec![
766            vec!["0 after"],
767            vec!["0 before"],
768            vec!["1 not"],
769            vec!["1 null"],
770        ];
771        assert_eq!(strings(&snap_streaming(2, &mut read0).await), expected);
772        assert_eq!(strings(&read0.expect_snapshot_and_fetch(2).await), expected);
773
774        // New schema adds nulls (represented by empty string in Strings) in
775        // data written by old schema.
776        let expected = vec![
777            vec!["0 after", ""],
778            vec!["0 before", ""],
779            vec!["1 not", "x"],
780            vec!["1 null", ""],
781        ];
782        assert_eq!(strings(&snap_streaming(2, &mut read1).await), expected);
783        assert_eq!(strings(&read1.expect_snapshot_and_fetch(2).await), expected);
784
785        // Probably too spammy to leave in the logs, but it was useful to have
786        // hooked up while iterating.
787        if false {
788            info_log_non_zero_metrics(&client.metrics.registry.gather());
789        }
790    }
791}