iceberg/writer/base_writer/
equality_delete_writer.rs

1// Licensed to the Apache Software Foundation (ASF) under one
2// or more contributor license agreements.  See the NOTICE file
3// distributed with this work for additional information
4// regarding copyright ownership.  The ASF licenses this file
5// to you under the Apache License, Version 2.0 (the
6// "License"); you may not use this file except in compliance
7// with the License.  You may obtain a copy of the License at
8//
9//   http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing,
12// software distributed under the License is distributed on an
13// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14// KIND, either express or implied.  See the License for the
15// specific language governing permissions and limitations
16// under the License.
17
18//! This module provide `EqualityDeleteWriter`.
19
20use std::sync::Arc;
21
22use arrow_array::RecordBatch;
23use arrow_schema::{DataType, Field, SchemaRef as ArrowSchemaRef};
24use itertools::Itertools;
25use parquet::arrow::PARQUET_FIELD_ID_META_KEY;
26
27use crate::arrow::record_batch_projector::RecordBatchProjector;
28use crate::arrow::schema_to_arrow_schema;
29use crate::spec::{DataFile, PartitionKey, SchemaRef};
30use crate::writer::file_writer::FileWriterBuilder;
31use crate::writer::file_writer::location_generator::{FileNameGenerator, LocationGenerator};
32use crate::writer::file_writer::rolling_writer::{RollingFileWriter, RollingFileWriterBuilder};
33use crate::writer::{IcebergWriter, IcebergWriterBuilder};
34use crate::{Error, ErrorKind, Result};
35
36/// Builder for `EqualityDeleteWriter`.
37#[derive(Debug)]
38pub struct EqualityDeleteFileWriterBuilder<
39    B: FileWriterBuilder,
40    L: LocationGenerator,
41    F: FileNameGenerator,
42> {
43    inner: RollingFileWriterBuilder<B, L, F>,
44    config: EqualityDeleteWriterConfig,
45}
46
47impl<B, L, F> EqualityDeleteFileWriterBuilder<B, L, F>
48where
49    B: FileWriterBuilder,
50    L: LocationGenerator,
51    F: FileNameGenerator,
52{
53    /// Create a new `EqualityDeleteFileWriterBuilder` using a `RollingFileWriterBuilder`.
54    pub fn new(
55        inner: RollingFileWriterBuilder<B, L, F>,
56        config: EqualityDeleteWriterConfig,
57    ) -> Self {
58        Self { inner, config }
59    }
60}
61
62/// Config for `EqualityDeleteWriter`.
63#[derive(Debug)]
64pub struct EqualityDeleteWriterConfig {
65    // Field ids used to determine row equality in equality delete files.
66    equality_ids: Vec<i32>,
67    // Projector used to project the data chunk into specific fields.
68    projector: RecordBatchProjector,
69}
70
71impl EqualityDeleteWriterConfig {
72    /// Create a new `DataFileWriterConfig` with equality ids.
73    pub fn new(equality_ids: Vec<i32>, original_schema: SchemaRef) -> Result<Self> {
74        let original_arrow_schema = Arc::new(schema_to_arrow_schema(&original_schema)?);
75        let projector = RecordBatchProjector::new(
76            original_arrow_schema,
77            &equality_ids,
78            // The following rule comes from https://iceberg.apache.org/spec/#identifier-field-ids
79            // and https://iceberg.apache.org/spec/#equality-delete-files
80            // - The identifier field ids must be used for primitive types.
81            // - The identifier field ids must not be used for floating point types or nullable fields.
82            |field| {
83                // Only primitive type is allowed to be used for identifier field ids
84                if field.data_type().is_nested()
85                    || matches!(
86                        field.data_type(),
87                        DataType::Float16 | DataType::Float32 | DataType::Float64
88                    )
89                {
90                    return Ok(None);
91                }
92                Ok(Some(
93                    field
94                        .metadata()
95                        .get(PARQUET_FIELD_ID_META_KEY)
96                        .ok_or_else(|| {
97                            Error::new(ErrorKind::Unexpected, "Field metadata is missing.")
98                        })?
99                        .parse::<i64>()
100                        .map_err(|e| Error::new(ErrorKind::Unexpected, e.to_string()))?,
101                ))
102            },
103            |_field: &Field| true,
104        )?;
105        Ok(Self {
106            equality_ids,
107            projector,
108        })
109    }
110
111    /// Return projected Schema
112    pub fn projected_arrow_schema_ref(&self) -> &ArrowSchemaRef {
113        self.projector.projected_schema_ref()
114    }
115}
116
117#[async_trait::async_trait]
118impl<B, L, F> IcebergWriterBuilder for EqualityDeleteFileWriterBuilder<B, L, F>
119where
120    B: FileWriterBuilder,
121    L: LocationGenerator,
122    F: FileNameGenerator,
123{
124    type R = EqualityDeleteFileWriter<B, L, F>;
125
126    async fn build(&self, partition_key: Option<PartitionKey>) -> Result<Self::R> {
127        Ok(EqualityDeleteFileWriter {
128            inner: Some(self.inner.build()),
129            projector: self.config.projector.clone(),
130            equality_ids: self.config.equality_ids.clone(),
131            partition_key,
132        })
133    }
134}
135
136/// Writer used to write equality delete files.
137#[derive(Debug)]
138pub struct EqualityDeleteFileWriter<
139    B: FileWriterBuilder,
140    L: LocationGenerator,
141    F: FileNameGenerator,
142> {
143    inner: Option<RollingFileWriter<B, L, F>>,
144    projector: RecordBatchProjector,
145    equality_ids: Vec<i32>,
146    partition_key: Option<PartitionKey>,
147}
148
149#[async_trait::async_trait]
150impl<B, L, F> IcebergWriter for EqualityDeleteFileWriter<B, L, F>
151where
152    B: FileWriterBuilder,
153    L: LocationGenerator,
154    F: FileNameGenerator,
155{
156    async fn write(&mut self, batch: RecordBatch) -> Result<()> {
157        let batch = self.projector.project_batch(batch)?;
158        if let Some(writer) = self.inner.as_mut() {
159            writer.write(&self.partition_key, &batch).await
160        } else {
161            Err(Error::new(
162                ErrorKind::Unexpected,
163                "Equality delete inner writer has been closed.",
164            ))
165        }
166    }
167
168    async fn close(&mut self) -> Result<Vec<DataFile>> {
169        if let Some(writer) = self.inner.take() {
170            writer
171                .close()
172                .await?
173                .into_iter()
174                .map(|mut res| {
175                    res.content(crate::spec::DataContentType::EqualityDeletes);
176                    res.equality_ids(Some(self.equality_ids.iter().copied().collect_vec()));
177                    if let Some(pk) = self.partition_key.as_ref() {
178                        res.partition(pk.data().clone());
179                        res.partition_spec_id(pk.spec().spec_id());
180                    }
181                    res.build().map_err(|e| {
182                        Error::new(
183                            ErrorKind::DataInvalid,
184                            format!("Failed to build data file: {e}"),
185                        )
186                    })
187                })
188                .collect()
189        } else {
190            Err(Error::new(
191                ErrorKind::Unexpected,
192                "Equality delete inner writer has been closed.",
193            ))
194        }
195    }
196}
197
198#[cfg(test)]
199mod test {
200    use std::collections::HashMap;
201    use std::sync::Arc;
202
203    use arrow_array::types::Int32Type;
204    use arrow_array::{ArrayRef, BooleanArray, Int32Array, Int64Array, RecordBatch, StructArray};
205    use arrow_buffer::NullBuffer;
206    use arrow_schema::{DataType, Field, Fields};
207    use arrow_select::concat::concat_batches;
208    use itertools::Itertools;
209    use parquet::arrow::PARQUET_FIELD_ID_META_KEY;
210    use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
211    use parquet::file::properties::WriterProperties;
212    use tempfile::TempDir;
213    use uuid::Uuid;
214
215    use crate::arrow::{arrow_schema_to_schema, schema_to_arrow_schema};
216    use crate::io::{FileIO, FileIOBuilder};
217    use crate::spec::{
218        DataFile, DataFileFormat, ListType, MapType, NestedField, PrimitiveType, Schema,
219        StructType, Type,
220    };
221    use crate::writer::base_writer::equality_delete_writer::{
222        EqualityDeleteFileWriterBuilder, EqualityDeleteWriterConfig,
223    };
224    use crate::writer::file_writer::ParquetWriterBuilder;
225    use crate::writer::file_writer::location_generator::{
226        DefaultFileNameGenerator, DefaultLocationGenerator,
227    };
228    use crate::writer::file_writer::rolling_writer::RollingFileWriterBuilder;
229    use crate::writer::{IcebergWriter, IcebergWriterBuilder};
230
231    async fn check_parquet_data_file_with_equality_delete_write(
232        file_io: &FileIO,
233        data_file: &DataFile,
234        batch: &RecordBatch,
235    ) {
236        assert_eq!(data_file.file_format, DataFileFormat::Parquet);
237
238        // read the written file
239        let input_file = file_io.new_input(data_file.file_path.clone()).unwrap();
240        // read the written file
241        let input_content = input_file.read().await.unwrap();
242        let reader_builder =
243            ParquetRecordBatchReaderBuilder::try_new(input_content.clone()).unwrap();
244        let metadata = reader_builder.metadata().clone();
245
246        // check data
247        let reader = reader_builder.build().unwrap();
248        let batches = reader.map(|batch| batch.unwrap()).collect::<Vec<_>>();
249        let res = concat_batches(&batch.schema(), &batches).unwrap();
250        assert_eq!(*batch, res);
251
252        // check metadata
253        let expect_column_num = batch.num_columns();
254
255        assert_eq!(
256            data_file.record_count,
257            metadata
258                .row_groups()
259                .iter()
260                .map(|group| group.num_rows())
261                .sum::<i64>() as u64
262        );
263
264        assert_eq!(data_file.file_size_in_bytes, input_content.len() as u64);
265
266        assert_eq!(data_file.column_sizes.len(), expect_column_num);
267
268        for (index, id) in data_file.column_sizes().keys().sorted().enumerate() {
269            metadata
270                .row_groups()
271                .iter()
272                .map(|group| group.columns())
273                .for_each(|column| {
274                    assert_eq!(
275                        *data_file.column_sizes.get(id).unwrap() as i64,
276                        column.get(index).unwrap().compressed_size()
277                    );
278                });
279        }
280
281        assert_eq!(data_file.value_counts.len(), expect_column_num);
282        data_file.value_counts.iter().for_each(|(_, &v)| {
283            let expect = metadata
284                .row_groups()
285                .iter()
286                .map(|group| group.num_rows())
287                .sum::<i64>() as u64;
288            assert_eq!(v, expect);
289        });
290
291        for (index, id) in data_file.null_value_counts().keys().enumerate() {
292            let expect = batch.column(index).null_count() as u64;
293            assert_eq!(*data_file.null_value_counts.get(id).unwrap(), expect);
294        }
295
296        let split_offsets = data_file
297            .split_offsets
298            .as_ref()
299            .expect("split_offsets should be set");
300        assert_eq!(split_offsets.len(), metadata.num_row_groups());
301        split_offsets.iter().enumerate().for_each(|(i, &v)| {
302            let expect = metadata.row_groups()[i].file_offset().unwrap();
303            assert_eq!(v, expect);
304        });
305    }
306
307    #[tokio::test]
308    async fn test_equality_delete_writer() -> Result<(), anyhow::Error> {
309        let temp_dir = TempDir::new().unwrap();
310        let file_io = FileIOBuilder::new_fs_io().build().unwrap();
311        let location_gen = DefaultLocationGenerator::with_data_location(
312            temp_dir.path().to_str().unwrap().to_string(),
313        );
314        let file_name_gen =
315            DefaultFileNameGenerator::new("test".to_string(), None, DataFileFormat::Parquet);
316
317        // prepare data
318        // Int, Struct(Int), String, List(Int), Struct(Struct(Int))
319        let schema = Schema::builder()
320            .with_schema_id(1)
321            .with_fields(vec![
322                NestedField::required(0, "col0", Type::Primitive(PrimitiveType::Int)).into(),
323                NestedField::required(
324                    1,
325                    "col1",
326                    Type::Struct(StructType::new(vec![
327                        NestedField::required(5, "sub_col", Type::Primitive(PrimitiveType::Int))
328                            .into(),
329                    ])),
330                )
331                .into(),
332                NestedField::required(2, "col2", Type::Primitive(PrimitiveType::String)).into(),
333                NestedField::required(
334                    3,
335                    "col3",
336                    Type::List(ListType::new(
337                        NestedField::required(6, "element", Type::Primitive(PrimitiveType::Int))
338                            .into(),
339                    )),
340                )
341                .into(),
342                NestedField::required(
343                    4,
344                    "col4",
345                    Type::Struct(StructType::new(vec![
346                        NestedField::required(
347                            7,
348                            "sub_col",
349                            Type::Struct(StructType::new(vec![
350                                NestedField::required(
351                                    8,
352                                    "sub_sub_col",
353                                    Type::Primitive(PrimitiveType::Int),
354                                )
355                                .into(),
356                            ])),
357                        )
358                        .into(),
359                    ])),
360                )
361                .into(),
362            ])
363            .build()
364            .unwrap();
365        let arrow_schema = Arc::new(schema_to_arrow_schema(&schema).unwrap());
366        let col0 = Arc::new(Int32Array::from_iter_values(vec![1; 1024])) as ArrayRef;
367        let col1 = Arc::new(StructArray::new(
368            if let DataType::Struct(fields) = arrow_schema.fields.get(1).unwrap().data_type() {
369                fields.clone()
370            } else {
371                unreachable!()
372            },
373            vec![Arc::new(Int32Array::from_iter_values(vec![1; 1024]))],
374            None,
375        ));
376        let col2 = Arc::new(arrow_array::StringArray::from_iter_values(vec![
377            "test";
378            1024
379        ])) as ArrayRef;
380        let col3 = Arc::new({
381            let list_parts = arrow_array::ListArray::from_iter_primitive::<Int32Type, _, _>(vec![
382              Some(
383                  vec![Some(1),]
384              );
385              1024
386          ])
387            .into_parts();
388            arrow_array::ListArray::new(
389                if let DataType::List(field) = arrow_schema.fields.get(3).unwrap().data_type() {
390                    field.clone()
391                } else {
392                    unreachable!()
393                },
394                list_parts.1,
395                list_parts.2,
396                list_parts.3,
397            )
398        }) as ArrayRef;
399        let col4 = Arc::new(StructArray::new(
400            if let DataType::Struct(fields) = arrow_schema.fields.get(4).unwrap().data_type() {
401                fields.clone()
402            } else {
403                unreachable!()
404            },
405            vec![Arc::new(StructArray::new(
406                if let DataType::Struct(fields) = arrow_schema.fields.get(4).unwrap().data_type() {
407                    if let DataType::Struct(fields) = fields.first().unwrap().data_type() {
408                        fields.clone()
409                    } else {
410                        unreachable!()
411                    }
412                } else {
413                    unreachable!()
414                },
415                vec![Arc::new(Int32Array::from_iter_values(vec![1; 1024]))],
416                None,
417            ))],
418            None,
419        ));
420        let columns = vec![col0, col1, col2, col3, col4];
421        let to_write = RecordBatch::try_new(arrow_schema.clone(), columns).unwrap();
422
423        let equality_ids = vec![0_i32, 8];
424        let equality_config =
425            EqualityDeleteWriterConfig::new(equality_ids, Arc::new(schema)).unwrap();
426        let delete_schema = Arc::new(
427            arrow_schema_to_schema(equality_config.projected_arrow_schema_ref()).unwrap(),
428        );
429        let projector = equality_config.projector.clone();
430
431        // prepare writer
432        let pb =
433            ParquetWriterBuilder::new(WriterProperties::builder().build(), delete_schema.clone());
434        let rolling_writer_builder = RollingFileWriterBuilder::new_with_default_file_size(
435            pb,
436            delete_schema,
437            file_io.clone(),
438            location_gen,
439            file_name_gen,
440        );
441        let mut equality_delete_writer =
442            EqualityDeleteFileWriterBuilder::new(rolling_writer_builder, equality_config)
443                .build(None)
444                .await?;
445
446        // write
447        equality_delete_writer.write(to_write.clone()).await?;
448        let res = equality_delete_writer.close().await?;
449        assert_eq!(res.len(), 1);
450        let data_file = res.into_iter().next().unwrap();
451
452        // check
453        let to_write_projected = projector.project_batch(to_write)?;
454        check_parquet_data_file_with_equality_delete_write(
455            &file_io,
456            &data_file,
457            &to_write_projected,
458        )
459        .await;
460        Ok(())
461    }
462
463    #[tokio::test]
464    async fn test_equality_delete_unreachable_column() -> Result<(), anyhow::Error> {
465        let schema = Arc::new(
466            Schema::builder()
467                .with_schema_id(1)
468                .with_fields(vec![
469                    NestedField::required(0, "col0", Type::Primitive(PrimitiveType::Float)).into(),
470                    NestedField::required(1, "col1", Type::Primitive(PrimitiveType::Double)).into(),
471                    NestedField::optional(2, "col2", Type::Primitive(PrimitiveType::Int)).into(),
472                    NestedField::required(
473                        3,
474                        "col3",
475                        Type::Struct(StructType::new(vec![
476                            NestedField::required(
477                                4,
478                                "sub_col",
479                                Type::Primitive(PrimitiveType::Int),
480                            )
481                            .into(),
482                        ])),
483                    )
484                    .into(),
485                    NestedField::optional(
486                        5,
487                        "col4",
488                        Type::Struct(StructType::new(vec![
489                            NestedField::required(
490                                6,
491                                "sub_col2",
492                                Type::Primitive(PrimitiveType::Int),
493                            )
494                            .into(),
495                        ])),
496                    )
497                    .into(),
498                    NestedField::required(
499                        7,
500                        "col5",
501                        Type::Map(MapType::new(
502                            Arc::new(NestedField::required(
503                                8,
504                                "key",
505                                Type::Primitive(PrimitiveType::String),
506                            )),
507                            Arc::new(NestedField::required(
508                                9,
509                                "value",
510                                Type::Primitive(PrimitiveType::Int),
511                            )),
512                        )),
513                    )
514                    .into(),
515                    NestedField::required(
516                        10,
517                        "col6",
518                        Type::List(ListType::new(Arc::new(NestedField::required(
519                            11,
520                            "element",
521                            Type::Primitive(PrimitiveType::Int),
522                        )))),
523                    )
524                    .into(),
525                ])
526                .build()
527                .unwrap(),
528        );
529        // Float and Double are not allowed to be used for equality delete
530        assert!(EqualityDeleteWriterConfig::new(vec![0], schema.clone()).is_err());
531        assert!(EqualityDeleteWriterConfig::new(vec![1], schema.clone()).is_err());
532        // Struct is not allowed to be used for equality delete
533        assert!(EqualityDeleteWriterConfig::new(vec![3], schema.clone()).is_err());
534        // Nested field of struct is allowed to be used for equality delete
535        assert!(EqualityDeleteWriterConfig::new(vec![4], schema.clone()).is_ok());
536        // Nested field of map is not allowed to be used for equality delete
537        assert!(EqualityDeleteWriterConfig::new(vec![7], schema.clone()).is_err());
538        assert!(EqualityDeleteWriterConfig::new(vec![8], schema.clone()).is_err());
539        assert!(EqualityDeleteWriterConfig::new(vec![9], schema.clone()).is_err());
540        // Nested field of list is not allowed to be used for equality delete
541        assert!(EqualityDeleteWriterConfig::new(vec![10], schema.clone()).is_err());
542        assert!(EqualityDeleteWriterConfig::new(vec![11], schema.clone()).is_err());
543
544        Ok(())
545    }
546
547    #[tokio::test]
548    async fn test_equality_delete_with_primitive_type() -> Result<(), anyhow::Error> {
549        let temp_dir = TempDir::new().unwrap();
550        let file_io = FileIOBuilder::new_fs_io().build().unwrap();
551        let location_gen = DefaultLocationGenerator::with_data_location(
552            temp_dir.path().to_str().unwrap().to_string(),
553        );
554        let file_name_gen =
555            DefaultFileNameGenerator::new("test".to_string(), None, DataFileFormat::Parquet);
556
557        let schema = Arc::new(
558            Schema::builder()
559                .with_schema_id(1)
560                .with_fields(vec![
561                    NestedField::required(0, "col0", Type::Primitive(PrimitiveType::Boolean))
562                        .into(),
563                    NestedField::required(1, "col1", Type::Primitive(PrimitiveType::Int)).into(),
564                    NestedField::required(2, "col2", Type::Primitive(PrimitiveType::Long)).into(),
565                    NestedField::required(
566                        3,
567                        "col3",
568                        Type::Primitive(PrimitiveType::Decimal {
569                            precision: 38,
570                            scale: 5,
571                        }),
572                    )
573                    .into(),
574                    NestedField::required(4, "col4", Type::Primitive(PrimitiveType::Date)).into(),
575                    NestedField::required(5, "col5", Type::Primitive(PrimitiveType::Time)).into(),
576                    NestedField::required(6, "col6", Type::Primitive(PrimitiveType::Timestamp))
577                        .into(),
578                    NestedField::required(7, "col7", Type::Primitive(PrimitiveType::Timestamptz))
579                        .into(),
580                    NestedField::required(8, "col8", Type::Primitive(PrimitiveType::TimestampNs))
581                        .into(),
582                    NestedField::required(9, "col9", Type::Primitive(PrimitiveType::TimestamptzNs))
583                        .into(),
584                    NestedField::required(10, "col10", Type::Primitive(PrimitiveType::String))
585                        .into(),
586                    NestedField::required(11, "col11", Type::Primitive(PrimitiveType::Uuid)).into(),
587                    NestedField::required(12, "col12", Type::Primitive(PrimitiveType::Fixed(10)))
588                        .into(),
589                    NestedField::required(13, "col13", Type::Primitive(PrimitiveType::Binary))
590                        .into(),
591                ])
592                .build()
593                .unwrap(),
594        );
595        let equality_ids = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13];
596        let config = EqualityDeleteWriterConfig::new(equality_ids, schema.clone()).unwrap();
597        let delete_arrow_schema = config.projected_arrow_schema_ref().clone();
598        let delete_schema = Arc::new(arrow_schema_to_schema(&delete_arrow_schema).unwrap());
599
600        let pb =
601            ParquetWriterBuilder::new(WriterProperties::builder().build(), delete_schema.clone());
602        let rolling_writer_builder = RollingFileWriterBuilder::new_with_default_file_size(
603            pb,
604            delete_schema,
605            file_io.clone(),
606            location_gen,
607            file_name_gen,
608        );
609        let mut equality_delete_writer =
610            EqualityDeleteFileWriterBuilder::new(rolling_writer_builder, config)
611                .build(None)
612                .await?;
613
614        // prepare data
615        let col0 = Arc::new(BooleanArray::from(vec![
616            Some(true),
617            Some(false),
618            Some(true),
619        ])) as ArrayRef;
620        let col1 = Arc::new(Int32Array::from(vec![Some(1), Some(2), Some(4)])) as ArrayRef;
621        let col2 = Arc::new(Int64Array::from(vec![Some(1), Some(2), Some(4)])) as ArrayRef;
622        let col3 = Arc::new(
623            arrow_array::Decimal128Array::from(vec![Some(1), Some(2), Some(4)])
624                .with_precision_and_scale(38, 5)
625                .unwrap(),
626        ) as ArrayRef;
627        let col4 = Arc::new(arrow_array::Date32Array::from(vec![
628            Some(0),
629            Some(1),
630            Some(3),
631        ])) as ArrayRef;
632        let col5 = Arc::new(arrow_array::Time64MicrosecondArray::from(vec![
633            Some(0),
634            Some(1),
635            Some(3),
636        ])) as ArrayRef;
637        let col6 = Arc::new(arrow_array::TimestampMicrosecondArray::from(vec![
638            Some(0),
639            Some(1),
640            Some(3),
641        ])) as ArrayRef;
642        let col7 = Arc::new(
643            arrow_array::TimestampMicrosecondArray::from(vec![Some(0), Some(1), Some(3)])
644                .with_timezone_utc(),
645        ) as ArrayRef;
646        let col8 = Arc::new(arrow_array::TimestampNanosecondArray::from(vec![
647            Some(0),
648            Some(1),
649            Some(3),
650        ])) as ArrayRef;
651        let col9 = Arc::new(
652            arrow_array::TimestampNanosecondArray::from(vec![Some(0), Some(1), Some(3)])
653                .with_timezone_utc(),
654        ) as ArrayRef;
655        let col10 = Arc::new(arrow_array::StringArray::from(vec![
656            Some("a"),
657            Some("b"),
658            Some("d"),
659        ])) as ArrayRef;
660        let col11 = Arc::new(
661            arrow_array::FixedSizeBinaryArray::try_from_sparse_iter_with_size(
662                vec![
663                    Some(Uuid::from_u128(0).as_bytes().to_vec()),
664                    Some(Uuid::from_u128(1).as_bytes().to_vec()),
665                    Some(Uuid::from_u128(3).as_bytes().to_vec()),
666                ]
667                .into_iter(),
668                16,
669            )
670            .unwrap(),
671        ) as ArrayRef;
672        let col12 = Arc::new(
673            arrow_array::FixedSizeBinaryArray::try_from_sparse_iter_with_size(
674                vec![
675                    Some(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
676                    Some(vec![11, 12, 13, 14, 15, 16, 17, 18, 19, 20]),
677                    Some(vec![21, 22, 23, 24, 25, 26, 27, 28, 29, 30]),
678                ]
679                .into_iter(),
680                10,
681            )
682            .unwrap(),
683        ) as ArrayRef;
684        let col13 = Arc::new(arrow_array::LargeBinaryArray::from_opt_vec(vec![
685            Some(b"one"),
686            Some(b""),
687            Some(b"zzzz"),
688        ])) as ArrayRef;
689        let to_write = RecordBatch::try_new(delete_arrow_schema.clone(), vec![
690            col0, col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11, col12, col13,
691        ])
692        .unwrap();
693        equality_delete_writer.write(to_write.clone()).await?;
694        let res = equality_delete_writer.close().await?;
695        assert_eq!(res.len(), 1);
696        check_parquet_data_file_with_equality_delete_write(
697            &file_io,
698            &res.into_iter().next().unwrap(),
699            &to_write,
700        )
701        .await;
702
703        Ok(())
704    }
705
706    #[tokio::test]
707    async fn test_equality_delete_with_nullable_field() -> Result<(), anyhow::Error> {
708        // prepare data
709        // Int, Struct(Int), Struct(Struct(Int))
710        let schema = Schema::builder()
711            .with_schema_id(1)
712            .with_fields(vec![
713                NestedField::optional(0, "col0", Type::Primitive(PrimitiveType::Int)).into(),
714                NestedField::optional(
715                    1,
716                    "col1",
717                    Type::Struct(StructType::new(vec![
718                        NestedField::optional(2, "sub_col", Type::Primitive(PrimitiveType::Int))
719                            .into(),
720                    ])),
721                )
722                .into(),
723                NestedField::optional(
724                    3,
725                    "col2",
726                    Type::Struct(StructType::new(vec![
727                        NestedField::optional(
728                            4,
729                            "sub_struct_col",
730                            Type::Struct(StructType::new(vec![
731                                NestedField::optional(
732                                    5,
733                                    "sub_sub_col",
734                                    Type::Primitive(PrimitiveType::Int),
735                                )
736                                .into(),
737                            ])),
738                        )
739                        .into(),
740                    ])),
741                )
742                .into(),
743            ])
744            .build()
745            .unwrap();
746        let arrow_schema = Arc::new(schema_to_arrow_schema(&schema).unwrap());
747        // null 1            null(struct)
748        // 2    null(struct) null(sub_struct_col)
749        // 3    null(field)  null(sub_sub_col)
750        let col0 = Arc::new(Int32Array::from(vec![None, Some(2), Some(3)])) as ArrayRef;
751        let col1 = {
752            let nulls = NullBuffer::from(vec![true, false, true]);
753            Arc::new(StructArray::new(
754                if let DataType::Struct(fields) = arrow_schema.fields.get(1).unwrap().data_type() {
755                    fields.clone()
756                } else {
757                    unreachable!()
758                },
759                vec![Arc::new(Int32Array::from(vec![Some(1), Some(2), None]))],
760                Some(nulls),
761            ))
762        };
763        let col2 = {
764            let inner_col = {
765                let nulls = NullBuffer::from(vec![true, false, true]);
766                Arc::new(StructArray::new(
767                    Fields::from(vec![
768                        Field::new("sub_sub_col", DataType::Int32, true).with_metadata(
769                            HashMap::from([(
770                                PARQUET_FIELD_ID_META_KEY.to_string(),
771                                "5".to_string(),
772                            )]),
773                        ),
774                    ]),
775                    vec![Arc::new(Int32Array::from(vec![Some(1), Some(2), None]))],
776                    Some(nulls),
777                ))
778            };
779            let nulls = NullBuffer::from(vec![false, true, true]);
780            Arc::new(StructArray::new(
781                if let DataType::Struct(fields) = arrow_schema.fields.get(2).unwrap().data_type() {
782                    fields.clone()
783                } else {
784                    unreachable!()
785                },
786                vec![inner_col],
787                Some(nulls),
788            ))
789        };
790        let columns = vec![col0, col1, col2];
791
792        let to_write = RecordBatch::try_new(arrow_schema.clone(), columns).unwrap();
793        let equality_ids = vec![0_i32, 2, 5];
794        let equality_config =
795            EqualityDeleteWriterConfig::new(equality_ids, Arc::new(schema)).unwrap();
796        let projector = equality_config.projector.clone();
797
798        // check
799        let to_write_projected = projector.project_batch(to_write)?;
800        let expect_batch =
801            RecordBatch::try_new(equality_config.projected_arrow_schema_ref().clone(), vec![
802                Arc::new(Int32Array::from(vec![None, Some(2), Some(3)])) as ArrayRef,
803                Arc::new(Int32Array::from(vec![Some(1), None, None])) as ArrayRef,
804                Arc::new(Int32Array::from(vec![None, None, None])) as ArrayRef,
805            ])
806            .unwrap();
807        assert_eq!(to_write_projected, expect_batch);
808        Ok(())
809    }
810}