parquet/arrow/arrow_writer/
mod.rs

1// Licensed to the Apache Software Foundation (ASF) under one
2// or more contributor license agreements.  See the NOTICE file
3// distributed with this work for additional information
4// regarding copyright ownership.  The ASF licenses this file
5// to you under the Apache License, Version 2.0 (the
6// "License"); you may not use this file except in compliance
7// with the License.  You may obtain a copy of the License at
8//
9//   http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing,
12// software distributed under the License is distributed on an
13// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14// KIND, either express or implied.  See the License for the
15// specific language governing permissions and limitations
16// under the License.
17
18//! Contains writer which writes arrow data into parquet data.
19
20use bytes::Bytes;
21use std::io::{Read, Write};
22use std::iter::Peekable;
23use std::slice::Iter;
24use std::sync::{Arc, Mutex};
25use std::vec::IntoIter;
26use thrift::protocol::TCompactOutputProtocol;
27
28use arrow_array::cast::AsArray;
29use arrow_array::types::*;
30use arrow_array::{ArrayRef, RecordBatch, RecordBatchWriter};
31use arrow_schema::{ArrowError, DataType as ArrowDataType, Field, IntervalUnit, SchemaRef};
32
33use super::schema::{add_encoded_arrow_schema_to_metadata, decimal_length_from_precision};
34
35use crate::arrow::arrow_writer::byte_array::ByteArrayEncoder;
36use crate::arrow::ArrowSchemaConverter;
37use crate::column::page::{CompressedPage, PageWriteSpec, PageWriter};
38use crate::column::page_encryption::PageEncryptor;
39use crate::column::writer::encoder::ColumnValueEncoder;
40use crate::column::writer::{
41    get_column_writer, ColumnCloseResult, ColumnWriter, GenericColumnWriter,
42};
43use crate::data_type::{ByteArray, FixedLenByteArray};
44#[cfg(feature = "encryption")]
45use crate::encryption::encrypt::FileEncryptor;
46use crate::errors::{ParquetError, Result};
47use crate::file::metadata::{KeyValue, RowGroupMetaData};
48use crate::file::properties::{WriterProperties, WriterPropertiesPtr};
49use crate::file::reader::{ChunkReader, Length};
50use crate::file::writer::{SerializedFileWriter, SerializedRowGroupWriter};
51use crate::schema::types::{ColumnDescPtr, SchemaDescriptor};
52use crate::thrift::TSerializable;
53use levels::{calculate_array_levels, ArrayLevels};
54
55mod byte_array;
56mod levels;
57
58/// Encodes [`RecordBatch`] to parquet
59///
60/// Writes Arrow `RecordBatch`es to a Parquet writer. Multiple [`RecordBatch`] will be encoded
61/// to the same row group, up to `max_row_group_size` rows. Any remaining rows will be
62/// flushed on close, leading the final row group in the output file to potentially
63/// contain fewer than `max_row_group_size` rows
64///
65/// # Example: Writing `RecordBatch`es
66/// ```
67/// # use std::sync::Arc;
68/// # use bytes::Bytes;
69/// # use arrow_array::{ArrayRef, Int64Array};
70/// # use arrow_array::RecordBatch;
71/// # use parquet::arrow::arrow_writer::ArrowWriter;
72/// # use parquet::arrow::arrow_reader::ParquetRecordBatchReader;
73/// let col = Arc::new(Int64Array::from_iter_values([1, 2, 3])) as ArrayRef;
74/// let to_write = RecordBatch::try_from_iter([("col", col)]).unwrap();
75///
76/// let mut buffer = Vec::new();
77/// let mut writer = ArrowWriter::try_new(&mut buffer, to_write.schema(), None).unwrap();
78/// writer.write(&to_write).unwrap();
79/// writer.close().unwrap();
80///
81/// let mut reader = ParquetRecordBatchReader::try_new(Bytes::from(buffer), 1024).unwrap();
82/// let read = reader.next().unwrap().unwrap();
83///
84/// assert_eq!(to_write, read);
85/// ```
86///
87/// # Memory Usage and Limiting
88///
89/// The nature of Parquet requires buffering of an entire row group before it can
90/// be flushed to the underlying writer. Data is mostly buffered in its encoded
91/// form, reducing memory usage. However, some data such as dictionary keys,
92/// large strings or very nested data may still result in non-trivial memory
93/// usage.
94///
95/// See Also:
96/// * [`ArrowWriter::memory_size`]: the current memory usage of the writer.
97/// * [`ArrowWriter::in_progress_size`]: Estimated size of the buffered row group,
98///
99/// Call [`Self::flush`] to trigger an early flush of a row group based on a
100/// memory threshold and/or global memory pressure. However,  smaller row groups
101/// result in higher metadata overheads, and thus may worsen compression ratios
102/// and query performance.
103///
104/// ```no_run
105/// # use std::io::Write;
106/// # use arrow_array::RecordBatch;
107/// # use parquet::arrow::ArrowWriter;
108/// # let mut writer: ArrowWriter<Vec<u8>> = todo!();
109/// # let batch: RecordBatch = todo!();
110/// writer.write(&batch).unwrap();
111/// // Trigger an early flush if anticipated size exceeds 1_000_000
112/// if writer.in_progress_size() > 1_000_000 {
113///     writer.flush().unwrap();
114/// }
115/// ```
116///
117/// ## Type Support
118///
119/// The writer supports writing all Arrow [`DataType`]s that have a direct mapping to
120/// Parquet types including  [`StructArray`] and [`ListArray`].
121///
122/// The following are not supported:
123///
124/// * [`IntervalMonthDayNanoArray`]: Parquet does not [support nanosecond intervals].
125///
126/// [`DataType`]: https://docs.rs/arrow/latest/arrow/datatypes/enum.DataType.html
127/// [`StructArray`]: https://docs.rs/arrow/latest/arrow/array/struct.StructArray.html
128/// [`ListArray`]: https://docs.rs/arrow/latest/arrow/array/type.ListArray.html
129/// [`IntervalMonthDayNanoArray`]: https://docs.rs/arrow/latest/arrow/array/type.IntervalMonthDayNanoArray.html
130/// [support nanosecond intervals]: https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#interval
131pub struct ArrowWriter<W: Write> {
132    /// Underlying Parquet writer
133    writer: SerializedFileWriter<W>,
134
135    /// The in-progress row group if any
136    in_progress: Option<ArrowRowGroupWriter>,
137
138    /// A copy of the Arrow schema.
139    ///
140    /// The schema is used to verify that each record batch written has the correct schema
141    arrow_schema: SchemaRef,
142
143    /// Creates new [`ArrowRowGroupWriter`] instances as required
144    row_group_writer_factory: ArrowRowGroupWriterFactory,
145
146    /// The length of arrays to write to each row group
147    max_row_group_size: usize,
148}
149
150impl<W: Write + Send> std::fmt::Debug for ArrowWriter<W> {
151    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
152        let buffered_memory = self.in_progress_size();
153        f.debug_struct("ArrowWriter")
154            .field("writer", &self.writer)
155            .field("in_progress_size", &format_args!("{buffered_memory} bytes"))
156            .field("in_progress_rows", &self.in_progress_rows())
157            .field("arrow_schema", &self.arrow_schema)
158            .field("max_row_group_size", &self.max_row_group_size)
159            .finish()
160    }
161}
162
163impl<W: Write + Send> ArrowWriter<W> {
164    /// Try to create a new Arrow writer
165    ///
166    /// The writer will fail if:
167    ///  * a `SerializedFileWriter` cannot be created from the ParquetWriter
168    ///  * the Arrow schema contains unsupported datatypes such as Unions
169    pub fn try_new(
170        writer: W,
171        arrow_schema: SchemaRef,
172        props: Option<WriterProperties>,
173    ) -> Result<Self> {
174        let options = ArrowWriterOptions::new().with_properties(props.unwrap_or_default());
175        Self::try_new_with_options(writer, arrow_schema, options)
176    }
177
178    /// Try to create a new Arrow writer with [`ArrowWriterOptions`].
179    ///
180    /// The writer will fail if:
181    ///  * a `SerializedFileWriter` cannot be created from the ParquetWriter
182    ///  * the Arrow schema contains unsupported datatypes such as Unions
183    pub fn try_new_with_options(
184        writer: W,
185        arrow_schema: SchemaRef,
186        options: ArrowWriterOptions,
187    ) -> Result<Self> {
188        let mut props = options.properties;
189        let mut converter = ArrowSchemaConverter::new().with_coerce_types(props.coerce_types());
190        if let Some(schema_root) = &options.schema_root {
191            converter = converter.schema_root(schema_root);
192        }
193        let schema = converter.convert(&arrow_schema)?;
194        if !options.skip_arrow_metadata {
195            // add serialized arrow schema
196            add_encoded_arrow_schema_to_metadata(&arrow_schema, &mut props);
197        }
198
199        let max_row_group_size = props.max_row_group_size();
200
201        let file_writer =
202            SerializedFileWriter::new(writer, schema.root_schema_ptr(), Arc::new(props))?;
203
204        let row_group_writer_factory = ArrowRowGroupWriterFactory::new(&file_writer);
205
206        Ok(Self {
207            writer: file_writer,
208            in_progress: None,
209            arrow_schema,
210            row_group_writer_factory,
211            max_row_group_size,
212        })
213    }
214
215    /// Returns metadata for any flushed row groups
216    pub fn flushed_row_groups(&self) -> &[RowGroupMetaData] {
217        self.writer.flushed_row_groups()
218    }
219
220    /// Estimated memory usage, in bytes, of this `ArrowWriter`
221    ///
222    /// This estimate is formed bu summing the values of
223    /// [`ArrowColumnWriter::memory_size`] all in progress columns.
224    pub fn memory_size(&self) -> usize {
225        match &self.in_progress {
226            Some(in_progress) => in_progress.writers.iter().map(|x| x.memory_size()).sum(),
227            None => 0,
228        }
229    }
230
231    /// Anticipated encoded size of the in progress row group.
232    ///
233    /// This estimate the row group size after being completely encoded is,
234    /// formed by summing the values of
235    /// [`ArrowColumnWriter::get_estimated_total_bytes`] for all in progress
236    /// columns.
237    pub fn in_progress_size(&self) -> usize {
238        match &self.in_progress {
239            Some(in_progress) => in_progress
240                .writers
241                .iter()
242                .map(|x| x.get_estimated_total_bytes())
243                .sum(),
244            None => 0,
245        }
246    }
247
248    /// Returns the number of rows buffered in the in progress row group
249    pub fn in_progress_rows(&self) -> usize {
250        self.in_progress
251            .as_ref()
252            .map(|x| x.buffered_rows)
253            .unwrap_or_default()
254    }
255
256    /// Returns the number of bytes written by this instance
257    pub fn bytes_written(&self) -> usize {
258        self.writer.bytes_written()
259    }
260
261    /// Encodes the provided [`RecordBatch`]
262    ///
263    /// If this would cause the current row group to exceed [`WriterProperties::max_row_group_size`]
264    /// rows, the contents of `batch` will be written to one or more row groups such that all but
265    /// the final row group in the file contain [`WriterProperties::max_row_group_size`] rows.
266    ///
267    /// This will fail if the `batch`'s schema does not match the writer's schema.
268    pub fn write(&mut self, batch: &RecordBatch) -> Result<()> {
269        if batch.num_rows() == 0 {
270            return Ok(());
271        }
272
273        let in_progress = match &mut self.in_progress {
274            Some(in_progress) => in_progress,
275            x => x.insert(self.row_group_writer_factory.create_row_group_writer(
276                self.writer.schema_descr(),
277                self.writer.properties(),
278                &self.arrow_schema,
279                self.writer.flushed_row_groups().len(),
280            )?),
281        };
282
283        // If would exceed max_row_group_size, split batch
284        if in_progress.buffered_rows + batch.num_rows() > self.max_row_group_size {
285            let to_write = self.max_row_group_size - in_progress.buffered_rows;
286            let a = batch.slice(0, to_write);
287            let b = batch.slice(to_write, batch.num_rows() - to_write);
288            self.write(&a)?;
289            return self.write(&b);
290        }
291
292        in_progress.write(batch)?;
293
294        if in_progress.buffered_rows >= self.max_row_group_size {
295            self.flush()?
296        }
297        Ok(())
298    }
299
300    /// Writes the given buf bytes to the internal buffer.
301    ///
302    /// It's safe to use this method to write data to the underlying writer,
303    /// because it will ensure that the buffering and byte‐counting layers are used.
304    pub fn write_all(&mut self, buf: &[u8]) -> std::io::Result<()> {
305        self.writer.write_all(buf)
306    }
307
308    /// Flushes all buffered rows into a new row group
309    pub fn flush(&mut self) -> Result<()> {
310        let in_progress = match self.in_progress.take() {
311            Some(in_progress) => in_progress,
312            None => return Ok(()),
313        };
314
315        let mut row_group_writer = self.writer.next_row_group()?;
316        for chunk in in_progress.close()? {
317            chunk.append_to_row_group(&mut row_group_writer)?;
318        }
319        row_group_writer.close()?;
320        Ok(())
321    }
322
323    /// Additional [`KeyValue`] metadata to be written in addition to those from [`WriterProperties`]
324    ///
325    /// This method provide a way to append kv_metadata after write RecordBatch
326    pub fn append_key_value_metadata(&mut self, kv_metadata: KeyValue) {
327        self.writer.append_key_value_metadata(kv_metadata)
328    }
329
330    /// Returns a reference to the underlying writer.
331    pub fn inner(&self) -> &W {
332        self.writer.inner()
333    }
334
335    /// Returns a mutable reference to the underlying writer.
336    ///
337    /// **Warning**: if you write directly to this writer, you will skip
338    /// the `TrackedWrite` buffering and byte‐counting layers. That’ll cause
339    /// the file footer’s recorded offsets and sizes to diverge from reality,
340    /// resulting in an unreadable or corrupted Parquet file.
341    ///
342    /// If you want to write safely to the underlying writer, use [`Self::write_all`].
343    pub fn inner_mut(&mut self) -> &mut W {
344        self.writer.inner_mut()
345    }
346
347    /// Flushes any outstanding data and returns the underlying writer.
348    pub fn into_inner(mut self) -> Result<W> {
349        self.flush()?;
350        self.writer.into_inner()
351    }
352
353    /// Close and finalize the underlying Parquet writer
354    ///
355    /// Unlike [`Self::close`] this does not consume self
356    ///
357    /// Attempting to write after calling finish will result in an error
358    pub fn finish(&mut self) -> Result<crate::format::FileMetaData> {
359        self.flush()?;
360        self.writer.finish()
361    }
362
363    /// Close and finalize the underlying Parquet writer
364    pub fn close(mut self) -> Result<crate::format::FileMetaData> {
365        self.finish()
366    }
367}
368
369impl<W: Write + Send> RecordBatchWriter for ArrowWriter<W> {
370    fn write(&mut self, batch: &RecordBatch) -> Result<(), ArrowError> {
371        self.write(batch).map_err(|e| e.into())
372    }
373
374    fn close(self) -> std::result::Result<(), ArrowError> {
375        self.close()?;
376        Ok(())
377    }
378}
379
380/// Arrow-specific configuration settings for writing parquet files.
381///
382/// See [`ArrowWriter`] for how to configure the writer.
383#[derive(Debug, Clone, Default)]
384pub struct ArrowWriterOptions {
385    properties: WriterProperties,
386    skip_arrow_metadata: bool,
387    schema_root: Option<String>,
388}
389
390impl ArrowWriterOptions {
391    /// Creates a new [`ArrowWriterOptions`] with the default settings.
392    pub fn new() -> Self {
393        Self::default()
394    }
395
396    /// Sets the [`WriterProperties`] for writing parquet files.
397    pub fn with_properties(self, properties: WriterProperties) -> Self {
398        Self { properties, ..self }
399    }
400
401    /// Skip encoding the embedded arrow metadata (defaults to `false`)
402    ///
403    /// Parquet files generated by the [`ArrowWriter`] contain embedded arrow schema
404    /// by default.
405    ///
406    /// Set `skip_arrow_metadata` to true, to skip encoding the embedded metadata.
407    pub fn with_skip_arrow_metadata(self, skip_arrow_metadata: bool) -> Self {
408        Self {
409            skip_arrow_metadata,
410            ..self
411        }
412    }
413
414    /// Set the name of the root parquet schema element (defaults to `"arrow_schema"`)
415    pub fn with_schema_root(self, schema_root: String) -> Self {
416        Self {
417            schema_root: Some(schema_root),
418            ..self
419        }
420    }
421}
422
423/// A single column chunk produced by [`ArrowColumnWriter`]
424#[derive(Default)]
425struct ArrowColumnChunkData {
426    length: usize,
427    data: Vec<Bytes>,
428}
429
430impl Length for ArrowColumnChunkData {
431    fn len(&self) -> u64 {
432        self.length as _
433    }
434}
435
436impl ChunkReader for ArrowColumnChunkData {
437    type T = ArrowColumnChunkReader;
438
439    fn get_read(&self, start: u64) -> Result<Self::T> {
440        assert_eq!(start, 0); // Assume append_column writes all data in one-shot
441        Ok(ArrowColumnChunkReader(
442            self.data.clone().into_iter().peekable(),
443        ))
444    }
445
446    fn get_bytes(&self, _start: u64, _length: usize) -> Result<Bytes> {
447        unimplemented!()
448    }
449}
450
451/// A [`Read`] for [`ArrowColumnChunkData`]
452struct ArrowColumnChunkReader(Peekable<IntoIter<Bytes>>);
453
454impl Read for ArrowColumnChunkReader {
455    fn read(&mut self, out: &mut [u8]) -> std::io::Result<usize> {
456        let buffer = loop {
457            match self.0.peek_mut() {
458                Some(b) if b.is_empty() => {
459                    self.0.next();
460                    continue;
461                }
462                Some(b) => break b,
463                None => return Ok(0),
464            }
465        };
466
467        let len = buffer.len().min(out.len());
468        let b = buffer.split_to(len);
469        out[..len].copy_from_slice(&b);
470        Ok(len)
471    }
472}
473
474/// A shared [`ArrowColumnChunkData`]
475///
476/// This allows it to be owned by [`ArrowPageWriter`] whilst allowing access via
477/// [`ArrowRowGroupWriter`] on flush, without requiring self-referential borrows
478type SharedColumnChunk = Arc<Mutex<ArrowColumnChunkData>>;
479
480#[derive(Default)]
481struct ArrowPageWriter {
482    buffer: SharedColumnChunk,
483    #[cfg(feature = "encryption")]
484    page_encryptor: Option<PageEncryptor>,
485}
486
487impl ArrowPageWriter {
488    #[cfg(feature = "encryption")]
489    pub fn with_encryptor(mut self, page_encryptor: Option<PageEncryptor>) -> Self {
490        self.page_encryptor = page_encryptor;
491        self
492    }
493
494    #[cfg(feature = "encryption")]
495    fn page_encryptor_mut(&mut self) -> Option<&mut PageEncryptor> {
496        self.page_encryptor.as_mut()
497    }
498
499    #[cfg(not(feature = "encryption"))]
500    fn page_encryptor_mut(&mut self) -> Option<&mut PageEncryptor> {
501        None
502    }
503}
504
505impl PageWriter for ArrowPageWriter {
506    fn write_page(&mut self, page: CompressedPage) -> Result<PageWriteSpec> {
507        let page = match self.page_encryptor_mut() {
508            Some(page_encryptor) => page_encryptor.encrypt_compressed_page(page)?,
509            None => page,
510        };
511
512        let page_header = page.to_thrift_header();
513        let header = {
514            let mut header = Vec::with_capacity(1024);
515
516            match self.page_encryptor_mut() {
517                Some(page_encryptor) => {
518                    page_encryptor.encrypt_page_header(&page_header, &mut header)?;
519                    if page.compressed_page().is_data_page() {
520                        page_encryptor.increment_page();
521                    }
522                }
523                None => {
524                    let mut protocol = TCompactOutputProtocol::new(&mut header);
525                    page_header.write_to_out_protocol(&mut protocol)?;
526                }
527            };
528
529            Bytes::from(header)
530        };
531
532        let mut buf = self.buffer.try_lock().unwrap();
533
534        let data = page.compressed_page().buffer().clone();
535        let compressed_size = data.len() + header.len();
536
537        let mut spec = PageWriteSpec::new();
538        spec.page_type = page.page_type();
539        spec.num_values = page.num_values();
540        spec.uncompressed_size = page.uncompressed_size() + header.len();
541        spec.offset = buf.length as u64;
542        spec.compressed_size = compressed_size;
543        spec.bytes_written = compressed_size as u64;
544
545        buf.length += compressed_size;
546        buf.data.push(header);
547        buf.data.push(data);
548
549        Ok(spec)
550    }
551
552    fn close(&mut self) -> Result<()> {
553        Ok(())
554    }
555}
556
557/// A leaf column that can be encoded by [`ArrowColumnWriter`]
558#[derive(Debug)]
559pub struct ArrowLeafColumn(ArrayLevels);
560
561/// Computes the [`ArrowLeafColumn`] for a potentially nested [`ArrayRef`]
562pub fn compute_leaves(field: &Field, array: &ArrayRef) -> Result<Vec<ArrowLeafColumn>> {
563    let levels = calculate_array_levels(array, field)?;
564    Ok(levels.into_iter().map(ArrowLeafColumn).collect())
565}
566
567/// The data for a single column chunk, see [`ArrowColumnWriter`]
568pub struct ArrowColumnChunk {
569    data: ArrowColumnChunkData,
570    close: ColumnCloseResult,
571}
572
573impl std::fmt::Debug for ArrowColumnChunk {
574    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
575        f.debug_struct("ArrowColumnChunk")
576            .field("length", &self.data.length)
577            .finish_non_exhaustive()
578    }
579}
580
581impl ArrowColumnChunk {
582    /// Calls [`SerializedRowGroupWriter::append_column`] with this column's data
583    pub fn append_to_row_group<W: Write + Send>(
584        self,
585        writer: &mut SerializedRowGroupWriter<'_, W>,
586    ) -> Result<()> {
587        writer.append_column(&self.data, self.close)
588    }
589}
590
591/// Encodes [`ArrowLeafColumn`] to [`ArrowColumnChunk`]
592///
593/// Note: This is a low-level interface for applications that require
594/// fine-grained control of encoding (e.g. encoding using multiple threads),
595/// see [`ArrowWriter`] for a higher-level interface
596///
597/// # Example: Encoding two Arrow Array's in Parallel
598/// ```
599/// // The arrow schema
600/// # use std::sync::Arc;
601/// # use arrow_array::*;
602/// # use arrow_schema::*;
603/// # use parquet::arrow::ArrowSchemaConverter;
604/// # use parquet::arrow::arrow_writer::{ArrowLeafColumn, compute_leaves, get_column_writers, ArrowColumnChunk};
605/// # use parquet::file::properties::WriterProperties;
606/// # use parquet::file::writer::{SerializedFileWriter, SerializedRowGroupWriter};
607/// #
608/// let schema = Arc::new(Schema::new(vec![
609///     Field::new("i32", DataType::Int32, false),
610///     Field::new("f32", DataType::Float32, false),
611/// ]));
612///
613/// // Compute the parquet schema
614/// let props = Arc::new(WriterProperties::default());
615/// let parquet_schema = ArrowSchemaConverter::new()
616///   .with_coerce_types(props.coerce_types())
617///   .convert(&schema)
618///   .unwrap();
619///
620/// // Create writers for each of the leaf columns
621/// let col_writers = get_column_writers(&parquet_schema, &props, &schema).unwrap();
622///
623/// // Spawn a worker thread for each column
624/// //
625/// // Note: This is for demonstration purposes, a thread-pool e.g. rayon or tokio, would be better.
626/// // The `map` produces an iterator of type `tuple of (thread handle, send channel)`.
627/// let mut workers: Vec<_> = col_writers
628///     .into_iter()
629///     .map(|mut col_writer| {
630///         let (send, recv) = std::sync::mpsc::channel::<ArrowLeafColumn>();
631///         let handle = std::thread::spawn(move || {
632///             // receive Arrays to encode via the channel
633///             for col in recv {
634///                 col_writer.write(&col)?;
635///             }
636///             // once the input is complete, close the writer
637///             // to return the newly created ArrowColumnChunk
638///             col_writer.close()
639///         });
640///         (handle, send)
641///     })
642///     .collect();
643///
644/// // Create parquet writer
645/// let root_schema = parquet_schema.root_schema_ptr();
646/// // write to memory in the example, but this could be a File
647/// let mut out = Vec::with_capacity(1024);
648/// let mut writer = SerializedFileWriter::new(&mut out, root_schema, props.clone())
649///   .unwrap();
650///
651/// // Start row group
652/// let mut row_group_writer: SerializedRowGroupWriter<'_, _> = writer
653///   .next_row_group()
654///   .unwrap();
655///
656/// // Create some example input columns to encode
657/// let to_write = vec![
658///     Arc::new(Int32Array::from_iter_values([1, 2, 3])) as _,
659///     Arc::new(Float32Array::from_iter_values([1., 45., -1.])) as _,
660/// ];
661///
662/// // Send the input columns to the workers
663/// let mut worker_iter = workers.iter_mut();
664/// for (arr, field) in to_write.iter().zip(&schema.fields) {
665///     for leaves in compute_leaves(field, arr).unwrap() {
666///         worker_iter.next().unwrap().1.send(leaves).unwrap();
667///     }
668/// }
669///
670/// // Wait for the workers to complete encoding, and append
671/// // the resulting column chunks to the row group (and the file)
672/// for (handle, send) in workers {
673///     drop(send); // Drop send side to signal termination
674///     // wait for the worker to send the completed chunk
675///     let chunk: ArrowColumnChunk = handle.join().unwrap().unwrap();
676///     chunk.append_to_row_group(&mut row_group_writer).unwrap();
677/// }
678/// // Close the row group which writes to the underlying file
679/// row_group_writer.close().unwrap();
680///
681/// let metadata = writer.close().unwrap();
682/// assert_eq!(metadata.num_rows, 3);
683/// ```
684pub struct ArrowColumnWriter {
685    writer: ArrowColumnWriterImpl,
686    chunk: SharedColumnChunk,
687}
688
689impl std::fmt::Debug for ArrowColumnWriter {
690    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
691        f.debug_struct("ArrowColumnWriter").finish_non_exhaustive()
692    }
693}
694
695enum ArrowColumnWriterImpl {
696    ByteArray(GenericColumnWriter<'static, ByteArrayEncoder>),
697    Column(ColumnWriter<'static>),
698}
699
700impl ArrowColumnWriter {
701    /// Write an [`ArrowLeafColumn`]
702    pub fn write(&mut self, col: &ArrowLeafColumn) -> Result<()> {
703        match &mut self.writer {
704            ArrowColumnWriterImpl::Column(c) => {
705                write_leaf(c, &col.0)?;
706            }
707            ArrowColumnWriterImpl::ByteArray(c) => {
708                write_primitive(c, col.0.array().as_ref(), &col.0)?;
709            }
710        }
711        Ok(())
712    }
713
714    /// Close this column returning the written [`ArrowColumnChunk`]
715    pub fn close(self) -> Result<ArrowColumnChunk> {
716        let close = match self.writer {
717            ArrowColumnWriterImpl::ByteArray(c) => c.close()?,
718            ArrowColumnWriterImpl::Column(c) => c.close()?,
719        };
720        let chunk = Arc::try_unwrap(self.chunk).ok().unwrap();
721        let data = chunk.into_inner().unwrap();
722        Ok(ArrowColumnChunk { data, close })
723    }
724
725    /// Returns the estimated total memory usage by the writer.
726    ///
727    /// This  [`Self::get_estimated_total_bytes`] this is an estimate
728    /// of the current memory usage and not it's anticipated encoded size.
729    ///
730    /// This includes:
731    /// 1. Data buffered in encoded form
732    /// 2. Data buffered in un-encoded form (e.g. `usize` dictionary keys)
733    ///
734    /// This value should be greater than or equal to [`Self::get_estimated_total_bytes`]
735    pub fn memory_size(&self) -> usize {
736        match &self.writer {
737            ArrowColumnWriterImpl::ByteArray(c) => c.memory_size(),
738            ArrowColumnWriterImpl::Column(c) => c.memory_size(),
739        }
740    }
741
742    /// Returns the estimated total encoded bytes for this column writer.
743    ///
744    /// This includes:
745    /// 1. Data buffered in encoded form
746    /// 2. An estimate of how large the data buffered in un-encoded form would be once encoded
747    ///
748    /// This value should be less than or equal to [`Self::memory_size`]
749    pub fn get_estimated_total_bytes(&self) -> usize {
750        match &self.writer {
751            ArrowColumnWriterImpl::ByteArray(c) => c.get_estimated_total_bytes() as _,
752            ArrowColumnWriterImpl::Column(c) => c.get_estimated_total_bytes() as _,
753        }
754    }
755}
756
757/// Encodes [`RecordBatch`] to a parquet row group
758struct ArrowRowGroupWriter {
759    writers: Vec<ArrowColumnWriter>,
760    schema: SchemaRef,
761    buffered_rows: usize,
762}
763
764impl ArrowRowGroupWriter {
765    fn new(writers: Vec<ArrowColumnWriter>, arrow: &SchemaRef) -> Self {
766        Self {
767            writers,
768            schema: arrow.clone(),
769            buffered_rows: 0,
770        }
771    }
772
773    fn write(&mut self, batch: &RecordBatch) -> Result<()> {
774        self.buffered_rows += batch.num_rows();
775        let mut writers = self.writers.iter_mut();
776        for (field, column) in self.schema.fields().iter().zip(batch.columns()) {
777            for leaf in compute_leaves(field.as_ref(), column)? {
778                writers.next().unwrap().write(&leaf)?
779            }
780        }
781        Ok(())
782    }
783
784    fn close(self) -> Result<Vec<ArrowColumnChunk>> {
785        self.writers
786            .into_iter()
787            .map(|writer| writer.close())
788            .collect()
789    }
790}
791
792struct ArrowRowGroupWriterFactory {
793    #[cfg(feature = "encryption")]
794    file_encryptor: Option<Arc<FileEncryptor>>,
795}
796
797impl ArrowRowGroupWriterFactory {
798    #[cfg(feature = "encryption")]
799    fn new<W: Write + Send>(file_writer: &SerializedFileWriter<W>) -> Self {
800        Self {
801            file_encryptor: file_writer.file_encryptor(),
802        }
803    }
804
805    #[cfg(not(feature = "encryption"))]
806    fn new<W: Write + Send>(_file_writer: &SerializedFileWriter<W>) -> Self {
807        Self {}
808    }
809
810    #[cfg(feature = "encryption")]
811    fn create_row_group_writer(
812        &self,
813        parquet: &SchemaDescriptor,
814        props: &WriterPropertiesPtr,
815        arrow: &SchemaRef,
816        row_group_index: usize,
817    ) -> Result<ArrowRowGroupWriter> {
818        let writers = get_column_writers_with_encryptor(
819            parquet,
820            props,
821            arrow,
822            self.file_encryptor.clone(),
823            row_group_index,
824        )?;
825        Ok(ArrowRowGroupWriter::new(writers, arrow))
826    }
827
828    #[cfg(not(feature = "encryption"))]
829    fn create_row_group_writer(
830        &self,
831        parquet: &SchemaDescriptor,
832        props: &WriterPropertiesPtr,
833        arrow: &SchemaRef,
834        _row_group_index: usize,
835    ) -> Result<ArrowRowGroupWriter> {
836        let writers = get_column_writers(parquet, props, arrow)?;
837        Ok(ArrowRowGroupWriter::new(writers, arrow))
838    }
839}
840
841/// Returns the [`ArrowColumnWriter`] for a given schema
842pub fn get_column_writers(
843    parquet: &SchemaDescriptor,
844    props: &WriterPropertiesPtr,
845    arrow: &SchemaRef,
846) -> Result<Vec<ArrowColumnWriter>> {
847    let mut writers = Vec::with_capacity(arrow.fields.len());
848    let mut leaves = parquet.columns().iter();
849    let column_factory = ArrowColumnWriterFactory::new();
850    for field in &arrow.fields {
851        column_factory.get_arrow_column_writer(
852            field.data_type(),
853            props,
854            &mut leaves,
855            &mut writers,
856        )?;
857    }
858    Ok(writers)
859}
860
861/// Returns the [`ArrowColumnWriter`] for a given schema and supports columnar encryption
862#[cfg(feature = "encryption")]
863fn get_column_writers_with_encryptor(
864    parquet: &SchemaDescriptor,
865    props: &WriterPropertiesPtr,
866    arrow: &SchemaRef,
867    file_encryptor: Option<Arc<FileEncryptor>>,
868    row_group_index: usize,
869) -> Result<Vec<ArrowColumnWriter>> {
870    let mut writers = Vec::with_capacity(arrow.fields.len());
871    let mut leaves = parquet.columns().iter();
872    let column_factory =
873        ArrowColumnWriterFactory::new().with_file_encryptor(row_group_index, file_encryptor);
874    for field in &arrow.fields {
875        column_factory.get_arrow_column_writer(
876            field.data_type(),
877            props,
878            &mut leaves,
879            &mut writers,
880        )?;
881    }
882    Ok(writers)
883}
884
885/// Gets [`ArrowColumnWriter`] instances for different data types
886struct ArrowColumnWriterFactory {
887    #[cfg(feature = "encryption")]
888    row_group_index: usize,
889    #[cfg(feature = "encryption")]
890    file_encryptor: Option<Arc<FileEncryptor>>,
891}
892
893impl ArrowColumnWriterFactory {
894    pub fn new() -> Self {
895        Self {
896            #[cfg(feature = "encryption")]
897            row_group_index: 0,
898            #[cfg(feature = "encryption")]
899            file_encryptor: None,
900        }
901    }
902
903    #[cfg(feature = "encryption")]
904    pub fn with_file_encryptor(
905        mut self,
906        row_group_index: usize,
907        file_encryptor: Option<Arc<FileEncryptor>>,
908    ) -> Self {
909        self.row_group_index = row_group_index;
910        self.file_encryptor = file_encryptor;
911        self
912    }
913
914    #[cfg(feature = "encryption")]
915    fn create_page_writer(
916        &self,
917        column_descriptor: &ColumnDescPtr,
918        column_index: usize,
919    ) -> Result<Box<ArrowPageWriter>> {
920        let column_path = column_descriptor.path().string();
921        let page_encryptor = PageEncryptor::create_if_column_encrypted(
922            &self.file_encryptor,
923            self.row_group_index,
924            column_index,
925            &column_path,
926        )?;
927        Ok(Box::new(
928            ArrowPageWriter::default().with_encryptor(page_encryptor),
929        ))
930    }
931
932    #[cfg(not(feature = "encryption"))]
933    fn create_page_writer(
934        &self,
935        _column_descriptor: &ColumnDescPtr,
936        _column_index: usize,
937    ) -> Result<Box<ArrowPageWriter>> {
938        Ok(Box::<ArrowPageWriter>::default())
939    }
940
941    /// Gets the [`ArrowColumnWriter`] for the given `data_type`
942    fn get_arrow_column_writer(
943        &self,
944        data_type: &ArrowDataType,
945        props: &WriterPropertiesPtr,
946        leaves: &mut Iter<'_, ColumnDescPtr>,
947        out: &mut Vec<ArrowColumnWriter>,
948    ) -> Result<()> {
949        let col = |desc: &ColumnDescPtr| -> Result<ArrowColumnWriter> {
950            let page_writer = self.create_page_writer(desc, out.len())?;
951            let chunk = page_writer.buffer.clone();
952            let writer = get_column_writer(desc.clone(), props.clone(), page_writer);
953            Ok(ArrowColumnWriter {
954                chunk,
955                writer: ArrowColumnWriterImpl::Column(writer),
956            })
957        };
958
959        let bytes = |desc: &ColumnDescPtr| -> Result<ArrowColumnWriter> {
960            let page_writer = self.create_page_writer(desc, out.len())?;
961            let chunk = page_writer.buffer.clone();
962            let writer = GenericColumnWriter::new(desc.clone(), props.clone(), page_writer);
963            Ok(ArrowColumnWriter {
964                chunk,
965                writer: ArrowColumnWriterImpl::ByteArray(writer),
966            })
967        };
968
969        match data_type {
970            _ if data_type.is_primitive() => out.push(col(leaves.next().unwrap())?),
971            ArrowDataType::FixedSizeBinary(_) | ArrowDataType::Boolean | ArrowDataType::Null => out.push(col(leaves.next().unwrap())?),
972            ArrowDataType::LargeBinary
973            | ArrowDataType::Binary
974            | ArrowDataType::Utf8
975            | ArrowDataType::LargeUtf8
976            | ArrowDataType::BinaryView
977            | ArrowDataType::Utf8View => {
978                out.push(bytes(leaves.next().unwrap())?)
979            }
980            ArrowDataType::List(f)
981            | ArrowDataType::LargeList(f)
982            | ArrowDataType::FixedSizeList(f, _) => {
983                self.get_arrow_column_writer(f.data_type(), props, leaves, out)?
984            }
985            ArrowDataType::Struct(fields) => {
986                for field in fields {
987                    self.get_arrow_column_writer(field.data_type(), props, leaves, out)?
988                }
989            }
990            ArrowDataType::Map(f, _) => match f.data_type() {
991                ArrowDataType::Struct(f) => {
992                    self.get_arrow_column_writer(f[0].data_type(), props, leaves, out)?;
993                    self.get_arrow_column_writer(f[1].data_type(), props, leaves, out)?
994                }
995                _ => unreachable!("invalid map type"),
996            }
997            ArrowDataType::Dictionary(_, value_type) => match value_type.as_ref() {
998                ArrowDataType::Utf8 | ArrowDataType::LargeUtf8 | ArrowDataType::Binary | ArrowDataType::LargeBinary => {
999                    out.push(bytes(leaves.next().unwrap())?)
1000                }
1001                ArrowDataType::Utf8View | ArrowDataType::BinaryView => {
1002                    out.push(bytes(leaves.next().unwrap())?)
1003                }
1004                ArrowDataType::FixedSizeBinary(_) => {
1005                    out.push(bytes(leaves.next().unwrap())?)
1006                }
1007                _ => {
1008                    out.push(col(leaves.next().unwrap())?)
1009                }
1010            }
1011            _ => return Err(ParquetError::NYI(
1012                format!(
1013                    "Attempting to write an Arrow type {data_type:?} to parquet that is not yet implemented"
1014                )
1015            ))
1016        }
1017        Ok(())
1018    }
1019}
1020
1021fn write_leaf(writer: &mut ColumnWriter<'_>, levels: &ArrayLevels) -> Result<usize> {
1022    let column = levels.array().as_ref();
1023    let indices = levels.non_null_indices();
1024    match writer {
1025        ColumnWriter::Int32ColumnWriter(ref mut typed) => {
1026            match column.data_type() {
1027                ArrowDataType::Date64 => {
1028                    // If the column is a Date64, we cast it to a Date32, and then interpret that as Int32
1029                    let array = arrow_cast::cast(column, &ArrowDataType::Date32)?;
1030                    let array = arrow_cast::cast(&array, &ArrowDataType::Int32)?;
1031
1032                    let array = array.as_primitive::<Int32Type>();
1033                    write_primitive(typed, array.values(), levels)
1034                }
1035                ArrowDataType::UInt32 => {
1036                    let values = column.as_primitive::<UInt32Type>().values();
1037                    // follow C++ implementation and use overflow/reinterpret cast from  u32 to i32 which will map
1038                    // `(i32::MAX as u32)..u32::MAX` to `i32::MIN..0`
1039                    let array = values.inner().typed_data::<i32>();
1040                    write_primitive(typed, array, levels)
1041                }
1042                ArrowDataType::Decimal128(_, _) => {
1043                    // use the int32 to represent the decimal with low precision
1044                    let array = column
1045                        .as_primitive::<Decimal128Type>()
1046                        .unary::<_, Int32Type>(|v| v as i32);
1047                    write_primitive(typed, array.values(), levels)
1048                }
1049                ArrowDataType::Decimal256(_, _) => {
1050                    // use the int32 to represent the decimal with low precision
1051                    let array = column
1052                        .as_primitive::<Decimal256Type>()
1053                        .unary::<_, Int32Type>(|v| v.as_i128() as i32);
1054                    write_primitive(typed, array.values(), levels)
1055                }
1056                ArrowDataType::Dictionary(_, value_type) => match value_type.as_ref() {
1057                    ArrowDataType::Decimal128(_, _) => {
1058                        let array = arrow_cast::cast(column, value_type)?;
1059                        let array = array
1060                            .as_primitive::<Decimal128Type>()
1061                            .unary::<_, Int32Type>(|v| v as i32);
1062                        write_primitive(typed, array.values(), levels)
1063                    }
1064                    ArrowDataType::Decimal256(_, _) => {
1065                        let array = arrow_cast::cast(column, value_type)?;
1066                        let array = array
1067                            .as_primitive::<Decimal256Type>()
1068                            .unary::<_, Int32Type>(|v| v.as_i128() as i32);
1069                        write_primitive(typed, array.values(), levels)
1070                    }
1071                    _ => {
1072                        let array = arrow_cast::cast(column, &ArrowDataType::Int32)?;
1073                        let array = array.as_primitive::<Int32Type>();
1074                        write_primitive(typed, array.values(), levels)
1075                    }
1076                },
1077                _ => {
1078                    let array = arrow_cast::cast(column, &ArrowDataType::Int32)?;
1079                    let array = array.as_primitive::<Int32Type>();
1080                    write_primitive(typed, array.values(), levels)
1081                }
1082            }
1083        }
1084        ColumnWriter::BoolColumnWriter(ref mut typed) => {
1085            let array = column.as_boolean();
1086            typed.write_batch(
1087                get_bool_array_slice(array, indices).as_slice(),
1088                levels.def_levels(),
1089                levels.rep_levels(),
1090            )
1091        }
1092        ColumnWriter::Int64ColumnWriter(ref mut typed) => {
1093            match column.data_type() {
1094                ArrowDataType::Date64 => {
1095                    let array = arrow_cast::cast(column, &ArrowDataType::Int64)?;
1096
1097                    let array = array.as_primitive::<Int64Type>();
1098                    write_primitive(typed, array.values(), levels)
1099                }
1100                ArrowDataType::Int64 => {
1101                    let array = column.as_primitive::<Int64Type>();
1102                    write_primitive(typed, array.values(), levels)
1103                }
1104                ArrowDataType::UInt64 => {
1105                    let values = column.as_primitive::<UInt64Type>().values();
1106                    // follow C++ implementation and use overflow/reinterpret cast from  u64 to i64 which will map
1107                    // `(i64::MAX as u64)..u64::MAX` to `i64::MIN..0`
1108                    let array = values.inner().typed_data::<i64>();
1109                    write_primitive(typed, array, levels)
1110                }
1111                ArrowDataType::Decimal128(_, _) => {
1112                    // use the int64 to represent the decimal with low precision
1113                    let array = column
1114                        .as_primitive::<Decimal128Type>()
1115                        .unary::<_, Int64Type>(|v| v as i64);
1116                    write_primitive(typed, array.values(), levels)
1117                }
1118                ArrowDataType::Decimal256(_, _) => {
1119                    // use the int64 to represent the decimal with low precision
1120                    let array = column
1121                        .as_primitive::<Decimal256Type>()
1122                        .unary::<_, Int64Type>(|v| v.as_i128() as i64);
1123                    write_primitive(typed, array.values(), levels)
1124                }
1125                ArrowDataType::Dictionary(_, value_type) => match value_type.as_ref() {
1126                    ArrowDataType::Decimal128(_, _) => {
1127                        let array = arrow_cast::cast(column, value_type)?;
1128                        let array = array
1129                            .as_primitive::<Decimal128Type>()
1130                            .unary::<_, Int64Type>(|v| v as i64);
1131                        write_primitive(typed, array.values(), levels)
1132                    }
1133                    ArrowDataType::Decimal256(_, _) => {
1134                        let array = arrow_cast::cast(column, value_type)?;
1135                        let array = array
1136                            .as_primitive::<Decimal256Type>()
1137                            .unary::<_, Int64Type>(|v| v.as_i128() as i64);
1138                        write_primitive(typed, array.values(), levels)
1139                    }
1140                    _ => {
1141                        let array = arrow_cast::cast(column, &ArrowDataType::Int64)?;
1142                        let array = array.as_primitive::<Int64Type>();
1143                        write_primitive(typed, array.values(), levels)
1144                    }
1145                },
1146                _ => {
1147                    let array = arrow_cast::cast(column, &ArrowDataType::Int64)?;
1148                    let array = array.as_primitive::<Int64Type>();
1149                    write_primitive(typed, array.values(), levels)
1150                }
1151            }
1152        }
1153        ColumnWriter::Int96ColumnWriter(ref mut _typed) => {
1154            unreachable!("Currently unreachable because data type not supported")
1155        }
1156        ColumnWriter::FloatColumnWriter(ref mut typed) => {
1157            let array = column.as_primitive::<Float32Type>();
1158            write_primitive(typed, array.values(), levels)
1159        }
1160        ColumnWriter::DoubleColumnWriter(ref mut typed) => {
1161            let array = column.as_primitive::<Float64Type>();
1162            write_primitive(typed, array.values(), levels)
1163        }
1164        ColumnWriter::ByteArrayColumnWriter(_) => {
1165            unreachable!("should use ByteArrayWriter")
1166        }
1167        ColumnWriter::FixedLenByteArrayColumnWriter(ref mut typed) => {
1168            let bytes = match column.data_type() {
1169                ArrowDataType::Interval(interval_unit) => match interval_unit {
1170                    IntervalUnit::YearMonth => {
1171                        let array = column
1172                            .as_any()
1173                            .downcast_ref::<arrow_array::IntervalYearMonthArray>()
1174                            .unwrap();
1175                        get_interval_ym_array_slice(array, indices)
1176                    }
1177                    IntervalUnit::DayTime => {
1178                        let array = column
1179                            .as_any()
1180                            .downcast_ref::<arrow_array::IntervalDayTimeArray>()
1181                            .unwrap();
1182                        get_interval_dt_array_slice(array, indices)
1183                    }
1184                    _ => {
1185                        return Err(ParquetError::NYI(
1186                            format!(
1187                                "Attempting to write an Arrow interval type {interval_unit:?} to parquet that is not yet implemented"
1188                            )
1189                        ));
1190                    }
1191                },
1192                ArrowDataType::FixedSizeBinary(_) => {
1193                    let array = column
1194                        .as_any()
1195                        .downcast_ref::<arrow_array::FixedSizeBinaryArray>()
1196                        .unwrap();
1197                    get_fsb_array_slice(array, indices)
1198                }
1199                ArrowDataType::Decimal128(_, _) => {
1200                    let array = column.as_primitive::<Decimal128Type>();
1201                    get_decimal_128_array_slice(array, indices)
1202                }
1203                ArrowDataType::Decimal256(_, _) => {
1204                    let array = column
1205                        .as_any()
1206                        .downcast_ref::<arrow_array::Decimal256Array>()
1207                        .unwrap();
1208                    get_decimal_256_array_slice(array, indices)
1209                }
1210                ArrowDataType::Float16 => {
1211                    let array = column.as_primitive::<Float16Type>();
1212                    get_float_16_array_slice(array, indices)
1213                }
1214                _ => {
1215                    return Err(ParquetError::NYI(
1216                        "Attempting to write an Arrow type that is not yet implemented".to_string(),
1217                    ));
1218                }
1219            };
1220            typed.write_batch(bytes.as_slice(), levels.def_levels(), levels.rep_levels())
1221        }
1222    }
1223}
1224
1225fn write_primitive<E: ColumnValueEncoder>(
1226    writer: &mut GenericColumnWriter<E>,
1227    values: &E::Values,
1228    levels: &ArrayLevels,
1229) -> Result<usize> {
1230    writer.write_batch_internal(
1231        values,
1232        Some(levels.non_null_indices()),
1233        levels.def_levels(),
1234        levels.rep_levels(),
1235        None,
1236        None,
1237        None,
1238    )
1239}
1240
1241fn get_bool_array_slice(array: &arrow_array::BooleanArray, indices: &[usize]) -> Vec<bool> {
1242    let mut values = Vec::with_capacity(indices.len());
1243    for i in indices {
1244        values.push(array.value(*i))
1245    }
1246    values
1247}
1248
1249/// Returns 12-byte values representing 3 values of months, days and milliseconds (4-bytes each).
1250/// An Arrow YearMonth interval only stores months, thus only the first 4 bytes are populated.
1251fn get_interval_ym_array_slice(
1252    array: &arrow_array::IntervalYearMonthArray,
1253    indices: &[usize],
1254) -> Vec<FixedLenByteArray> {
1255    let mut values = Vec::with_capacity(indices.len());
1256    for i in indices {
1257        let mut value = array.value(*i).to_le_bytes().to_vec();
1258        let mut suffix = vec![0; 8];
1259        value.append(&mut suffix);
1260        values.push(FixedLenByteArray::from(ByteArray::from(value)))
1261    }
1262    values
1263}
1264
1265/// Returns 12-byte values representing 3 values of months, days and milliseconds (4-bytes each).
1266/// An Arrow DayTime interval only stores days and millis, thus the first 4 bytes are not populated.
1267fn get_interval_dt_array_slice(
1268    array: &arrow_array::IntervalDayTimeArray,
1269    indices: &[usize],
1270) -> Vec<FixedLenByteArray> {
1271    let mut values = Vec::with_capacity(indices.len());
1272    for i in indices {
1273        let mut out = [0; 12];
1274        let value = array.value(*i);
1275        out[4..8].copy_from_slice(&value.days.to_le_bytes());
1276        out[8..12].copy_from_slice(&value.milliseconds.to_le_bytes());
1277        values.push(FixedLenByteArray::from(ByteArray::from(out.to_vec())));
1278    }
1279    values
1280}
1281
1282fn get_decimal_128_array_slice(
1283    array: &arrow_array::Decimal128Array,
1284    indices: &[usize],
1285) -> Vec<FixedLenByteArray> {
1286    let mut values = Vec::with_capacity(indices.len());
1287    let size = decimal_length_from_precision(array.precision());
1288    for i in indices {
1289        let as_be_bytes = array.value(*i).to_be_bytes();
1290        let resized_value = as_be_bytes[(16 - size)..].to_vec();
1291        values.push(FixedLenByteArray::from(ByteArray::from(resized_value)));
1292    }
1293    values
1294}
1295
1296fn get_decimal_256_array_slice(
1297    array: &arrow_array::Decimal256Array,
1298    indices: &[usize],
1299) -> Vec<FixedLenByteArray> {
1300    let mut values = Vec::with_capacity(indices.len());
1301    let size = decimal_length_from_precision(array.precision());
1302    for i in indices {
1303        let as_be_bytes = array.value(*i).to_be_bytes();
1304        let resized_value = as_be_bytes[(32 - size)..].to_vec();
1305        values.push(FixedLenByteArray::from(ByteArray::from(resized_value)));
1306    }
1307    values
1308}
1309
1310fn get_float_16_array_slice(
1311    array: &arrow_array::Float16Array,
1312    indices: &[usize],
1313) -> Vec<FixedLenByteArray> {
1314    let mut values = Vec::with_capacity(indices.len());
1315    for i in indices {
1316        let value = array.value(*i).to_le_bytes().to_vec();
1317        values.push(FixedLenByteArray::from(ByteArray::from(value)));
1318    }
1319    values
1320}
1321
1322fn get_fsb_array_slice(
1323    array: &arrow_array::FixedSizeBinaryArray,
1324    indices: &[usize],
1325) -> Vec<FixedLenByteArray> {
1326    let mut values = Vec::with_capacity(indices.len());
1327    for i in indices {
1328        let value = array.value(*i).to_vec();
1329        values.push(FixedLenByteArray::from(ByteArray::from(value)))
1330    }
1331    values
1332}
1333
1334#[cfg(test)]
1335mod tests {
1336    use super::*;
1337
1338    use std::fs::File;
1339    use std::io::Seek;
1340
1341    use crate::arrow::arrow_reader::{ParquetRecordBatchReader, ParquetRecordBatchReaderBuilder};
1342    use crate::arrow::ARROW_SCHEMA_META_KEY;
1343    use crate::file::page_encoding_stats::PageEncodingStats;
1344    use crate::format::PageHeader;
1345    use crate::thrift::TCompactSliceInputProtocol;
1346    use arrow::datatypes::ToByteSlice;
1347    use arrow::datatypes::{DataType, Schema};
1348    use arrow::error::Result as ArrowResult;
1349    use arrow::util::data_gen::create_random_array;
1350    use arrow::util::pretty::pretty_format_batches;
1351    use arrow::{array::*, buffer::Buffer};
1352    use arrow_buffer::{i256, IntervalDayTime, IntervalMonthDayNano, NullBuffer};
1353    use arrow_schema::Fields;
1354    use half::f16;
1355    use num::{FromPrimitive, ToPrimitive};
1356
1357    use crate::basic::Encoding;
1358    use crate::data_type::AsBytes;
1359    use crate::file::metadata::ParquetMetaData;
1360    use crate::file::page_index::index::Index;
1361    use crate::file::properties::{
1362        BloomFilterPosition, EnabledStatistics, ReaderProperties, WriterVersion,
1363    };
1364    use crate::file::serialized_reader::ReadOptionsBuilder;
1365    use crate::file::{
1366        reader::{FileReader, SerializedFileReader},
1367        statistics::Statistics,
1368    };
1369
1370    #[test]
1371    fn arrow_writer() {
1372        // define schema
1373        let schema = Schema::new(vec![
1374            Field::new("a", DataType::Int32, false),
1375            Field::new("b", DataType::Int32, true),
1376        ]);
1377
1378        // create some data
1379        let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
1380        let b = Int32Array::from(vec![Some(1), None, None, Some(4), Some(5)]);
1381
1382        // build a record batch
1383        let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a), Arc::new(b)]).unwrap();
1384
1385        roundtrip(batch, Some(SMALL_SIZE / 2));
1386    }
1387
1388    fn get_bytes_after_close(schema: SchemaRef, expected_batch: &RecordBatch) -> Vec<u8> {
1389        let mut buffer = vec![];
1390
1391        let mut writer = ArrowWriter::try_new(&mut buffer, schema, None).unwrap();
1392        writer.write(expected_batch).unwrap();
1393        writer.close().unwrap();
1394
1395        buffer
1396    }
1397
1398    fn get_bytes_by_into_inner(schema: SchemaRef, expected_batch: &RecordBatch) -> Vec<u8> {
1399        let mut writer = ArrowWriter::try_new(Vec::new(), schema, None).unwrap();
1400        writer.write(expected_batch).unwrap();
1401        writer.into_inner().unwrap()
1402    }
1403
1404    #[test]
1405    fn roundtrip_bytes() {
1406        // define schema
1407        let schema = Arc::new(Schema::new(vec![
1408            Field::new("a", DataType::Int32, false),
1409            Field::new("b", DataType::Int32, true),
1410        ]));
1411
1412        // create some data
1413        let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
1414        let b = Int32Array::from(vec![Some(1), None, None, Some(4), Some(5)]);
1415
1416        // build a record batch
1417        let expected_batch =
1418            RecordBatch::try_new(schema.clone(), vec![Arc::new(a), Arc::new(b)]).unwrap();
1419
1420        for buffer in [
1421            get_bytes_after_close(schema.clone(), &expected_batch),
1422            get_bytes_by_into_inner(schema, &expected_batch),
1423        ] {
1424            let cursor = Bytes::from(buffer);
1425            let mut record_batch_reader = ParquetRecordBatchReader::try_new(cursor, 1024).unwrap();
1426
1427            let actual_batch = record_batch_reader
1428                .next()
1429                .expect("No batch found")
1430                .expect("Unable to get batch");
1431
1432            assert_eq!(expected_batch.schema(), actual_batch.schema());
1433            assert_eq!(expected_batch.num_columns(), actual_batch.num_columns());
1434            assert_eq!(expected_batch.num_rows(), actual_batch.num_rows());
1435            for i in 0..expected_batch.num_columns() {
1436                let expected_data = expected_batch.column(i).to_data();
1437                let actual_data = actual_batch.column(i).to_data();
1438
1439                assert_eq!(expected_data, actual_data);
1440            }
1441        }
1442    }
1443
1444    #[test]
1445    fn arrow_writer_non_null() {
1446        // define schema
1447        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);
1448
1449        // create some data
1450        let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
1451
1452        // build a record batch
1453        let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
1454
1455        roundtrip(batch, Some(SMALL_SIZE / 2));
1456    }
1457
1458    #[test]
1459    fn arrow_writer_list() {
1460        // define schema
1461        let schema = Schema::new(vec![Field::new(
1462            "a",
1463            DataType::List(Arc::new(Field::new_list_field(DataType::Int32, false))),
1464            true,
1465        )]);
1466
1467        // create some data
1468        let a_values = Int32Array::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
1469
1470        // Construct a buffer for value offsets, for the nested array:
1471        //  [[1], [2, 3], null, [4, 5, 6], [7, 8, 9, 10]]
1472        let a_value_offsets = arrow::buffer::Buffer::from([0, 1, 3, 3, 6, 10].to_byte_slice());
1473
1474        // Construct a list array from the above two
1475        let a_list_data = ArrayData::builder(DataType::List(Arc::new(Field::new_list_field(
1476            DataType::Int32,
1477            false,
1478        ))))
1479        .len(5)
1480        .add_buffer(a_value_offsets)
1481        .add_child_data(a_values.into_data())
1482        .null_bit_buffer(Some(Buffer::from([0b00011011])))
1483        .build()
1484        .unwrap();
1485        let a = ListArray::from(a_list_data);
1486
1487        // build a record batch
1488        let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
1489
1490        assert_eq!(batch.column(0).null_count(), 1);
1491
1492        // This test fails if the max row group size is less than the batch's length
1493        // see https://github.com/apache/arrow-rs/issues/518
1494        roundtrip(batch, None);
1495    }
1496
1497    #[test]
1498    fn arrow_writer_list_non_null() {
1499        // define schema
1500        let schema = Schema::new(vec![Field::new(
1501            "a",
1502            DataType::List(Arc::new(Field::new_list_field(DataType::Int32, false))),
1503            false,
1504        )]);
1505
1506        // create some data
1507        let a_values = Int32Array::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
1508
1509        // Construct a buffer for value offsets, for the nested array:
1510        //  [[1], [2, 3], [], [4, 5, 6], [7, 8, 9, 10]]
1511        let a_value_offsets = arrow::buffer::Buffer::from([0, 1, 3, 3, 6, 10].to_byte_slice());
1512
1513        // Construct a list array from the above two
1514        let a_list_data = ArrayData::builder(DataType::List(Arc::new(Field::new_list_field(
1515            DataType::Int32,
1516            false,
1517        ))))
1518        .len(5)
1519        .add_buffer(a_value_offsets)
1520        .add_child_data(a_values.into_data())
1521        .build()
1522        .unwrap();
1523        let a = ListArray::from(a_list_data);
1524
1525        // build a record batch
1526        let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
1527
1528        // This test fails if the max row group size is less than the batch's length
1529        // see https://github.com/apache/arrow-rs/issues/518
1530        assert_eq!(batch.column(0).null_count(), 0);
1531
1532        roundtrip(batch, None);
1533    }
1534
1535    #[test]
1536    fn arrow_writer_binary() {
1537        let string_field = Field::new("a", DataType::Utf8, false);
1538        let binary_field = Field::new("b", DataType::Binary, false);
1539        let schema = Schema::new(vec![string_field, binary_field]);
1540
1541        let raw_string_values = vec!["foo", "bar", "baz", "quux"];
1542        let raw_binary_values = [
1543            b"foo".to_vec(),
1544            b"bar".to_vec(),
1545            b"baz".to_vec(),
1546            b"quux".to_vec(),
1547        ];
1548        let raw_binary_value_refs = raw_binary_values
1549            .iter()
1550            .map(|x| x.as_slice())
1551            .collect::<Vec<_>>();
1552
1553        let string_values = StringArray::from(raw_string_values.clone());
1554        let binary_values = BinaryArray::from(raw_binary_value_refs);
1555        let batch = RecordBatch::try_new(
1556            Arc::new(schema),
1557            vec![Arc::new(string_values), Arc::new(binary_values)],
1558        )
1559        .unwrap();
1560
1561        roundtrip(batch, Some(SMALL_SIZE / 2));
1562    }
1563
1564    #[test]
1565    fn arrow_writer_binary_view() {
1566        let string_field = Field::new("a", DataType::Utf8View, false);
1567        let binary_field = Field::new("b", DataType::BinaryView, false);
1568        let nullable_string_field = Field::new("a", DataType::Utf8View, true);
1569        let schema = Schema::new(vec![string_field, binary_field, nullable_string_field]);
1570
1571        let raw_string_values = vec!["foo", "bar", "large payload over 12 bytes", "lulu"];
1572        let raw_binary_values = vec![
1573            b"foo".to_vec(),
1574            b"bar".to_vec(),
1575            b"large payload over 12 bytes".to_vec(),
1576            b"lulu".to_vec(),
1577        ];
1578        let nullable_string_values =
1579            vec![Some("foo"), None, Some("large payload over 12 bytes"), None];
1580
1581        let string_view_values = StringViewArray::from(raw_string_values);
1582        let binary_view_values = BinaryViewArray::from_iter_values(raw_binary_values);
1583        let nullable_string_view_values = StringViewArray::from(nullable_string_values);
1584        let batch = RecordBatch::try_new(
1585            Arc::new(schema),
1586            vec![
1587                Arc::new(string_view_values),
1588                Arc::new(binary_view_values),
1589                Arc::new(nullable_string_view_values),
1590            ],
1591        )
1592        .unwrap();
1593
1594        roundtrip(batch.clone(), Some(SMALL_SIZE / 2));
1595        roundtrip(batch, None);
1596    }
1597
1598    fn get_decimal_batch(precision: u8, scale: i8) -> RecordBatch {
1599        let decimal_field = Field::new("a", DataType::Decimal128(precision, scale), false);
1600        let schema = Schema::new(vec![decimal_field]);
1601
1602        let decimal_values = vec![10_000, 50_000, 0, -100]
1603            .into_iter()
1604            .map(Some)
1605            .collect::<Decimal128Array>()
1606            .with_precision_and_scale(precision, scale)
1607            .unwrap();
1608
1609        RecordBatch::try_new(Arc::new(schema), vec![Arc::new(decimal_values)]).unwrap()
1610    }
1611
1612    #[test]
1613    fn arrow_writer_decimal() {
1614        // int32 to store the decimal value
1615        let batch_int32_decimal = get_decimal_batch(5, 2);
1616        roundtrip(batch_int32_decimal, Some(SMALL_SIZE / 2));
1617        // int64 to store the decimal value
1618        let batch_int64_decimal = get_decimal_batch(12, 2);
1619        roundtrip(batch_int64_decimal, Some(SMALL_SIZE / 2));
1620        // fixed_length_byte_array to store the decimal value
1621        let batch_fixed_len_byte_array_decimal = get_decimal_batch(30, 2);
1622        roundtrip(batch_fixed_len_byte_array_decimal, Some(SMALL_SIZE / 2));
1623    }
1624
1625    #[test]
1626    fn arrow_writer_complex() {
1627        // define schema
1628        let struct_field_d = Arc::new(Field::new("d", DataType::Float64, true));
1629        let struct_field_f = Arc::new(Field::new("f", DataType::Float32, true));
1630        let struct_field_g = Arc::new(Field::new_list(
1631            "g",
1632            Field::new_list_field(DataType::Int16, true),
1633            false,
1634        ));
1635        let struct_field_h = Arc::new(Field::new_list(
1636            "h",
1637            Field::new_list_field(DataType::Int16, false),
1638            true,
1639        ));
1640        let struct_field_e = Arc::new(Field::new_struct(
1641            "e",
1642            vec![
1643                struct_field_f.clone(),
1644                struct_field_g.clone(),
1645                struct_field_h.clone(),
1646            ],
1647            false,
1648        ));
1649        let schema = Schema::new(vec![
1650            Field::new("a", DataType::Int32, false),
1651            Field::new("b", DataType::Int32, true),
1652            Field::new_struct(
1653                "c",
1654                vec![struct_field_d.clone(), struct_field_e.clone()],
1655                false,
1656            ),
1657        ]);
1658
1659        // create some data
1660        let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
1661        let b = Int32Array::from(vec![Some(1), None, None, Some(4), Some(5)]);
1662        let d = Float64Array::from(vec![None, None, None, Some(1.0), None]);
1663        let f = Float32Array::from(vec![Some(0.0), None, Some(333.3), None, Some(5.25)]);
1664
1665        let g_value = Int16Array::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
1666
1667        // Construct a buffer for value offsets, for the nested array:
1668        //  [[1], [2, 3], [], [4, 5, 6], [7, 8, 9, 10]]
1669        let g_value_offsets = arrow::buffer::Buffer::from([0, 1, 3, 3, 6, 10].to_byte_slice());
1670
1671        // Construct a list array from the above two
1672        let g_list_data = ArrayData::builder(struct_field_g.data_type().clone())
1673            .len(5)
1674            .add_buffer(g_value_offsets.clone())
1675            .add_child_data(g_value.to_data())
1676            .build()
1677            .unwrap();
1678        let g = ListArray::from(g_list_data);
1679        // The difference between g and h is that h has a null bitmap
1680        let h_list_data = ArrayData::builder(struct_field_h.data_type().clone())
1681            .len(5)
1682            .add_buffer(g_value_offsets)
1683            .add_child_data(g_value.to_data())
1684            .null_bit_buffer(Some(Buffer::from([0b00011011])))
1685            .build()
1686            .unwrap();
1687        let h = ListArray::from(h_list_data);
1688
1689        let e = StructArray::from(vec![
1690            (struct_field_f, Arc::new(f) as ArrayRef),
1691            (struct_field_g, Arc::new(g) as ArrayRef),
1692            (struct_field_h, Arc::new(h) as ArrayRef),
1693        ]);
1694
1695        let c = StructArray::from(vec![
1696            (struct_field_d, Arc::new(d) as ArrayRef),
1697            (struct_field_e, Arc::new(e) as ArrayRef),
1698        ]);
1699
1700        // build a record batch
1701        let batch = RecordBatch::try_new(
1702            Arc::new(schema),
1703            vec![Arc::new(a), Arc::new(b), Arc::new(c)],
1704        )
1705        .unwrap();
1706
1707        roundtrip(batch.clone(), Some(SMALL_SIZE / 2));
1708        roundtrip(batch, Some(SMALL_SIZE / 3));
1709    }
1710
1711    #[test]
1712    fn arrow_writer_complex_mixed() {
1713        // This test was added while investigating https://github.com/apache/arrow-rs/issues/244.
1714        // It was subsequently fixed while investigating https://github.com/apache/arrow-rs/issues/245.
1715
1716        // define schema
1717        let offset_field = Arc::new(Field::new("offset", DataType::Int32, false));
1718        let partition_field = Arc::new(Field::new("partition", DataType::Int64, true));
1719        let topic_field = Arc::new(Field::new("topic", DataType::Utf8, true));
1720        let schema = Schema::new(vec![Field::new(
1721            "some_nested_object",
1722            DataType::Struct(Fields::from(vec![
1723                offset_field.clone(),
1724                partition_field.clone(),
1725                topic_field.clone(),
1726            ])),
1727            false,
1728        )]);
1729
1730        // create some data
1731        let offset = Int32Array::from(vec![1, 2, 3, 4, 5]);
1732        let partition = Int64Array::from(vec![Some(1), None, None, Some(4), Some(5)]);
1733        let topic = StringArray::from(vec![Some("A"), None, Some("A"), Some(""), None]);
1734
1735        let some_nested_object = StructArray::from(vec![
1736            (offset_field, Arc::new(offset) as ArrayRef),
1737            (partition_field, Arc::new(partition) as ArrayRef),
1738            (topic_field, Arc::new(topic) as ArrayRef),
1739        ]);
1740
1741        // build a record batch
1742        let batch =
1743            RecordBatch::try_new(Arc::new(schema), vec![Arc::new(some_nested_object)]).unwrap();
1744
1745        roundtrip(batch, Some(SMALL_SIZE / 2));
1746    }
1747
1748    #[test]
1749    fn arrow_writer_map() {
1750        // Note: we are using the JSON Arrow reader for brevity
1751        let json_content = r#"
1752        {"stocks":{"long": "$AAA", "short": "$BBB"}}
1753        {"stocks":{"long": null, "long": "$CCC", "short": null}}
1754        {"stocks":{"hedged": "$YYY", "long": null, "short": "$D"}}
1755        "#;
1756        let entries_struct_type = DataType::Struct(Fields::from(vec![
1757            Field::new("key", DataType::Utf8, false),
1758            Field::new("value", DataType::Utf8, true),
1759        ]));
1760        let stocks_field = Field::new(
1761            "stocks",
1762            DataType::Map(
1763                Arc::new(Field::new("entries", entries_struct_type, false)),
1764                false,
1765            ),
1766            true,
1767        );
1768        let schema = Arc::new(Schema::new(vec![stocks_field]));
1769        let builder = arrow::json::ReaderBuilder::new(schema).with_batch_size(64);
1770        let mut reader = builder.build(std::io::Cursor::new(json_content)).unwrap();
1771
1772        let batch = reader.next().unwrap().unwrap();
1773        roundtrip(batch, None);
1774    }
1775
1776    #[test]
1777    fn arrow_writer_2_level_struct() {
1778        // tests writing <struct<struct<primitive>>
1779        let field_c = Field::new("c", DataType::Int32, true);
1780        let field_b = Field::new("b", DataType::Struct(vec![field_c].into()), true);
1781        let type_a = DataType::Struct(vec![field_b.clone()].into());
1782        let field_a = Field::new("a", type_a, true);
1783        let schema = Schema::new(vec![field_a.clone()]);
1784
1785        // create data
1786        let c = Int32Array::from(vec![Some(1), None, Some(3), None, None, Some(6)]);
1787        let b_data = ArrayDataBuilder::new(field_b.data_type().clone())
1788            .len(6)
1789            .null_bit_buffer(Some(Buffer::from([0b00100111])))
1790            .add_child_data(c.into_data())
1791            .build()
1792            .unwrap();
1793        let b = StructArray::from(b_data);
1794        let a_data = ArrayDataBuilder::new(field_a.data_type().clone())
1795            .len(6)
1796            .null_bit_buffer(Some(Buffer::from([0b00101111])))
1797            .add_child_data(b.into_data())
1798            .build()
1799            .unwrap();
1800        let a = StructArray::from(a_data);
1801
1802        assert_eq!(a.null_count(), 1);
1803        assert_eq!(a.column(0).null_count(), 2);
1804
1805        // build a racord batch
1806        let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
1807
1808        roundtrip(batch, Some(SMALL_SIZE / 2));
1809    }
1810
1811    #[test]
1812    fn arrow_writer_2_level_struct_non_null() {
1813        // tests writing <struct<struct<primitive>>
1814        let field_c = Field::new("c", DataType::Int32, false);
1815        let type_b = DataType::Struct(vec![field_c].into());
1816        let field_b = Field::new("b", type_b.clone(), false);
1817        let type_a = DataType::Struct(vec![field_b].into());
1818        let field_a = Field::new("a", type_a.clone(), false);
1819        let schema = Schema::new(vec![field_a]);
1820
1821        // create data
1822        let c = Int32Array::from(vec![1, 2, 3, 4, 5, 6]);
1823        let b_data = ArrayDataBuilder::new(type_b)
1824            .len(6)
1825            .add_child_data(c.into_data())
1826            .build()
1827            .unwrap();
1828        let b = StructArray::from(b_data);
1829        let a_data = ArrayDataBuilder::new(type_a)
1830            .len(6)
1831            .add_child_data(b.into_data())
1832            .build()
1833            .unwrap();
1834        let a = StructArray::from(a_data);
1835
1836        assert_eq!(a.null_count(), 0);
1837        assert_eq!(a.column(0).null_count(), 0);
1838
1839        // build a racord batch
1840        let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
1841
1842        roundtrip(batch, Some(SMALL_SIZE / 2));
1843    }
1844
1845    #[test]
1846    fn arrow_writer_2_level_struct_mixed_null() {
1847        // tests writing <struct<struct<primitive>>
1848        let field_c = Field::new("c", DataType::Int32, false);
1849        let type_b = DataType::Struct(vec![field_c].into());
1850        let field_b = Field::new("b", type_b.clone(), true);
1851        let type_a = DataType::Struct(vec![field_b].into());
1852        let field_a = Field::new("a", type_a.clone(), false);
1853        let schema = Schema::new(vec![field_a]);
1854
1855        // create data
1856        let c = Int32Array::from(vec![1, 2, 3, 4, 5, 6]);
1857        let b_data = ArrayDataBuilder::new(type_b)
1858            .len(6)
1859            .null_bit_buffer(Some(Buffer::from([0b00100111])))
1860            .add_child_data(c.into_data())
1861            .build()
1862            .unwrap();
1863        let b = StructArray::from(b_data);
1864        // a intentionally has no null buffer, to test that this is handled correctly
1865        let a_data = ArrayDataBuilder::new(type_a)
1866            .len(6)
1867            .add_child_data(b.into_data())
1868            .build()
1869            .unwrap();
1870        let a = StructArray::from(a_data);
1871
1872        assert_eq!(a.null_count(), 0);
1873        assert_eq!(a.column(0).null_count(), 2);
1874
1875        // build a racord batch
1876        let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
1877
1878        roundtrip(batch, Some(SMALL_SIZE / 2));
1879    }
1880
1881    #[test]
1882    fn arrow_writer_2_level_struct_mixed_null_2() {
1883        // tests writing <struct<struct<primitive>>, where the primitive columns are non-null.
1884        let field_c = Field::new("c", DataType::Int32, false);
1885        let field_d = Field::new("d", DataType::FixedSizeBinary(4), false);
1886        let field_e = Field::new(
1887            "e",
1888            DataType::Dictionary(Box::new(DataType::Int32), Box::new(DataType::Utf8)),
1889            false,
1890        );
1891
1892        let field_b = Field::new(
1893            "b",
1894            DataType::Struct(vec![field_c, field_d, field_e].into()),
1895            false,
1896        );
1897        let type_a = DataType::Struct(vec![field_b.clone()].into());
1898        let field_a = Field::new("a", type_a, true);
1899        let schema = Schema::new(vec![field_a.clone()]);
1900
1901        // create data
1902        let c = Int32Array::from_iter_values(0..6);
1903        let d = FixedSizeBinaryArray::try_from_iter(
1904            ["aaaa", "bbbb", "cccc", "dddd", "eeee", "ffff"].into_iter(),
1905        )
1906        .expect("four byte values");
1907        let e = Int32DictionaryArray::from_iter(["one", "two", "three", "four", "five", "one"]);
1908        let b_data = ArrayDataBuilder::new(field_b.data_type().clone())
1909            .len(6)
1910            .add_child_data(c.into_data())
1911            .add_child_data(d.into_data())
1912            .add_child_data(e.into_data())
1913            .build()
1914            .unwrap();
1915        let b = StructArray::from(b_data);
1916        let a_data = ArrayDataBuilder::new(field_a.data_type().clone())
1917            .len(6)
1918            .null_bit_buffer(Some(Buffer::from([0b00100101])))
1919            .add_child_data(b.into_data())
1920            .build()
1921            .unwrap();
1922        let a = StructArray::from(a_data);
1923
1924        assert_eq!(a.null_count(), 3);
1925        assert_eq!(a.column(0).null_count(), 0);
1926
1927        // build a record batch
1928        let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
1929
1930        roundtrip(batch, Some(SMALL_SIZE / 2));
1931    }
1932
1933    #[test]
1934    fn test_fixed_size_binary_in_dict() {
1935        fn test_fixed_size_binary_in_dict_inner<K>()
1936        where
1937            K: ArrowDictionaryKeyType,
1938            K::Native: FromPrimitive + ToPrimitive + TryFrom<u8>,
1939            <<K as arrow_array::ArrowPrimitiveType>::Native as TryFrom<u8>>::Error: std::fmt::Debug,
1940        {
1941            let field = Field::new(
1942                "a",
1943                DataType::Dictionary(
1944                    Box::new(K::DATA_TYPE),
1945                    Box::new(DataType::FixedSizeBinary(4)),
1946                ),
1947                false,
1948            );
1949            let schema = Schema::new(vec![field]);
1950
1951            let keys: Vec<K::Native> = vec![
1952                K::Native::try_from(0u8).unwrap(),
1953                K::Native::try_from(0u8).unwrap(),
1954                K::Native::try_from(1u8).unwrap(),
1955            ];
1956            let keys = PrimitiveArray::<K>::from_iter_values(keys);
1957            let values = FixedSizeBinaryArray::try_from_iter(
1958                vec![vec![0, 0, 0, 0], vec![1, 1, 1, 1]].into_iter(),
1959            )
1960            .unwrap();
1961
1962            let data = DictionaryArray::<K>::new(keys, Arc::new(values));
1963            let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(data)]).unwrap();
1964            roundtrip(batch, None);
1965        }
1966
1967        test_fixed_size_binary_in_dict_inner::<UInt8Type>();
1968        test_fixed_size_binary_in_dict_inner::<UInt16Type>();
1969        test_fixed_size_binary_in_dict_inner::<UInt32Type>();
1970        test_fixed_size_binary_in_dict_inner::<UInt16Type>();
1971        test_fixed_size_binary_in_dict_inner::<Int8Type>();
1972        test_fixed_size_binary_in_dict_inner::<Int16Type>();
1973        test_fixed_size_binary_in_dict_inner::<Int32Type>();
1974        test_fixed_size_binary_in_dict_inner::<Int64Type>();
1975    }
1976
1977    #[test]
1978    fn test_empty_dict() {
1979        let struct_fields = Fields::from(vec![Field::new(
1980            "dict",
1981            DataType::Dictionary(Box::new(DataType::Int32), Box::new(DataType::Utf8)),
1982            false,
1983        )]);
1984
1985        let schema = Schema::new(vec![Field::new_struct(
1986            "struct",
1987            struct_fields.clone(),
1988            true,
1989        )]);
1990        let dictionary = Arc::new(DictionaryArray::new(
1991            Int32Array::new_null(5),
1992            Arc::new(StringArray::new_null(0)),
1993        ));
1994
1995        let s = StructArray::new(
1996            struct_fields,
1997            vec![dictionary],
1998            Some(NullBuffer::new_null(5)),
1999        );
2000
2001        let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(s)]).unwrap();
2002        roundtrip(batch, None);
2003    }
2004    #[test]
2005    fn arrow_writer_page_size() {
2006        let schema = Arc::new(Schema::new(vec![Field::new("col", DataType::Utf8, false)]));
2007
2008        let mut builder = StringBuilder::with_capacity(100, 329 * 10_000);
2009
2010        // Generate an array of 10 unique 10 character string
2011        for i in 0..10 {
2012            let value = i
2013                .to_string()
2014                .repeat(10)
2015                .chars()
2016                .take(10)
2017                .collect::<String>();
2018
2019            builder.append_value(value);
2020        }
2021
2022        let array = Arc::new(builder.finish());
2023
2024        let batch = RecordBatch::try_new(schema, vec![array]).unwrap();
2025
2026        let file = tempfile::tempfile().unwrap();
2027
2028        // Set everything very low so we fallback to PLAIN encoding after the first row
2029        let props = WriterProperties::builder()
2030            .set_data_page_size_limit(1)
2031            .set_dictionary_page_size_limit(1)
2032            .set_write_batch_size(1)
2033            .build();
2034
2035        let mut writer =
2036            ArrowWriter::try_new(file.try_clone().unwrap(), batch.schema(), Some(props))
2037                .expect("Unable to write file");
2038        writer.write(&batch).unwrap();
2039        writer.close().unwrap();
2040
2041        let options = ReadOptionsBuilder::new().with_page_index().build();
2042        let reader =
2043            SerializedFileReader::new_with_options(file.try_clone().unwrap(), options).unwrap();
2044
2045        let column = reader.metadata().row_group(0).columns();
2046
2047        assert_eq!(column.len(), 1);
2048
2049        // We should write one row before falling back to PLAIN encoding so there should still be a
2050        // dictionary page.
2051        assert!(
2052            column[0].dictionary_page_offset().is_some(),
2053            "Expected a dictionary page"
2054        );
2055
2056        assert!(reader.metadata().offset_index().is_some());
2057        let offset_indexes = &reader.metadata().offset_index().unwrap()[0];
2058
2059        let page_locations = offset_indexes[0].page_locations.clone();
2060
2061        // We should fallback to PLAIN encoding after the first row and our max page size is 1 bytes
2062        // so we expect one dictionary encoded page and then a page per row thereafter.
2063        assert_eq!(
2064            page_locations.len(),
2065            10,
2066            "Expected 10 pages but got {page_locations:#?}"
2067        );
2068    }
2069
2070    #[test]
2071    fn arrow_writer_float_nans() {
2072        let f16_field = Field::new("a", DataType::Float16, false);
2073        let f32_field = Field::new("b", DataType::Float32, false);
2074        let f64_field = Field::new("c", DataType::Float64, false);
2075        let schema = Schema::new(vec![f16_field, f32_field, f64_field]);
2076
2077        let f16_values = (0..MEDIUM_SIZE)
2078            .map(|i| {
2079                Some(if i % 2 == 0 {
2080                    f16::NAN
2081                } else {
2082                    f16::from_f32(i as f32)
2083                })
2084            })
2085            .collect::<Float16Array>();
2086
2087        let f32_values = (0..MEDIUM_SIZE)
2088            .map(|i| Some(if i % 2 == 0 { f32::NAN } else { i as f32 }))
2089            .collect::<Float32Array>();
2090
2091        let f64_values = (0..MEDIUM_SIZE)
2092            .map(|i| Some(if i % 2 == 0 { f64::NAN } else { i as f64 }))
2093            .collect::<Float64Array>();
2094
2095        let batch = RecordBatch::try_new(
2096            Arc::new(schema),
2097            vec![
2098                Arc::new(f16_values),
2099                Arc::new(f32_values),
2100                Arc::new(f64_values),
2101            ],
2102        )
2103        .unwrap();
2104
2105        roundtrip(batch, None);
2106    }
2107
2108    const SMALL_SIZE: usize = 7;
2109    const MEDIUM_SIZE: usize = 63;
2110
2111    fn roundtrip(expected_batch: RecordBatch, max_row_group_size: Option<usize>) -> Vec<File> {
2112        let mut files = vec![];
2113        for version in [WriterVersion::PARQUET_1_0, WriterVersion::PARQUET_2_0] {
2114            let mut props = WriterProperties::builder().set_writer_version(version);
2115
2116            if let Some(size) = max_row_group_size {
2117                props = props.set_max_row_group_size(size)
2118            }
2119
2120            let props = props.build();
2121            files.push(roundtrip_opts(&expected_batch, props))
2122        }
2123        files
2124    }
2125
2126    fn roundtrip_opts_with_array_validation<F>(
2127        expected_batch: &RecordBatch,
2128        props: WriterProperties,
2129        validate: F,
2130    ) -> File
2131    where
2132        F: Fn(&ArrayData, &ArrayData),
2133    {
2134        let file = tempfile::tempfile().unwrap();
2135
2136        let mut writer = ArrowWriter::try_new(
2137            file.try_clone().unwrap(),
2138            expected_batch.schema(),
2139            Some(props),
2140        )
2141        .expect("Unable to write file");
2142        writer.write(expected_batch).unwrap();
2143        writer.close().unwrap();
2144
2145        let mut record_batch_reader =
2146            ParquetRecordBatchReader::try_new(file.try_clone().unwrap(), 1024).unwrap();
2147
2148        let actual_batch = record_batch_reader
2149            .next()
2150            .expect("No batch found")
2151            .expect("Unable to get batch");
2152
2153        assert_eq!(expected_batch.schema(), actual_batch.schema());
2154        assert_eq!(expected_batch.num_columns(), actual_batch.num_columns());
2155        assert_eq!(expected_batch.num_rows(), actual_batch.num_rows());
2156        for i in 0..expected_batch.num_columns() {
2157            let expected_data = expected_batch.column(i).to_data();
2158            let actual_data = actual_batch.column(i).to_data();
2159            validate(&expected_data, &actual_data);
2160        }
2161
2162        file
2163    }
2164
2165    fn roundtrip_opts(expected_batch: &RecordBatch, props: WriterProperties) -> File {
2166        roundtrip_opts_with_array_validation(expected_batch, props, |a, b| {
2167            a.validate_full().expect("valid expected data");
2168            b.validate_full().expect("valid actual data");
2169            assert_eq!(a, b)
2170        })
2171    }
2172
2173    struct RoundTripOptions {
2174        values: ArrayRef,
2175        schema: SchemaRef,
2176        bloom_filter: bool,
2177        bloom_filter_position: BloomFilterPosition,
2178    }
2179
2180    impl RoundTripOptions {
2181        fn new(values: ArrayRef, nullable: bool) -> Self {
2182            let data_type = values.data_type().clone();
2183            let schema = Schema::new(vec![Field::new("col", data_type, nullable)]);
2184            Self {
2185                values,
2186                schema: Arc::new(schema),
2187                bloom_filter: false,
2188                bloom_filter_position: BloomFilterPosition::AfterRowGroup,
2189            }
2190        }
2191    }
2192
2193    fn one_column_roundtrip(values: ArrayRef, nullable: bool) -> Vec<File> {
2194        one_column_roundtrip_with_options(RoundTripOptions::new(values, nullable))
2195    }
2196
2197    fn one_column_roundtrip_with_schema(values: ArrayRef, schema: SchemaRef) -> Vec<File> {
2198        let mut options = RoundTripOptions::new(values, false);
2199        options.schema = schema;
2200        one_column_roundtrip_with_options(options)
2201    }
2202
2203    fn one_column_roundtrip_with_options(options: RoundTripOptions) -> Vec<File> {
2204        let RoundTripOptions {
2205            values,
2206            schema,
2207            bloom_filter,
2208            bloom_filter_position,
2209        } = options;
2210
2211        let encodings = match values.data_type() {
2212            DataType::Utf8 | DataType::LargeUtf8 | DataType::Binary | DataType::LargeBinary => {
2213                vec![
2214                    Encoding::PLAIN,
2215                    Encoding::DELTA_BYTE_ARRAY,
2216                    Encoding::DELTA_LENGTH_BYTE_ARRAY,
2217                ]
2218            }
2219            DataType::Int64
2220            | DataType::Int32
2221            | DataType::Int16
2222            | DataType::Int8
2223            | DataType::UInt64
2224            | DataType::UInt32
2225            | DataType::UInt16
2226            | DataType::UInt8 => vec![
2227                Encoding::PLAIN,
2228                Encoding::DELTA_BINARY_PACKED,
2229                Encoding::BYTE_STREAM_SPLIT,
2230            ],
2231            DataType::Float32 | DataType::Float64 => {
2232                vec![Encoding::PLAIN, Encoding::BYTE_STREAM_SPLIT]
2233            }
2234            _ => vec![Encoding::PLAIN],
2235        };
2236
2237        let expected_batch = RecordBatch::try_new(schema, vec![values]).unwrap();
2238
2239        let row_group_sizes = [1024, SMALL_SIZE, SMALL_SIZE / 2, SMALL_SIZE / 2 + 1, 10];
2240
2241        let mut files = vec![];
2242        for dictionary_size in [0, 1, 1024] {
2243            for encoding in &encodings {
2244                for version in [WriterVersion::PARQUET_1_0, WriterVersion::PARQUET_2_0] {
2245                    for row_group_size in row_group_sizes {
2246                        let props = WriterProperties::builder()
2247                            .set_writer_version(version)
2248                            .set_max_row_group_size(row_group_size)
2249                            .set_dictionary_enabled(dictionary_size != 0)
2250                            .set_dictionary_page_size_limit(dictionary_size.max(1))
2251                            .set_encoding(*encoding)
2252                            .set_bloom_filter_enabled(bloom_filter)
2253                            .set_bloom_filter_position(bloom_filter_position)
2254                            .build();
2255
2256                        files.push(roundtrip_opts(&expected_batch, props))
2257                    }
2258                }
2259            }
2260        }
2261        files
2262    }
2263
2264    fn values_required<A, I>(iter: I) -> Vec<File>
2265    where
2266        A: From<Vec<I::Item>> + Array + 'static,
2267        I: IntoIterator,
2268    {
2269        let raw_values: Vec<_> = iter.into_iter().collect();
2270        let values = Arc::new(A::from(raw_values));
2271        one_column_roundtrip(values, false)
2272    }
2273
2274    fn values_optional<A, I>(iter: I) -> Vec<File>
2275    where
2276        A: From<Vec<Option<I::Item>>> + Array + 'static,
2277        I: IntoIterator,
2278    {
2279        let optional_raw_values: Vec<_> = iter
2280            .into_iter()
2281            .enumerate()
2282            .map(|(i, v)| if i % 2 == 0 { None } else { Some(v) })
2283            .collect();
2284        let optional_values = Arc::new(A::from(optional_raw_values));
2285        one_column_roundtrip(optional_values, true)
2286    }
2287
2288    fn required_and_optional<A, I>(iter: I)
2289    where
2290        A: From<Vec<I::Item>> + From<Vec<Option<I::Item>>> + Array + 'static,
2291        I: IntoIterator + Clone,
2292    {
2293        values_required::<A, I>(iter.clone());
2294        values_optional::<A, I>(iter);
2295    }
2296
2297    fn check_bloom_filter<T: AsBytes>(
2298        files: Vec<File>,
2299        file_column: String,
2300        positive_values: Vec<T>,
2301        negative_values: Vec<T>,
2302    ) {
2303        files.into_iter().take(1).for_each(|file| {
2304            let file_reader = SerializedFileReader::new_with_options(
2305                file,
2306                ReadOptionsBuilder::new()
2307                    .with_reader_properties(
2308                        ReaderProperties::builder()
2309                            .set_read_bloom_filter(true)
2310                            .build(),
2311                    )
2312                    .build(),
2313            )
2314            .expect("Unable to open file as Parquet");
2315            let metadata = file_reader.metadata();
2316
2317            // Gets bloom filters from all row groups.
2318            let mut bloom_filters: Vec<_> = vec![];
2319            for (ri, row_group) in metadata.row_groups().iter().enumerate() {
2320                if let Some((column_index, _)) = row_group
2321                    .columns()
2322                    .iter()
2323                    .enumerate()
2324                    .find(|(_, column)| column.column_path().string() == file_column)
2325                {
2326                    let row_group_reader = file_reader
2327                        .get_row_group(ri)
2328                        .expect("Unable to read row group");
2329                    if let Some(sbbf) = row_group_reader.get_column_bloom_filter(column_index) {
2330                        bloom_filters.push(sbbf.clone());
2331                    } else {
2332                        panic!("No bloom filter for column named {file_column} found");
2333                    }
2334                } else {
2335                    panic!("No column named {file_column} found");
2336                }
2337            }
2338
2339            positive_values.iter().for_each(|value| {
2340                let found = bloom_filters.iter().find(|sbbf| sbbf.check(value));
2341                assert!(
2342                    found.is_some(),
2343                    "{}",
2344                    format!("Value {:?} should be in bloom filter", value.as_bytes())
2345                );
2346            });
2347
2348            negative_values.iter().for_each(|value| {
2349                let found = bloom_filters.iter().find(|sbbf| sbbf.check(value));
2350                assert!(
2351                    found.is_none(),
2352                    "{}",
2353                    format!("Value {:?} should not be in bloom filter", value.as_bytes())
2354                );
2355            });
2356        });
2357    }
2358
2359    #[test]
2360    fn all_null_primitive_single_column() {
2361        let values = Arc::new(Int32Array::from(vec![None; SMALL_SIZE]));
2362        one_column_roundtrip(values, true);
2363    }
2364    #[test]
2365    fn null_single_column() {
2366        let values = Arc::new(NullArray::new(SMALL_SIZE));
2367        one_column_roundtrip(values, true);
2368        // null arrays are always nullable, a test with non-nullable nulls fails
2369    }
2370
2371    #[test]
2372    fn bool_single_column() {
2373        required_and_optional::<BooleanArray, _>(
2374            [true, false].iter().cycle().copied().take(SMALL_SIZE),
2375        );
2376    }
2377
2378    #[test]
2379    fn bool_large_single_column() {
2380        let values = Arc::new(
2381            [None, Some(true), Some(false)]
2382                .iter()
2383                .cycle()
2384                .copied()
2385                .take(200_000)
2386                .collect::<BooleanArray>(),
2387        );
2388        let schema = Schema::new(vec![Field::new("col", values.data_type().clone(), true)]);
2389        let expected_batch = RecordBatch::try_new(Arc::new(schema), vec![values]).unwrap();
2390        let file = tempfile::tempfile().unwrap();
2391
2392        let mut writer =
2393            ArrowWriter::try_new(file.try_clone().unwrap(), expected_batch.schema(), None)
2394                .expect("Unable to write file");
2395        writer.write(&expected_batch).unwrap();
2396        writer.close().unwrap();
2397    }
2398
2399    #[test]
2400    fn check_page_offset_index_with_nan() {
2401        let values = Arc::new(Float64Array::from(vec![f64::NAN; 10]));
2402        let schema = Schema::new(vec![Field::new("col", DataType::Float64, true)]);
2403        let batch = RecordBatch::try_new(Arc::new(schema), vec![values]).unwrap();
2404
2405        let mut out = Vec::with_capacity(1024);
2406        let mut writer =
2407            ArrowWriter::try_new(&mut out, batch.schema(), None).expect("Unable to write file");
2408        writer.write(&batch).unwrap();
2409        let file_meta_data = writer.close().unwrap();
2410        for row_group in file_meta_data.row_groups {
2411            for column in row_group.columns {
2412                assert!(column.offset_index_offset.is_some());
2413                assert!(column.offset_index_length.is_some());
2414                assert!(column.column_index_offset.is_none());
2415                assert!(column.column_index_length.is_none());
2416            }
2417        }
2418    }
2419
2420    #[test]
2421    fn i8_single_column() {
2422        required_and_optional::<Int8Array, _>(0..SMALL_SIZE as i8);
2423    }
2424
2425    #[test]
2426    fn i16_single_column() {
2427        required_and_optional::<Int16Array, _>(0..SMALL_SIZE as i16);
2428    }
2429
2430    #[test]
2431    fn i32_single_column() {
2432        required_and_optional::<Int32Array, _>(0..SMALL_SIZE as i32);
2433    }
2434
2435    #[test]
2436    fn i64_single_column() {
2437        required_and_optional::<Int64Array, _>(0..SMALL_SIZE as i64);
2438    }
2439
2440    #[test]
2441    fn u8_single_column() {
2442        required_and_optional::<UInt8Array, _>(0..SMALL_SIZE as u8);
2443    }
2444
2445    #[test]
2446    fn u16_single_column() {
2447        required_and_optional::<UInt16Array, _>(0..SMALL_SIZE as u16);
2448    }
2449
2450    #[test]
2451    fn u32_single_column() {
2452        required_and_optional::<UInt32Array, _>(0..SMALL_SIZE as u32);
2453    }
2454
2455    #[test]
2456    fn u64_single_column() {
2457        required_and_optional::<UInt64Array, _>(0..SMALL_SIZE as u64);
2458    }
2459
2460    #[test]
2461    fn f32_single_column() {
2462        required_and_optional::<Float32Array, _>((0..SMALL_SIZE).map(|i| i as f32));
2463    }
2464
2465    #[test]
2466    fn f64_single_column() {
2467        required_and_optional::<Float64Array, _>((0..SMALL_SIZE).map(|i| i as f64));
2468    }
2469
2470    // The timestamp array types don't implement From<Vec<T>> because they need the timezone
2471    // argument, and they also doesn't support building from a Vec<Option<T>>, so call
2472    // one_column_roundtrip manually instead of calling required_and_optional for these tests.
2473
2474    #[test]
2475    fn timestamp_second_single_column() {
2476        let raw_values: Vec<_> = (0..SMALL_SIZE as i64).collect();
2477        let values = Arc::new(TimestampSecondArray::from(raw_values));
2478
2479        one_column_roundtrip(values, false);
2480    }
2481
2482    #[test]
2483    fn timestamp_millisecond_single_column() {
2484        let raw_values: Vec<_> = (0..SMALL_SIZE as i64).collect();
2485        let values = Arc::new(TimestampMillisecondArray::from(raw_values));
2486
2487        one_column_roundtrip(values, false);
2488    }
2489
2490    #[test]
2491    fn timestamp_microsecond_single_column() {
2492        let raw_values: Vec<_> = (0..SMALL_SIZE as i64).collect();
2493        let values = Arc::new(TimestampMicrosecondArray::from(raw_values));
2494
2495        one_column_roundtrip(values, false);
2496    }
2497
2498    #[test]
2499    fn timestamp_nanosecond_single_column() {
2500        let raw_values: Vec<_> = (0..SMALL_SIZE as i64).collect();
2501        let values = Arc::new(TimestampNanosecondArray::from(raw_values));
2502
2503        one_column_roundtrip(values, false);
2504    }
2505
2506    #[test]
2507    fn date32_single_column() {
2508        required_and_optional::<Date32Array, _>(0..SMALL_SIZE as i32);
2509    }
2510
2511    #[test]
2512    fn date64_single_column() {
2513        // Date64 must be a multiple of 86400000, see ARROW-10925
2514        required_and_optional::<Date64Array, _>(
2515            (0..(SMALL_SIZE as i64 * 86400000)).step_by(86400000),
2516        );
2517    }
2518
2519    #[test]
2520    fn time32_second_single_column() {
2521        required_and_optional::<Time32SecondArray, _>(0..SMALL_SIZE as i32);
2522    }
2523
2524    #[test]
2525    fn time32_millisecond_single_column() {
2526        required_and_optional::<Time32MillisecondArray, _>(0..SMALL_SIZE as i32);
2527    }
2528
2529    #[test]
2530    fn time64_microsecond_single_column() {
2531        required_and_optional::<Time64MicrosecondArray, _>(0..SMALL_SIZE as i64);
2532    }
2533
2534    #[test]
2535    fn time64_nanosecond_single_column() {
2536        required_and_optional::<Time64NanosecondArray, _>(0..SMALL_SIZE as i64);
2537    }
2538
2539    #[test]
2540    fn duration_second_single_column() {
2541        required_and_optional::<DurationSecondArray, _>(0..SMALL_SIZE as i64);
2542    }
2543
2544    #[test]
2545    fn duration_millisecond_single_column() {
2546        required_and_optional::<DurationMillisecondArray, _>(0..SMALL_SIZE as i64);
2547    }
2548
2549    #[test]
2550    fn duration_microsecond_single_column() {
2551        required_and_optional::<DurationMicrosecondArray, _>(0..SMALL_SIZE as i64);
2552    }
2553
2554    #[test]
2555    fn duration_nanosecond_single_column() {
2556        required_and_optional::<DurationNanosecondArray, _>(0..SMALL_SIZE as i64);
2557    }
2558
2559    #[test]
2560    fn interval_year_month_single_column() {
2561        required_and_optional::<IntervalYearMonthArray, _>(0..SMALL_SIZE as i32);
2562    }
2563
2564    #[test]
2565    fn interval_day_time_single_column() {
2566        required_and_optional::<IntervalDayTimeArray, _>(vec![
2567            IntervalDayTime::new(0, 1),
2568            IntervalDayTime::new(0, 3),
2569            IntervalDayTime::new(3, -2),
2570            IntervalDayTime::new(-200, 4),
2571        ]);
2572    }
2573
2574    #[test]
2575    #[should_panic(
2576        expected = "Attempting to write an Arrow interval type MonthDayNano to parquet that is not yet implemented"
2577    )]
2578    fn interval_month_day_nano_single_column() {
2579        required_and_optional::<IntervalMonthDayNanoArray, _>(vec![
2580            IntervalMonthDayNano::new(0, 1, 5),
2581            IntervalMonthDayNano::new(0, 3, 2),
2582            IntervalMonthDayNano::new(3, -2, -5),
2583            IntervalMonthDayNano::new(-200, 4, -1),
2584        ]);
2585    }
2586
2587    #[test]
2588    fn binary_single_column() {
2589        let one_vec: Vec<u8> = (0..SMALL_SIZE as u8).collect();
2590        let many_vecs: Vec<_> = std::iter::repeat(one_vec).take(SMALL_SIZE).collect();
2591        let many_vecs_iter = many_vecs.iter().map(|v| v.as_slice());
2592
2593        // BinaryArrays can't be built from Vec<Option<&str>>, so only call `values_required`
2594        values_required::<BinaryArray, _>(many_vecs_iter);
2595    }
2596
2597    #[test]
2598    fn binary_view_single_column() {
2599        let one_vec: Vec<u8> = (0..SMALL_SIZE as u8).collect();
2600        let many_vecs: Vec<_> = std::iter::repeat(one_vec).take(SMALL_SIZE).collect();
2601        let many_vecs_iter = many_vecs.iter().map(|v| v.as_slice());
2602
2603        // BinaryArrays can't be built from Vec<Option<&str>>, so only call `values_required`
2604        values_required::<BinaryViewArray, _>(many_vecs_iter);
2605    }
2606
2607    #[test]
2608    fn i32_column_bloom_filter_at_end() {
2609        let array = Arc::new(Int32Array::from_iter(0..SMALL_SIZE as i32));
2610        let mut options = RoundTripOptions::new(array, false);
2611        options.bloom_filter = true;
2612        options.bloom_filter_position = BloomFilterPosition::End;
2613
2614        let files = one_column_roundtrip_with_options(options);
2615        check_bloom_filter(
2616            files,
2617            "col".to_string(),
2618            (0..SMALL_SIZE as i32).collect(),
2619            (SMALL_SIZE as i32 + 1..SMALL_SIZE as i32 + 10).collect(),
2620        );
2621    }
2622
2623    #[test]
2624    fn i32_column_bloom_filter() {
2625        let array = Arc::new(Int32Array::from_iter(0..SMALL_SIZE as i32));
2626        let mut options = RoundTripOptions::new(array, false);
2627        options.bloom_filter = true;
2628
2629        let files = one_column_roundtrip_with_options(options);
2630        check_bloom_filter(
2631            files,
2632            "col".to_string(),
2633            (0..SMALL_SIZE as i32).collect(),
2634            (SMALL_SIZE as i32 + 1..SMALL_SIZE as i32 + 10).collect(),
2635        );
2636    }
2637
2638    #[test]
2639    fn binary_column_bloom_filter() {
2640        let one_vec: Vec<u8> = (0..SMALL_SIZE as u8).collect();
2641        let many_vecs: Vec<_> = std::iter::repeat(one_vec).take(SMALL_SIZE).collect();
2642        let many_vecs_iter = many_vecs.iter().map(|v| v.as_slice());
2643
2644        let array = Arc::new(BinaryArray::from_iter_values(many_vecs_iter));
2645        let mut options = RoundTripOptions::new(array, false);
2646        options.bloom_filter = true;
2647
2648        let files = one_column_roundtrip_with_options(options);
2649        check_bloom_filter(
2650            files,
2651            "col".to_string(),
2652            many_vecs,
2653            vec![vec![(SMALL_SIZE + 1) as u8]],
2654        );
2655    }
2656
2657    #[test]
2658    fn empty_string_null_column_bloom_filter() {
2659        let raw_values: Vec<_> = (0..SMALL_SIZE).map(|i| i.to_string()).collect();
2660        let raw_strs = raw_values.iter().map(|s| s.as_str());
2661
2662        let array = Arc::new(StringArray::from_iter_values(raw_strs));
2663        let mut options = RoundTripOptions::new(array, false);
2664        options.bloom_filter = true;
2665
2666        let files = one_column_roundtrip_with_options(options);
2667
2668        let optional_raw_values: Vec<_> = raw_values
2669            .iter()
2670            .enumerate()
2671            .filter_map(|(i, v)| if i % 2 == 0 { None } else { Some(v.as_str()) })
2672            .collect();
2673        // For null slots, empty string should not be in bloom filter.
2674        check_bloom_filter(files, "col".to_string(), optional_raw_values, vec![""]);
2675    }
2676
2677    #[test]
2678    fn large_binary_single_column() {
2679        let one_vec: Vec<u8> = (0..SMALL_SIZE as u8).collect();
2680        let many_vecs: Vec<_> = std::iter::repeat(one_vec).take(SMALL_SIZE).collect();
2681        let many_vecs_iter = many_vecs.iter().map(|v| v.as_slice());
2682
2683        // LargeBinaryArrays can't be built from Vec<Option<&str>>, so only call `values_required`
2684        values_required::<LargeBinaryArray, _>(many_vecs_iter);
2685    }
2686
2687    #[test]
2688    fn fixed_size_binary_single_column() {
2689        let mut builder = FixedSizeBinaryBuilder::new(4);
2690        builder.append_value(b"0123").unwrap();
2691        builder.append_null();
2692        builder.append_value(b"8910").unwrap();
2693        builder.append_value(b"1112").unwrap();
2694        let array = Arc::new(builder.finish());
2695
2696        one_column_roundtrip(array, true);
2697    }
2698
2699    #[test]
2700    fn string_single_column() {
2701        let raw_values: Vec<_> = (0..SMALL_SIZE).map(|i| i.to_string()).collect();
2702        let raw_strs = raw_values.iter().map(|s| s.as_str());
2703
2704        required_and_optional::<StringArray, _>(raw_strs);
2705    }
2706
2707    #[test]
2708    fn large_string_single_column() {
2709        let raw_values: Vec<_> = (0..SMALL_SIZE).map(|i| i.to_string()).collect();
2710        let raw_strs = raw_values.iter().map(|s| s.as_str());
2711
2712        required_and_optional::<LargeStringArray, _>(raw_strs);
2713    }
2714
2715    #[test]
2716    fn string_view_single_column() {
2717        let raw_values: Vec<_> = (0..SMALL_SIZE).map(|i| i.to_string()).collect();
2718        let raw_strs = raw_values.iter().map(|s| s.as_str());
2719
2720        required_and_optional::<StringViewArray, _>(raw_strs);
2721    }
2722
2723    #[test]
2724    fn null_list_single_column() {
2725        let null_field = Field::new_list_field(DataType::Null, true);
2726        let list_field = Field::new("emptylist", DataType::List(Arc::new(null_field)), true);
2727
2728        let schema = Schema::new(vec![list_field]);
2729
2730        // Build [[], null, [null, null]]
2731        let a_values = NullArray::new(2);
2732        let a_value_offsets = arrow::buffer::Buffer::from([0, 0, 0, 2].to_byte_slice());
2733        let a_list_data = ArrayData::builder(DataType::List(Arc::new(Field::new_list_field(
2734            DataType::Null,
2735            true,
2736        ))))
2737        .len(3)
2738        .add_buffer(a_value_offsets)
2739        .null_bit_buffer(Some(Buffer::from([0b00000101])))
2740        .add_child_data(a_values.into_data())
2741        .build()
2742        .unwrap();
2743
2744        let a = ListArray::from(a_list_data);
2745
2746        assert!(a.is_valid(0));
2747        assert!(!a.is_valid(1));
2748        assert!(a.is_valid(2));
2749
2750        assert_eq!(a.value(0).len(), 0);
2751        assert_eq!(a.value(2).len(), 2);
2752        assert_eq!(a.value(2).logical_nulls().unwrap().null_count(), 2);
2753
2754        let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
2755        roundtrip(batch, None);
2756    }
2757
2758    #[test]
2759    fn list_single_column() {
2760        let a_values = Int32Array::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
2761        let a_value_offsets = arrow::buffer::Buffer::from([0, 1, 3, 3, 6, 10].to_byte_slice());
2762        let a_list_data = ArrayData::builder(DataType::List(Arc::new(Field::new_list_field(
2763            DataType::Int32,
2764            false,
2765        ))))
2766        .len(5)
2767        .add_buffer(a_value_offsets)
2768        .null_bit_buffer(Some(Buffer::from([0b00011011])))
2769        .add_child_data(a_values.into_data())
2770        .build()
2771        .unwrap();
2772
2773        assert_eq!(a_list_data.null_count(), 1);
2774
2775        let a = ListArray::from(a_list_data);
2776        let values = Arc::new(a);
2777
2778        one_column_roundtrip(values, true);
2779    }
2780
2781    #[test]
2782    fn large_list_single_column() {
2783        let a_values = Int32Array::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
2784        let a_value_offsets = arrow::buffer::Buffer::from([0i64, 1, 3, 3, 6, 10].to_byte_slice());
2785        let a_list_data = ArrayData::builder(DataType::LargeList(Arc::new(Field::new(
2786            "large_item",
2787            DataType::Int32,
2788            true,
2789        ))))
2790        .len(5)
2791        .add_buffer(a_value_offsets)
2792        .add_child_data(a_values.into_data())
2793        .null_bit_buffer(Some(Buffer::from([0b00011011])))
2794        .build()
2795        .unwrap();
2796
2797        // I think this setup is incorrect because this should pass
2798        assert_eq!(a_list_data.null_count(), 1);
2799
2800        let a = LargeListArray::from(a_list_data);
2801        let values = Arc::new(a);
2802
2803        one_column_roundtrip(values, true);
2804    }
2805
2806    #[test]
2807    fn list_nested_nulls() {
2808        use arrow::datatypes::Int32Type;
2809        let data = vec![
2810            Some(vec![Some(1)]),
2811            Some(vec![Some(2), Some(3)]),
2812            None,
2813            Some(vec![Some(4), Some(5), None]),
2814            Some(vec![None]),
2815            Some(vec![Some(6), Some(7)]),
2816        ];
2817
2818        let list = ListArray::from_iter_primitive::<Int32Type, _, _>(data.clone());
2819        one_column_roundtrip(Arc::new(list), true);
2820
2821        let list = LargeListArray::from_iter_primitive::<Int32Type, _, _>(data);
2822        one_column_roundtrip(Arc::new(list), true);
2823    }
2824
2825    #[test]
2826    fn struct_single_column() {
2827        let a_values = Int32Array::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
2828        let struct_field_a = Arc::new(Field::new("f", DataType::Int32, false));
2829        let s = StructArray::from(vec![(struct_field_a, Arc::new(a_values) as ArrayRef)]);
2830
2831        let values = Arc::new(s);
2832        one_column_roundtrip(values, false);
2833    }
2834
2835    #[test]
2836    fn list_and_map_coerced_names() {
2837        // Create map and list with non-Parquet naming
2838        let list_field =
2839            Field::new_list("my_list", Field::new("item", DataType::Int32, false), false);
2840        let map_field = Field::new_map(
2841            "my_map",
2842            "entries",
2843            Field::new("keys", DataType::Int32, false),
2844            Field::new("values", DataType::Int32, true),
2845            false,
2846            true,
2847        );
2848
2849        let list_array = create_random_array(&list_field, 100, 0.0, 0.0).unwrap();
2850        let map_array = create_random_array(&map_field, 100, 0.0, 0.0).unwrap();
2851
2852        let arrow_schema = Arc::new(Schema::new(vec![list_field, map_field]));
2853
2854        // Write data to Parquet but coerce names to match spec
2855        let props = Some(WriterProperties::builder().set_coerce_types(true).build());
2856        let file = tempfile::tempfile().unwrap();
2857        let mut writer =
2858            ArrowWriter::try_new(file.try_clone().unwrap(), arrow_schema.clone(), props).unwrap();
2859
2860        let batch = RecordBatch::try_new(arrow_schema, vec![list_array, map_array]).unwrap();
2861        writer.write(&batch).unwrap();
2862        let file_metadata = writer.close().unwrap();
2863
2864        // Coerced name of "item" should be "element"
2865        assert_eq!(file_metadata.schema[3].name, "element");
2866        // Coerced name of "entries" should be "key_value"
2867        assert_eq!(file_metadata.schema[5].name, "key_value");
2868        // Coerced name of "keys" should be "key"
2869        assert_eq!(file_metadata.schema[6].name, "key");
2870        // Coerced name of "values" should be "value"
2871        assert_eq!(file_metadata.schema[7].name, "value");
2872
2873        // Double check schema after reading from the file
2874        let reader = SerializedFileReader::new(file).unwrap();
2875        let file_schema = reader.metadata().file_metadata().schema();
2876        let fields = file_schema.get_fields();
2877        let list_field = &fields[0].get_fields()[0];
2878        assert_eq!(list_field.get_fields()[0].name(), "element");
2879        let map_field = &fields[1].get_fields()[0];
2880        assert_eq!(map_field.name(), "key_value");
2881        assert_eq!(map_field.get_fields()[0].name(), "key");
2882        assert_eq!(map_field.get_fields()[1].name(), "value");
2883    }
2884
2885    #[test]
2886    fn fallback_flush_data_page() {
2887        //tests if the Fallback::flush_data_page clears all buffers correctly
2888        let raw_values: Vec<_> = (0..MEDIUM_SIZE).map(|i| i.to_string()).collect();
2889        let values = Arc::new(StringArray::from(raw_values));
2890        let encodings = vec![
2891            Encoding::DELTA_BYTE_ARRAY,
2892            Encoding::DELTA_LENGTH_BYTE_ARRAY,
2893        ];
2894        let data_type = values.data_type().clone();
2895        let schema = Arc::new(Schema::new(vec![Field::new("col", data_type, false)]));
2896        let expected_batch = RecordBatch::try_new(schema, vec![values]).unwrap();
2897
2898        let row_group_sizes = [1024, SMALL_SIZE, SMALL_SIZE / 2, SMALL_SIZE / 2 + 1, 10];
2899        let data_page_size_limit: usize = 32;
2900        let write_batch_size: usize = 16;
2901
2902        for encoding in &encodings {
2903            for row_group_size in row_group_sizes {
2904                let props = WriterProperties::builder()
2905                    .set_writer_version(WriterVersion::PARQUET_2_0)
2906                    .set_max_row_group_size(row_group_size)
2907                    .set_dictionary_enabled(false)
2908                    .set_encoding(*encoding)
2909                    .set_data_page_size_limit(data_page_size_limit)
2910                    .set_write_batch_size(write_batch_size)
2911                    .build();
2912
2913                roundtrip_opts_with_array_validation(&expected_batch, props, |a, b| {
2914                    let string_array_a = StringArray::from(a.clone());
2915                    let string_array_b = StringArray::from(b.clone());
2916                    let vec_a: Vec<&str> = string_array_a.iter().map(|v| v.unwrap()).collect();
2917                    let vec_b: Vec<&str> = string_array_b.iter().map(|v| v.unwrap()).collect();
2918                    assert_eq!(
2919                        vec_a, vec_b,
2920                        "failed for encoder: {encoding:?} and row_group_size: {row_group_size:?}"
2921                    );
2922                });
2923            }
2924        }
2925    }
2926
2927    #[test]
2928    fn arrow_writer_string_dictionary() {
2929        // define schema
2930        #[allow(deprecated)]
2931        let schema = Arc::new(Schema::new(vec![Field::new_dict(
2932            "dictionary",
2933            DataType::Dictionary(Box::new(DataType::Int32), Box::new(DataType::Utf8)),
2934            true,
2935            42,
2936            true,
2937        )]));
2938
2939        // create some data
2940        let d: Int32DictionaryArray = [Some("alpha"), None, Some("beta"), Some("alpha")]
2941            .iter()
2942            .copied()
2943            .collect();
2944
2945        // build a record batch
2946        one_column_roundtrip_with_schema(Arc::new(d), schema);
2947    }
2948
2949    #[test]
2950    fn arrow_writer_primitive_dictionary() {
2951        // define schema
2952        #[allow(deprecated)]
2953        let schema = Arc::new(Schema::new(vec![Field::new_dict(
2954            "dictionary",
2955            DataType::Dictionary(Box::new(DataType::UInt8), Box::new(DataType::UInt32)),
2956            true,
2957            42,
2958            true,
2959        )]));
2960
2961        // create some data
2962        let mut builder = PrimitiveDictionaryBuilder::<UInt8Type, UInt32Type>::new();
2963        builder.append(12345678).unwrap();
2964        builder.append_null();
2965        builder.append(22345678).unwrap();
2966        builder.append(12345678).unwrap();
2967        let d = builder.finish();
2968
2969        one_column_roundtrip_with_schema(Arc::new(d), schema);
2970    }
2971
2972    #[test]
2973    fn arrow_writer_decimal128_dictionary() {
2974        let integers = vec![12345, 56789, 34567];
2975
2976        let keys = UInt8Array::from(vec![Some(0), None, Some(1), Some(2), Some(1)]);
2977
2978        let values = Decimal128Array::from(integers.clone())
2979            .with_precision_and_scale(5, 2)
2980            .unwrap();
2981
2982        let array = DictionaryArray::new(keys, Arc::new(values));
2983        one_column_roundtrip(Arc::new(array.clone()), true);
2984
2985        let values = Decimal128Array::from(integers)
2986            .with_precision_and_scale(12, 2)
2987            .unwrap();
2988
2989        let array = array.with_values(Arc::new(values));
2990        one_column_roundtrip(Arc::new(array), true);
2991    }
2992
2993    #[test]
2994    fn arrow_writer_decimal256_dictionary() {
2995        let integers = vec![
2996            i256::from_i128(12345),
2997            i256::from_i128(56789),
2998            i256::from_i128(34567),
2999        ];
3000
3001        let keys = UInt8Array::from(vec![Some(0), None, Some(1), Some(2), Some(1)]);
3002
3003        let values = Decimal256Array::from(integers.clone())
3004            .with_precision_and_scale(5, 2)
3005            .unwrap();
3006
3007        let array = DictionaryArray::new(keys, Arc::new(values));
3008        one_column_roundtrip(Arc::new(array.clone()), true);
3009
3010        let values = Decimal256Array::from(integers)
3011            .with_precision_and_scale(12, 2)
3012            .unwrap();
3013
3014        let array = array.with_values(Arc::new(values));
3015        one_column_roundtrip(Arc::new(array), true);
3016    }
3017
3018    #[test]
3019    fn arrow_writer_string_dictionary_unsigned_index() {
3020        // define schema
3021        #[allow(deprecated)]
3022        let schema = Arc::new(Schema::new(vec![Field::new_dict(
3023            "dictionary",
3024            DataType::Dictionary(Box::new(DataType::UInt8), Box::new(DataType::Utf8)),
3025            true,
3026            42,
3027            true,
3028        )]));
3029
3030        // create some data
3031        let d: UInt8DictionaryArray = [Some("alpha"), None, Some("beta"), Some("alpha")]
3032            .iter()
3033            .copied()
3034            .collect();
3035
3036        one_column_roundtrip_with_schema(Arc::new(d), schema);
3037    }
3038
3039    #[test]
3040    fn u32_min_max() {
3041        // check values roundtrip through parquet
3042        let src = [
3043            u32::MIN,
3044            u32::MIN + 1,
3045            (i32::MAX as u32) - 1,
3046            i32::MAX as u32,
3047            (i32::MAX as u32) + 1,
3048            u32::MAX - 1,
3049            u32::MAX,
3050        ];
3051        let values = Arc::new(UInt32Array::from_iter_values(src.iter().cloned()));
3052        let files = one_column_roundtrip(values, false);
3053
3054        for file in files {
3055            // check statistics are valid
3056            let reader = SerializedFileReader::new(file).unwrap();
3057            let metadata = reader.metadata();
3058
3059            let mut row_offset = 0;
3060            for row_group in metadata.row_groups() {
3061                assert_eq!(row_group.num_columns(), 1);
3062                let column = row_group.column(0);
3063
3064                let num_values = column.num_values() as usize;
3065                let src_slice = &src[row_offset..row_offset + num_values];
3066                row_offset += column.num_values() as usize;
3067
3068                let stats = column.statistics().unwrap();
3069                if let Statistics::Int32(stats) = stats {
3070                    assert_eq!(
3071                        *stats.min_opt().unwrap() as u32,
3072                        *src_slice.iter().min().unwrap()
3073                    );
3074                    assert_eq!(
3075                        *stats.max_opt().unwrap() as u32,
3076                        *src_slice.iter().max().unwrap()
3077                    );
3078                } else {
3079                    panic!("Statistics::Int32 missing")
3080                }
3081            }
3082        }
3083    }
3084
3085    #[test]
3086    fn u64_min_max() {
3087        // check values roundtrip through parquet
3088        let src = [
3089            u64::MIN,
3090            u64::MIN + 1,
3091            (i64::MAX as u64) - 1,
3092            i64::MAX as u64,
3093            (i64::MAX as u64) + 1,
3094            u64::MAX - 1,
3095            u64::MAX,
3096        ];
3097        let values = Arc::new(UInt64Array::from_iter_values(src.iter().cloned()));
3098        let files = one_column_roundtrip(values, false);
3099
3100        for file in files {
3101            // check statistics are valid
3102            let reader = SerializedFileReader::new(file).unwrap();
3103            let metadata = reader.metadata();
3104
3105            let mut row_offset = 0;
3106            for row_group in metadata.row_groups() {
3107                assert_eq!(row_group.num_columns(), 1);
3108                let column = row_group.column(0);
3109
3110                let num_values = column.num_values() as usize;
3111                let src_slice = &src[row_offset..row_offset + num_values];
3112                row_offset += column.num_values() as usize;
3113
3114                let stats = column.statistics().unwrap();
3115                if let Statistics::Int64(stats) = stats {
3116                    assert_eq!(
3117                        *stats.min_opt().unwrap() as u64,
3118                        *src_slice.iter().min().unwrap()
3119                    );
3120                    assert_eq!(
3121                        *stats.max_opt().unwrap() as u64,
3122                        *src_slice.iter().max().unwrap()
3123                    );
3124                } else {
3125                    panic!("Statistics::Int64 missing")
3126                }
3127            }
3128        }
3129    }
3130
3131    #[test]
3132    fn statistics_null_counts_only_nulls() {
3133        // check that null-count statistics for "only NULL"-columns are correct
3134        let values = Arc::new(UInt64Array::from(vec![None, None]));
3135        let files = one_column_roundtrip(values, true);
3136
3137        for file in files {
3138            // check statistics are valid
3139            let reader = SerializedFileReader::new(file).unwrap();
3140            let metadata = reader.metadata();
3141            assert_eq!(metadata.num_row_groups(), 1);
3142            let row_group = metadata.row_group(0);
3143            assert_eq!(row_group.num_columns(), 1);
3144            let column = row_group.column(0);
3145            let stats = column.statistics().unwrap();
3146            assert_eq!(stats.null_count_opt(), Some(2));
3147        }
3148    }
3149
3150    #[test]
3151    fn test_list_of_struct_roundtrip() {
3152        // define schema
3153        let int_field = Field::new("a", DataType::Int32, true);
3154        let int_field2 = Field::new("b", DataType::Int32, true);
3155
3156        let int_builder = Int32Builder::with_capacity(10);
3157        let int_builder2 = Int32Builder::with_capacity(10);
3158
3159        let struct_builder = StructBuilder::new(
3160            vec![int_field, int_field2],
3161            vec![Box::new(int_builder), Box::new(int_builder2)],
3162        );
3163        let mut list_builder = ListBuilder::new(struct_builder);
3164
3165        // Construct the following array
3166        // [{a: 1, b: 2}], [], null, [null, null], [{a: null, b: 3}], [{a: 2, b: null}]
3167
3168        // [{a: 1, b: 2}]
3169        let values = list_builder.values();
3170        values
3171            .field_builder::<Int32Builder>(0)
3172            .unwrap()
3173            .append_value(1);
3174        values
3175            .field_builder::<Int32Builder>(1)
3176            .unwrap()
3177            .append_value(2);
3178        values.append(true);
3179        list_builder.append(true);
3180
3181        // []
3182        list_builder.append(true);
3183
3184        // null
3185        list_builder.append(false);
3186
3187        // [null, null]
3188        let values = list_builder.values();
3189        values
3190            .field_builder::<Int32Builder>(0)
3191            .unwrap()
3192            .append_null();
3193        values
3194            .field_builder::<Int32Builder>(1)
3195            .unwrap()
3196            .append_null();
3197        values.append(false);
3198        values
3199            .field_builder::<Int32Builder>(0)
3200            .unwrap()
3201            .append_null();
3202        values
3203            .field_builder::<Int32Builder>(1)
3204            .unwrap()
3205            .append_null();
3206        values.append(false);
3207        list_builder.append(true);
3208
3209        // [{a: null, b: 3}]
3210        let values = list_builder.values();
3211        values
3212            .field_builder::<Int32Builder>(0)
3213            .unwrap()
3214            .append_null();
3215        values
3216            .field_builder::<Int32Builder>(1)
3217            .unwrap()
3218            .append_value(3);
3219        values.append(true);
3220        list_builder.append(true);
3221
3222        // [{a: 2, b: null}]
3223        let values = list_builder.values();
3224        values
3225            .field_builder::<Int32Builder>(0)
3226            .unwrap()
3227            .append_value(2);
3228        values
3229            .field_builder::<Int32Builder>(1)
3230            .unwrap()
3231            .append_null();
3232        values.append(true);
3233        list_builder.append(true);
3234
3235        let array = Arc::new(list_builder.finish());
3236
3237        one_column_roundtrip(array, true);
3238    }
3239
3240    fn row_group_sizes(metadata: &ParquetMetaData) -> Vec<i64> {
3241        metadata.row_groups().iter().map(|x| x.num_rows()).collect()
3242    }
3243
3244    #[test]
3245    fn test_aggregates_records() {
3246        let arrays = [
3247            Int32Array::from((0..100).collect::<Vec<_>>()),
3248            Int32Array::from((0..50).collect::<Vec<_>>()),
3249            Int32Array::from((200..500).collect::<Vec<_>>()),
3250        ];
3251
3252        let schema = Arc::new(Schema::new(vec![Field::new(
3253            "int",
3254            ArrowDataType::Int32,
3255            false,
3256        )]));
3257
3258        let file = tempfile::tempfile().unwrap();
3259
3260        let props = WriterProperties::builder()
3261            .set_max_row_group_size(200)
3262            .build();
3263
3264        let mut writer =
3265            ArrowWriter::try_new(file.try_clone().unwrap(), schema.clone(), Some(props)).unwrap();
3266
3267        for array in arrays {
3268            let batch = RecordBatch::try_new(schema.clone(), vec![Arc::new(array)]).unwrap();
3269            writer.write(&batch).unwrap();
3270        }
3271
3272        writer.close().unwrap();
3273
3274        let builder = ParquetRecordBatchReaderBuilder::try_new(file).unwrap();
3275        assert_eq!(&row_group_sizes(builder.metadata()), &[200, 200, 50]);
3276
3277        let batches = builder
3278            .with_batch_size(100)
3279            .build()
3280            .unwrap()
3281            .collect::<ArrowResult<Vec<_>>>()
3282            .unwrap();
3283
3284        assert_eq!(batches.len(), 5);
3285        assert!(batches.iter().all(|x| x.num_columns() == 1));
3286
3287        let batch_sizes: Vec<_> = batches.iter().map(|x| x.num_rows()).collect();
3288
3289        assert_eq!(&batch_sizes, &[100, 100, 100, 100, 50]);
3290
3291        let values: Vec<_> = batches
3292            .iter()
3293            .flat_map(|x| {
3294                x.column(0)
3295                    .as_any()
3296                    .downcast_ref::<Int32Array>()
3297                    .unwrap()
3298                    .values()
3299                    .iter()
3300                    .cloned()
3301            })
3302            .collect();
3303
3304        let expected_values: Vec<_> = [0..100, 0..50, 200..500].into_iter().flatten().collect();
3305        assert_eq!(&values, &expected_values)
3306    }
3307
3308    #[test]
3309    fn complex_aggregate() {
3310        // Tests aggregating nested data
3311        let field_a = Arc::new(Field::new("leaf_a", DataType::Int32, false));
3312        let field_b = Arc::new(Field::new("leaf_b", DataType::Int32, true));
3313        let struct_a = Arc::new(Field::new(
3314            "struct_a",
3315            DataType::Struct(vec![field_a.clone(), field_b.clone()].into()),
3316            true,
3317        ));
3318
3319        let list_a = Arc::new(Field::new("list", DataType::List(struct_a), true));
3320        let struct_b = Arc::new(Field::new(
3321            "struct_b",
3322            DataType::Struct(vec![list_a.clone()].into()),
3323            false,
3324        ));
3325
3326        let schema = Arc::new(Schema::new(vec![struct_b]));
3327
3328        // create nested data
3329        let field_a_array = Int32Array::from(vec![1, 2, 3, 4, 5, 6]);
3330        let field_b_array =
3331            Int32Array::from_iter(vec![Some(1), None, Some(2), None, None, Some(6)]);
3332
3333        let struct_a_array = StructArray::from(vec![
3334            (field_a.clone(), Arc::new(field_a_array) as ArrayRef),
3335            (field_b.clone(), Arc::new(field_b_array) as ArrayRef),
3336        ]);
3337
3338        let list_data = ArrayDataBuilder::new(list_a.data_type().clone())
3339            .len(5)
3340            .add_buffer(Buffer::from_iter(vec![
3341                0_i32, 1_i32, 1_i32, 3_i32, 3_i32, 5_i32,
3342            ]))
3343            .null_bit_buffer(Some(Buffer::from_iter(vec![
3344                true, false, true, false, true,
3345            ])))
3346            .child_data(vec![struct_a_array.into_data()])
3347            .build()
3348            .unwrap();
3349
3350        let list_a_array = Arc::new(ListArray::from(list_data)) as ArrayRef;
3351        let struct_b_array = StructArray::from(vec![(list_a.clone(), list_a_array)]);
3352
3353        let batch1 =
3354            RecordBatch::try_from_iter(vec![("struct_b", Arc::new(struct_b_array) as ArrayRef)])
3355                .unwrap();
3356
3357        let field_a_array = Int32Array::from(vec![6, 7, 8, 9, 10]);
3358        let field_b_array = Int32Array::from_iter(vec![None, None, None, Some(1), None]);
3359
3360        let struct_a_array = StructArray::from(vec![
3361            (field_a, Arc::new(field_a_array) as ArrayRef),
3362            (field_b, Arc::new(field_b_array) as ArrayRef),
3363        ]);
3364
3365        let list_data = ArrayDataBuilder::new(list_a.data_type().clone())
3366            .len(2)
3367            .add_buffer(Buffer::from_iter(vec![0_i32, 4_i32, 5_i32]))
3368            .child_data(vec![struct_a_array.into_data()])
3369            .build()
3370            .unwrap();
3371
3372        let list_a_array = Arc::new(ListArray::from(list_data)) as ArrayRef;
3373        let struct_b_array = StructArray::from(vec![(list_a, list_a_array)]);
3374
3375        let batch2 =
3376            RecordBatch::try_from_iter(vec![("struct_b", Arc::new(struct_b_array) as ArrayRef)])
3377                .unwrap();
3378
3379        let batches = &[batch1, batch2];
3380
3381        // Verify data is as expected
3382
3383        let expected = r#"
3384            +-------------------------------------------------------------------------------------------------------+
3385            | struct_b                                                                                              |
3386            +-------------------------------------------------------------------------------------------------------+
3387            | {list: [{leaf_a: 1, leaf_b: 1}]}                                                                      |
3388            | {list: }                                                                                              |
3389            | {list: [{leaf_a: 2, leaf_b: }, {leaf_a: 3, leaf_b: 2}]}                                               |
3390            | {list: }                                                                                              |
3391            | {list: [{leaf_a: 4, leaf_b: }, {leaf_a: 5, leaf_b: }]}                                                |
3392            | {list: [{leaf_a: 6, leaf_b: }, {leaf_a: 7, leaf_b: }, {leaf_a: 8, leaf_b: }, {leaf_a: 9, leaf_b: 1}]} |
3393            | {list: [{leaf_a: 10, leaf_b: }]}                                                                      |
3394            +-------------------------------------------------------------------------------------------------------+
3395        "#.trim().split('\n').map(|x| x.trim()).collect::<Vec<_>>().join("\n");
3396
3397        let actual = pretty_format_batches(batches).unwrap().to_string();
3398        assert_eq!(actual, expected);
3399
3400        // Write data
3401        let file = tempfile::tempfile().unwrap();
3402        let props = WriterProperties::builder()
3403            .set_max_row_group_size(6)
3404            .build();
3405
3406        let mut writer =
3407            ArrowWriter::try_new(file.try_clone().unwrap(), schema, Some(props)).unwrap();
3408
3409        for batch in batches {
3410            writer.write(batch).unwrap();
3411        }
3412        writer.close().unwrap();
3413
3414        // Read Data
3415        // Should have written entire first batch and first row of second to the first row group
3416        // leaving a single row in the second row group
3417
3418        let builder = ParquetRecordBatchReaderBuilder::try_new(file).unwrap();
3419        assert_eq!(&row_group_sizes(builder.metadata()), &[6, 1]);
3420
3421        let batches = builder
3422            .with_batch_size(2)
3423            .build()
3424            .unwrap()
3425            .collect::<ArrowResult<Vec<_>>>()
3426            .unwrap();
3427
3428        assert_eq!(batches.len(), 4);
3429        let batch_counts: Vec<_> = batches.iter().map(|x| x.num_rows()).collect();
3430        assert_eq!(&batch_counts, &[2, 2, 2, 1]);
3431
3432        let actual = pretty_format_batches(&batches).unwrap().to_string();
3433        assert_eq!(actual, expected);
3434    }
3435
3436    #[test]
3437    fn test_arrow_writer_metadata() {
3438        let batch_schema = Schema::new(vec![Field::new("int32", DataType::Int32, false)]);
3439        let file_schema = batch_schema.clone().with_metadata(
3440            vec![("foo".to_string(), "bar".to_string())]
3441                .into_iter()
3442                .collect(),
3443        );
3444
3445        let batch = RecordBatch::try_new(
3446            Arc::new(batch_schema),
3447            vec![Arc::new(Int32Array::from(vec![1, 2, 3, 4])) as _],
3448        )
3449        .unwrap();
3450
3451        let mut buf = Vec::with_capacity(1024);
3452        let mut writer = ArrowWriter::try_new(&mut buf, Arc::new(file_schema), None).unwrap();
3453        writer.write(&batch).unwrap();
3454        writer.close().unwrap();
3455    }
3456
3457    #[test]
3458    fn test_arrow_writer_nullable() {
3459        let batch_schema = Schema::new(vec![Field::new("int32", DataType::Int32, false)]);
3460        let file_schema = Schema::new(vec![Field::new("int32", DataType::Int32, true)]);
3461        let file_schema = Arc::new(file_schema);
3462
3463        let batch = RecordBatch::try_new(
3464            Arc::new(batch_schema),
3465            vec![Arc::new(Int32Array::from(vec![1, 2, 3, 4])) as _],
3466        )
3467        .unwrap();
3468
3469        let mut buf = Vec::with_capacity(1024);
3470        let mut writer = ArrowWriter::try_new(&mut buf, file_schema.clone(), None).unwrap();
3471        writer.write(&batch).unwrap();
3472        writer.close().unwrap();
3473
3474        let mut read = ParquetRecordBatchReader::try_new(Bytes::from(buf), 1024).unwrap();
3475        let back = read.next().unwrap().unwrap();
3476        assert_eq!(back.schema(), file_schema);
3477        assert_ne!(back.schema(), batch.schema());
3478        assert_eq!(back.column(0).as_ref(), batch.column(0).as_ref());
3479    }
3480
3481    #[test]
3482    fn in_progress_accounting() {
3483        // define schema
3484        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);
3485
3486        // create some data
3487        let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
3488
3489        // build a record batch
3490        let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
3491
3492        let mut writer = ArrowWriter::try_new(vec![], batch.schema(), None).unwrap();
3493
3494        // starts empty
3495        assert_eq!(writer.in_progress_size(), 0);
3496        assert_eq!(writer.in_progress_rows(), 0);
3497        assert_eq!(writer.memory_size(), 0);
3498        assert_eq!(writer.bytes_written(), 4); // Initial header
3499        writer.write(&batch).unwrap();
3500
3501        // updated on write
3502        let initial_size = writer.in_progress_size();
3503        assert!(initial_size > 0);
3504        assert_eq!(writer.in_progress_rows(), 5);
3505        let initial_memory = writer.memory_size();
3506        assert!(initial_memory > 0);
3507        // memory estimate is larger than estimated encoded size
3508        assert!(
3509            initial_size <= initial_memory,
3510            "{initial_size} <= {initial_memory}"
3511        );
3512
3513        // updated on second write
3514        writer.write(&batch).unwrap();
3515        assert!(writer.in_progress_size() > initial_size);
3516        assert_eq!(writer.in_progress_rows(), 10);
3517        assert!(writer.memory_size() > initial_memory);
3518        assert!(
3519            writer.in_progress_size() <= writer.memory_size(),
3520            "in_progress_size {} <= memory_size {}",
3521            writer.in_progress_size(),
3522            writer.memory_size()
3523        );
3524
3525        // in progress tracking is cleared, but the overall data written is updated
3526        let pre_flush_bytes_written = writer.bytes_written();
3527        writer.flush().unwrap();
3528        assert_eq!(writer.in_progress_size(), 0);
3529        assert_eq!(writer.memory_size(), 0);
3530        assert!(writer.bytes_written() > pre_flush_bytes_written);
3531
3532        writer.close().unwrap();
3533    }
3534
3535    #[test]
3536    fn test_writer_all_null() {
3537        let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
3538        let b = Int32Array::new(vec![0; 5].into(), Some(NullBuffer::new_null(5)));
3539        let batch = RecordBatch::try_from_iter(vec![
3540            ("a", Arc::new(a) as ArrayRef),
3541            ("b", Arc::new(b) as ArrayRef),
3542        ])
3543        .unwrap();
3544
3545        let mut buf = Vec::with_capacity(1024);
3546        let mut writer = ArrowWriter::try_new(&mut buf, batch.schema(), None).unwrap();
3547        writer.write(&batch).unwrap();
3548        writer.close().unwrap();
3549
3550        let bytes = Bytes::from(buf);
3551        let options = ReadOptionsBuilder::new().with_page_index().build();
3552        let reader = SerializedFileReader::new_with_options(bytes, options).unwrap();
3553        let index = reader.metadata().offset_index().unwrap();
3554
3555        assert_eq!(index.len(), 1);
3556        assert_eq!(index[0].len(), 2); // 2 columns
3557        assert_eq!(index[0][0].page_locations().len(), 1); // 1 page
3558        assert_eq!(index[0][1].page_locations().len(), 1); // 1 page
3559    }
3560
3561    #[test]
3562    fn test_disabled_statistics_with_page() {
3563        let file_schema = Schema::new(vec![
3564            Field::new("a", DataType::Utf8, true),
3565            Field::new("b", DataType::Utf8, true),
3566        ]);
3567        let file_schema = Arc::new(file_schema);
3568
3569        let batch = RecordBatch::try_new(
3570            file_schema.clone(),
3571            vec![
3572                Arc::new(StringArray::from(vec!["a", "b", "c", "d"])) as _,
3573                Arc::new(StringArray::from(vec!["w", "x", "y", "z"])) as _,
3574            ],
3575        )
3576        .unwrap();
3577
3578        let props = WriterProperties::builder()
3579            .set_statistics_enabled(EnabledStatistics::None)
3580            .set_column_statistics_enabled("a".into(), EnabledStatistics::Page)
3581            .build();
3582
3583        let mut buf = Vec::with_capacity(1024);
3584        let mut writer = ArrowWriter::try_new(&mut buf, file_schema.clone(), Some(props)).unwrap();
3585        writer.write(&batch).unwrap();
3586
3587        let metadata = writer.close().unwrap();
3588        assert_eq!(metadata.row_groups.len(), 1);
3589        let row_group = &metadata.row_groups[0];
3590        assert_eq!(row_group.columns.len(), 2);
3591        // Column "a" has both offset and column index, as requested
3592        assert!(row_group.columns[0].offset_index_offset.is_some());
3593        assert!(row_group.columns[0].column_index_offset.is_some());
3594        // Column "b" should only have offset index
3595        assert!(row_group.columns[1].offset_index_offset.is_some());
3596        assert!(row_group.columns[1].column_index_offset.is_none());
3597
3598        let options = ReadOptionsBuilder::new().with_page_index().build();
3599        let reader = SerializedFileReader::new_with_options(Bytes::from(buf), options).unwrap();
3600
3601        let row_group = reader.get_row_group(0).unwrap();
3602        let a_col = row_group.metadata().column(0);
3603        let b_col = row_group.metadata().column(1);
3604
3605        // Column chunk of column "a" should have chunk level statistics
3606        if let Statistics::ByteArray(byte_array_stats) = a_col.statistics().unwrap() {
3607            let min = byte_array_stats.min_opt().unwrap();
3608            let max = byte_array_stats.max_opt().unwrap();
3609
3610            assert_eq!(min.as_bytes(), b"a");
3611            assert_eq!(max.as_bytes(), b"d");
3612        } else {
3613            panic!("expecting Statistics::ByteArray");
3614        }
3615
3616        // The column chunk for column "b" shouldn't have statistics
3617        assert!(b_col.statistics().is_none());
3618
3619        let offset_index = reader.metadata().offset_index().unwrap();
3620        assert_eq!(offset_index.len(), 1); // 1 row group
3621        assert_eq!(offset_index[0].len(), 2); // 2 columns
3622
3623        let column_index = reader.metadata().column_index().unwrap();
3624        assert_eq!(column_index.len(), 1); // 1 row group
3625        assert_eq!(column_index[0].len(), 2); // 2 columns
3626
3627        let a_idx = &column_index[0][0];
3628        assert!(matches!(a_idx, Index::BYTE_ARRAY(_)), "{a_idx:?}");
3629        let b_idx = &column_index[0][1];
3630        assert!(matches!(b_idx, Index::NONE), "{b_idx:?}");
3631    }
3632
3633    #[test]
3634    fn test_disabled_statistics_with_chunk() {
3635        let file_schema = Schema::new(vec![
3636            Field::new("a", DataType::Utf8, true),
3637            Field::new("b", DataType::Utf8, true),
3638        ]);
3639        let file_schema = Arc::new(file_schema);
3640
3641        let batch = RecordBatch::try_new(
3642            file_schema.clone(),
3643            vec![
3644                Arc::new(StringArray::from(vec!["a", "b", "c", "d"])) as _,
3645                Arc::new(StringArray::from(vec!["w", "x", "y", "z"])) as _,
3646            ],
3647        )
3648        .unwrap();
3649
3650        let props = WriterProperties::builder()
3651            .set_statistics_enabled(EnabledStatistics::None)
3652            .set_column_statistics_enabled("a".into(), EnabledStatistics::Chunk)
3653            .build();
3654
3655        let mut buf = Vec::with_capacity(1024);
3656        let mut writer = ArrowWriter::try_new(&mut buf, file_schema.clone(), Some(props)).unwrap();
3657        writer.write(&batch).unwrap();
3658
3659        let metadata = writer.close().unwrap();
3660        assert_eq!(metadata.row_groups.len(), 1);
3661        let row_group = &metadata.row_groups[0];
3662        assert_eq!(row_group.columns.len(), 2);
3663        // Column "a" should only have offset index
3664        assert!(row_group.columns[0].offset_index_offset.is_some());
3665        assert!(row_group.columns[0].column_index_offset.is_none());
3666        // Column "b" should only have offset index
3667        assert!(row_group.columns[1].offset_index_offset.is_some());
3668        assert!(row_group.columns[1].column_index_offset.is_none());
3669
3670        let options = ReadOptionsBuilder::new().with_page_index().build();
3671        let reader = SerializedFileReader::new_with_options(Bytes::from(buf), options).unwrap();
3672
3673        let row_group = reader.get_row_group(0).unwrap();
3674        let a_col = row_group.metadata().column(0);
3675        let b_col = row_group.metadata().column(1);
3676
3677        // Column chunk of column "a" should have chunk level statistics
3678        if let Statistics::ByteArray(byte_array_stats) = a_col.statistics().unwrap() {
3679            let min = byte_array_stats.min_opt().unwrap();
3680            let max = byte_array_stats.max_opt().unwrap();
3681
3682            assert_eq!(min.as_bytes(), b"a");
3683            assert_eq!(max.as_bytes(), b"d");
3684        } else {
3685            panic!("expecting Statistics::ByteArray");
3686        }
3687
3688        // The column chunk for column "b"  shouldn't have statistics
3689        assert!(b_col.statistics().is_none());
3690
3691        let column_index = reader.metadata().column_index().unwrap();
3692        assert_eq!(column_index.len(), 1); // 1 row group
3693        assert_eq!(column_index[0].len(), 2); // 2 columns
3694
3695        let a_idx = &column_index[0][0];
3696        assert!(matches!(a_idx, Index::NONE), "{a_idx:?}");
3697        let b_idx = &column_index[0][1];
3698        assert!(matches!(b_idx, Index::NONE), "{b_idx:?}");
3699    }
3700
3701    #[test]
3702    fn test_arrow_writer_skip_metadata() {
3703        let batch_schema = Schema::new(vec![Field::new("int32", DataType::Int32, false)]);
3704        let file_schema = Arc::new(batch_schema.clone());
3705
3706        let batch = RecordBatch::try_new(
3707            Arc::new(batch_schema),
3708            vec![Arc::new(Int32Array::from(vec![1, 2, 3, 4])) as _],
3709        )
3710        .unwrap();
3711        let skip_options = ArrowWriterOptions::new().with_skip_arrow_metadata(true);
3712
3713        let mut buf = Vec::with_capacity(1024);
3714        let mut writer =
3715            ArrowWriter::try_new_with_options(&mut buf, file_schema.clone(), skip_options).unwrap();
3716        writer.write(&batch).unwrap();
3717        writer.close().unwrap();
3718
3719        let bytes = Bytes::from(buf);
3720        let reader_builder = ParquetRecordBatchReaderBuilder::try_new(bytes).unwrap();
3721        assert_eq!(file_schema, *reader_builder.schema());
3722        if let Some(key_value_metadata) = reader_builder
3723            .metadata()
3724            .file_metadata()
3725            .key_value_metadata()
3726        {
3727            assert!(!key_value_metadata
3728                .iter()
3729                .any(|kv| kv.key.as_str() == ARROW_SCHEMA_META_KEY));
3730        }
3731    }
3732
3733    #[test]
3734    fn mismatched_schemas() {
3735        let batch_schema = Schema::new(vec![Field::new("count", DataType::Int32, false)]);
3736        let file_schema = Arc::new(Schema::new(vec![Field::new(
3737            "temperature",
3738            DataType::Float64,
3739            false,
3740        )]));
3741
3742        let batch = RecordBatch::try_new(
3743            Arc::new(batch_schema),
3744            vec![Arc::new(Int32Array::from(vec![1, 2, 3, 4])) as _],
3745        )
3746        .unwrap();
3747
3748        let mut buf = Vec::with_capacity(1024);
3749        let mut writer = ArrowWriter::try_new(&mut buf, file_schema.clone(), None).unwrap();
3750
3751        let err = writer.write(&batch).unwrap_err().to_string();
3752        assert_eq!(
3753            err,
3754            "Arrow: Incompatible type. Field 'temperature' has type Float64, array has type Int32"
3755        );
3756    }
3757
3758    #[test]
3759    // https://github.com/apache/arrow-rs/issues/6988
3760    fn test_roundtrip_empty_schema() {
3761        // create empty record batch with empty schema
3762        let empty_batch = RecordBatch::try_new_with_options(
3763            Arc::new(Schema::empty()),
3764            vec![],
3765            &RecordBatchOptions::default().with_row_count(Some(0)),
3766        )
3767        .unwrap();
3768
3769        // write to parquet
3770        let mut parquet_bytes: Vec<u8> = Vec::new();
3771        let mut writer =
3772            ArrowWriter::try_new(&mut parquet_bytes, empty_batch.schema(), None).unwrap();
3773        writer.write(&empty_batch).unwrap();
3774        writer.close().unwrap();
3775
3776        // read from parquet
3777        let bytes = Bytes::from(parquet_bytes);
3778        let reader = ParquetRecordBatchReaderBuilder::try_new(bytes).unwrap();
3779        assert_eq!(reader.schema(), &empty_batch.schema());
3780        let batches: Vec<_> = reader
3781            .build()
3782            .unwrap()
3783            .collect::<ArrowResult<Vec<_>>>()
3784            .unwrap();
3785        assert_eq!(batches.len(), 0);
3786    }
3787
3788    #[test]
3789    fn test_page_stats_truncation() {
3790        let string_field = Field::new("a", DataType::Utf8, false);
3791        let binary_field = Field::new("b", DataType::Binary, false);
3792        let schema = Schema::new(vec![string_field, binary_field]);
3793
3794        let raw_string_values = vec!["Blart Versenwald III"];
3795        let raw_binary_values = [b"Blart Versenwald III".to_vec()];
3796        let raw_binary_value_refs = raw_binary_values
3797            .iter()
3798            .map(|x| x.as_slice())
3799            .collect::<Vec<_>>();
3800
3801        let string_values = StringArray::from(raw_string_values.clone());
3802        let binary_values = BinaryArray::from(raw_binary_value_refs);
3803        let batch = RecordBatch::try_new(
3804            Arc::new(schema),
3805            vec![Arc::new(string_values), Arc::new(binary_values)],
3806        )
3807        .unwrap();
3808
3809        let props = WriterProperties::builder()
3810            .set_statistics_truncate_length(Some(2))
3811            .set_dictionary_enabled(false)
3812            .set_encoding(Encoding::PLAIN)
3813            .set_compression(crate::basic::Compression::UNCOMPRESSED)
3814            .build();
3815
3816        let mut file = roundtrip_opts(&batch, props);
3817
3818        // read file and decode page headers
3819        // Note: use the thrift API as there is no Rust API to access the statistics in the page headers
3820        let mut buf = vec![];
3821        file.seek(std::io::SeekFrom::Start(0)).unwrap();
3822        let read = file.read_to_end(&mut buf).unwrap();
3823        assert!(read > 0);
3824
3825        // decode first page header
3826        let first_page = &buf[4..];
3827        let mut prot = TCompactSliceInputProtocol::new(first_page);
3828        let hdr = PageHeader::read_from_in_protocol(&mut prot).unwrap();
3829        let stats = hdr.data_page_header.unwrap().statistics;
3830        assert!(stats.is_some());
3831        let stats = stats.unwrap();
3832        // check that min/max were properly truncated
3833        assert!(!stats.is_max_value_exact.unwrap());
3834        assert!(!stats.is_min_value_exact.unwrap());
3835        assert_eq!(stats.max_value.unwrap(), "Bm".as_bytes());
3836        assert_eq!(stats.min_value.unwrap(), "Bl".as_bytes());
3837
3838        // check second page now
3839        let second_page = &prot.as_slice()[hdr.compressed_page_size as usize..];
3840        let mut prot = TCompactSliceInputProtocol::new(second_page);
3841        let hdr = PageHeader::read_from_in_protocol(&mut prot).unwrap();
3842        let stats = hdr.data_page_header.unwrap().statistics;
3843        assert!(stats.is_some());
3844        let stats = stats.unwrap();
3845        // check that min/max were properly truncated
3846        assert!(!stats.is_max_value_exact.unwrap());
3847        assert!(!stats.is_min_value_exact.unwrap());
3848        assert_eq!(stats.max_value.unwrap(), "Bm".as_bytes());
3849        assert_eq!(stats.min_value.unwrap(), "Bl".as_bytes());
3850    }
3851
3852    #[test]
3853    fn test_page_encoding_statistics_roundtrip() {
3854        let batch_schema = Schema::new(vec![Field::new(
3855            "int32",
3856            arrow_schema::DataType::Int32,
3857            false,
3858        )]);
3859
3860        let batch = RecordBatch::try_new(
3861            Arc::new(batch_schema.clone()),
3862            vec![Arc::new(Int32Array::from(vec![1, 2, 3, 4])) as _],
3863        )
3864        .unwrap();
3865
3866        let mut file: File = tempfile::tempfile().unwrap();
3867        let mut writer = ArrowWriter::try_new(&mut file, Arc::new(batch_schema), None).unwrap();
3868        writer.write(&batch).unwrap();
3869        let file_metadata = writer.close().unwrap();
3870
3871        assert_eq!(file_metadata.row_groups.len(), 1);
3872        assert_eq!(file_metadata.row_groups[0].columns.len(), 1);
3873        let chunk_meta = file_metadata.row_groups[0].columns[0]
3874            .meta_data
3875            .as_ref()
3876            .expect("column metadata missing");
3877        assert!(chunk_meta.encoding_stats.is_some());
3878        let chunk_page_stats = chunk_meta.encoding_stats.as_ref().unwrap();
3879
3880        // check that the read metadata is also correct
3881        let options = ReadOptionsBuilder::new().with_page_index().build();
3882        let reader = SerializedFileReader::new_with_options(file, options).unwrap();
3883
3884        let rowgroup = reader.get_row_group(0).expect("row group missing");
3885        assert_eq!(rowgroup.num_columns(), 1);
3886        let column = rowgroup.metadata().column(0);
3887        assert!(column.page_encoding_stats().is_some());
3888        let file_page_stats = column.page_encoding_stats().unwrap();
3889        let chunk_stats: Vec<PageEncodingStats> = chunk_page_stats
3890            .iter()
3891            .map(|x| crate::file::page_encoding_stats::try_from_thrift(x).unwrap())
3892            .collect();
3893        assert_eq!(&chunk_stats, file_page_stats);
3894    }
3895}