duckdb/
row.rs

1use std::{convert, sync::Arc};
2
3use super::{Error, Result, Statement};
4use crate::types::{self, EnumType, FromSql, FromSqlError, ListType, ValueRef};
5
6use arrow::{
7    array::{
8        self, Array, ArrayRef, DictionaryArray, FixedSizeBinaryArray, FixedSizeListArray, ListArray, MapArray,
9        StructArray,
10    },
11    datatypes::*,
12};
13use fallible_iterator::FallibleIterator;
14use fallible_streaming_iterator::FallibleStreamingIterator;
15use rust_decimal::prelude::*;
16
17/// An handle for the resulting rows of a query.
18#[must_use = "Rows is lazy and will do nothing unless consumed"]
19pub struct Rows<'stmt> {
20    pub(crate) stmt: Option<&'stmt Statement<'stmt>>,
21    arr: Arc<Option<StructArray>>,
22    row: Option<Row<'stmt>>,
23    current_row: usize,
24    current_batch_row: usize,
25}
26
27impl<'stmt> Rows<'stmt> {
28    #[inline]
29    fn reset(&mut self) {
30        self.current_row = 0;
31        self.current_batch_row = 0;
32        self.arr = Arc::new(None);
33    }
34
35    /// Attempt to get the next row from the query. Returns `Ok(Some(Row))` if
36    /// there is another row, `Err(...)` if there was an error
37    /// getting the next row, and `Ok(None)` if all rows have been retrieved.
38    ///
39    /// ## Note
40    ///
41    /// This interface is not compatible with Rust's `Iterator` trait, because
42    /// the lifetime of the returned row is tied to the lifetime of `self`.
43    /// This is a fallible "streaming iterator". For a more natural interface,
44    /// consider using [`query_map`](crate::Statement::query_map) or
45    /// [`query_and_then`](crate::Statement::query_and_then) instead, which
46    /// return types that implement `Iterator`.
47    #[allow(clippy::should_implement_trait)] // cannot implement Iterator
48    #[inline]
49    pub fn next(&mut self) -> Result<Option<&Row<'stmt>>> {
50        self.advance()?;
51        Ok((*self).get())
52    }
53
54    #[inline]
55    fn batch_row_count(&self) -> usize {
56        if self.arr.is_none() {
57            return 0;
58        }
59        self.arr.as_ref().as_ref().unwrap().len()
60    }
61
62    /// Map over this `Rows`, converting it to a [`Map`], which
63    /// implements `FallibleIterator`.
64    ///
65    /// **Note:** This method requires the closure to return `duckdb::Result<B>`.
66    /// If you need to use custom error types, consider using [`and_then`](Self::and_then)
67    /// instead, which allows any error type that implements `From<duckdb::Error>`.
68    ///
69    /// ```rust,no_run
70    /// use fallible_iterator::FallibleIterator;
71    /// # use duckdb::{Result, Statement};
72    /// fn query(stmt: &mut Statement) -> Result<Vec<i64>> {
73    ///     let rows = stmt.query([])?;
74    ///     rows.map(|r| r.get(0)).collect()
75    /// }
76    /// ```
77    // FIXME Hide FallibleStreamingIterator::map
78    #[inline]
79    pub fn map<F, B>(self, f: F) -> Map<'stmt, F>
80    where
81        F: FnMut(&Row<'_>) -> Result<B>,
82    {
83        Map { rows: self, f }
84    }
85
86    /// Map over this `Rows`, converting it to a [`MappedRows`], which
87    /// implements `Iterator`.
88    #[inline]
89    pub fn mapped<F, B>(self, f: F) -> MappedRows<'stmt, F>
90    where
91        F: FnMut(&Row<'_>) -> Result<B>,
92    {
93        MappedRows { rows: self, map: f }
94    }
95
96    /// Map over this `Rows` with a fallible function, converting it to a
97    /// [`AndThenRows`], which implements `Iterator` (instead of
98    /// `FallibleStreamingIterator`).
99    #[inline]
100    pub fn and_then<F, T, E>(self, f: F) -> AndThenRows<'stmt, F>
101    where
102        F: FnMut(&Row<'_>) -> Result<T, E>,
103    {
104        AndThenRows { rows: self, map: f }
105    }
106
107    /// Access the underlying statement
108    ///
109    /// This method provides a way to access the `Statement` that created these `Rows`
110    /// without additional borrowing conflicts. This is particularly useful when you need
111    /// to access statement metadata (like column count or names) while iterating over results.
112    ///
113    /// # Example
114    ///
115    /// ```rust,no_run
116    /// # use duckdb::{Connection, Result};
117    /// fn process_results(conn: &Connection) -> Result<()> {
118    ///     let mut stmt = conn.prepare("SELECT id, name FROM people")?;
119    ///     let mut rows = stmt.query([])?;
120    ///
121    ///     let column_count = rows.as_ref().unwrap().column_count();
122    ///     println!("Processing {} columns", column_count);
123    ///
124    ///     while let Some(row) = rows.next()? {
125    ///         // Process row...
126    ///     }
127    ///     Ok(())
128    /// }
129    /// ```
130    pub fn as_ref(&self) -> Option<&Statement<'stmt>> {
131        self.stmt
132    }
133}
134
135impl<'stmt> Rows<'stmt> {
136    #[inline]
137    pub(crate) fn new(stmt: &'stmt Statement<'stmt>) -> Self {
138        Rows {
139            stmt: Some(stmt),
140            arr: Arc::new(None),
141            row: None,
142            current_row: 0,
143            current_batch_row: 0,
144        }
145    }
146
147    #[inline]
148    pub(crate) fn get_expected_row(&mut self) -> Result<&Row<'stmt>> {
149        match self.next()? {
150            Some(row) => Ok(row),
151            None => Err(Error::QueryReturnedNoRows),
152        }
153    }
154}
155
156/// `F` is used to transform the _streaming_ iterator into a _fallible_
157/// iterator.
158#[must_use = "iterators are lazy and do nothing unless consumed"]
159pub struct Map<'stmt, F> {
160    rows: Rows<'stmt>,
161    f: F,
162}
163
164impl<F, B> FallibleIterator for Map<'_, F>
165where
166    F: FnMut(&Row<'_>) -> Result<B>,
167{
168    type Error = Error;
169    type Item = B;
170
171    #[inline]
172    fn next(&mut self) -> Result<Option<B>> {
173        match self.rows.next()? {
174            Some(v) => Ok(Some((self.f)(v)?)),
175            None => Ok(None),
176        }
177    }
178}
179
180/// An iterator over the mapped resulting rows of a query.
181///
182/// `F` is used to transform the _streaming_ iterator into a _standard_
183/// iterator.
184#[must_use = "iterators are lazy and do nothing unless consumed"]
185pub struct MappedRows<'stmt, F> {
186    rows: Rows<'stmt>,
187    map: F,
188}
189
190impl<T, F> Iterator for MappedRows<'_, F>
191where
192    F: FnMut(&Row<'_>) -> Result<T>,
193{
194    type Item = Result<T>;
195
196    #[inline]
197    fn next(&mut self) -> Option<Result<T>> {
198        let map = &mut self.map;
199        self.rows.next().transpose().map(|row_result| row_result.and_then(map))
200    }
201}
202
203/// An iterator over the mapped resulting rows of a query, with an Error type
204/// unifying with Error.
205#[must_use = "iterators are lazy and do nothing unless consumed"]
206pub struct AndThenRows<'stmt, F> {
207    rows: Rows<'stmt>,
208    map: F,
209}
210
211impl<T, E, F> Iterator for AndThenRows<'_, F>
212where
213    E: convert::From<Error>,
214    F: FnMut(&Row<'_>) -> Result<T, E>,
215{
216    type Item = Result<T, E>;
217
218    #[inline]
219    fn next(&mut self) -> Option<Self::Item> {
220        let map = &mut self.map;
221        self.rows
222            .next()
223            .transpose()
224            .map(|row_result| row_result.map_err(E::from).and_then(map))
225    }
226}
227
228/// `FallibleStreamingIterator` differs from the standard library's `Iterator`
229/// in two ways:
230/// * each call to `next` (sqlite3_step) can fail.
231/// * returned `Row` is valid until `next` is called again or `Statement` is
232///   reset or finalized.
233///
234/// While these iterators cannot be used with Rust `for` loops, `while let`
235/// loops offer a similar level of ergonomics:
236/// ```rust,no_run
237/// # use duckdb::{Result, Statement};
238/// fn query(stmt: &mut Statement) -> Result<()> {
239///     let mut rows = stmt.query([])?;
240///     while let Some(row) = rows.next()? {
241///         // scan columns value
242///     }
243///     Ok(())
244/// }
245/// ```
246impl<'stmt> FallibleStreamingIterator for Rows<'stmt> {
247    type Error = Error;
248    type Item = Row<'stmt>;
249
250    #[inline]
251    fn advance(&mut self) -> Result<()> {
252        match self.stmt {
253            Some(stmt) => {
254                if self.current_row < stmt.row_count() {
255                    if self.current_batch_row >= self.batch_row_count() {
256                        self.arr = Arc::new(stmt.step());
257                        if self.arr.is_none() {
258                            self.row = None;
259                            return Ok(());
260                        }
261                        self.current_batch_row = 0;
262                    }
263                    self.row = Some(Row {
264                        stmt,
265                        arr: self.arr.clone(),
266                        current_row: self.current_batch_row,
267                    });
268                    self.current_row += 1;
269                    self.current_batch_row += 1;
270                    Ok(())
271                } else {
272                    self.reset();
273                    self.row = None;
274                    Ok(())
275                }
276            }
277            None => {
278                self.row = None;
279                Ok(())
280            }
281        }
282    }
283
284    #[inline]
285    fn get(&self) -> Option<&Row<'stmt>> {
286        self.row.as_ref()
287    }
288}
289
290/// A single result row of a query.
291pub struct Row<'stmt> {
292    pub(crate) stmt: &'stmt Statement<'stmt>,
293    arr: Arc<Option<StructArray>>,
294    current_row: usize,
295}
296
297#[allow(clippy::needless_lifetimes)]
298impl<'stmt> Row<'stmt> {
299    /// Get the value of a particular column of the result row.
300    ///
301    /// ## Failure
302    ///
303    /// Panics if calling [`row.get(idx)`](Row::get) would return an error,
304    /// including:
305    ///
306    /// * If the underlying DuckDB column type is not a valid type as a source
307    ///   for `T`
308    /// * If the underlying DuckDB integral value is outside the range
309    ///   representable by `T`
310    /// * If `idx` is outside the range of columns in the returned query
311    pub fn get_unwrap<I: RowIndex, T: FromSql>(&self, idx: I) -> T {
312        self.get(idx).unwrap()
313    }
314
315    /// Get the value of a particular column of the result row.
316    ///
317    /// ## Failure
318    ///
319    /// Returns an `Error::InvalidColumnType` if the underlying DuckDB column
320    /// type is not a valid type as a source for `T`.
321    ///
322    /// Returns an `Error::InvalidColumnIndex` if `idx` is outside the valid
323    /// column range for this row.
324    ///
325    /// Returns an `Error::InvalidColumnName` if `idx` is not a valid column
326    /// name for this row.
327    ///
328    /// If the result type is i128 (which requires the `i128_blob` feature to be
329    /// enabled), and the underlying DuckDB column is a blob whose size is not
330    /// 16 bytes, `Error::InvalidColumnType` will also be returned.
331    pub fn get<I: RowIndex, T: FromSql>(&self, idx: I) -> Result<T> {
332        let idx = idx.idx(self.stmt)?;
333        let value = self.value_ref(self.current_row, idx);
334        FromSql::column_result(value).map_err(|err| match err {
335            FromSqlError::InvalidType => {
336                Error::InvalidColumnType(idx, self.stmt.column_name_unwrap(idx).into(), value.data_type())
337            }
338            FromSqlError::OutOfRange(i) => Error::IntegralValueOutOfRange(idx, i),
339            FromSqlError::Other(err) => Error::FromSqlConversionFailure(idx, value.data_type(), err),
340            #[cfg(feature = "uuid")]
341            FromSqlError::InvalidUuidSize(_) => {
342                Error::InvalidColumnType(idx, self.stmt.column_name_unwrap(idx).into(), value.data_type())
343            }
344        })
345    }
346
347    /// Get the value of a particular column of the result row as a `ValueRef`,
348    /// allowing data to be read out of a row without copying.
349    ///
350    /// This `ValueRef` is valid only as long as this Row, which is enforced by
351    /// it's lifetime. This means that while this method is completely safe,
352    /// it can be somewhat difficult to use, and most callers will be better
353    /// served by [`get`](Row::get) or [`get_unwrap`](Row::get_unwrap).
354    ///
355    /// ## Failure
356    ///
357    /// Returns an `Error::InvalidColumnIndex` if `idx` is outside the valid
358    /// column range for this row.
359    ///
360    /// Returns an `Error::InvalidColumnName` if `idx` is not a valid column
361    /// name for this row.
362    pub fn get_ref<I: RowIndex>(&self, idx: I) -> Result<ValueRef<'_>> {
363        let idx = idx.idx(self.stmt)?;
364        // Narrowing from `ValueRef<'stmt>` (which `self.stmt.value_ref(idx)`
365        // returns) to `ValueRef<'a>` is needed because it's only valid until
366        // the next call to sqlite3_step.
367        let val_ref = self.value_ref(self.current_row, idx);
368        Ok(val_ref)
369    }
370
371    fn value_ref(&self, row: usize, col: usize) -> ValueRef<'_> {
372        let column = self.arr.as_ref().as_ref().unwrap().column(col);
373        Self::value_ref_internal(row, col, column)
374    }
375
376    pub(crate) fn value_ref_internal(row: usize, col: usize, column: &ArrayRef) -> ValueRef<'_> {
377        if column.is_null(row) {
378            return ValueRef::Null;
379        }
380        // duckdb.cpp SetArrowFormat
381        // https://github.com/duckdb/duckdb/blob/71f1c7a7e4b8737cff5e78d1f090c54f5e78e17b/src/main/query_result.cpp#L148
382        match column.data_type() {
383            DataType::Utf8 => {
384                let array = column.as_any().downcast_ref::<array::StringArray>().unwrap();
385
386                if array.is_null(row) {
387                    return ValueRef::Null;
388                }
389                ValueRef::from(array.value(row))
390            }
391            DataType::LargeUtf8 => {
392                let array = column.as_any().downcast_ref::<array::LargeStringArray>().unwrap();
393
394                if array.is_null(row) {
395                    return ValueRef::Null;
396                }
397                ValueRef::from(array.value(row))
398            }
399            DataType::Binary => {
400                let array = column.as_any().downcast_ref::<array::BinaryArray>().unwrap();
401
402                if array.is_null(row) {
403                    return ValueRef::Null;
404                }
405                ValueRef::Blob(array.value(row))
406            }
407            DataType::LargeBinary => {
408                let array = column.as_any().downcast_ref::<array::LargeBinaryArray>().unwrap();
409
410                if array.is_null(row) {
411                    return ValueRef::Null;
412                }
413                ValueRef::Blob(array.value(row))
414            }
415            DataType::FixedSizeBinary(_) => {
416                let array = column.as_any().downcast_ref::<FixedSizeBinaryArray>().unwrap();
417
418                if array.is_null(row) {
419                    return ValueRef::Null;
420                }
421                ValueRef::Blob(array.value(row))
422            }
423            DataType::Boolean => {
424                let array = column.as_any().downcast_ref::<array::BooleanArray>().unwrap();
425
426                if array.is_null(row) {
427                    return ValueRef::Null;
428                }
429                ValueRef::Boolean(array.value(row))
430            }
431            DataType::Int8 => {
432                let array = column.as_any().downcast_ref::<array::Int8Array>().unwrap();
433
434                if array.is_null(row) {
435                    return ValueRef::Null;
436                }
437                ValueRef::TinyInt(array.value(row))
438            }
439            DataType::Int16 => {
440                let array = column.as_any().downcast_ref::<array::Int16Array>().unwrap();
441
442                if array.is_null(row) {
443                    return ValueRef::Null;
444                }
445                ValueRef::SmallInt(array.value(row))
446            }
447            DataType::Int32 => {
448                let array = column.as_any().downcast_ref::<array::Int32Array>().unwrap();
449
450                if array.is_null(row) {
451                    return ValueRef::Null;
452                }
453                ValueRef::Int(array.value(row))
454            }
455            DataType::Int64 => {
456                let array = column.as_any().downcast_ref::<array::Int64Array>().unwrap();
457
458                if array.is_null(row) {
459                    return ValueRef::Null;
460                }
461                ValueRef::BigInt(array.value(row))
462            }
463            DataType::UInt8 => {
464                let array = column.as_any().downcast_ref::<array::UInt8Array>().unwrap();
465
466                if array.is_null(row) {
467                    return ValueRef::Null;
468                }
469                ValueRef::UTinyInt(array.value(row))
470            }
471            DataType::UInt16 => {
472                let array = column.as_any().downcast_ref::<array::UInt16Array>().unwrap();
473
474                if array.is_null(row) {
475                    return ValueRef::Null;
476                }
477                ValueRef::USmallInt(array.value(row))
478            }
479            DataType::UInt32 => {
480                let array = column.as_any().downcast_ref::<array::UInt32Array>().unwrap();
481
482                if array.is_null(row) {
483                    return ValueRef::Null;
484                }
485                ValueRef::UInt(array.value(row))
486            }
487            DataType::UInt64 => {
488                let array = column.as_any().downcast_ref::<array::UInt64Array>().unwrap();
489
490                if array.is_null(row) {
491                    return ValueRef::Null;
492                }
493                ValueRef::UBigInt(array.value(row))
494            }
495            DataType::Float16 => {
496                let array = column.as_any().downcast_ref::<array::Float32Array>().unwrap();
497
498                if array.is_null(row) {
499                    return ValueRef::Null;
500                }
501                ValueRef::Float(array.value(row))
502            }
503            DataType::Float32 => {
504                let array = column.as_any().downcast_ref::<array::Float32Array>().unwrap();
505
506                if array.is_null(row) {
507                    return ValueRef::Null;
508                }
509                ValueRef::Float(array.value(row))
510            }
511            DataType::Float64 => {
512                let array = column.as_any().downcast_ref::<array::Float64Array>().unwrap();
513
514                if array.is_null(row) {
515                    return ValueRef::Null;
516                }
517                ValueRef::Double(array.value(row))
518            }
519            DataType::Decimal128(..) => {
520                let array = column.as_any().downcast_ref::<array::Decimal128Array>().unwrap();
521
522                if array.is_null(row) {
523                    return ValueRef::Null;
524                }
525                // hugeint: d:38,0
526                if array.scale() == 0 {
527                    return ValueRef::HugeInt(array.value(row));
528                }
529                ValueRef::Decimal(Decimal::from_i128_with_scale(array.value(row), array.scale() as u32))
530            }
531            DataType::Timestamp(unit, _) if *unit == TimeUnit::Second => {
532                let array = column.as_any().downcast_ref::<array::TimestampSecondArray>().unwrap();
533
534                if array.is_null(row) {
535                    return ValueRef::Null;
536                }
537                ValueRef::Timestamp(types::TimeUnit::Second, array.value(row))
538            }
539            DataType::Timestamp(unit, _) if *unit == TimeUnit::Millisecond => {
540                let array = column
541                    .as_any()
542                    .downcast_ref::<array::TimestampMillisecondArray>()
543                    .unwrap();
544
545                if array.is_null(row) {
546                    return ValueRef::Null;
547                }
548                ValueRef::Timestamp(types::TimeUnit::Millisecond, array.value(row))
549            }
550            DataType::Timestamp(unit, _) if *unit == TimeUnit::Microsecond => {
551                let array = column
552                    .as_any()
553                    .downcast_ref::<array::TimestampMicrosecondArray>()
554                    .unwrap();
555
556                if array.is_null(row) {
557                    return ValueRef::Null;
558                }
559                ValueRef::Timestamp(types::TimeUnit::Microsecond, array.value(row))
560            }
561            DataType::Timestamp(unit, _) if *unit == TimeUnit::Nanosecond => {
562                let array = column
563                    .as_any()
564                    .downcast_ref::<array::TimestampNanosecondArray>()
565                    .unwrap();
566
567                if array.is_null(row) {
568                    return ValueRef::Null;
569                }
570                ValueRef::Timestamp(types::TimeUnit::Nanosecond, array.value(row))
571            }
572            DataType::Date32 => {
573                let array = column.as_any().downcast_ref::<array::Date32Array>().unwrap();
574
575                if array.is_null(row) {
576                    return ValueRef::Null;
577                }
578                ValueRef::Date32(array.value(row))
579            }
580            DataType::Time64(TimeUnit::Microsecond) => {
581                let array = column.as_any().downcast_ref::<array::Time64MicrosecondArray>().unwrap();
582
583                if array.is_null(row) {
584                    return ValueRef::Null;
585                }
586                ValueRef::Time64(types::TimeUnit::Microsecond, array.value(row))
587            }
588            DataType::Interval(unit) => match unit {
589                IntervalUnit::MonthDayNano => {
590                    let array = column
591                        .as_any()
592                        .downcast_ref::<array::IntervalMonthDayNanoArray>()
593                        .unwrap();
594
595                    if array.is_null(row) {
596                        return ValueRef::Null;
597                    }
598
599                    let value = array.value(row);
600
601                    ValueRef::Interval {
602                        months: value.months,
603                        days: value.days,
604                        nanos: value.nanoseconds,
605                    }
606                }
607                _ => unimplemented!("{:?}", unit),
608            },
609            // TODO: support more data types
610            // NOTE: DataTypes not supported by duckdb
611            // DataType::Date64 => make_string_date!(array::Date64Array, column, row),
612            // DataType::Time32(unit) if *unit == TimeUnit::Second => {
613            //     make_string_time!(array::Time32SecondArray, column, row)
614            // }
615            // DataType::Time32(unit) if *unit == TimeUnit::Millisecond => {
616            //     make_string_time!(array::Time32MillisecondArray, column, row)
617            // }
618            // DataType::Time64(unit) if *unit == TimeUnit::Nanosecond => {
619            //     make_string_time!(array::Time64NanosecondArray, column, row)
620            // }
621            DataType::LargeList(..) => {
622                let arr = column.as_any().downcast_ref::<array::LargeListArray>().unwrap();
623
624                ValueRef::List(ListType::Large(arr), row)
625            }
626            DataType::List(..) => {
627                let arr = column.as_any().downcast_ref::<ListArray>().unwrap();
628
629                ValueRef::List(ListType::Regular(arr), row)
630            }
631            DataType::Dictionary(key_type, ..) => {
632                let column = column.as_any();
633                ValueRef::Enum(
634                    match key_type.as_ref() {
635                        DataType::UInt8 => {
636                            EnumType::UInt8(column.downcast_ref::<DictionaryArray<UInt8Type>>().unwrap())
637                        }
638                        DataType::UInt16 => {
639                            EnumType::UInt16(column.downcast_ref::<DictionaryArray<UInt16Type>>().unwrap())
640                        }
641                        DataType::UInt32 => {
642                            EnumType::UInt32(column.downcast_ref::<DictionaryArray<UInt32Type>>().unwrap())
643                        }
644                        typ => panic!("Unsupported key type: {typ:?}"),
645                    },
646                    row,
647                )
648            }
649            DataType::Struct(_) => {
650                let res = column.as_any().downcast_ref::<StructArray>().unwrap();
651                ValueRef::Struct(res, row)
652            }
653            DataType::Map(..) => {
654                let arr = column.as_any().downcast_ref::<MapArray>().unwrap();
655                ValueRef::Map(arr, row)
656            }
657            DataType::FixedSizeList(..) => {
658                let arr = column.as_any().downcast_ref::<FixedSizeListArray>().unwrap();
659                ValueRef::Array(arr, row)
660            }
661            DataType::Union(..) => ValueRef::Union(column, row),
662            _ => unreachable!("invalid value: {}, {}", col, column.data_type()),
663        }
664    }
665
666    /// Get the value of a particular column of the result row as a `ValueRef`,
667    /// allowing data to be read out of a row without copying.
668    ///
669    /// This `ValueRef` is valid only as long as this Row, which is enforced by
670    /// it's lifetime. This means that while this method is completely safe,
671    /// it can be difficult to use, and most callers will be better served by
672    /// [`get`](Row::get) or [`get_unwrap`](Row::get_unwrap).
673    ///
674    /// ## Failure
675    ///
676    /// Panics if calling [`row.get_ref(idx)`](Row::get_ref) would return an
677    /// error, including:
678    ///
679    /// * If `idx` is outside the range of columns in the returned query.
680    /// * If `idx` is not a valid column name for this row.
681    pub fn get_ref_unwrap<I: RowIndex>(&self, idx: I) -> ValueRef<'_> {
682        self.get_ref(idx).unwrap()
683    }
684}
685
686impl<'stmt> AsRef<Statement<'stmt>> for Row<'stmt> {
687    fn as_ref(&self) -> &Statement<'stmt> {
688        self.stmt
689    }
690}
691
692mod sealed {
693    /// This trait exists just to ensure that the only impls of `trait Params`
694    /// that are allowed are ones in this crate.
695    pub trait Sealed {}
696    impl Sealed for usize {}
697    impl Sealed for &str {}
698}
699
700/// A trait implemented by types that can index into columns of a row.
701///
702/// It is only implemented for `usize` and `&str`.
703pub trait RowIndex: sealed::Sealed {
704    /// Returns the index of the appropriate column, or `None` if no such
705    /// column exists.
706    fn idx(&self, stmt: &Statement<'_>) -> Result<usize>;
707}
708
709impl RowIndex for usize {
710    #[inline]
711    fn idx(&self, stmt: &Statement<'_>) -> Result<usize> {
712        if *self >= stmt.column_count() {
713            Err(Error::InvalidColumnIndex(*self))
714        } else {
715            Ok(*self)
716        }
717    }
718}
719
720impl RowIndex for &'_ str {
721    #[inline]
722    fn idx(&self, stmt: &Statement<'_>) -> Result<usize> {
723        stmt.column_index(self)
724    }
725}
726
727macro_rules! tuple_try_from_row {
728    ($($field:ident),*) => {
729        impl<'a, $($field,)*> convert::TryFrom<&'a Row<'a>> for ($($field,)*) where $($field: FromSql,)* {
730            type Error = crate::Error;
731
732            // we end with index += 1, which rustc warns about
733            // unused_variables and unused_mut are allowed for ()
734            #[allow(unused_assignments, unused_variables, unused_mut)]
735            fn try_from(row: &'a Row<'a>) -> Result<Self> {
736                let mut index = 0;
737                $(
738                    #[allow(non_snake_case)]
739                    let $field = row.get::<_, $field>(index)?;
740                    index += 1;
741                )*
742                Ok(($($field,)*))
743            }
744        }
745    }
746}
747
748macro_rules! tuples_try_from_row {
749    () => {
750        // not very useful, but maybe some other macro users will find this helpful
751        tuple_try_from_row!();
752    };
753    ($first:ident $(, $remaining:ident)*) => {
754        tuple_try_from_row!($first $(, $remaining)*);
755        tuples_try_from_row!($($remaining),*);
756    };
757}
758
759tuples_try_from_row!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P);
760
761#[cfg(test)]
762mod tests {
763    #![allow(clippy::redundant_closure)] // false positives due to lifetime issues; clippy issue #5594
764    use crate::{Connection, Result};
765
766    #[test]
767    fn test_try_from_row_for_tuple_1() -> Result<()> {
768        use crate::ToSql;
769        use std::convert::TryFrom;
770
771        let conn = Connection::open_in_memory()?;
772        conn.execute(
773            "CREATE TABLE test (a INTEGER)",
774            crate::params_from_iter(std::iter::empty::<&dyn ToSql>()),
775        )?;
776        conn.execute("INSERT INTO test VALUES (42)", [])?;
777        let val = conn.query_row("SELECT a FROM test", [], |row| <(u32,)>::try_from(row))?;
778        assert_eq!(val, (42,));
779        let fail = conn.query_row("SELECT a FROM test", [], |row| <(u32, u32)>::try_from(row));
780        assert!(fail.is_err());
781        Ok(())
782    }
783
784    #[test]
785    fn test_try_from_row_for_tuple_2() -> Result<()> {
786        use std::convert::TryFrom;
787
788        let conn = Connection::open_in_memory()?;
789        conn.execute("CREATE TABLE test (a INTEGER, b INTEGER)", [])?;
790        conn.execute("INSERT INTO test VALUES (42, 47)", [])?;
791        let val = conn.query_row("SELECT a, b FROM test", [], |row| <(u32, u32)>::try_from(row))?;
792        assert_eq!(val, (42, 47));
793        let fail = conn.query_row("SELECT a, b FROM test", [], |row| <(u32, u32, u32)>::try_from(row));
794        assert!(fail.is_err());
795        Ok(())
796    }
797
798    #[test]
799    fn test_try_from_row_for_tuple_16() -> Result<()> {
800        use std::convert::TryFrom;
801
802        let create_table = "CREATE TABLE test (
803            a INTEGER,
804            b INTEGER,
805            c INTEGER,
806            d INTEGER,
807            e INTEGER,
808            f INTEGER,
809            g INTEGER,
810            h INTEGER,
811            i INTEGER,
812            j INTEGER,
813            k INTEGER,
814            l INTEGER,
815            m INTEGER,
816            n INTEGER,
817            o INTEGER,
818            p INTEGER
819        )";
820
821        let insert_values = "INSERT INTO test VALUES (
822            0,
823            1,
824            2,
825            3,
826            4,
827            5,
828            6,
829            7,
830            8,
831            9,
832            10,
833            11,
834            12,
835            13,
836            14,
837            15
838        )";
839
840        type BigTuple = (
841            u32,
842            u32,
843            u32,
844            u32,
845            u32,
846            u32,
847            u32,
848            u32,
849            u32,
850            u32,
851            u32,
852            u32,
853            u32,
854            u32,
855            u32,
856            u32,
857        );
858
859        let conn = Connection::open_in_memory()?;
860        conn.execute(create_table, [])?;
861        conn.execute(insert_values, [])?;
862        let val = conn.query_row("SELECT * FROM test", [], |row| BigTuple::try_from(row))?;
863        // Debug is not implemented for tuples of 16
864        assert_eq!(val.0, 0);
865        assert_eq!(val.1, 1);
866        assert_eq!(val.2, 2);
867        assert_eq!(val.3, 3);
868        assert_eq!(val.4, 4);
869        assert_eq!(val.5, 5);
870        assert_eq!(val.6, 6);
871        assert_eq!(val.7, 7);
872        assert_eq!(val.8, 8);
873        assert_eq!(val.9, 9);
874        assert_eq!(val.10, 10);
875        assert_eq!(val.11, 11);
876        assert_eq!(val.12, 12);
877        assert_eq!(val.13, 13);
878        assert_eq!(val.14, 14);
879        assert_eq!(val.15, 15);
880
881        // We don't test one bigger because it's unimplemented
882        Ok(())
883    }
884
885    #[test]
886    #[cfg(feature = "vtab-arrow")]
887    fn test_fixed_size_binary_via_arrow() -> Result<()> {
888        use crate::vtab::arrow::{arrow_recordbatch_to_query_params, ArrowVTab};
889        use arrow::array::{Array, ArrayRef, BinaryArray, FixedSizeBinaryArray};
890        use arrow::datatypes::{DataType, Field, Schema};
891        use arrow::record_batch::RecordBatch;
892        use std::sync::Arc;
893
894        let conn = Connection::open_in_memory()?;
895        conn.register_table_function::<ArrowVTab>("arrow")?;
896
897        // Create FixedSizeBinary(16) array - like UUID
898        let values = vec![
899            vec![1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
900            vec![16u8, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1],
901            vec![0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255],
902        ];
903
904        let byte_array = FixedSizeBinaryArray::try_from_iter(values.into_iter()).unwrap();
905        let arc: ArrayRef = Arc::new(byte_array);
906        let schema = Schema::new(vec![Field::new("data", DataType::FixedSizeBinary(16), false)]);
907        let batch = RecordBatch::try_new(Arc::new(schema), vec![arc]).unwrap();
908
909        let mut stmt = conn.prepare("SELECT data FROM arrow(?, ?)")?;
910        let mut arr = stmt.query_arrow(arrow_recordbatch_to_query_params(batch))?;
911        let rb = arr.next().expect("no record batch");
912
913        // DuckDB converts FixedSizeBinary to regular Binary
914        let column = rb.column(0).as_any().downcast_ref::<BinaryArray>().unwrap();
915        assert_eq!(column.len(), 3);
916        assert_eq!(
917            column.value(0),
918            &[1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
919        );
920        assert_eq!(
921            column.value(1),
922            &[16u8, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
923        );
924        assert_eq!(column.value(2), &[0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255]);
925
926        Ok(())
927    }
928
929    #[test]
930    #[cfg(feature = "vtab-arrow")]
931    fn test_fixed_size_binary_with_nulls_via_arrow() -> Result<()> {
932        use crate::vtab::arrow::{arrow_recordbatch_to_query_params, ArrowVTab};
933        use arrow::array::{Array, ArrayRef, BinaryArray, FixedSizeBinaryArray};
934        use arrow::datatypes::{DataType, Field, Schema};
935        use arrow::record_batch::RecordBatch;
936        use std::sync::Arc;
937
938        let conn = Connection::open_in_memory()?;
939        conn.register_table_function::<ArrowVTab>("arrow")?;
940
941        // Create FixedSizeBinary(8) array with nulls
942        let values = vec![
943            Some(vec![1u8, 2, 3, 4, 5, 6, 7, 8]),
944            None,
945            Some(vec![9u8, 10, 11, 12, 13, 14, 15, 16]),
946        ];
947
948        let byte_array = FixedSizeBinaryArray::try_from_sparse_iter_with_size(values.into_iter(), 8).unwrap();
949        let arc: ArrayRef = Arc::new(byte_array);
950        let schema = Schema::new(vec![Field::new("data", DataType::FixedSizeBinary(8), true)]);
951        let batch = RecordBatch::try_new(Arc::new(schema), vec![arc]).unwrap();
952
953        let mut stmt = conn.prepare("SELECT data FROM arrow(?, ?)")?;
954        let mut arr = stmt.query_arrow(arrow_recordbatch_to_query_params(batch))?;
955        let rb = arr.next().expect("no record batch");
956
957        // NOTE: Currently, null handling for FixedSizeBinary is not fully implemented
958        // (see vtab/arrow.rs fixed_size_binary_array_to_vector, line 925-926)
959        // Nulls are converted to zero bytes instead of actual nulls
960        let column = rb.column(0).as_any().downcast_ref::<BinaryArray>().unwrap();
961        assert_eq!(column.len(), 3);
962        assert!(column.is_valid(0));
963        // This should be false when null handling is implemented
964        // assert!(!column.is_valid(1));
965        assert!(column.is_valid(2));
966        assert_eq!(column.value(0), &[1u8, 2, 3, 4, 5, 6, 7, 8]);
967        // The null value is currently represented as zero bytes
968        assert_eq!(column.value(1), &[0u8, 0, 0, 0, 0, 0, 0, 0]);
969        assert_eq!(column.value(2), &[9u8, 10, 11, 12, 13, 14, 15, 16]);
970
971        Ok(())
972    }
973
974    #[test]
975    #[cfg(feature = "vtab-arrow")]
976    fn test_fixed_size_binary_different_sizes_via_arrow() -> Result<()> {
977        use crate::vtab::arrow::{arrow_recordbatch_to_query_params, ArrowVTab};
978        use arrow::array::{ArrayRef, FixedSizeBinaryArray};
979        use arrow::datatypes::{DataType, Field, Schema};
980        use arrow::record_batch::RecordBatch;
981        use std::sync::Arc;
982
983        let conn = Connection::open_in_memory()?;
984        conn.register_table_function::<ArrowVTab>("arrow")?;
985
986        // Test with FixedSizeBinary(4)
987        let values = vec![vec![1u8, 2, 3, 4], vec![5u8, 6, 7, 8]];
988
989        let byte_array = FixedSizeBinaryArray::try_from_iter(values.into_iter()).unwrap();
990        let arc: ArrayRef = Arc::new(byte_array);
991        let schema = Schema::new(vec![Field::new("data", DataType::FixedSizeBinary(4), false)]);
992        let batch = RecordBatch::try_new(Arc::new(schema), vec![arc]).unwrap();
993
994        let mut stmt = conn.prepare("SELECT data FROM arrow(?, ?)")?;
995        let mut rows = stmt.query(arrow_recordbatch_to_query_params(batch))?;
996
997        // Read via Row interface
998        let row = rows.next()?.unwrap();
999        let bytes: Vec<u8> = row.get(0)?;
1000        assert_eq!(bytes, vec![1u8, 2, 3, 4]);
1001
1002        let row = rows.next()?.unwrap();
1003        let bytes: Vec<u8> = row.get(0)?;
1004        assert_eq!(bytes, vec![5u8, 6, 7, 8]);
1005
1006        Ok(())
1007    }
1008
1009    #[test]
1010    #[cfg(feature = "vtab-arrow")]
1011    fn test_fixed_size_binary_value_ref_via_arrow() -> Result<()> {
1012        use crate::types::ValueRef;
1013        use crate::vtab::arrow::{arrow_recordbatch_to_query_params, ArrowVTab};
1014        use arrow::array::{ArrayRef, FixedSizeBinaryArray};
1015        use arrow::datatypes::{DataType, Field, Schema};
1016        use arrow::record_batch::RecordBatch;
1017        use std::sync::Arc;
1018
1019        let conn = Connection::open_in_memory()?;
1020        conn.register_table_function::<ArrowVTab>("arrow")?;
1021
1022        let values = vec![Some(vec![1u8, 2, 3, 4]), None];
1023
1024        let byte_array = FixedSizeBinaryArray::try_from_sparse_iter_with_size(values.into_iter(), 4).unwrap();
1025        let arc: ArrayRef = Arc::new(byte_array);
1026        let schema = Schema::new(vec![Field::new("data", DataType::FixedSizeBinary(4), true)]);
1027        let batch = RecordBatch::try_new(Arc::new(schema), vec![arc]).unwrap();
1028
1029        let mut stmt = conn.prepare("SELECT data FROM arrow(?, ?)")?;
1030        let mut rows = stmt.query(arrow_recordbatch_to_query_params(batch))?;
1031
1032        // First row - non-null
1033        let row = rows.next()?.unwrap();
1034        let value_ref = row.get_ref(0)?;
1035        match value_ref {
1036            ValueRef::Blob(bytes) => {
1037                assert_eq!(bytes, &[1u8, 2, 3, 4]);
1038            }
1039            _ => panic!("Expected Blob ValueRef, got {:?}", value_ref),
1040        }
1041
1042        // Second row - should be null, but currently null handling is not implemented
1043        // (see vtab/arrow.rs fixed_size_binary_array_to_vector, line 925-926)
1044        // so it's represented as zero bytes
1045        let row = rows.next()?.unwrap();
1046        let value_ref = row.get_ref(0)?;
1047        match value_ref {
1048            ValueRef::Blob(bytes) => {
1049                // This should be ValueRef::Null when null handling is implemented
1050                assert_eq!(bytes, &[0u8, 0, 0, 0]);
1051            }
1052            _ => panic!("Expected Blob ValueRef with zero bytes, got {:?}", value_ref),
1053        }
1054
1055        Ok(())
1056    }
1057
1058    #[cfg(feature = "uuid")]
1059    #[test]
1060    fn test_fixed_size_binary_uuid() -> Result<()> {
1061        use uuid::Uuid;
1062
1063        let conn = Connection::open_in_memory()?;
1064        conn.execute_batch("CREATE TABLE test (id UUID)")?;
1065
1066        let uuid_str = "550e8400-e29b-41d4-a716-446655440000";
1067        conn.execute("INSERT INTO test VALUES (?)", [uuid_str])?;
1068
1069        // Read back as UUID
1070        let uuid: Uuid = conn.query_row("SELECT id FROM test", [], |r| r.get(0))?;
1071        assert_eq!(uuid.to_string(), uuid_str);
1072        Ok(())
1073    }
1074
1075    #[cfg(feature = "uuid")]
1076    #[test]
1077    fn test_fixed_size_binary_uuid_roundtrip() -> Result<()> {
1078        use uuid::Uuid;
1079
1080        let conn = Connection::open_in_memory()?;
1081        conn.execute_batch("CREATE TABLE test (id UUID)")?;
1082
1083        let original_uuid = Uuid::new_v4();
1084        conn.execute("INSERT INTO test VALUES (?)", [original_uuid])?;
1085
1086        let retrieved_uuid: Uuid = conn.query_row("SELECT id FROM test", [], |r| r.get(0))?;
1087        assert_eq!(original_uuid, retrieved_uuid);
1088        Ok(())
1089    }
1090}