flatbuffers/
builder.rs

1/*
2 * Copyright 2018 Google Inc. All rights reserved.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#[cfg(not(feature = "std"))]
18use alloc::{vec, vec::Vec};
19use core::cmp::max;
20use core::convert::Infallible;
21use core::fmt::{Debug, Display};
22use core::iter::{DoubleEndedIterator, ExactSizeIterator};
23use core::marker::PhantomData;
24use core::ops::{Add, AddAssign, Deref, DerefMut, Index, IndexMut, Sub, SubAssign};
25use core::ptr::write_bytes;
26
27use crate::endian_scalar::emplace_scalar;
28use crate::primitives::*;
29use crate::push::{Push, PushAlignment};
30use crate::read_scalar;
31use crate::table::Table;
32use crate::vector::Vector;
33use crate::vtable::{field_index_to_field_offset, VTable};
34use crate::vtable_writer::VTableWriter;
35
36/// Trait to implement custom allocation strategies for [`FlatBufferBuilder`].
37///
38/// An implementation can be used with [`FlatBufferBuilder::new_in`], enabling a custom allocation
39/// strategy for the [`FlatBufferBuilder`].
40///
41/// # Safety
42///
43/// The implementation of the allocator must match the defined behavior as described by the
44/// comments.
45pub unsafe trait Allocator: DerefMut<Target = [u8]> {
46    /// A type describing allocation failures
47    type Error: Display + Debug;
48    /// Grows the buffer, with the old contents being moved to the end.
49    ///
50    /// NOTE: While not unsound, an implementation that doesn't grow the
51    /// internal buffer will get stuck in an infinite loop.
52    fn grow_downwards(&mut self) -> Result<(), Self::Error>;
53
54    /// Returns the size of the internal buffer in bytes.
55    fn len(&self) -> usize;
56}
57
58/// Default [`FlatBufferBuilder`] allocator backed by a [`Vec<u8>`].
59#[derive(Default)]
60pub struct DefaultAllocator(Vec<u8>);
61
62impl DefaultAllocator {
63    /// Builds the allocator from an existing buffer.
64    pub fn from_vec(buffer: Vec<u8>) -> Self {
65        Self(buffer)
66    }
67}
68
69impl Deref for DefaultAllocator {
70    type Target = [u8];
71
72    fn deref(&self) -> &Self::Target {
73        &self.0
74    }
75}
76
77impl DerefMut for DefaultAllocator {
78    fn deref_mut(&mut self) -> &mut Self::Target {
79        &mut self.0
80    }
81}
82
83// SAFETY: The methods are implemented as described by the documentation.
84unsafe impl Allocator for DefaultAllocator {
85    type Error = Infallible;
86    fn grow_downwards(&mut self) -> Result<(), Self::Error> {
87        let old_len = self.0.len();
88        let new_len = max(1, old_len * 2);
89
90        self.0.resize(new_len, 0);
91
92        if new_len == 1 {
93            return Ok(());
94        }
95
96        // calculate the midpoint, and safely copy the old end data to the new
97        // end position:
98        let middle = new_len / 2;
99        {
100            let (left, right) = &mut self.0[..].split_at_mut(middle);
101            right.copy_from_slice(left);
102        }
103        // finally, zero out the old end data.
104        {
105            let ptr = self.0[..middle].as_mut_ptr();
106            // Safety:
107            // ptr is byte aligned and of length middle
108            unsafe {
109                write_bytes(ptr, 0, middle);
110            }
111        }
112        Ok(())
113    }
114
115    fn len(&self) -> usize {
116        self.0.len()
117    }
118}
119
120#[derive(Clone, Copy, Debug, Eq, PartialEq)]
121struct FieldLoc {
122    off: UOffsetT,
123    id: VOffsetT,
124}
125
126/// FlatBufferBuilder builds a FlatBuffer through manipulating its internal
127/// state. It has an owned `Vec<u8>` that grows as needed (up to the hardcoded
128/// limit of 2GiB, which is set by the FlatBuffers format).
129#[derive(Clone, Debug, Eq, PartialEq)]
130pub struct FlatBufferBuilder<'fbb, A: Allocator = DefaultAllocator> {
131    allocator: A,
132    head: ReverseIndex,
133
134    field_locs: Vec<FieldLoc>,
135    written_vtable_revpos: Vec<UOffsetT>,
136
137    nested: bool,
138    finished: bool,
139
140    min_align: usize,
141    force_defaults: bool,
142    strings_pool: Vec<WIPOffset<&'fbb str>>,
143
144    _phantom: PhantomData<&'fbb ()>,
145}
146
147impl<'fbb> FlatBufferBuilder<'fbb, DefaultAllocator> {
148    /// Create a FlatBufferBuilder that is ready for writing.
149    pub fn new() -> Self {
150        Self::with_capacity(0)
151    }
152    #[deprecated(note = "replaced with `with_capacity`", since = "0.8.5")]
153    pub fn new_with_capacity(size: usize) -> Self {
154        Self::with_capacity(size)
155    }
156    /// Create a FlatBufferBuilder that is ready for writing, with a
157    /// ready-to-use capacity of the provided size.
158    ///
159    /// The maximum valid value is `FLATBUFFERS_MAX_BUFFER_SIZE`.
160    pub fn with_capacity(size: usize) -> Self {
161        Self::from_vec(vec![0; size])
162    }
163    /// Create a FlatBufferBuilder that is ready for writing, reusing
164    /// an existing vector.
165    pub fn from_vec(buffer: Vec<u8>) -> Self {
166        // we need to check the size here because we create the backing buffer
167        // directly, bypassing the typical way of using grow_allocator:
168        assert!(
169            buffer.len() <= FLATBUFFERS_MAX_BUFFER_SIZE,
170            "cannot initialize buffer bigger than 2 gigabytes"
171        );
172        let allocator = DefaultAllocator::from_vec(buffer);
173        Self::new_in(allocator)
174    }
175
176    /// Destroy the FlatBufferBuilder, returning its internal byte vector
177    /// and the index into it that represents the start of valid data.
178    pub fn collapse(self) -> (Vec<u8>, usize) {
179        let index = self.head.to_forward_index(&self.allocator);
180        (self.allocator.0, index)
181    }
182}
183
184impl<'fbb, A: Allocator> FlatBufferBuilder<'fbb, A> {
185    /// Create a [`FlatBufferBuilder`] that is ready for writing with a custom [`Allocator`].
186    pub fn new_in(allocator: A) -> Self {
187        let head = ReverseIndex::end();
188        FlatBufferBuilder {
189            allocator,
190            head,
191
192            field_locs: Vec::new(),
193            written_vtable_revpos: Vec::new(),
194
195            nested: false,
196            finished: false,
197
198            min_align: 0,
199            force_defaults: false,
200            strings_pool: Vec::new(),
201
202            _phantom: PhantomData,
203        }
204    }
205
206    /// Destroy the [`FlatBufferBuilder`], returning its [`Allocator`] and the index
207    /// into it that represents the start of valid data.
208    pub fn collapse_in(self) -> (A, usize) {
209        let index = self.head.to_forward_index(&self.allocator);
210        (self.allocator, index)
211    }
212
213    /// Reset the FlatBufferBuilder internal state. Use this method after a
214    /// call to a `finish` function in order to re-use a FlatBufferBuilder.
215    ///
216    /// This function is the only way to reset the `finished` state and start
217    /// again.
218    ///
219    /// If you are using a FlatBufferBuilder repeatedly, make sure to use this
220    /// function, because it re-uses the FlatBufferBuilder's existing
221    /// heap-allocated `Vec<u8>` internal buffer. This offers significant speed
222    /// improvements as compared to creating a new FlatBufferBuilder for every
223    /// new object.
224    pub fn reset(&mut self) {
225        // memset only the part of the buffer that could be dirty:
226        self.allocator[self.head.range_to_end()]
227            .iter_mut()
228            .for_each(|x| *x = 0);
229
230        self.head = ReverseIndex::end();
231        self.written_vtable_revpos.clear();
232
233        self.nested = false;
234        self.finished = false;
235
236        self.min_align = 0;
237        self.strings_pool.clear();
238    }
239
240    /// Push a Push'able value onto the front of the in-progress data.
241    ///
242    /// This function uses traits to provide a unified API for writing
243    /// scalars, tables, vectors, and WIPOffsets.
244    #[inline]
245    pub fn push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output> {
246        let sz = P::size();
247        self.align(sz, P::alignment());
248        self.make_space(sz);
249        {
250            let (dst, rest) = self.allocator[self.head.range_to_end()].split_at_mut(sz);
251            // Safety:
252            // Called make_space above
253            unsafe { x.push(dst, rest.len()) };
254        }
255        WIPOffset::new(self.used_space() as UOffsetT)
256    }
257
258    /// Push a Push'able value onto the front of the in-progress data, and
259    /// store a reference to it in the in-progress vtable. If the value matches
260    /// the default, then this is a no-op.
261    #[inline]
262    pub fn push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X) {
263        self.assert_nested("push_slot");
264        if x != default || self.force_defaults {
265            self.push_slot_always(slotoff, x);
266        }
267    }
268
269    /// Push a Push'able value onto the front of the in-progress data, and
270    /// store a reference to it in the in-progress vtable.
271    #[inline]
272    pub fn push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X) {
273        self.assert_nested("push_slot_always");
274        let off = self.push(x);
275        self.track_field(slotoff, off.value());
276    }
277
278    /// Retrieve the number of vtables that have been serialized into the
279    /// FlatBuffer. This is primarily used to check vtable deduplication.
280    #[inline]
281    pub fn num_written_vtables(&self) -> usize {
282        self.written_vtable_revpos.len()
283    }
284
285    /// Start a Table write.
286    ///
287    /// Asserts that the builder is not in a nested state.
288    ///
289    /// Users probably want to use `push_slot` to add values after calling this.
290    #[inline]
291    pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> {
292        self.assert_not_nested(
293            "start_table can not be called when a table or vector is under construction",
294        );
295        self.nested = true;
296
297        WIPOffset::new(self.used_space() as UOffsetT)
298    }
299
300    /// End a Table write.
301    ///
302    /// Asserts that the builder is in a nested state.
303    #[inline]
304    pub fn end_table(
305        &mut self,
306        off: WIPOffset<TableUnfinishedWIPOffset>,
307    ) -> WIPOffset<TableFinishedWIPOffset> {
308        self.assert_nested("end_table");
309
310        let o = self.write_vtable(off);
311
312        self.nested = false;
313        self.field_locs.clear();
314
315        WIPOffset::new(o.value())
316    }
317
318    /// Start a Vector write.
319    ///
320    /// Asserts that the builder is not in a nested state.
321    ///
322    /// Most users will prefer to call `create_vector`.
323    /// Speed optimizing users who choose to create vectors manually using this
324    /// function will want to use `push` to add values.
325    #[inline]
326    pub fn start_vector<T: Push>(&mut self, num_items: usize) {
327        self.assert_not_nested(
328            "start_vector can not be called when a table or vector is under construction",
329        );
330        self.nested = true;
331        self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET));
332    }
333
334    /// End a Vector write.
335    ///
336    /// Note that the `num_elems` parameter is the number of written items, not
337    /// the byte count.
338    ///
339    /// Asserts that the builder is in a nested state.
340    #[inline]
341    pub fn end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>> {
342        self.assert_nested("end_vector");
343        self.nested = false;
344        let o = self.push::<UOffsetT>(num_elems as UOffsetT);
345        WIPOffset::new(o.value())
346    }
347
348    #[inline]
349    pub fn create_shared_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
350        self.assert_not_nested(
351            "create_shared_string can not be called when a table or vector is under construction",
352        );
353
354        // Saves a ref to allocator since rust doesnt like us refrencing it
355        // in the binary_search_by code.
356        let buf = &self.allocator;
357
358        let found = self.strings_pool.binary_search_by(|offset| {
359            let ptr = offset.value() as usize;
360            // Gets The pointer to the size of the string
361            let str_memory = &buf[buf.len() - ptr..];
362            // Gets the size of the written string from buffer
363            let size =
364                u32::from_le_bytes([str_memory[0], str_memory[1], str_memory[2], str_memory[3]])
365                    as usize;
366            // Size of the string size
367            let string_size: usize = 4;
368            // Fetches actual string bytes from index of string after string size
369            // to the size of string plus string size
370            let iter = str_memory[string_size..size + string_size].iter();
371            // Compares bytes of fetched string and current writable string
372            iter.cloned().cmp(s.bytes())
373        });
374
375        match found {
376            Ok(index) => self.strings_pool[index],
377            Err(index) => {
378                let address = WIPOffset::new(self.create_byte_string(s.as_bytes()).value());
379                self.strings_pool.insert(index, address);
380                address
381            }
382        }
383    }
384
385    /// Create a utf8 string.
386    ///
387    /// The wire format represents this as a zero-terminated byte vector.
388    #[inline]
389    pub fn create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
390        self.assert_not_nested(
391            "create_string can not be called when a table or vector is under construction",
392        );
393        WIPOffset::new(self.create_byte_string(s.as_bytes()).value())
394    }
395
396    /// Create a zero-terminated byte vector.
397    #[inline]
398    pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> {
399        self.assert_not_nested(
400            "create_byte_string can not be called when a table or vector is under construction",
401        );
402        self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET));
403        self.push(0u8);
404        self.push_bytes_unprefixed(data);
405        self.push(data.len() as UOffsetT);
406        WIPOffset::new(self.used_space() as UOffsetT)
407    }
408
409    /// Create a vector of Push-able objects.
410    ///
411    /// Speed-sensitive users may wish to reduce memory usage by creating the
412    /// vector manually: use `start_vector`, `push`, and `end_vector`.
413    #[inline]
414    pub fn create_vector<'a: 'b, 'b, T: Push + 'b>(
415        &'a mut self,
416        items: &'b [T],
417    ) -> WIPOffset<Vector<'fbb, T::Output>> {
418        let elem_size = T::size();
419        let slice_size = items.len() * elem_size;
420        self.align(slice_size, T::alignment().max_of(SIZE_UOFFSET));
421        self.ensure_capacity(slice_size + UOffsetT::size());
422
423        self.head -= slice_size;
424        let mut written_len = self.head.distance_to_end();
425
426        let buf = &mut self.allocator[self.head.range_to(self.head + slice_size)];
427        for (item, out) in items.iter().zip(buf.chunks_exact_mut(elem_size)) {
428            written_len -= elem_size;
429
430            // Safety:
431            // Called ensure_capacity and aligned to T above
432            unsafe { item.push(out, written_len) };
433        }
434
435        WIPOffset::new(self.push::<UOffsetT>(items.len() as UOffsetT).value())
436    }
437
438    /// Create a vector of Push-able objects.
439    ///
440    /// Speed-sensitive users may wish to reduce memory usage by creating the
441    /// vector manually: use `start_vector`, `push`, and `end_vector`.
442    #[inline]
443    pub fn create_vector_from_iter<T: Push>(
444        &mut self,
445        items: impl ExactSizeIterator<Item = T> + DoubleEndedIterator,
446    ) -> WIPOffset<Vector<'fbb, T::Output>> {
447        let elem_size = T::size();
448        self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
449        let mut actual = 0;
450        for item in items.rev() {
451            self.push(item);
452            actual += 1;
453        }
454        WIPOffset::new(self.push::<UOffsetT>(actual).value())
455    }
456
457    /// Set whether default values are stored.
458    ///
459    /// In order to save space, fields that are set to their default value
460    /// aren't stored in the buffer. Setting `force_defaults` to `true`
461    /// disables this optimization.
462    ///
463    /// By default, `force_defaults` is `false`.
464    #[inline]
465    pub fn force_defaults(&mut self, force_defaults: bool) {
466        self.force_defaults = force_defaults;
467    }
468
469    /// Get the byte slice for the data that has been written, regardless of
470    /// whether it has been finished.
471    #[inline]
472    pub fn unfinished_data(&self) -> &[u8] {
473        &self.allocator[self.head.range_to_end()]
474    }
475    /// Get the byte slice for the data that has been written after a call to
476    /// one of the `finish` functions.
477    /// # Panics
478    /// Panics if the buffer is not finished.
479    #[inline]
480    pub fn finished_data(&self) -> &[u8] {
481        self.assert_finished("finished_bytes cannot be called when the buffer is not yet finished");
482        &self.allocator[self.head.range_to_end()]
483    }
484    /// Returns a mutable view of a finished buffer and location of where the flatbuffer starts.
485    /// Note that modifying the flatbuffer data may corrupt it.
486    /// # Panics
487    /// Panics if the flatbuffer is not finished.
488    #[inline]
489    pub fn mut_finished_buffer(&mut self) -> (&mut [u8], usize) {
490        let index = self.head.to_forward_index(&self.allocator);
491        (&mut self.allocator[..], index)
492    }
493    /// Assert that a field is present in the just-finished Table.
494    ///
495    /// This is somewhat low-level and is mostly used by the generated code.
496    #[inline]
497    pub fn required(
498        &self,
499        tab_revloc: WIPOffset<TableFinishedWIPOffset>,
500        slot_byte_loc: VOffsetT,
501        assert_msg_name: &'static str,
502    ) {
503        let idx = self.used_space() - tab_revloc.value() as usize;
504
505        // Safety:
506        // The value of TableFinishedWIPOffset is the offset from the end of the allocator
507        // to an SOffsetT pointing to a valid VTable
508        //
509        // `self.allocator.len() = self.used_space() + self.head`
510        // `self.allocator.len() - tab_revloc = self.used_space() - tab_revloc + self.head`
511        // `self.allocator.len() - tab_revloc = idx + self.head`
512        let tab = unsafe { Table::new(&self.allocator[self.head.range_to_end()], idx) };
513        let o = tab.vtable().get(slot_byte_loc) as usize;
514        assert!(o != 0, "missing required field {}", assert_msg_name);
515    }
516
517    /// Finalize the FlatBuffer by: aligning it, pushing an optional file
518    /// identifier on to it, pushing a size prefix on to it, and marking the
519    /// internal state of the FlatBufferBuilder as `finished`. Afterwards,
520    /// users can call `finished_data` to get the resulting data.
521    #[inline]
522    pub fn finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
523        self.finish_with_opts(root, file_identifier, true);
524    }
525
526    /// Finalize the FlatBuffer by: aligning it, pushing an optional file
527    /// identifier on to it, and marking the internal state of the
528    /// FlatBufferBuilder as `finished`. Afterwards, users can call
529    /// `finished_data` to get the resulting data.
530    #[inline]
531    pub fn finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
532        self.finish_with_opts(root, file_identifier, false);
533    }
534
535    /// Finalize the FlatBuffer by: aligning it and marking the internal state
536    /// of the FlatBufferBuilder as `finished`. Afterwards, users can call
537    /// `finished_data` to get the resulting data.
538    #[inline]
539    pub fn finish_minimal<T>(&mut self, root: WIPOffset<T>) {
540        self.finish_with_opts(root, None, false);
541    }
542
543    #[inline]
544    fn used_space(&self) -> usize {
545        self.head.distance_to_end()
546    }
547
548    #[inline]
549    fn track_field(&mut self, slot_off: VOffsetT, off: UOffsetT) {
550        let fl = FieldLoc { id: slot_off, off };
551        self.field_locs.push(fl);
552    }
553
554    /// Write the VTable, if it is new.
555    fn write_vtable(
556        &mut self,
557        table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>,
558    ) -> WIPOffset<VTableWIPOffset> {
559        self.assert_nested("write_vtable");
560
561        // Write the vtable offset, which is the start of any Table.
562        // We fill its value later.
563        let object_revloc_to_vtable: WIPOffset<VTableWIPOffset> =
564            WIPOffset::new(self.push::<UOffsetT>(0xF0F0_F0F0).value());
565
566        // Layout of the data this function will create when a new vtable is
567        // needed.
568        // --------------------------------------------------------------------
569        // vtable starts here
570        // | x, x -- vtable len (bytes) [u16]
571        // | x, x -- object inline len (bytes) [u16]
572        // | x, x -- zero, or num bytes from start of object to field #0   [u16]
573        // | ...
574        // | x, x -- zero, or num bytes from start of object to field #n-1 [u16]
575        // vtable ends here
576        // table starts here
577        // | x, x, x, x -- offset (negative direction) to the vtable [i32]
578        // |               aka "vtableoffset"
579        // | -- table inline data begins here, we don't touch it --
580        // table ends here -- aka "table_start"
581        // --------------------------------------------------------------------
582        //
583        // Layout of the data this function will create when we re-use an
584        // existing vtable.
585        //
586        // We always serialize this particular vtable, then compare it to the
587        // other vtables we know about to see if there is a duplicate. If there
588        // is, then we erase the serialized vtable we just made.
589        // We serialize it first so that we are able to do byte-by-byte
590        // comparisons with already-serialized vtables. This 1) saves
591        // bookkeeping space (we only keep revlocs to existing vtables), 2)
592        // allows us to convert to little-endian once, then do
593        // fast memcmp comparisons, and 3) by ensuring we are comparing real
594        // serialized vtables, we can be more assured that we are doing the
595        // comparisons correctly.
596        //
597        // --------------------------------------------------------------------
598        // table starts here
599        // | x, x, x, x -- offset (negative direction) to an existing vtable [i32]
600        // |               aka "vtableoffset"
601        // | -- table inline data begins here, we don't touch it --
602        // table starts here: aka "table_start"
603        // --------------------------------------------------------------------
604
605        // fill the WIP vtable with zeros:
606        let vtable_byte_len = get_vtable_byte_len(&self.field_locs);
607        self.make_space(vtable_byte_len);
608
609        // compute the length of the table (not vtable!) in bytes:
610        let table_object_size = object_revloc_to_vtable.value() - table_tail_revloc.value();
611        debug_assert!(table_object_size < 0x10000); // vTable use 16bit offsets.
612
613        // Write the VTable (we may delete it afterwards, if it is a duplicate):
614        let vt_start_pos = self.head;
615        let vt_end_pos = self.head + vtable_byte_len;
616        {
617            // write the vtable header:
618            let vtfw =
619                &mut VTableWriter::init(&mut self.allocator[vt_start_pos.range_to(vt_end_pos)]);
620            vtfw.write_vtable_byte_length(vtable_byte_len as VOffsetT);
621            vtfw.write_object_inline_size(table_object_size as VOffsetT);
622
623            // serialize every FieldLoc to the vtable:
624            for &fl in self.field_locs.iter() {
625                let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT;
626                vtfw.write_field_offset(fl.id, pos);
627            }
628        }
629        let new_vt_bytes = &self.allocator[vt_start_pos.range_to(vt_end_pos)];
630        let found = self
631            .written_vtable_revpos
632            .binary_search_by(|old_vtable_revpos: &UOffsetT| {
633                let old_vtable_pos = self.allocator.len() - *old_vtable_revpos as usize;
634                // Safety:
635                // Already written vtables are valid by construction
636                let old_vtable = unsafe { VTable::init(&self.allocator, old_vtable_pos) };
637                new_vt_bytes.cmp(old_vtable.as_bytes())
638            });
639        let final_vtable_revpos = match found {
640            Ok(i) => {
641                // The new vtable is a duplicate so clear it.
642                VTableWriter::init(&mut self.allocator[vt_start_pos.range_to(vt_end_pos)]).clear();
643                self.head += vtable_byte_len;
644                self.written_vtable_revpos[i]
645            }
646            Err(i) => {
647                // This is a new vtable. Add it to the cache.
648                let new_vt_revpos = self.used_space() as UOffsetT;
649                self.written_vtable_revpos.insert(i, new_vt_revpos);
650                new_vt_revpos
651            }
652        };
653        // Write signed offset from table to its vtable.
654        let table_pos = self.allocator.len() - object_revloc_to_vtable.value() as usize;
655        if cfg!(debug_assertions) {
656            // Safety:
657            // Verified slice length
658            let tmp_soffset_to_vt = unsafe {
659                read_scalar::<UOffsetT>(&self.allocator[table_pos..table_pos + SIZE_UOFFSET])
660            };
661            assert_eq!(tmp_soffset_to_vt, 0xF0F0_F0F0);
662        }
663
664        let buf = &mut self.allocator[table_pos..table_pos + SIZE_SOFFSET];
665        // Safety:
666        // Verified length of buf above
667        unsafe {
668            emplace_scalar::<SOffsetT>(
669                buf,
670                final_vtable_revpos as SOffsetT - object_revloc_to_vtable.value() as SOffsetT,
671            );
672        }
673
674        self.field_locs.clear();
675
676        object_revloc_to_vtable
677    }
678
679    // Only call this when you know it is safe to double the size of the buffer.
680    #[inline]
681    fn grow_allocator(&mut self) {
682        let starting_active_size = self.used_space();
683        self.allocator
684            .grow_downwards()
685            .expect("Flatbuffer allocation failure");
686
687        let ending_active_size = self.used_space();
688        debug_assert_eq!(starting_active_size, ending_active_size);
689    }
690
691    // with or without a size prefix changes how we load the data, so finish*
692    // functions are split along those lines.
693    fn finish_with_opts<T>(
694        &mut self,
695        root: WIPOffset<T>,
696        file_identifier: Option<&str>,
697        size_prefixed: bool,
698    ) {
699        self.assert_not_finished("buffer cannot be finished when it is already finished");
700        self.assert_not_nested(
701            "buffer cannot be finished when a table or vector is under construction",
702        );
703        self.written_vtable_revpos.clear();
704
705        let to_align = {
706            // for the root offset:
707            let a = SIZE_UOFFSET;
708            // for the size prefix:
709            let b = if size_prefixed { SIZE_UOFFSET } else { 0 };
710            // for the file identifier (a string that is not zero-terminated):
711            let c = if file_identifier.is_some() {
712                FILE_IDENTIFIER_LENGTH
713            } else {
714                0
715            };
716            a + b + c
717        };
718
719        {
720            let ma = PushAlignment::new(self.min_align);
721            self.align(to_align, ma);
722        }
723
724        if let Some(ident) = file_identifier {
725            debug_assert_eq!(ident.len(), FILE_IDENTIFIER_LENGTH);
726            self.push_bytes_unprefixed(ident.as_bytes());
727        }
728
729        self.push(root);
730
731        if size_prefixed {
732            let sz = self.used_space() as UOffsetT;
733            self.push::<UOffsetT>(sz);
734        }
735        self.finished = true;
736    }
737
738    #[inline]
739    fn align(&mut self, len: usize, alignment: PushAlignment) {
740        self.track_min_align(alignment.value());
741        let s = self.used_space() as usize;
742        self.make_space(padding_bytes(s + len, alignment.value()));
743    }
744
745    #[inline]
746    fn track_min_align(&mut self, alignment: usize) {
747        self.min_align = max(self.min_align, alignment);
748    }
749
750    #[inline]
751    fn push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT {
752        let n = self.make_space(x.len());
753        self.allocator[n.range_to(n + x.len())].copy_from_slice(x);
754
755        n.to_forward_index(&self.allocator) as UOffsetT
756    }
757
758    #[inline]
759    fn make_space(&mut self, want: usize) -> ReverseIndex {
760        self.ensure_capacity(want);
761        self.head -= want;
762        self.head
763    }
764
765    #[inline]
766    fn ensure_capacity(&mut self, want: usize) -> usize {
767        if self.unused_ready_space() >= want {
768            return want;
769        }
770        assert!(
771            want <= FLATBUFFERS_MAX_BUFFER_SIZE,
772            "cannot grow buffer beyond 2 gigabytes"
773        );
774
775        while self.unused_ready_space() < want {
776            self.grow_allocator();
777        }
778        want
779    }
780    #[inline]
781    fn unused_ready_space(&self) -> usize {
782        self.allocator.len() - self.head.distance_to_end()
783    }
784    #[inline]
785    fn assert_nested(&self, fn_name: &'static str) {
786        // we don't assert that self.field_locs.len() >0 because the vtable
787        // could be empty (e.g. for empty tables, or for all-default values).
788        debug_assert!(
789            self.nested,
790            "incorrect FlatBufferBuilder usage: {} must be called while in a nested state",
791            fn_name
792        );
793    }
794    #[inline]
795    fn assert_not_nested(&self, msg: &'static str) {
796        debug_assert!(!self.nested, "{}", msg);
797    }
798    #[inline]
799    fn assert_finished(&self, msg: &'static str) {
800        debug_assert!(self.finished, "{}", msg);
801    }
802    #[inline]
803    fn assert_not_finished(&self, msg: &'static str) {
804        debug_assert!(!self.finished, "{}", msg);
805    }
806}
807
808/// Compute the length of the vtable needed to represent the provided FieldLocs.
809/// If there are no FieldLocs, then provide the minimum number of bytes
810/// required: enough to write the VTable header.
811#[inline]
812fn get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize {
813    let max_voffset = field_locs.iter().map(|fl| fl.id).max();
814    match max_voffset {
815        None => field_index_to_field_offset(0) as usize,
816        Some(mv) => mv as usize + SIZE_VOFFSET,
817    }
818}
819
820#[inline]
821fn padding_bytes(buf_size: usize, scalar_size: usize) -> usize {
822    // ((!buf_size) + 1) & (scalar_size - 1)
823    (!buf_size).wrapping_add(1) & (scalar_size.wrapping_sub(1))
824}
825
826impl<'fbb> Default for FlatBufferBuilder<'fbb> {
827    fn default() -> Self {
828        Self::with_capacity(0)
829    }
830}
831
832/// An index that indexes from the reverse of a slice.
833///
834/// Note that while the internal representation is an index
835/// from the end of a buffer, operations like `Add` and `Sub`
836/// behave like a regular index:
837///
838/// # Examples
839///
840/// ```ignore
841/// let buf = [0, 1, 2, 3, 4, 5];
842/// let idx = ReverseIndex::end() - 2;
843/// assert_eq!(&buf[idx.range_to_end()], &[4, 5]);
844/// assert_eq!(idx.to_forward_index(&buf), 4);
845/// ```
846#[derive(Clone, Copy, Debug, Eq, PartialEq)]
847struct ReverseIndex(usize);
848
849impl ReverseIndex {
850    /// Returns an index set to the end.
851    ///
852    /// Note: Indexing this will result in an out of bounds error.
853    pub fn end() -> Self {
854        Self(0)
855    }
856
857    /// Returns a struct equivalent to the range `self..`
858    pub fn range_to_end(self) -> ReverseIndexRange {
859        ReverseIndexRange(self, ReverseIndex::end())
860    }
861
862    /// Returns a struct equivalent to the range `self..end`
863    pub fn range_to(self, end: ReverseIndex) -> ReverseIndexRange {
864        ReverseIndexRange(self, end)
865    }
866
867    /// Transforms this reverse index into a regular index for the given buffer.
868    pub fn to_forward_index<T>(self, buf: &[T]) -> usize {
869        buf.len() - self.0
870    }
871
872    /// Returns the number of elements until the end of the range.
873    pub fn distance_to_end(&self) -> usize {
874        self.0
875    }
876}
877
878impl Sub<usize> for ReverseIndex {
879    type Output = Self;
880
881    fn sub(self, rhs: usize) -> Self::Output {
882        Self(self.0 + rhs)
883    }
884}
885
886impl SubAssign<usize> for ReverseIndex {
887    fn sub_assign(&mut self, rhs: usize) {
888        *self = *self - rhs;
889    }
890}
891
892impl Add<usize> for ReverseIndex {
893    type Output = Self;
894
895    fn add(self, rhs: usize) -> Self::Output {
896        Self(self.0 - rhs)
897    }
898}
899
900impl AddAssign<usize> for ReverseIndex {
901    fn add_assign(&mut self, rhs: usize) {
902        *self = *self + rhs;
903    }
904}
905impl<T> Index<ReverseIndex> for [T] {
906    type Output = T;
907
908    fn index(&self, index: ReverseIndex) -> &Self::Output {
909        let index = index.to_forward_index(self);
910        &self[index]
911    }
912}
913
914impl<T> IndexMut<ReverseIndex> for [T] {
915    fn index_mut(&mut self, index: ReverseIndex) -> &mut Self::Output {
916        let index = index.to_forward_index(self);
917        &mut self[index]
918    }
919}
920
921#[derive(Clone, Copy, Debug, Eq, PartialEq)]
922struct ReverseIndexRange(ReverseIndex, ReverseIndex);
923
924impl<T> Index<ReverseIndexRange> for [T] {
925    type Output = [T];
926
927    fn index(&self, index: ReverseIndexRange) -> &Self::Output {
928        let start = index.0.to_forward_index(self);
929        let end = index.1.to_forward_index(self);
930        &self[start..end]
931    }
932}
933
934impl<T> IndexMut<ReverseIndexRange> for [T] {
935    fn index_mut(&mut self, index: ReverseIndexRange) -> &mut Self::Output {
936        let start = index.0.to_forward_index(self);
937        let end = index.1.to_forward_index(self);
938        &mut self[start..end]
939    }
940}
941
942#[cfg(test)]
943mod tests {
944    use super::*;
945
946    #[test]
947    fn reverse_index_test() {
948        let buf = [0, 1, 2, 3, 4, 5];
949        let idx = ReverseIndex::end() - 2;
950        assert_eq!(&buf[idx.range_to_end()], &[4, 5]);
951        assert_eq!(&buf[idx.range_to(idx + 1)], &[4]);
952        assert_eq!(idx.to_forward_index(&buf), 4);
953    }
954}