1#[cfg(not(feature = "std"))]
18use alloc::{vec, vec::Vec};
19use core::cmp::max;
20use core::convert::Infallible;
21use core::fmt::{Debug, Display};
22use core::iter::{DoubleEndedIterator, ExactSizeIterator};
23use core::marker::PhantomData;
24use core::ops::{Add, AddAssign, Deref, DerefMut, Index, IndexMut, Sub, SubAssign};
25use core::ptr::write_bytes;
26
27use crate::endian_scalar::emplace_scalar;
28use crate::primitives::*;
29use crate::push::{Push, PushAlignment};
30use crate::read_scalar;
31use crate::table::Table;
32use crate::vector::Vector;
33use crate::vtable::{field_index_to_field_offset, VTable};
34use crate::vtable_writer::VTableWriter;
35
36pub unsafe trait Allocator: DerefMut<Target = [u8]> {
46 type Error: Display + Debug;
48 fn grow_downwards(&mut self) -> Result<(), Self::Error>;
53
54 fn len(&self) -> usize;
56}
57
58#[derive(Default)]
60pub struct DefaultAllocator(Vec<u8>);
61
62impl DefaultAllocator {
63 pub fn from_vec(buffer: Vec<u8>) -> Self {
65 Self(buffer)
66 }
67}
68
69impl Deref for DefaultAllocator {
70 type Target = [u8];
71
72 fn deref(&self) -> &Self::Target {
73 &self.0
74 }
75}
76
77impl DerefMut for DefaultAllocator {
78 fn deref_mut(&mut self) -> &mut Self::Target {
79 &mut self.0
80 }
81}
82
83unsafe impl Allocator for DefaultAllocator {
85 type Error = Infallible;
86 fn grow_downwards(&mut self) -> Result<(), Self::Error> {
87 let old_len = self.0.len();
88 let new_len = max(1, old_len * 2);
89
90 self.0.resize(new_len, 0);
91
92 if new_len == 1 {
93 return Ok(());
94 }
95
96 let middle = new_len / 2;
99 {
100 let (left, right) = &mut self.0[..].split_at_mut(middle);
101 right.copy_from_slice(left);
102 }
103 {
105 let ptr = self.0[..middle].as_mut_ptr();
106 unsafe {
109 write_bytes(ptr, 0, middle);
110 }
111 }
112 Ok(())
113 }
114
115 fn len(&self) -> usize {
116 self.0.len()
117 }
118}
119
120#[derive(Clone, Copy, Debug, Eq, PartialEq)]
121struct FieldLoc {
122 off: UOffsetT,
123 id: VOffsetT,
124}
125
126#[derive(Clone, Debug, Eq, PartialEq)]
130pub struct FlatBufferBuilder<'fbb, A: Allocator = DefaultAllocator> {
131 allocator: A,
132 head: ReverseIndex,
133
134 field_locs: Vec<FieldLoc>,
135 written_vtable_revpos: Vec<UOffsetT>,
136
137 nested: bool,
138 finished: bool,
139
140 min_align: usize,
141 force_defaults: bool,
142 strings_pool: Vec<WIPOffset<&'fbb str>>,
143
144 _phantom: PhantomData<&'fbb ()>,
145}
146
147impl<'fbb> FlatBufferBuilder<'fbb, DefaultAllocator> {
148 pub fn new() -> Self {
150 Self::with_capacity(0)
151 }
152 #[deprecated(note = "replaced with `with_capacity`", since = "0.8.5")]
153 pub fn new_with_capacity(size: usize) -> Self {
154 Self::with_capacity(size)
155 }
156 pub fn with_capacity(size: usize) -> Self {
161 Self::from_vec(vec![0; size])
162 }
163 pub fn from_vec(buffer: Vec<u8>) -> Self {
166 assert!(
169 buffer.len() <= FLATBUFFERS_MAX_BUFFER_SIZE,
170 "cannot initialize buffer bigger than 2 gigabytes"
171 );
172 let allocator = DefaultAllocator::from_vec(buffer);
173 Self::new_in(allocator)
174 }
175
176 pub fn collapse(self) -> (Vec<u8>, usize) {
179 let index = self.head.to_forward_index(&self.allocator);
180 (self.allocator.0, index)
181 }
182}
183
184impl<'fbb, A: Allocator> FlatBufferBuilder<'fbb, A> {
185 pub fn new_in(allocator: A) -> Self {
187 let head = ReverseIndex::end();
188 FlatBufferBuilder {
189 allocator,
190 head,
191
192 field_locs: Vec::new(),
193 written_vtable_revpos: Vec::new(),
194
195 nested: false,
196 finished: false,
197
198 min_align: 0,
199 force_defaults: false,
200 strings_pool: Vec::new(),
201
202 _phantom: PhantomData,
203 }
204 }
205
206 pub fn collapse_in(self) -> (A, usize) {
209 let index = self.head.to_forward_index(&self.allocator);
210 (self.allocator, index)
211 }
212
213 pub fn reset(&mut self) {
225 self.allocator[self.head.range_to_end()]
227 .iter_mut()
228 .for_each(|x| *x = 0);
229
230 self.head = ReverseIndex::end();
231 self.written_vtable_revpos.clear();
232
233 self.nested = false;
234 self.finished = false;
235
236 self.min_align = 0;
237 self.strings_pool.clear();
238 }
239
240 #[inline]
245 pub fn push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output> {
246 let sz = P::size();
247 self.align(sz, P::alignment());
248 self.make_space(sz);
249 {
250 let (dst, rest) = self.allocator[self.head.range_to_end()].split_at_mut(sz);
251 unsafe { x.push(dst, rest.len()) };
254 }
255 WIPOffset::new(self.used_space() as UOffsetT)
256 }
257
258 #[inline]
262 pub fn push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X) {
263 self.assert_nested("push_slot");
264 if x != default || self.force_defaults {
265 self.push_slot_always(slotoff, x);
266 }
267 }
268
269 #[inline]
272 pub fn push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X) {
273 self.assert_nested("push_slot_always");
274 let off = self.push(x);
275 self.track_field(slotoff, off.value());
276 }
277
278 #[inline]
281 pub fn num_written_vtables(&self) -> usize {
282 self.written_vtable_revpos.len()
283 }
284
285 #[inline]
291 pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> {
292 self.assert_not_nested(
293 "start_table can not be called when a table or vector is under construction",
294 );
295 self.nested = true;
296
297 WIPOffset::new(self.used_space() as UOffsetT)
298 }
299
300 #[inline]
304 pub fn end_table(
305 &mut self,
306 off: WIPOffset<TableUnfinishedWIPOffset>,
307 ) -> WIPOffset<TableFinishedWIPOffset> {
308 self.assert_nested("end_table");
309
310 let o = self.write_vtable(off);
311
312 self.nested = false;
313 self.field_locs.clear();
314
315 WIPOffset::new(o.value())
316 }
317
318 #[inline]
326 pub fn start_vector<T: Push>(&mut self, num_items: usize) {
327 self.assert_not_nested(
328 "start_vector can not be called when a table or vector is under construction",
329 );
330 self.nested = true;
331 self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET));
332 }
333
334 #[inline]
341 pub fn end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>> {
342 self.assert_nested("end_vector");
343 self.nested = false;
344 let o = self.push::<UOffsetT>(num_elems as UOffsetT);
345 WIPOffset::new(o.value())
346 }
347
348 #[inline]
349 pub fn create_shared_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
350 self.assert_not_nested(
351 "create_shared_string can not be called when a table or vector is under construction",
352 );
353
354 let buf = &self.allocator;
357
358 let found = self.strings_pool.binary_search_by(|offset| {
359 let ptr = offset.value() as usize;
360 let str_memory = &buf[buf.len() - ptr..];
362 let size =
364 u32::from_le_bytes([str_memory[0], str_memory[1], str_memory[2], str_memory[3]])
365 as usize;
366 let string_size: usize = 4;
368 let iter = str_memory[string_size..size + string_size].iter();
371 iter.cloned().cmp(s.bytes())
373 });
374
375 match found {
376 Ok(index) => self.strings_pool[index],
377 Err(index) => {
378 let address = WIPOffset::new(self.create_byte_string(s.as_bytes()).value());
379 self.strings_pool.insert(index, address);
380 address
381 }
382 }
383 }
384
385 #[inline]
389 pub fn create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
390 self.assert_not_nested(
391 "create_string can not be called when a table or vector is under construction",
392 );
393 WIPOffset::new(self.create_byte_string(s.as_bytes()).value())
394 }
395
396 #[inline]
398 pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> {
399 self.assert_not_nested(
400 "create_byte_string can not be called when a table or vector is under construction",
401 );
402 self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET));
403 self.push(0u8);
404 self.push_bytes_unprefixed(data);
405 self.push(data.len() as UOffsetT);
406 WIPOffset::new(self.used_space() as UOffsetT)
407 }
408
409 #[inline]
414 pub fn create_vector<'a: 'b, 'b, T: Push + 'b>(
415 &'a mut self,
416 items: &'b [T],
417 ) -> WIPOffset<Vector<'fbb, T::Output>> {
418 let elem_size = T::size();
419 let slice_size = items.len() * elem_size;
420 self.align(slice_size, T::alignment().max_of(SIZE_UOFFSET));
421 self.ensure_capacity(slice_size + UOffsetT::size());
422
423 self.head -= slice_size;
424 let mut written_len = self.head.distance_to_end();
425
426 let buf = &mut self.allocator[self.head.range_to(self.head + slice_size)];
427 for (item, out) in items.iter().zip(buf.chunks_exact_mut(elem_size)) {
428 written_len -= elem_size;
429
430 unsafe { item.push(out, written_len) };
433 }
434
435 WIPOffset::new(self.push::<UOffsetT>(items.len() as UOffsetT).value())
436 }
437
438 #[inline]
443 pub fn create_vector_from_iter<T: Push>(
444 &mut self,
445 items: impl ExactSizeIterator<Item = T> + DoubleEndedIterator,
446 ) -> WIPOffset<Vector<'fbb, T::Output>> {
447 let elem_size = T::size();
448 self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
449 let mut actual = 0;
450 for item in items.rev() {
451 self.push(item);
452 actual += 1;
453 }
454 WIPOffset::new(self.push::<UOffsetT>(actual).value())
455 }
456
457 #[inline]
465 pub fn force_defaults(&mut self, force_defaults: bool) {
466 self.force_defaults = force_defaults;
467 }
468
469 #[inline]
472 pub fn unfinished_data(&self) -> &[u8] {
473 &self.allocator[self.head.range_to_end()]
474 }
475 #[inline]
480 pub fn finished_data(&self) -> &[u8] {
481 self.assert_finished("finished_bytes cannot be called when the buffer is not yet finished");
482 &self.allocator[self.head.range_to_end()]
483 }
484 #[inline]
489 pub fn mut_finished_buffer(&mut self) -> (&mut [u8], usize) {
490 let index = self.head.to_forward_index(&self.allocator);
491 (&mut self.allocator[..], index)
492 }
493 #[inline]
497 pub fn required(
498 &self,
499 tab_revloc: WIPOffset<TableFinishedWIPOffset>,
500 slot_byte_loc: VOffsetT,
501 assert_msg_name: &'static str,
502 ) {
503 let idx = self.used_space() - tab_revloc.value() as usize;
504
505 let tab = unsafe { Table::new(&self.allocator[self.head.range_to_end()], idx) };
513 let o = tab.vtable().get(slot_byte_loc) as usize;
514 assert!(o != 0, "missing required field {}", assert_msg_name);
515 }
516
517 #[inline]
522 pub fn finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
523 self.finish_with_opts(root, file_identifier, true);
524 }
525
526 #[inline]
531 pub fn finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
532 self.finish_with_opts(root, file_identifier, false);
533 }
534
535 #[inline]
539 pub fn finish_minimal<T>(&mut self, root: WIPOffset<T>) {
540 self.finish_with_opts(root, None, false);
541 }
542
543 #[inline]
544 fn used_space(&self) -> usize {
545 self.head.distance_to_end()
546 }
547
548 #[inline]
549 fn track_field(&mut self, slot_off: VOffsetT, off: UOffsetT) {
550 let fl = FieldLoc { id: slot_off, off };
551 self.field_locs.push(fl);
552 }
553
554 fn write_vtable(
556 &mut self,
557 table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>,
558 ) -> WIPOffset<VTableWIPOffset> {
559 self.assert_nested("write_vtable");
560
561 let object_revloc_to_vtable: WIPOffset<VTableWIPOffset> =
564 WIPOffset::new(self.push::<UOffsetT>(0xF0F0_F0F0).value());
565
566 let vtable_byte_len = get_vtable_byte_len(&self.field_locs);
607 self.make_space(vtable_byte_len);
608
609 let table_object_size = object_revloc_to_vtable.value() - table_tail_revloc.value();
611 debug_assert!(table_object_size < 0x10000); let vt_start_pos = self.head;
615 let vt_end_pos = self.head + vtable_byte_len;
616 {
617 let vtfw =
619 &mut VTableWriter::init(&mut self.allocator[vt_start_pos.range_to(vt_end_pos)]);
620 vtfw.write_vtable_byte_length(vtable_byte_len as VOffsetT);
621 vtfw.write_object_inline_size(table_object_size as VOffsetT);
622
623 for &fl in self.field_locs.iter() {
625 let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT;
626 vtfw.write_field_offset(fl.id, pos);
627 }
628 }
629 let new_vt_bytes = &self.allocator[vt_start_pos.range_to(vt_end_pos)];
630 let found = self
631 .written_vtable_revpos
632 .binary_search_by(|old_vtable_revpos: &UOffsetT| {
633 let old_vtable_pos = self.allocator.len() - *old_vtable_revpos as usize;
634 let old_vtable = unsafe { VTable::init(&self.allocator, old_vtable_pos) };
637 new_vt_bytes.cmp(old_vtable.as_bytes())
638 });
639 let final_vtable_revpos = match found {
640 Ok(i) => {
641 VTableWriter::init(&mut self.allocator[vt_start_pos.range_to(vt_end_pos)]).clear();
643 self.head += vtable_byte_len;
644 self.written_vtable_revpos[i]
645 }
646 Err(i) => {
647 let new_vt_revpos = self.used_space() as UOffsetT;
649 self.written_vtable_revpos.insert(i, new_vt_revpos);
650 new_vt_revpos
651 }
652 };
653 let table_pos = self.allocator.len() - object_revloc_to_vtable.value() as usize;
655 if cfg!(debug_assertions) {
656 let tmp_soffset_to_vt = unsafe {
659 read_scalar::<UOffsetT>(&self.allocator[table_pos..table_pos + SIZE_UOFFSET])
660 };
661 assert_eq!(tmp_soffset_to_vt, 0xF0F0_F0F0);
662 }
663
664 let buf = &mut self.allocator[table_pos..table_pos + SIZE_SOFFSET];
665 unsafe {
668 emplace_scalar::<SOffsetT>(
669 buf,
670 final_vtable_revpos as SOffsetT - object_revloc_to_vtable.value() as SOffsetT,
671 );
672 }
673
674 self.field_locs.clear();
675
676 object_revloc_to_vtable
677 }
678
679 #[inline]
681 fn grow_allocator(&mut self) {
682 let starting_active_size = self.used_space();
683 self.allocator
684 .grow_downwards()
685 .expect("Flatbuffer allocation failure");
686
687 let ending_active_size = self.used_space();
688 debug_assert_eq!(starting_active_size, ending_active_size);
689 }
690
691 fn finish_with_opts<T>(
694 &mut self,
695 root: WIPOffset<T>,
696 file_identifier: Option<&str>,
697 size_prefixed: bool,
698 ) {
699 self.assert_not_finished("buffer cannot be finished when it is already finished");
700 self.assert_not_nested(
701 "buffer cannot be finished when a table or vector is under construction",
702 );
703 self.written_vtable_revpos.clear();
704
705 let to_align = {
706 let a = SIZE_UOFFSET;
708 let b = if size_prefixed { SIZE_UOFFSET } else { 0 };
710 let c = if file_identifier.is_some() {
712 FILE_IDENTIFIER_LENGTH
713 } else {
714 0
715 };
716 a + b + c
717 };
718
719 {
720 let ma = PushAlignment::new(self.min_align);
721 self.align(to_align, ma);
722 }
723
724 if let Some(ident) = file_identifier {
725 debug_assert_eq!(ident.len(), FILE_IDENTIFIER_LENGTH);
726 self.push_bytes_unprefixed(ident.as_bytes());
727 }
728
729 self.push(root);
730
731 if size_prefixed {
732 let sz = self.used_space() as UOffsetT;
733 self.push::<UOffsetT>(sz);
734 }
735 self.finished = true;
736 }
737
738 #[inline]
739 fn align(&mut self, len: usize, alignment: PushAlignment) {
740 self.track_min_align(alignment.value());
741 let s = self.used_space() as usize;
742 self.make_space(padding_bytes(s + len, alignment.value()));
743 }
744
745 #[inline]
746 fn track_min_align(&mut self, alignment: usize) {
747 self.min_align = max(self.min_align, alignment);
748 }
749
750 #[inline]
751 fn push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT {
752 let n = self.make_space(x.len());
753 self.allocator[n.range_to(n + x.len())].copy_from_slice(x);
754
755 n.to_forward_index(&self.allocator) as UOffsetT
756 }
757
758 #[inline]
759 fn make_space(&mut self, want: usize) -> ReverseIndex {
760 self.ensure_capacity(want);
761 self.head -= want;
762 self.head
763 }
764
765 #[inline]
766 fn ensure_capacity(&mut self, want: usize) -> usize {
767 if self.unused_ready_space() >= want {
768 return want;
769 }
770 assert!(
771 want <= FLATBUFFERS_MAX_BUFFER_SIZE,
772 "cannot grow buffer beyond 2 gigabytes"
773 );
774
775 while self.unused_ready_space() < want {
776 self.grow_allocator();
777 }
778 want
779 }
780 #[inline]
781 fn unused_ready_space(&self) -> usize {
782 self.allocator.len() - self.head.distance_to_end()
783 }
784 #[inline]
785 fn assert_nested(&self, fn_name: &'static str) {
786 debug_assert!(
789 self.nested,
790 "incorrect FlatBufferBuilder usage: {} must be called while in a nested state",
791 fn_name
792 );
793 }
794 #[inline]
795 fn assert_not_nested(&self, msg: &'static str) {
796 debug_assert!(!self.nested, "{}", msg);
797 }
798 #[inline]
799 fn assert_finished(&self, msg: &'static str) {
800 debug_assert!(self.finished, "{}", msg);
801 }
802 #[inline]
803 fn assert_not_finished(&self, msg: &'static str) {
804 debug_assert!(!self.finished, "{}", msg);
805 }
806}
807
808#[inline]
812fn get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize {
813 let max_voffset = field_locs.iter().map(|fl| fl.id).max();
814 match max_voffset {
815 None => field_index_to_field_offset(0) as usize,
816 Some(mv) => mv as usize + SIZE_VOFFSET,
817 }
818}
819
820#[inline]
821fn padding_bytes(buf_size: usize, scalar_size: usize) -> usize {
822 (!buf_size).wrapping_add(1) & (scalar_size.wrapping_sub(1))
824}
825
826impl<'fbb> Default for FlatBufferBuilder<'fbb> {
827 fn default() -> Self {
828 Self::with_capacity(0)
829 }
830}
831
832#[derive(Clone, Copy, Debug, Eq, PartialEq)]
847struct ReverseIndex(usize);
848
849impl ReverseIndex {
850 pub fn end() -> Self {
854 Self(0)
855 }
856
857 pub fn range_to_end(self) -> ReverseIndexRange {
859 ReverseIndexRange(self, ReverseIndex::end())
860 }
861
862 pub fn range_to(self, end: ReverseIndex) -> ReverseIndexRange {
864 ReverseIndexRange(self, end)
865 }
866
867 pub fn to_forward_index<T>(self, buf: &[T]) -> usize {
869 buf.len() - self.0
870 }
871
872 pub fn distance_to_end(&self) -> usize {
874 self.0
875 }
876}
877
878impl Sub<usize> for ReverseIndex {
879 type Output = Self;
880
881 fn sub(self, rhs: usize) -> Self::Output {
882 Self(self.0 + rhs)
883 }
884}
885
886impl SubAssign<usize> for ReverseIndex {
887 fn sub_assign(&mut self, rhs: usize) {
888 *self = *self - rhs;
889 }
890}
891
892impl Add<usize> for ReverseIndex {
893 type Output = Self;
894
895 fn add(self, rhs: usize) -> Self::Output {
896 Self(self.0 - rhs)
897 }
898}
899
900impl AddAssign<usize> for ReverseIndex {
901 fn add_assign(&mut self, rhs: usize) {
902 *self = *self + rhs;
903 }
904}
905impl<T> Index<ReverseIndex> for [T] {
906 type Output = T;
907
908 fn index(&self, index: ReverseIndex) -> &Self::Output {
909 let index = index.to_forward_index(self);
910 &self[index]
911 }
912}
913
914impl<T> IndexMut<ReverseIndex> for [T] {
915 fn index_mut(&mut self, index: ReverseIndex) -> &mut Self::Output {
916 let index = index.to_forward_index(self);
917 &mut self[index]
918 }
919}
920
921#[derive(Clone, Copy, Debug, Eq, PartialEq)]
922struct ReverseIndexRange(ReverseIndex, ReverseIndex);
923
924impl<T> Index<ReverseIndexRange> for [T] {
925 type Output = [T];
926
927 fn index(&self, index: ReverseIndexRange) -> &Self::Output {
928 let start = index.0.to_forward_index(self);
929 let end = index.1.to_forward_index(self);
930 &self[start..end]
931 }
932}
933
934impl<T> IndexMut<ReverseIndexRange> for [T] {
935 fn index_mut(&mut self, index: ReverseIndexRange) -> &mut Self::Output {
936 let start = index.0.to_forward_index(self);
937 let end = index.1.to_forward_index(self);
938 &mut self[start..end]
939 }
940}
941
942#[cfg(test)]
943mod tests {
944 use super::*;
945
946 #[test]
947 fn reverse_index_test() {
948 let buf = [0, 1, 2, 3, 4, 5];
949 let idx = ReverseIndex::end() - 2;
950 assert_eq!(&buf[idx.range_to_end()], &[4, 5]);
951 assert_eq!(&buf[idx.range_to(idx + 1)], &[4]);
952 assert_eq!(idx.to_forward_index(&buf), 4);
953 }
954}