flatbuffers/
vtable.rs

1/*
2 * Copyright 2018 Google Inc. All rights reserved.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17use crate::endian_scalar::read_scalar_at;
18use crate::follow::Follow;
19use crate::primitives::*;
20
21/// VTable encapsulates read-only usage of a vtable. It is only to be used
22/// by generated code.
23#[derive(Debug)]
24pub struct VTable<'a> {
25    buf: &'a [u8],
26    loc: usize,
27}
28
29impl<'a> PartialEq for VTable<'a> {
30    fn eq(&self, other: &VTable) -> bool {
31        self.as_bytes().eq(other.as_bytes())
32    }
33}
34
35impl<'a> VTable<'a> {
36    /// SAFETY
37    /// `buf` must contain a valid vtable at `loc`
38    ///
39    /// This consists of a number of `VOffsetT`
40    /// - size of vtable in bytes including size element
41    /// - size of object in bytes including the vtable offset
42    /// - n fields where n is the number of fields in the table's schema when the code was compiled
43    pub unsafe fn init(buf: &'a [u8], loc: usize) -> Self {
44        VTable { buf, loc }
45    }
46
47    pub fn num_fields(&self) -> usize {
48        (self.num_bytes() / SIZE_VOFFSET) - 2
49    }
50
51    pub fn num_bytes(&self) -> usize {
52        // Safety:
53        // Valid VTable at time of construction
54        unsafe { read_scalar_at::<VOffsetT>(self.buf, self.loc) as usize }
55    }
56
57    pub fn object_inline_num_bytes(&self) -> usize {
58        // Safety:
59        // Valid VTable at time of construction
60        let n = unsafe { read_scalar_at::<VOffsetT>(self.buf, self.loc + SIZE_VOFFSET) };
61        n as usize
62    }
63
64    pub fn get_field(&self, idx: usize) -> VOffsetT {
65        // TODO(rw): distinguish between None and 0?
66        if idx > self.num_fields() {
67            return 0;
68        }
69
70        // Safety:
71        // Valid VTable at time of construction
72        unsafe {
73            read_scalar_at::<VOffsetT>(
74                self.buf,
75                self.loc + SIZE_VOFFSET + SIZE_VOFFSET + SIZE_VOFFSET * idx,
76            )
77        }
78    }
79
80    pub fn get(&self, byte_loc: VOffsetT) -> VOffsetT {
81        // TODO(rw): distinguish between None and 0?
82        if byte_loc as usize + 2 > self.num_bytes() {
83            return 0;
84        }
85        // Safety:
86        // byte_loc is within bounds of vtable, which was valid at time of construction
87        unsafe { read_scalar_at::<VOffsetT>(self.buf, self.loc + byte_loc as usize) }
88    }
89
90    pub fn as_bytes(&self) -> &[u8] {
91        let len = self.num_bytes();
92        &self.buf[self.loc..self.loc + len]
93    }
94}
95
96#[allow(dead_code)]
97pub fn field_index_to_field_offset(field_id: VOffsetT) -> VOffsetT {
98    // Should correspond to what end_table() below builds up.
99    let fixed_fields = 2; // Vtable size and Object Size.
100    ((field_id + fixed_fields) * (SIZE_VOFFSET as VOffsetT)) as VOffsetT
101}
102
103#[allow(dead_code)]
104pub fn field_offset_to_field_index(field_o: VOffsetT) -> VOffsetT {
105    debug_assert!(field_o >= 2);
106    let fixed_fields = 2; // VTable size and Object Size.
107    (field_o / (SIZE_VOFFSET as VOffsetT)) - fixed_fields
108}
109
110impl<'a> Follow<'a> for VTable<'a> {
111    type Inner = VTable<'a>;
112    unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
113        VTable::init(buf, loc)
114    }
115}