summaryrefslogtreecommitdiff
path: root/rust
diff options
context:
space:
mode:
authorRobert <rw@users.noreply.github.com>2018-09-02 17:05:50 -0700
committerrw <me@rwinslow.com>2018-09-02 18:26:55 -0700
commit3c54fd964b6beae9a92955415568a001c9cea23d (patch)
tree08f625977a0de84337044abe4ca7beadb4d9ac22 /rust
parente7578548a5714dd278d798344d6619d8cbbfb4d9 (diff)
downloadflatbuffers-3c54fd964b6beae9a92955415568a001c9cea23d.tar.gz
flatbuffers-3c54fd964b6beae9a92955415568a001c9cea23d.tar.bz2
flatbuffers-3c54fd964b6beae9a92955415568a001c9cea23d.zip
Port FlatBuffers to Rust (#4898)
This is a port of FlatBuffers to Rust. It provides code generation and a runtime library derived from the C++ implementation. It utilizes the Rust type system to provide safe and fast traversal of FlatBuffers data. There are 188 tests, including many fuzz tests of roundtrips for various serialization scenarios. Initial benchmarks indicate that the canonical example payload can be written in ~700ns, and traversed in ~100ns. Rustaceans may be interested in the Follow, Push, and SafeSliceAccess traits. These traits lift traversals, reads, writes, and slice accesses into the type system, providing abstraction with no runtime penalty.
Diffstat (limited to 'rust')
-rw-r--r--rust/flatbuffers/Cargo.lock4
-rw-r--r--rust/flatbuffers/Cargo.toml7
-rw-r--r--rust/flatbuffers/src/builder.rs636
-rw-r--r--rust/flatbuffers/src/endian_scalar.rs180
-rw-r--r--rust/flatbuffers/src/follow.rs62
-rw-r--r--rust/flatbuffers/src/lib.rs38
-rw-r--r--rust/flatbuffers/src/primitives.rs298
-rw-r--r--rust/flatbuffers/src/push.rs81
-rw-r--r--rust/flatbuffers/src/table.rs77
-rw-r--r--rust/flatbuffers/src/vector.rs133
-rw-r--r--rust/flatbuffers/src/vtable.rs95
-rw-r--r--rust/flatbuffers/src/vtable_writer.rs85
12 files changed, 1696 insertions, 0 deletions
diff --git a/rust/flatbuffers/Cargo.lock b/rust/flatbuffers/Cargo.lock
new file mode 100644
index 00000000..dc2168d0
--- /dev/null
+++ b/rust/flatbuffers/Cargo.lock
@@ -0,0 +1,4 @@
+[[package]]
+name = "flatbuffers"
+version = "0.1.0"
+
diff --git a/rust/flatbuffers/Cargo.toml b/rust/flatbuffers/Cargo.toml
new file mode 100644
index 00000000..f5914e99
--- /dev/null
+++ b/rust/flatbuffers/Cargo.toml
@@ -0,0 +1,7 @@
+[package]
+name = "flatbuffers"
+version = "0.1.0"
+authors = ["Robert Winslow <hello@rwinslow.com>", "FlatBuffers Maintainers"]
+
+[dependencies]
+smallvec = "0.6"
diff --git a/rust/flatbuffers/src/builder.rs b/rust/flatbuffers/src/builder.rs
new file mode 100644
index 00000000..2a808ef7
--- /dev/null
+++ b/rust/flatbuffers/src/builder.rs
@@ -0,0 +1,636 @@
+/*
+ * Copyright 2018 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+extern crate smallvec;
+
+use std::cmp::max;
+use std::marker::PhantomData;
+use std::ptr::write_bytes;
+use std::slice::from_raw_parts;
+
+use endian_scalar::{read_scalar, emplace_scalar};
+use primitives::*;
+use push::{Push, PushAlignment};
+use table::Table;
+use vtable::{VTable, field_index_to_field_offset};
+use vtable_writer::VTableWriter;
+use vector::{SafeSliceAccess, Vector};
+
+#[derive(Clone, Copy, Debug)]
+struct FieldLoc {
+ off: UOffsetT,
+ id: VOffsetT,
+}
+
+/// FlatBufferBuilder builds a FlatBuffer through manipulating its internal
+/// state. It has an owned `Vec<u8>` that grows as needed (up to the hardcoded
+/// limit of 2GiB, which is set by the FlatBuffers format).
+pub struct FlatBufferBuilder<'fbb> {
+ owned_buf: Vec<u8>,
+ head: usize,
+
+ field_locs: Vec<FieldLoc>,
+ written_vtable_revpos: Vec<UOffsetT>,
+
+ nested: bool,
+ finished: bool,
+
+ min_align: usize,
+
+ _phantom: PhantomData<&'fbb ()>,
+}
+
+impl<'fbb> FlatBufferBuilder<'fbb> {
+ /// Create a FlatBufferBuilder that is ready for writing.
+ pub fn new() -> Self {
+ Self::new_with_capacity(0)
+ }
+
+ /// Create a FlatBufferBuilder that is ready for writing, with a
+ /// ready-to-use capacity of the provided size.
+ ///
+ /// The maximum valid value is `FLATBUFFERS_MAX_BUFFER_SIZE`.
+ pub fn new_with_capacity(size: usize) -> Self {
+ // we need to check the size here because we create the backing buffer
+ // directly, bypassing the typical way of using grow_owned_buf:
+ assert!(size <= FLATBUFFERS_MAX_BUFFER_SIZE,
+ "cannot initialize buffer bigger than 2 gigabytes");
+
+ FlatBufferBuilder {
+ owned_buf: vec![0u8; size],
+ head: size,
+
+ field_locs: Vec::new(),
+ written_vtable_revpos: Vec::new(),
+
+ nested: false,
+ finished: false,
+
+ min_align: 0,
+
+ _phantom: PhantomData,
+ }
+ }
+
+ /// Reset the FlatBufferBuilder internal state. Use this method after a
+ /// call to a `finish` function in order to re-use a FlatBufferBuilder.
+ ///
+ /// This function is the only way to reset the `finished` state and start
+ /// again.
+ ///
+ /// If you are using a FlatBufferBuilder repeatedly, make sure to use this
+ /// function, because it re-uses the FlatBufferBuilder's existing
+ /// heap-allocated `Vec<u8>` internal buffer. This offers significant speed
+ /// improvements as compared to creating a new FlatBufferBuilder for every
+ /// new object.
+ pub fn reset(&mut self) {
+ // memset only the part of the buffer that could be dirty:
+ {
+ let to_clear = self.owned_buf.len() - self.head;
+ let ptr = (&mut self.owned_buf[self.head..]).as_mut_ptr();
+ unsafe { write_bytes(ptr, 0, to_clear); }
+ }
+
+ self.head = self.owned_buf.len();
+ self.written_vtable_revpos.clear();
+
+ self.nested = false;
+ self.finished = false;
+
+ self.min_align = 0;
+ }
+
+ /// Destroy the FlatBufferBuilder, returning its internal byte vector
+ /// and the index into it that represents the start of valid data.
+ pub fn collapse(self) -> (Vec<u8>, usize) {
+ (self.owned_buf, self.head)
+ }
+
+ /// Push a Push'able value onto the front of the in-progress data.
+ ///
+ /// This function uses traits to provide a unified API for writing
+ /// scalars, tables, vectors, and WIPOffsets.
+ #[inline]
+ pub fn push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output> {
+ let sz = P::size();
+ self.align(sz, P::alignment());
+ self.make_space(sz);
+ {
+ let (dst, rest) = (&mut self.owned_buf[self.head..]).split_at_mut(sz);
+ x.push(dst, rest);
+ }
+ WIPOffset::new(self.used_space() as UOffsetT)
+ }
+
+ /// Push a Push'able value onto the front of the in-progress data, and
+ /// store a reference to it in the in-progress vtable. If the value matches
+ /// the default, then this is a no-op.
+ #[inline]
+ pub fn push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X) {
+ self.assert_nested("push_slot");
+ if x == default {
+ return;
+ }
+ self.push_slot_always(slotoff, x);
+ }
+
+ /// Push a Push'able value onto the front of the in-progress data, and
+ /// store a reference to it in the in-progress vtable.
+ #[inline]
+ pub fn push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X) {
+ self.assert_nested("push_slot_always");
+ let off = self.push(x);
+ self.track_field(slotoff, off.value());
+ }
+
+ /// Retrieve the number of vtables that have been serialized into the
+ /// FlatBuffer. This is primarily used to check vtable deduplication.
+ #[inline]
+ pub fn num_written_vtables(&self) -> usize {
+ self.written_vtable_revpos.len()
+ }
+
+ /// Start a Table write.
+ ///
+ /// Asserts that the builder is not in a nested state.
+ ///
+ /// Users probably want to use `push_slot` to add values after calling this.
+ #[inline]
+ pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> {
+ self.assert_not_nested("start_table can not be called when a table or vector is under construction");
+ self.nested = true;
+
+ WIPOffset::new(self.used_space() as UOffsetT)
+ }
+
+ /// End a Table write.
+ ///
+ /// Asserts that the builder is in a nested state.
+ #[inline]
+ pub fn end_table(&mut self, off: WIPOffset<TableUnfinishedWIPOffset>) -> WIPOffset<TableFinishedWIPOffset> {
+ self.assert_nested("end_table");
+
+ let o = self.write_vtable(off);
+
+ self.nested = false;
+ self.field_locs.clear();
+
+ WIPOffset::new(o.value())
+ }
+
+ /// Start a Vector write.
+ ///
+ /// Asserts that the builder is not in a nested state.
+ ///
+ /// Most users will prefer to call `create_vector`.
+ /// Speed optimizing users who choose to create vectors manually using this
+ /// function will want to use `push` to add values.
+ #[inline]
+ pub fn start_vector<T: Push>(&mut self, num_items: usize) {
+ self.assert_not_nested("start_vector can not be called when a table or vector is under construction");
+ self.nested = true;
+ self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET));
+ }
+
+ /// End a Vector write.
+ ///
+ /// Note that the `num_elems` parameter is the number of written items, not
+ /// the byte count.
+ ///
+ /// Asserts that the builder is in a nested state.
+ #[inline]
+ pub fn end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>> {
+ self.assert_nested("end_vector");
+ self.nested = false;
+ let o = self.push::<UOffsetT>(num_elems as UOffsetT);
+ WIPOffset::new(o.value())
+ }
+
+ /// Create a utf8 string.
+ ///
+ /// The wire format represents this as a zero-terminated byte vector.
+ #[inline]
+ pub fn create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
+ self.assert_not_nested("create_string can not be called when a table or vector is under construction");
+ WIPOffset::new(self.create_byte_string(s.as_bytes()).value())
+ }
+
+ /// Create a zero-terminated byte vector.
+ #[inline]
+ pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> {
+ self.assert_not_nested("create_byte_string can not be called when a table or vector is under construction");
+ self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET));
+ self.push(0u8);
+ self.push_bytes_unprefixed(data);
+ self.push(data.len() as UOffsetT);
+ WIPOffset::new(self.used_space() as UOffsetT)
+ }
+
+ /// Create a vector by memcpy'ing. This is much faster than calling
+ /// `create_vector`, but the underlying type must be represented as
+ /// little-endian on the host machine. This property is encoded in the
+ /// type system through the SafeSliceAccess trait. The following types are
+ /// always safe, on any platform: bool, u8, i8, and any
+ /// FlatBuffers-generated struct.
+ #[inline]
+ pub fn create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>(&'a mut self, items: &'b [T]) -> WIPOffset<Vector<'fbb, T>> {
+ self.assert_not_nested("create_vector_direct can not be called when a table or vector is under construction");
+ let elem_size = T::size();
+ self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
+
+ let bytes = {
+ let ptr = items.as_ptr() as *const T as *const u8;
+ unsafe { from_raw_parts(ptr, items.len() * elem_size) }
+ };
+ self.push_bytes_unprefixed(bytes);
+ self.push(items.len() as UOffsetT);
+
+ WIPOffset::new(self.used_space() as UOffsetT)
+ }
+
+ /// Create a vector of strings.
+ ///
+ /// Speed-sensitive users may wish to reduce memory usage by creating the
+ /// vector manually: use `create_vector`, `push`, and `end_vector`.
+ #[inline]
+ pub fn create_vector_of_strings<'a, 'b>(&'a mut self, xs: &'b [&'b str]) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>> {
+ self.assert_not_nested("create_vector_of_strings can not be called when a table or vector is under construction");
+ // internally, smallvec can be a stack-allocated or heap-allocated vector.
+ // we expect it to usually be stack-allocated.
+ let mut offsets: smallvec::SmallVec<[WIPOffset<&str>; 0]> = smallvec::SmallVec::with_capacity(xs.len());
+ unsafe { offsets.set_len(xs.len()); }
+ for (i, &s) in xs.iter().enumerate().rev() {
+ let o = self.create_string(s);
+ offsets[i] = o;
+ }
+ self.create_vector(&offsets[..])
+ }
+
+ /// Create a vector of Push-able objects.
+ ///
+ /// Speed-sensitive users may wish to reduce memory usage by creating the
+ /// vector manually: use `create_vector`, `push`, and `end_vector`.
+ #[inline]
+ pub fn create_vector<'a: 'b, 'b, T: Push + Copy + 'b>(&'a mut self, items: &'b [T]) -> WIPOffset<Vector<'fbb, T::Output>> {
+ let elem_size = T::size();
+ self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
+ for i in (0..items.len()).rev() {
+ self.push(items[i]);
+ }
+ WIPOffset::new(self.push::<UOffsetT>(items.len() as UOffsetT).value())
+ }
+
+ /// Get the byte slice for the data that has been written, regardless of
+ /// whether it has been finished.
+ #[inline]
+ pub fn unfinished_data(&self) -> &[u8] {
+ &self.owned_buf[self.head..]
+ }
+ /// Get the byte slice for the data that has been written after a call to
+ /// one of the `finish` functions.
+ #[inline]
+ pub fn finished_data(&self) -> &[u8] {
+ self.assert_finished("finished_bytes cannot be called when the buffer is not yet finished");
+ &self.owned_buf[self.head..]
+ }
+ /// Assert that a field is present in the just-finished Table.
+ ///
+ /// This is somewhat low-level and is mostly used by the generated code.
+ #[inline]
+ pub fn required(&self,
+ tab_revloc: WIPOffset<TableFinishedWIPOffset>,
+ slot_byte_loc: VOffsetT,
+ assert_msg_name: &'static str) {
+ let idx = self.used_space() - tab_revloc.value() as usize;
+ let tab = Table::new(&self.owned_buf[self.head..], idx);
+ let o = tab.vtable().get(slot_byte_loc) as usize;
+ assert!(o != 0, "missing required field {}", assert_msg_name);
+ }
+
+ /// Finalize the FlatBuffer by: aligning it, pushing an optional file
+ /// identifier on to it, pushing a size prefix on to it, and marking the
+ /// internal state of the FlatBufferBuilder as `finished`. Afterwards,
+ /// users can call `finished_data` to get the resulting data.
+ #[inline]
+ pub fn finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
+ self.finish_with_opts(root, file_identifier, true);
+ }
+
+ /// Finalize the FlatBuffer by: aligning it, pushing an optional file
+ /// identifier on to it, and marking the internal state of the
+ /// FlatBufferBuilder as `finished`. Afterwards, users can call
+ /// `finished_data` to get the resulting data.
+ #[inline]
+ pub fn finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
+ self.finish_with_opts(root, file_identifier, false);
+ }
+
+ /// Finalize the FlatBuffer by: aligning it and marking the internal state
+ /// of the FlatBufferBuilder as `finished`. Afterwards, users can call
+ /// `finished_data` to get the resulting data.
+ #[inline]
+ pub fn finish_minimal<T>(&mut self, root: WIPOffset<T>) {
+ self.finish_with_opts(root, None, false);
+ }
+
+ #[inline]
+ fn used_space(&self) -> usize {
+ self.owned_buf.len() - self.head as usize
+ }
+
+ #[inline]
+ fn track_field(&mut self, slot_off: VOffsetT, off: UOffsetT) {
+ let fl = FieldLoc {
+ id: slot_off,
+ off: off,
+ };
+ self.field_locs.push(fl);
+ }
+
+ /// Write the VTable, if it is new.
+ fn write_vtable(&mut self, table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>) -> WIPOffset<VTableWIPOffset> {
+ self.assert_nested("write_vtable");
+
+ // Write the vtable offset, which is the start of any Table.
+ // We fill its value later.
+ let object_revloc_to_vtable: WIPOffset<VTableWIPOffset> =
+ WIPOffset::new(self.push::<UOffsetT>(0xF0F0F0F0 as UOffsetT).value());
+
+ // Layout of the data this function will create when a new vtable is
+ // needed.
+ // --------------------------------------------------------------------
+ // vtable starts here
+ // | x, x -- vtable len (bytes) [u16]
+ // | x, x -- object inline len (bytes) [u16]
+ // | x, x -- zero, or num bytes from start of object to field #0 [u16]
+ // | ...
+ // | x, x -- zero, or num bytes from start of object to field #n-1 [u16]
+ // vtable ends here
+ // table starts here
+ // | x, x, x, x -- offset (negative direction) to the vtable [i32]
+ // | aka "vtableoffset"
+ // | -- table inline data begins here, we don't touch it --
+ // table ends here -- aka "table_start"
+ // --------------------------------------------------------------------
+ //
+ // Layout of the data this function will create when we re-use an
+ // existing vtable.
+ //
+ // We always serialize this particular vtable, then compare it to the
+ // other vtables we know about to see if there is a duplicate. If there
+ // is, then we erase the serialized vtable we just made.
+ // We serialize it first so that we are able to do byte-by-byte
+ // comparisons with already-serialized vtables. This 1) saves
+ // bookkeeping space (we only keep revlocs to existing vtables), 2)
+ // allows us to convert to little-endian once, then do
+ // fast memcmp comparisons, and 3) by ensuring we are comparing real
+ // serialized vtables, we can be more assured that we are doing the
+ // comparisons correctly.
+ //
+ // --------------------------------------------------------------------
+ // table starts here
+ // | x, x, x, x -- offset (negative direction) to an existing vtable [i32]
+ // | aka "vtableoffset"
+ // | -- table inline data begins here, we don't touch it --
+ // table starts here: aka "table_start"
+ // --------------------------------------------------------------------
+
+ // fill the WIP vtable with zeros:
+ let vtable_byte_len = get_vtable_byte_len(&self.field_locs);
+ self.make_space(vtable_byte_len);
+
+ // compute the length of the table (not vtable!) in bytes:
+ let table_object_size = object_revloc_to_vtable.value() - table_tail_revloc.value();
+ debug_assert!(table_object_size < 0x10000); // vTable use 16bit offsets.
+
+ // Write the VTable (we may delete it afterwards, if it is a duplicate):
+ let vt_start_pos = self.head;
+ let vt_end_pos = self.head + vtable_byte_len;
+ {
+ // write the vtable header:
+ let vtfw = &mut VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]);
+ vtfw.write_vtable_byte_length(vtable_byte_len as VOffsetT);
+ vtfw.write_object_inline_size(table_object_size as VOffsetT);
+
+ // serialize every FieldLoc to the vtable:
+ for &fl in self.field_locs.iter() {
+ let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT;
+ debug_assert_eq!(vtfw.get_field_offset(fl.id),
+ 0,
+ "tried to write a vtable field multiple times");
+ vtfw.write_field_offset(fl.id, pos);
+ }
+ }
+ let dup_vt_use = {
+ let this_vt = VTable::init(&self.owned_buf[..], self.head);
+ self.find_duplicate_stored_vtable_revloc(this_vt)
+ };
+
+ let vt_use = match dup_vt_use {
+ Some(n) => {
+ VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]).clear();
+ self.head += vtable_byte_len;
+ n
+ }
+ None => {
+ let new_vt_use = self.used_space() as UOffsetT;
+ self.written_vtable_revpos.push(new_vt_use);
+ new_vt_use
+ }
+ };
+
+ {
+ let n = self.head + self.used_space() - object_revloc_to_vtable.value() as usize;
+ let saw = read_scalar::<UOffsetT>(&self.owned_buf[n..n + SIZE_SOFFSET]);
+ debug_assert_eq!(saw, 0xF0F0F0F0);
+ emplace_scalar::<SOffsetT>(&mut self.owned_buf[n..n + SIZE_SOFFSET],
+ vt_use as SOffsetT - object_revloc_to_vtable.value() as SOffsetT);
+ }
+
+ self.field_locs.clear();
+
+ object_revloc_to_vtable
+ }
+
+ #[inline]
+ fn find_duplicate_stored_vtable_revloc(&self, needle: VTable) -> Option<UOffsetT> {
+ for &revloc in self.written_vtable_revpos.iter().rev() {
+ let o = VTable::init(&self.owned_buf[..], self.head + self.used_space() - revloc as usize);
+ if needle == o {
+ return Some(revloc);
+ }
+ }
+ None
+ }
+
+ // Only call this when you know it is safe to double the size of the buffer.
+ #[inline]
+ fn grow_owned_buf(&mut self) {
+ let old_len = self.owned_buf.len();
+ let new_len = max(1, old_len * 2);
+
+ let starting_active_size = self.used_space();
+
+ let diff = new_len - old_len;
+ self.owned_buf.resize(new_len, 0);
+ self.head += diff;
+
+ let ending_active_size = self.used_space();
+ debug_assert_eq!(starting_active_size, ending_active_size);
+
+ if new_len == 1 {
+ return;
+ }
+
+ // calculate the midpoint, and safely copy the old end data to the new
+ // end position:
+ let middle = new_len / 2;
+ {
+ let (left, right) = &mut self.owned_buf[..].split_at_mut(middle);
+ right.copy_from_slice(left);
+ }
+ // finally, zero out the old end data.
+ {
+ let ptr = (&mut self.owned_buf[..middle]).as_mut_ptr();
+ unsafe { write_bytes(ptr, 0, middle); }
+ }
+ }
+
+ // with or without a size prefix changes how we load the data, so finish*
+ // functions are split along those lines.
+ fn finish_with_opts<T>(&mut self,
+ root: WIPOffset<T>,
+ file_identifier: Option<&str>,
+ size_prefixed: bool) {
+ self.assert_not_finished("buffer cannot be finished when it is already finished");
+ self.assert_not_nested("buffer cannot be finished when a table or vector is under construction");
+ self.written_vtable_revpos.clear();
+
+ let to_align = {
+ // for the root offset:
+ let a = SIZE_UOFFSET;
+ // for the size prefix:
+ let b = if size_prefixed { SIZE_UOFFSET } else { 0 };
+ // for the file identifier (a string that is not zero-terminated):
+ let c = if file_identifier.is_some() {
+ FILE_IDENTIFIER_LENGTH
+ } else {
+ 0
+ };
+ a + b + c
+ };
+
+ {
+ let ma = PushAlignment::new(self.min_align);
+ self.align(to_align, ma);
+ }
+
+ if let Some(ident) = file_identifier {
+ debug_assert_eq!(ident.len(), FILE_IDENTIFIER_LENGTH);
+ self.push_bytes_unprefixed(ident.as_bytes());
+ }
+
+ self.push(root);
+
+ if size_prefixed {
+ let sz = self.used_space() as UOffsetT;
+ self.push::<UOffsetT>(sz);
+ }
+ self.finished = true;
+ }
+
+ #[inline]
+ fn align(&mut self, len: usize, alignment: PushAlignment) {
+ self.track_min_align(alignment.value());
+ let s = self.used_space() as usize;
+ self.make_space(padding_bytes(s + len, alignment.value()));
+ }
+
+ #[inline]
+ fn track_min_align(&mut self, alignment: usize) {
+ self.min_align = max(self.min_align, alignment);
+ }
+
+ #[inline]
+ fn push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT {
+ let n = self.make_space(x.len());
+ &mut self.owned_buf[n..n + x.len()].copy_from_slice(x);
+
+ n as UOffsetT
+ }
+
+ #[inline]
+ fn make_space(&mut self, want: usize) -> usize {
+ self.ensure_capacity(want);
+ self.head -= want;
+ self.head
+ }
+
+ #[inline]
+ fn ensure_capacity(&mut self, want: usize) -> usize {
+ if self.unused_ready_space() >= want {
+ return want;
+ }
+ assert!(want <= FLATBUFFERS_MAX_BUFFER_SIZE,
+ "cannot grow buffer beyond 2 gigabytes");
+
+ while self.unused_ready_space() < want {
+ self.grow_owned_buf();
+ }
+ want
+ }
+ #[inline]
+ fn unused_ready_space(&self) -> usize {
+ self.head
+ }
+ #[inline]
+ fn assert_nested(&self, fn_name: &'static str) {
+ // we don't assert that self.field_locs.len() >0 because the vtable
+ // could be empty (e.g. for empty tables, or for all-default values).
+ debug_assert!(self.nested, format!("incorrect FlatBufferBuilder usage: {} must be called while in a nested state", fn_name));
+ }
+ #[inline]
+ fn assert_not_nested(&self, msg: &'static str) {
+ debug_assert!(!self.nested, msg);
+ }
+ #[inline]
+ fn assert_finished(&self, msg: &'static str) {
+ debug_assert!(self.finished, msg);
+ }
+ #[inline]
+ fn assert_not_finished(&self, msg: &'static str) {
+ debug_assert!(!self.finished, msg);
+ }
+
+}
+
+/// Compute the length of the vtable needed to represent the provided FieldLocs.
+/// If there are no FieldLocs, then provide the minimum number of bytes
+/// required: enough to write the VTable header.
+#[inline]
+fn get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize {
+ let max_voffset = field_locs.iter().map(|fl| fl.id).max();
+ match max_voffset {
+ None => { field_index_to_field_offset(0) as usize }
+ Some(mv) => { mv as usize + SIZE_VOFFSET }
+ }
+}
+
+#[inline]
+fn padding_bytes(buf_size: usize, scalar_size: usize) -> usize {
+ // ((!buf_size) + 1) & (scalar_size - 1)
+ (!buf_size).wrapping_add(1) & (scalar_size.wrapping_sub(1))
+}
diff --git a/rust/flatbuffers/src/endian_scalar.rs b/rust/flatbuffers/src/endian_scalar.rs
new file mode 100644
index 00000000..00f2ebef
--- /dev/null
+++ b/rust/flatbuffers/src/endian_scalar.rs
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2018 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use std::mem::size_of;
+
+/// Trait for values that must be stored in little-endian byte order, but
+/// might be represented in memory as big-endian. Every type that implements
+/// EndianScalar is a valid FlatBuffers scalar value.
+///
+/// The Rust stdlib does not provide a trait to represent scalars, so this trait
+/// serves that purpose, too.
+///
+/// Note that we do not use the num-traits crate for this, because it provides
+/// "too much". For example, num-traits provides i128 support, but that is an
+/// invalid FlatBuffers type.
+pub trait EndianScalar: Sized + PartialEq + Copy + Clone {
+ fn to_little_endian(self) -> Self;
+ fn from_little_endian(self) -> Self;
+}
+
+/// Macro for implementing a no-op endian conversion. This is used for types
+/// that are one byte wide.
+macro_rules! impl_endian_scalar_noop {
+ ($ty:ident) => (
+ impl EndianScalar for $ty {
+ #[inline]
+ fn to_little_endian(self) -> Self {
+ self
+ }
+ #[inline]
+ fn from_little_endian(self) -> Self {
+ self
+ }
+ }
+ )
+}
+
+/// Macro for implementing an endian conversion using the stdlib `to_le` and
+/// `from_le` functions. This is used for integer types. It is not used for
+/// floats, because the `to_le` and `from_le` are not implemented for them in
+/// the stdlib.
+macro_rules! impl_endian_scalar_stdlib_le_conversion {
+ ($ty:ident) => (
+ impl EndianScalar for $ty {
+ #[inline]
+ fn to_little_endian(self) -> Self {
+ Self::to_le(self)
+ }
+ #[inline]
+ fn from_little_endian(self) -> Self {
+ Self::from_le(self)
+ }
+ }
+ )
+}
+
+impl_endian_scalar_noop!(bool);
+impl_endian_scalar_noop!(u8);
+impl_endian_scalar_noop!(i8);
+
+impl_endian_scalar_stdlib_le_conversion!(u16);
+impl_endian_scalar_stdlib_le_conversion!(u32);
+impl_endian_scalar_stdlib_le_conversion!(u64);
+impl_endian_scalar_stdlib_le_conversion!(i16);
+impl_endian_scalar_stdlib_le_conversion!(i32);
+impl_endian_scalar_stdlib_le_conversion!(i64);
+
+impl EndianScalar for f32 {
+ /// Convert f32 from host endian-ness to little-endian.
+ #[inline]
+ fn to_little_endian(self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ self
+ }
+ #[cfg(not(target_endian = "little"))]
+ {
+ byte_swap_f32(&self)
+ }
+ }
+ /// Convert f32 from little-endian to host endian-ness.
+ #[inline]
+ fn from_little_endian(self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ self
+ }
+ #[cfg(not(target_endian = "little"))]
+ {
+ byte_swap_f32(&self)
+ }
+ }
+}
+
+impl EndianScalar for f64 {
+ /// Convert f64 from host endian-ness to little-endian.
+ #[inline]
+ fn to_little_endian(self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ self
+ }
+ #[cfg(not(target_endian = "little"))]
+ {
+ byte_swap_f64(&self)
+ }
+ }
+ /// Convert f64 from little-endian to host endian-ness.
+ #[inline]
+ fn from_little_endian(self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ self
+ }
+ #[cfg(not(target_endian = "little"))]
+ {
+ byte_swap_f64(&self)
+ }
+ }
+}
+
+/// Swaps the bytes of an f32.
+#[allow(dead_code)]
+#[inline]
+pub fn byte_swap_f32(x: f32) -> f32 {
+ f32::from_bits(x.to_bits().swap_bytes())
+}
+
+/// Swaps the bytes of an f64.
+#[allow(dead_code)]
+#[inline]
+pub fn byte_swap_f64(x: f64) -> f64 {
+ f64::from_bits(x.to_bits().swap_bytes())
+}
+
+/// Place an EndianScalar into the provided mutable byte slice. Performs
+/// endian conversion, if necessary.
+#[inline]
+pub fn emplace_scalar<T: EndianScalar>(s: &mut [u8], x: T) {
+ let sz = size_of::<T>();
+ let mut_ptr = (&mut s[..sz]).as_mut_ptr() as *mut T;
+ let val = x.to_little_endian();
+ unsafe {
+ *mut_ptr = val;
+ }
+}
+
+/// Read an EndianScalar from the provided byte slice at the specified location.
+/// Performs endian conversion, if necessary.
+#[inline]
+pub fn read_scalar_at<T: EndianScalar>(s: &[u8], loc: usize) -> T {
+ let buf = &s[loc..loc + size_of::<T>()];
+ read_scalar(buf)
+}
+
+/// Read an EndianScalar from the provided byte slice. Performs endian
+/// conversion, if necessary.
+#[inline]
+pub fn read_scalar<T: EndianScalar>(s: &[u8]) -> T {
+ let sz = size_of::<T>();
+
+ let p = (&s[..sz]).as_ptr() as *const T;
+ let x = unsafe { *p };
+
+ x.from_little_endian()
+}
+
diff --git a/rust/flatbuffers/src/follow.rs b/rust/flatbuffers/src/follow.rs
new file mode 100644
index 00000000..4d3eff77
--- /dev/null
+++ b/rust/flatbuffers/src/follow.rs
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2018 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use std::marker::PhantomData;
+
+/// Follow is a trait that allows us to access FlatBuffers in a declarative,
+/// type safe, and fast way. They compile down to almost no code (after
+/// optimizations). Conceptually, Follow lifts the offset-based access
+/// patterns of FlatBuffers data into the type system. This trait is used
+/// pervasively at read time, to access tables, vtables, vectors, strings, and
+/// all other data. At this time, Follow is not utilized much on the write
+/// path.
+///
+/// Writing a new Follow implementation primarily involves deciding whether
+/// you want to return data (of the type Self::Inner) or do you want to
+/// continue traversing the FlatBuffer.
+pub trait Follow<'a> {
+ type Inner;
+ fn follow(buf: &'a [u8], loc: usize) -> Self::Inner;
+}
+
+/// Execute a follow as a top-level function.
+#[allow(dead_code)]
+#[inline]
+pub fn lifted_follow<'a, T: Follow<'a>>(buf: &'a [u8], loc: usize) -> T::Inner {
+ T::follow(buf, loc)
+}
+
+/// FollowStart wraps a Follow impl in a struct type. This can make certain
+/// programming patterns more ergonomic.
+#[derive(Debug)]
+pub struct FollowStart<T>(PhantomData<T>);
+impl<'a, T: Follow<'a> + 'a> FollowStart<T> {
+ #[inline]
+ pub fn new() -> Self {
+ Self { 0: PhantomData }
+ }
+ #[inline]
+ pub fn self_follow(&'a self, buf: &'a [u8], loc: usize) -> T::Inner {
+ T::follow(buf, loc)
+ }
+}
+impl<'a, T: Follow<'a>> Follow<'a> for FollowStart<T> {
+ type Inner = T::Inner;
+ #[inline]
+ fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+ T::follow(buf, loc)
+ }
+}
diff --git a/rust/flatbuffers/src/lib.rs b/rust/flatbuffers/src/lib.rs
new file mode 100644
index 00000000..1783b34c
--- /dev/null
+++ b/rust/flatbuffers/src/lib.rs
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2018 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+mod builder;
+mod endian_scalar;
+mod follow;
+mod primitives;
+mod push;
+mod table;
+mod vector;
+mod vtable;
+mod vtable_writer;
+
+pub use builder::FlatBufferBuilder;
+pub use endian_scalar::{EndianScalar, emplace_scalar, read_scalar, read_scalar_at, byte_swap_f32, byte_swap_f64};
+pub use follow::{Follow, FollowStart};
+pub use primitives::*;
+pub use push::Push;
+pub use table::{Table, buffer_has_identifier, get_root, get_size_prefixed_root};
+pub use vector::{SafeSliceAccess, Vector, follow_cast_ref};
+pub use vtable::field_index_to_field_offset;
+
+// TODO(rw): Unify `create_vector` and `create_vector_direct` by using
+// `Into<Vector<...>>`.
+// TODO(rw): Split fill ops in builder into fill_small, fill_big like in C++.
diff --git a/rust/flatbuffers/src/primitives.rs b/rust/flatbuffers/src/primitives.rs
new file mode 100644
index 00000000..59176b8f
--- /dev/null
+++ b/rust/flatbuffers/src/primitives.rs
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2018 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use std::marker::PhantomData;
+use std::mem::size_of;
+use std::ops::Deref;
+
+use endian_scalar::{emplace_scalar, read_scalar, read_scalar_at};
+use follow::Follow;
+use push::Push;
+
+pub const FLATBUFFERS_MAX_BUFFER_SIZE: usize = (1u64 << 31) as usize;
+
+pub const FILE_IDENTIFIER_LENGTH: usize = 4;
+
+pub const VTABLE_METADATA_FIELDS: usize = 2;
+
+pub const SIZE_U8: usize = size_of::<u8>();
+pub const SIZE_I8: usize = size_of::<i8>();
+
+pub const SIZE_U16: usize = size_of::<u16>();
+pub const SIZE_I16: usize = size_of::<i16>();
+
+pub const SIZE_U32: usize = size_of::<u32>();
+pub const SIZE_I32: usize = size_of::<i32>();
+
+pub const SIZE_U64: usize = size_of::<u64>();
+pub const SIZE_I64: usize = size_of::<i64>();
+
+pub const SIZE_F32: usize = size_of::<f32>();
+pub const SIZE_F64: usize = size_of::<f64>();
+
+pub const SIZE_SOFFSET: usize = SIZE_I32;
+pub const SIZE_UOFFSET: usize = SIZE_U32;
+pub const SIZE_VOFFSET: usize = SIZE_I16;
+
+pub const SIZE_SIZEPREFIX: usize = SIZE_UOFFSET;
+
+/// SOffsetT is an i32 that is used by tables to reference their vtables.
+pub type SOffsetT = i32;
+
+/// UOffsetT is a u32 that is used by pervasively to represent both pointers
+/// and lengths of vectors.
+pub type UOffsetT = u32;
+
+/// VOffsetT is a i32 that is used by vtables to store field data.
+pub type VOffsetT = i16;
+
+/// TableFinishedWIPOffset marks a WIPOffset as being for a finished table.
+pub struct TableFinishedWIPOffset {}
+
+/// TableUnfinishedWIPOffset marks a WIPOffset as being for an unfinished table.
+pub struct TableUnfinishedWIPOffset {}
+
+/// UnionWIPOffset marks a WIPOffset as being for a union value.
+pub struct UnionWIPOffset {}
+
+/// VTableWIPOffset marks a WIPOffset as being for a vtable.
+pub struct VTableWIPOffset {}
+
+/// WIPOffset contains an UOffsetT with a special meaning: it is the location of
+/// data relative to the *end* of an in-progress FlatBuffer. The
+/// FlatBufferBuilder uses this to track the location of objects in an absolute
+/// way. The impl of Push converts a WIPOffset into a ForwardsUOffset.
+#[derive(Debug)]
+pub struct WIPOffset<T>(UOffsetT, PhantomData<T>);
+
+// TODO(rw): why do we need to reimplement (with a default impl) Copy to
+// avoid ownership errors?
+impl<T> Copy for WIPOffset<T> {}
+impl<T> Clone for WIPOffset<T> {
+ #[inline]
+ fn clone(&self) -> WIPOffset<T> {
+ WIPOffset::new(self.0.clone())
+ }
+}
+impl<T> PartialEq for WIPOffset<T> {
+ fn eq(&self, o: &WIPOffset<T>) -> bool {
+ self.value() == o.value()
+ }
+}
+
+impl<T> Deref for WIPOffset<T> {
+ type Target = UOffsetT;
+ #[inline]
+ fn deref(&self) -> &UOffsetT {
+ &self.0
+ }
+}
+impl<'a, T: 'a> WIPOffset<T> {
+ /// Create a new WIPOffset.
+ #[inline]
+ pub fn new(o: UOffsetT) -> WIPOffset<T> {
+ WIPOffset {
+ 0: o,
+ 1: PhantomData,
+ }
+ }
+
+ /// Return a wrapped value that brings its meaning as a union WIPOffset
+ /// into the type system.
+ #[inline(always)]
+ pub fn as_union_value(&self) -> WIPOffset<UnionWIPOffset> {
+ WIPOffset::new(self.0)
+ }
+ /// Get the underlying value.
+ #[inline(always)]
+ pub fn value(&self) -> UOffsetT {
+ self.0
+ }
+}
+
+impl<T> Push for WIPOffset<T> {
+ type Output = ForwardsUOffset<T>;
+
+ #[inline(always)]
+ fn push(&self, dst: &mut [u8], rest: &[u8]) {
+ let n = (SIZE_UOFFSET + rest.len() - self.value() as usize) as UOffsetT;
+ emplace_scalar::<UOffsetT>(dst, n);
+ }
+}
+
+impl<T> Push for ForwardsUOffset<T> {
+ type Output = Self;
+
+ #[inline(always)]
+ fn push(&self, dst: &mut [u8], rest: &[u8]) {
+ self.value().push(dst, rest);
+ }
+}
+
+/// ForwardsUOffset is used by Follow to traverse a FlatBuffer: the pointer
+/// is incremented by the value contained in this type.
+#[derive(Debug)]
+pub struct ForwardsUOffset<T>(UOffsetT, PhantomData<T>);
+impl<T> ForwardsUOffset<T> {
+ #[inline(always)]
+ pub fn value(&self) -> UOffsetT {
+ self.0
+ }
+}
+
+impl<'a, T: Follow<'a>> Follow<'a> for ForwardsUOffset<T> {
+ type Inner = T::Inner;
+ #[inline(always)]
+ fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+ let slice = &buf[loc..loc + SIZE_UOFFSET];
+ let off = read_scalar::<u32>(slice) as usize;
+ T::follow(buf, loc + off)
+ }
+}
+
+/// ForwardsVOffset is used by Follow to traverse a FlatBuffer: the pointer
+/// is incremented by the value contained in this type.
+#[derive(Debug)]
+pub struct ForwardsVOffset<T>(VOffsetT, PhantomData<T>);
+impl<T> ForwardsVOffset<T> {
+ #[inline(always)]
+ pub fn value(&self) -> VOffsetT {
+ self.0
+ }
+}
+
+impl<'a, T: Follow<'a>> Follow<'a> for ForwardsVOffset<T> {
+ type Inner = T::Inner;
+ #[inline(always)]
+ fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+ let slice = &buf[loc..loc + SIZE_VOFFSET];
+ let off = read_scalar::<VOffsetT>(slice) as usize;
+ T::follow(buf, loc + off)
+ }
+}
+
+impl<T> Push for ForwardsVOffset<T> {
+ type Output = Self;
+
+ #[inline]
+ fn push(&self, dst: &mut [u8], rest: &[u8]) {
+ self.value().push(dst, rest);
+ }
+}
+
+/// ForwardsSOffset is used by Follow to traverse a FlatBuffer: the pointer
+/// is incremented by the *negative* of the value contained in this type.
+#[derive(Debug)]
+pub struct BackwardsSOffset<T>(SOffsetT, PhantomData<T>);
+impl<T> BackwardsSOffset<T> {
+ #[inline(always)]
+ pub fn value(&self) -> SOffsetT {
+ self.0
+ }
+}
+
+impl<'a, T: Follow<'a>> Follow<'a> for BackwardsSOffset<T> {
+ type Inner = T::Inner;
+ #[inline(always)]
+ fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+ let slice = &buf[loc..loc + SIZE_SOFFSET];
+ let off = read_scalar::<SOffsetT>(slice);
+ T::follow(buf, (loc as SOffsetT - off) as usize)
+ }
+}
+
+impl<T> Push for BackwardsSOffset<T> {
+ type Output = Self;
+
+ #[inline]
+ fn push(&self, dst: &mut [u8], rest: &[u8]) {
+ self.value().push(dst, rest);
+ }
+}
+
+/// SkipSizePrefix is used by Follow to traverse a FlatBuffer: the pointer is
+/// incremented by a fixed constant in order to skip over the size prefix value.
+pub struct SkipSizePrefix<T>(PhantomData<T>);
+impl<'a, T: Follow<'a> + 'a> Follow<'a> for SkipSizePrefix<T> {
+ type Inner = T::Inner;
+ #[inline(always)]
+ fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+ T::follow(buf, loc + SIZE_SIZEPREFIX)
+ }
+}
+
+/// SkipRootOffset is used by Follow to traverse a FlatBuffer: the pointer is
+/// incremented by a fixed constant in order to skip over the root offset value.
+pub struct SkipRootOffset<T>(PhantomData<T>);
+impl<'a, T: Follow<'a> + 'a> Follow<'a> for SkipRootOffset<T> {
+ type Inner = T::Inner;
+ #[inline(always)]
+ fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+ T::follow(buf, loc + SIZE_UOFFSET)
+ }
+}
+
+/// FileIdentifier is used by Follow to traverse a FlatBuffer: the pointer is
+/// dereferenced into a byte slice, whose bytes are the file identifer value.
+pub struct FileIdentifier;
+impl<'a> Follow<'a> for FileIdentifier {
+ type Inner = &'a [u8];
+ #[inline(always)]
+ fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+ &buf[loc..loc + FILE_IDENTIFIER_LENGTH]
+ }
+}
+
+/// SkipFileIdentifier is used by Follow to traverse a FlatBuffer: the pointer
+/// is incremented by a fixed constant in order to skip over the file
+/// identifier value.
+pub struct SkipFileIdentifier<T>(PhantomData<T>);
+impl<'a, T: Follow<'a> + 'a> Follow<'a> for SkipFileIdentifier<T> {
+ type Inner = T::Inner;
+ #[inline(always)]
+ fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+ T::follow(buf, loc + FILE_IDENTIFIER_LENGTH)
+ }
+}
+
+/// Follow trait impls for primitive types.
+///
+/// Ideally, these would be implemented as a single impl using trait bounds on
+/// EndianScalar, but implementing Follow that way causes a conflict with
+/// other impls.
+macro_rules! impl_follow_for_endian_scalar {
+ ($ty:ident) => (
+ impl<'a> Follow<'a> for $ty {
+ type Inner = $ty;
+ #[inline(always)]
+ fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+ read_scalar_at::<$ty>(buf, loc)
+ }
+ }
+ )
+}
+
+impl_follow_for_endian_scalar!(bool);
+impl_follow_for_endian_scalar!(u8);
+impl_follow_for_endian_scalar!(u16);
+impl_follow_for_endian_scalar!(u32);
+impl_follow_for_endian_scalar!(u64);
+impl_follow_for_endian_scalar!(i8);
+impl_follow_for_endian_scalar!(i16);
+impl_follow_for_endian_scalar!(i32);
+impl_follow_for_endian_scalar!(i64);
+impl_follow_for_endian_scalar!(f32);
+impl_follow_for_endian_scalar!(f64);
diff --git a/rust/flatbuffers/src/push.rs b/rust/flatbuffers/src/push.rs
new file mode 100644
index 00000000..2b307a3a
--- /dev/null
+++ b/rust/flatbuffers/src/push.rs
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2018 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use std::cmp::max;
+use std::mem::{align_of, size_of};
+
+use endian_scalar::emplace_scalar;
+
+/// Trait to abstract over functionality needed to write values (either owned
+/// or referenced). Used in FlatBufferBuilder and implemented for generated
+/// types.
+pub trait Push: Sized {
+ type Output;
+ fn push(&self, dst: &mut [u8], _rest: &[u8]);
+ #[inline]
+ fn size() -> usize {
+ size_of::<Self::Output>()
+ }
+ #[inline]
+ fn alignment() -> PushAlignment {
+ PushAlignment::new(align_of::<Self::Output>())
+ }
+}
+
+/// Ensure Push alignment calculations are typesafe (because this helps reduce
+/// implementation issues when using FlatBufferBuilder::align).
+pub struct PushAlignment(usize);
+impl PushAlignment {
+ #[inline]
+ pub fn new(x: usize) -> Self {
+ PushAlignment { 0: x }
+ }
+ #[inline]
+ pub fn value(&self) -> usize {
+ self.0
+ }
+ #[inline]
+ pub fn max_of(&self, o: usize) -> Self {
+ PushAlignment::new(max(self.0, o))
+ }
+}
+
+/// Macro to implement Push for EndianScalar types.
+macro_rules! impl_push_for_endian_scalar {
+ ($ty:ident) => (
+ impl Push for $ty {
+ type Output = $ty;
+
+ #[inline]
+ fn push(&self, dst: &mut [u8], _rest: &[u8]) {
+ emplace_scalar::<$ty>(dst, *self);
+ }
+
+ }
+ )
+}
+
+impl_push_for_endian_scalar!(bool);
+impl_push_for_endian_scalar!(u8);
+impl_push_for_endian_scalar!(i8);
+impl_push_for_endian_scalar!(u16);
+impl_push_for_endian_scalar!(i16);
+impl_push_for_endian_scalar!(u32);
+impl_push_for_endian_scalar!(i32);
+impl_push_for_endian_scalar!(u64);
+impl_push_for_endian_scalar!(i64);
+impl_push_for_endian_scalar!(f32);
+impl_push_for_endian_scalar!(f64);
diff --git a/rust/flatbuffers/src/table.rs b/rust/flatbuffers/src/table.rs
new file mode 100644
index 00000000..d9e952d0
--- /dev/null
+++ b/rust/flatbuffers/src/table.rs
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2018 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use follow::Follow;
+use primitives::*;
+use vtable::VTable;
+
+#[derive(Clone, Copy, Debug, PartialEq)]
+pub struct Table<'a> {
+ pub buf: &'a [u8],
+ pub loc: usize,
+}
+
+impl<'a> Table<'a> {
+ #[inline]
+ pub fn new(buf: &'a [u8], loc: usize) -> Self {
+ Table { buf: buf, loc: loc }
+ }
+ #[inline]
+ pub fn vtable(&'a self) -> VTable<'a> {
+ <BackwardsSOffset<VTable<'a>>>::follow(self.buf, self.loc)
+ }
+ #[inline]
+ pub fn get<T: Follow<'a> + 'a>(
+ &'a self,
+ slot_byte_loc: VOffsetT,
+ default: Option<T::Inner>,
+ ) -> Option<T::Inner> {
+ let o = self.vtable().get(slot_byte_loc) as usize;
+ if o == 0 {
+ return default;
+ }
+ Some(<T>::follow(self.buf, self.loc + o))
+ }
+}
+
+impl<'a> Follow<'a> for Table<'a> {
+ type Inner = Table<'a>;
+ #[inline]
+ fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+ Table { buf: buf, loc: loc }
+ }
+}
+
+#[inline]
+pub fn get_root<'a, T: Follow<'a> + 'a>(data: &'a [u8]) -> T::Inner {
+ <ForwardsUOffset<T>>::follow(data, 0)
+}
+#[inline]
+pub fn get_size_prefixed_root<'a, T: Follow<'a> + 'a>(data: &'a [u8]) -> T::Inner {
+ <SkipSizePrefix<ForwardsUOffset<T>>>::follow(data, 0)
+}
+#[inline]
+pub fn buffer_has_identifier(data: &[u8], ident: &str, size_prefixed: bool) -> bool {
+ assert_eq!(ident.len(), FILE_IDENTIFIER_LENGTH);
+
+ let got = if size_prefixed {
+ <SkipSizePrefix<SkipRootOffset<FileIdentifier>>>::follow(data, 0)
+ } else {
+ <SkipRootOffset<FileIdentifier>>::follow(data, 0)
+ };
+
+ ident.as_bytes() == got
+}
diff --git a/rust/flatbuffers/src/vector.rs b/rust/flatbuffers/src/vector.rs
new file mode 100644
index 00000000..8c2d6d50
--- /dev/null
+++ b/rust/flatbuffers/src/vector.rs
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2018 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use std::marker::PhantomData;
+use std::mem::size_of;
+use std::slice::from_raw_parts;
+use std::str::from_utf8_unchecked;
+
+use endian_scalar::{EndianScalar, read_scalar};
+use follow::Follow;
+use primitives::*;
+
+#[derive(Debug)]
+pub struct Vector<'a, T: 'a>(&'a [u8], usize, PhantomData<T>);
+
+impl<'a, T: 'a> Vector<'a, T> {
+ #[inline(always)]
+ pub fn new(buf: &'a [u8], loc: usize) -> Self {
+ Vector {
+ 0: buf,
+ 1: loc,
+ 2: PhantomData,
+ }
+ }
+
+ #[inline(always)]
+ pub fn len(&self) -> usize {
+ read_scalar::<UOffsetT>(&self.0[self.1 as usize..]) as usize
+ }
+}
+
+impl<'a, T: Follow<'a> + 'a> Vector<'a, T> {
+ #[inline(always)]
+ pub fn get(&self, idx: usize) -> T::Inner {
+ debug_assert!(idx < read_scalar::<u32>(&self.0[self.1 as usize..]) as usize);
+ let sz = size_of::<T>();
+ debug_assert!(sz > 0);
+ T::follow(self.0, self.1 as usize + SIZE_UOFFSET + sz * idx)
+ }
+}
+
+pub trait SafeSliceAccess {}
+impl<'a, T: SafeSliceAccess + 'a> Vector<'a, T> {
+ pub fn safe_slice(self) -> &'a [T] {
+ let buf = self.0;
+ let loc = self.1;
+ let sz = size_of::<T>();
+ debug_assert!(sz > 0);
+ let len = read_scalar::<UOffsetT>(&buf[loc..loc + SIZE_UOFFSET]) as usize;
+ let data_buf = &buf[loc + SIZE_UOFFSET..loc + SIZE_UOFFSET + len * sz];
+ let ptr = data_buf.as_ptr() as *const T;
+ let s: &'a [T] = unsafe { from_raw_parts(ptr, len) };
+ s
+ }
+}
+
+impl SafeSliceAccess for u8 {}
+impl SafeSliceAccess for i8 {}
+impl SafeSliceAccess for bool {}
+
+#[cfg(target_endian = "little")]
+mod le_safe_slice_impls {
+ impl super::SafeSliceAccess for u16 {}
+ impl super::SafeSliceAccess for u32 {}
+ impl super::SafeSliceAccess for u64 {}
+
+ impl super::SafeSliceAccess for i16 {}
+ impl super::SafeSliceAccess for i32 {}
+ impl super::SafeSliceAccess for i64 {}
+
+ impl super::SafeSliceAccess for f32 {}
+ impl super::SafeSliceAccess for f64 {}
+}
+
+pub use self::le_safe_slice_impls::*;
+
+pub fn follow_cast_ref<'a, T: Sized + 'a>(buf: &'a [u8], loc: usize) -> &'a T {
+ let sz = size_of::<T>();
+ let buf = &buf[loc..loc + sz];
+ let ptr = buf.as_ptr() as *const T;
+ unsafe { &*ptr }
+}
+
+impl<'a> Follow<'a> for &'a str {
+ type Inner = &'a str;
+ fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+ let len = read_scalar::<UOffsetT>(&buf[loc..loc + SIZE_UOFFSET]) as usize;
+ let slice = &buf[loc + SIZE_UOFFSET..loc + SIZE_UOFFSET + len];
+ let s = unsafe { from_utf8_unchecked(slice) };
+ s
+ }
+}
+
+fn follow_slice_helper<T>(buf: &[u8], loc: usize) -> &[T] {
+ let sz = size_of::<T>();
+ debug_assert!(sz > 0);
+ let len = read_scalar::<UOffsetT>(&buf[loc..loc + SIZE_UOFFSET]) as usize;
+ let data_buf = &buf[loc + SIZE_UOFFSET..loc + SIZE_UOFFSET + len * sz];
+ let ptr = data_buf.as_ptr() as *const T;
+ let s: &[T] = unsafe { from_raw_parts(ptr, len) };
+ s
+}
+
+/// Implement direct slice access if the host is little-endian.
+#[cfg(target_endian = "little")]
+impl<'a, T: EndianScalar> Follow<'a> for &'a [T] {
+ type Inner = &'a [T];
+ fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+ follow_slice_helper::<T>(buf, loc)
+ }
+}
+
+/// Implement Follow for all possible Vectors that have Follow-able elements.
+impl<'a, T: Follow<'a> + 'a> Follow<'a> for Vector<'a, T> {
+ type Inner = Vector<'a, T>;
+ fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+ Vector::new(buf, loc)
+ }
+}
+
diff --git a/rust/flatbuffers/src/vtable.rs b/rust/flatbuffers/src/vtable.rs
new file mode 100644
index 00000000..cd7ede6e
--- /dev/null
+++ b/rust/flatbuffers/src/vtable.rs
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2018 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use endian_scalar::read_scalar_at;
+use follow::Follow;
+use primitives::*;
+
+/// VTable encapsulates read-only usage of a vtable. It is only to be used
+/// by generated code.
+#[derive(Debug)]
+pub struct VTable<'a> {
+ buf: &'a [u8],
+ loc: usize,
+}
+
+impl<'a> PartialEq for VTable<'a> {
+ fn eq(&self, other: &VTable) -> bool {
+ self.as_bytes().eq(other.as_bytes())
+ }
+}
+
+impl<'a> VTable<'a> {
+ pub fn init(buf: &'a [u8], loc: usize) -> Self {
+ VTable {
+ buf: buf,
+ loc: loc,
+ }
+ }
+ pub fn num_fields(&self) -> usize {
+ (self.num_bytes() / SIZE_VOFFSET) - 2
+ }
+ pub fn num_bytes(&self) -> usize {
+ read_scalar_at::<VOffsetT>(self.buf, self.loc) as usize
+ }
+ pub fn object_inline_num_bytes(&self) -> usize {
+ let n = read_scalar_at::<VOffsetT>(self.buf, self.loc + SIZE_VOFFSET);
+ n as usize
+ }
+ pub fn get_field(&self, idx: usize) -> VOffsetT {
+ // TODO(rw): distinguish between None and 0?
+ if idx > self.num_fields() {
+ return 0;
+ }
+ read_scalar_at::<VOffsetT>(
+ self.buf,
+ self.loc + SIZE_VOFFSET + SIZE_VOFFSET + SIZE_VOFFSET * idx,
+ )
+ }
+ pub fn get(&self, byte_loc: VOffsetT) -> VOffsetT {
+ // TODO(rw): distinguish between None and 0?
+ if byte_loc as usize >= self.num_bytes() {
+ return 0;
+ }
+ read_scalar_at::<VOffsetT>(self.buf, self.loc + byte_loc as usize)
+ }
+ pub fn as_bytes(&self) -> &[u8] {
+ let len = self.num_bytes();
+ &self.buf[self.loc..self.loc + len]
+ }
+}
+
+
+#[allow(dead_code)]
+pub fn field_index_to_field_offset(field_id: VOffsetT) -> VOffsetT {
+ // Should correspond to what end_table() below builds up.
+ let fixed_fields = 2; // Vtable size and Object Size.
+ ((field_id + fixed_fields) * (SIZE_VOFFSET as VOffsetT)) as VOffsetT
+}
+
+#[allow(dead_code)]
+pub fn field_offset_to_field_index(field_o: VOffsetT) -> VOffsetT {
+ debug_assert!(field_o >= 2);
+ let fixed_fields = 2; // VTable size and Object Size.
+ (field_o / (SIZE_VOFFSET as VOffsetT)) - fixed_fields
+}
+
+impl<'a> Follow<'a> for VTable<'a> {
+ type Inner = VTable<'a>;
+ fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+ VTable::init(buf, loc)
+ }
+}
diff --git a/rust/flatbuffers/src/vtable_writer.rs b/rust/flatbuffers/src/vtable_writer.rs
new file mode 100644
index 00000000..119f794c
--- /dev/null
+++ b/rust/flatbuffers/src/vtable_writer.rs
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2018 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use std::ptr::write_bytes;
+
+use endian_scalar::{emplace_scalar, read_scalar};
+use primitives::*;
+
+/// VTableWriter compartmentalizes actions needed to create a vtable.
+#[derive(Debug)]
+pub struct VTableWriter<'a> {
+ buf: &'a mut [u8],
+}
+
+impl<'a> VTableWriter<'a> {
+ #[inline(always)]
+ pub fn init(buf: &'a mut [u8]) -> Self {
+ VTableWriter { buf: buf }
+ }
+
+ /// Writes the vtable length (in bytes) into the vtable.
+ ///
+ /// Note that callers already need to have computed this to initialize
+ /// a VTableWriter.
+ ///
+ /// In debug mode, asserts that the length of the underlying data is equal
+ /// to the provided value.
+ #[inline(always)]
+ pub fn write_vtable_byte_length(&mut self, n: VOffsetT) {
+ emplace_scalar::<VOffsetT>(&mut self.buf[..SIZE_VOFFSET], n);
+ debug_assert_eq!(n as usize, self.buf.len());
+ }
+
+ /// Writes an object length (in bytes) into the vtable.
+ #[inline(always)]
+ pub fn write_object_inline_size(&mut self, n: VOffsetT) {
+ emplace_scalar::<VOffsetT>(&mut self.buf[SIZE_VOFFSET..2 * SIZE_VOFFSET], n);
+ }
+
+ /// Gets an object field offset from the vtable. Only used for debugging.
+ ///
+ /// Note that this expects field offsets (which are like pointers), not
+ /// field ids (which are like array indices).
+ #[inline(always)]
+ pub fn get_field_offset(&self, vtable_offset: VOffsetT) -> VOffsetT {
+ let idx = vtable_offset as usize;
+ read_scalar::<VOffsetT>(&self.buf[idx..idx + SIZE_VOFFSET])
+ }
+
+ /// Writes an object field offset into the vtable.
+ ///
+ /// Note that this expects field offsets (which are like pointers), not
+ /// field ids (which are like array indices).
+ #[inline(always)]
+ pub fn write_field_offset(&mut self, vtable_offset: VOffsetT, object_data_offset: VOffsetT) {
+ let idx = vtable_offset as usize;
+ emplace_scalar::<VOffsetT>(&mut self.buf[idx..idx + SIZE_VOFFSET], object_data_offset);
+ }
+
+ /// Clears all data in this VTableWriter. Used to cleanly undo a
+ /// vtable write.
+ #[inline(always)]
+ pub fn clear(&mut self) {
+ // This is the closest thing to memset in Rust right now.
+ let len = self.buf.len();
+ let p = self.buf.as_mut_ptr() as *mut u8;
+ unsafe {
+ write_bytes(p, 0, len);
+ }
+ }
+}
+