1 /*
2 * Copyright 2017 Google Inc. All rights reserved.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef FLATBUFFERS_FLEXBUFFERS_H_
18 #define FLATBUFFERS_FLEXBUFFERS_H_
19
20 #include <map>
21 // Used to select STL variant.
22 #include "flatbuffers/base.h"
23 // We use the basic binary writing functions from the regular FlatBuffers.
24 #include "flatbuffers/util.h"
25
26 #ifdef _MSC_VER
27 # include <intrin.h>
28 #endif
29
30 #if defined(_MSC_VER)
31 # pragma warning(push)
32 # pragma warning(disable : 4127) // C4127: conditional expression is constant
33 #endif
34
35 namespace flexbuffers {
36
37 class Reference;
38 class Map;
39
40 // These are used in the lower 2 bits of a type field to determine the size of
41 // the elements (and or size field) of the item pointed to (e.g. vector).
42 enum BitWidth {
43 BIT_WIDTH_8 = 0,
44 BIT_WIDTH_16 = 1,
45 BIT_WIDTH_32 = 2,
46 BIT_WIDTH_64 = 3,
47 };
48
49 // These are used as the upper 6 bits of a type field to indicate the actual
50 // type.
51 enum Type {
52 FBT_NULL = 0,
53 FBT_INT = 1,
54 FBT_UINT = 2,
55 FBT_FLOAT = 3,
56 // Types above stored inline, types below store an offset.
57 FBT_KEY = 4,
58 FBT_STRING = 5,
59 FBT_INDIRECT_INT = 6,
60 FBT_INDIRECT_UINT = 7,
61 FBT_INDIRECT_FLOAT = 8,
62 FBT_MAP = 9,
63 FBT_VECTOR = 10, // Untyped.
64 FBT_VECTOR_INT = 11, // Typed any size (stores no type table).
65 FBT_VECTOR_UINT = 12,
66 FBT_VECTOR_FLOAT = 13,
67 FBT_VECTOR_KEY = 14,
68 // DEPRECATED, use FBT_VECTOR or FBT_VECTOR_KEY instead.
69 // Read test.cpp/FlexBuffersDeprecatedTest() for details on why.
70 FBT_VECTOR_STRING_DEPRECATED = 15,
71 FBT_VECTOR_INT2 = 16, // Typed tuple (no type table, no size field).
72 FBT_VECTOR_UINT2 = 17,
73 FBT_VECTOR_FLOAT2 = 18,
74 FBT_VECTOR_INT3 = 19, // Typed triple (no type table, no size field).
75 FBT_VECTOR_UINT3 = 20,
76 FBT_VECTOR_FLOAT3 = 21,
77 FBT_VECTOR_INT4 = 22, // Typed quad (no type table, no size field).
78 FBT_VECTOR_UINT4 = 23,
79 FBT_VECTOR_FLOAT4 = 24,
80 FBT_BLOB = 25,
81 FBT_BOOL = 26,
82 FBT_VECTOR_BOOL =
83 36, // To Allow the same type of conversion of type to vector type
84 };
85
IsInline(Type t)86 inline bool IsInline(Type t) { return t <= FBT_FLOAT || t == FBT_BOOL; }
87
IsTypedVectorElementType(Type t)88 inline bool IsTypedVectorElementType(Type t) {
89 return (t >= FBT_INT && t <= FBT_STRING) || t == FBT_BOOL;
90 }
91
IsTypedVector(Type t)92 inline bool IsTypedVector(Type t) {
93 return (t >= FBT_VECTOR_INT && t <= FBT_VECTOR_STRING_DEPRECATED) ||
94 t == FBT_VECTOR_BOOL;
95 }
96
IsFixedTypedVector(Type t)97 inline bool IsFixedTypedVector(Type t) {
98 return t >= FBT_VECTOR_INT2 && t <= FBT_VECTOR_FLOAT4;
99 }
100
101 inline Type ToTypedVector(Type t, size_t fixed_len = 0) {
102 FLATBUFFERS_ASSERT(IsTypedVectorElementType(t));
103 switch (fixed_len) {
104 case 0: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT);
105 case 2: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT2);
106 case 3: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT3);
107 case 4: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT4);
108 default: FLATBUFFERS_ASSERT(0); return FBT_NULL;
109 }
110 }
111
ToTypedVectorElementType(Type t)112 inline Type ToTypedVectorElementType(Type t) {
113 FLATBUFFERS_ASSERT(IsTypedVector(t));
114 return static_cast<Type>(t - FBT_VECTOR_INT + FBT_INT);
115 }
116
ToFixedTypedVectorElementType(Type t,uint8_t * len)117 inline Type ToFixedTypedVectorElementType(Type t, uint8_t *len) {
118 FLATBUFFERS_ASSERT(IsFixedTypedVector(t));
119 auto fixed_type = t - FBT_VECTOR_INT2;
120 *len = static_cast<uint8_t>(fixed_type / 3 +
121 2); // 3 types each, starting from length 2.
122 return static_cast<Type>(fixed_type % 3 + FBT_INT);
123 }
124
125 // TODO: implement proper support for 8/16bit floats, or decide not to
126 // support them.
127 typedef int16_t half;
128 typedef int8_t quarter;
129
130 // TODO: can we do this without conditionals using intrinsics or inline asm
131 // on some platforms? Given branch prediction the method below should be
132 // decently quick, but it is the most frequently executed function.
133 // We could do an (unaligned) 64-bit read if we ifdef out the platforms for
134 // which that doesn't work (or where we'd read into un-owned memory).
135 template<typename R, typename T1, typename T2, typename T4, typename T8>
ReadSizedScalar(const uint8_t * data,uint8_t byte_width)136 R ReadSizedScalar(const uint8_t *data, uint8_t byte_width) {
137 return byte_width < 4
138 ? (byte_width < 2
139 ? static_cast<R>(flatbuffers::ReadScalar<T1>(data))
140 : static_cast<R>(flatbuffers::ReadScalar<T2>(data)))
141 : (byte_width < 8
142 ? static_cast<R>(flatbuffers::ReadScalar<T4>(data))
143 : static_cast<R>(flatbuffers::ReadScalar<T8>(data)));
144 }
145
ReadInt64(const uint8_t * data,uint8_t byte_width)146 inline int64_t ReadInt64(const uint8_t *data, uint8_t byte_width) {
147 return ReadSizedScalar<int64_t, int8_t, int16_t, int32_t, int64_t>(
148 data, byte_width);
149 }
150
ReadUInt64(const uint8_t * data,uint8_t byte_width)151 inline uint64_t ReadUInt64(const uint8_t *data, uint8_t byte_width) {
152 // This is the "hottest" function (all offset lookups use this), so worth
153 // optimizing if possible.
154 // TODO: GCC apparently replaces memcpy by a rep movsb, but only if count is a
155 // constant, which here it isn't. Test if memcpy is still faster than
156 // the conditionals in ReadSizedScalar. Can also use inline asm.
157 // clang-format off
158 #if defined(_MSC_VER) && (defined(_M_X64) || defined _M_IX86)
159 uint64_t u = 0;
160 __movsb(reinterpret_cast<uint8_t *>(&u),
161 reinterpret_cast<const uint8_t *>(data), byte_width);
162 return flatbuffers::EndianScalar(u);
163 #else
164 return ReadSizedScalar<uint64_t, uint8_t, uint16_t, uint32_t, uint64_t>(
165 data, byte_width);
166 #endif
167 // clang-format on
168 }
169
ReadDouble(const uint8_t * data,uint8_t byte_width)170 inline double ReadDouble(const uint8_t *data, uint8_t byte_width) {
171 return ReadSizedScalar<double, quarter, half, float, double>(data,
172 byte_width);
173 }
174
Indirect(const uint8_t * offset,uint8_t byte_width)175 inline const uint8_t *Indirect(const uint8_t *offset, uint8_t byte_width) {
176 return offset - ReadUInt64(offset, byte_width);
177 }
178
Indirect(const uint8_t * offset)179 template<typename T> const uint8_t *Indirect(const uint8_t *offset) {
180 return offset - flatbuffers::ReadScalar<T>(offset);
181 }
182
WidthU(uint64_t u)183 inline BitWidth WidthU(uint64_t u) {
184 #define FLATBUFFERS_GET_FIELD_BIT_WIDTH(value, width) \
185 { \
186 if (!((u) & ~((1ULL << (width)) - 1ULL))) return BIT_WIDTH_##width; \
187 }
188 FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 8);
189 FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 16);
190 FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 32);
191 #undef FLATBUFFERS_GET_FIELD_BIT_WIDTH
192 return BIT_WIDTH_64;
193 }
194
WidthI(int64_t i)195 inline BitWidth WidthI(int64_t i) {
196 auto u = static_cast<uint64_t>(i) << 1;
197 return WidthU(i >= 0 ? u : ~u);
198 }
199
WidthF(double f)200 inline BitWidth WidthF(double f) {
201 return static_cast<double>(static_cast<float>(f)) == f ? BIT_WIDTH_32
202 : BIT_WIDTH_64;
203 }
204
205 // Base class of all types below.
206 // Points into the data buffer and allows access to one type.
207 class Object {
208 public:
Object(const uint8_t * data,uint8_t byte_width)209 Object(const uint8_t *data, uint8_t byte_width)
210 : data_(data), byte_width_(byte_width) {}
211
212 protected:
213 const uint8_t *data_;
214 uint8_t byte_width_;
215 };
216
217 // Object that has a size, obtained either from size prefix, or elsewhere.
218 class Sized : public Object {
219 public:
220 // Size prefix.
Sized(const uint8_t * data,uint8_t byte_width)221 Sized(const uint8_t *data, uint8_t byte_width)
222 : Object(data, byte_width), size_(read_size()) {}
223 // Manual size.
Sized(const uint8_t * data,uint8_t byte_width,size_t sz)224 Sized(const uint8_t *data, uint8_t byte_width, size_t sz)
225 : Object(data, byte_width), size_(sz) {}
size()226 size_t size() const { return size_; }
227 // Access size stored in `byte_width_` bytes before data_ pointer.
read_size()228 size_t read_size() const {
229 return static_cast<size_t>(ReadUInt64(data_ - byte_width_, byte_width_));
230 }
231
232 protected:
233 size_t size_;
234 };
235
236 class String : public Sized {
237 public:
238 // Size prefix.
String(const uint8_t * data,uint8_t byte_width)239 String(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {}
240 // Manual size.
String(const uint8_t * data,uint8_t byte_width,size_t sz)241 String(const uint8_t *data, uint8_t byte_width, size_t sz)
242 : Sized(data, byte_width, sz) {}
243
length()244 size_t length() const { return size(); }
c_str()245 const char *c_str() const { return reinterpret_cast<const char *>(data_); }
str()246 std::string str() const { return std::string(c_str(), size()); }
247
EmptyString()248 static String EmptyString() {
249 static const char *empty_string = "";
250 return String(reinterpret_cast<const uint8_t *>(empty_string), 1, 0);
251 }
IsTheEmptyString()252 bool IsTheEmptyString() const { return data_ == EmptyString().data_; }
253 };
254
255 class Blob : public Sized {
256 public:
Blob(const uint8_t * data_buf,uint8_t byte_width)257 Blob(const uint8_t *data_buf, uint8_t byte_width)
258 : Sized(data_buf, byte_width) {}
259
EmptyBlob()260 static Blob EmptyBlob() {
261 static const uint8_t empty_blob[] = { 0 /*len*/ };
262 return Blob(empty_blob + 1, 1);
263 }
IsTheEmptyBlob()264 bool IsTheEmptyBlob() const { return data_ == EmptyBlob().data_; }
data()265 const uint8_t *data() const { return data_; }
266 };
267
268 class Vector : public Sized {
269 public:
Vector(const uint8_t * data,uint8_t byte_width)270 Vector(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {}
271
272 Reference operator[](size_t i) const;
273
EmptyVector()274 static Vector EmptyVector() {
275 static const uint8_t empty_vector[] = { 0 /*len*/ };
276 return Vector(empty_vector + 1, 1);
277 }
IsTheEmptyVector()278 bool IsTheEmptyVector() const { return data_ == EmptyVector().data_; }
279 };
280
281 class TypedVector : public Sized {
282 public:
TypedVector(const uint8_t * data,uint8_t byte_width,Type element_type)283 TypedVector(const uint8_t *data, uint8_t byte_width, Type element_type)
284 : Sized(data, byte_width), type_(element_type) {}
285
286 Reference operator[](size_t i) const;
287
EmptyTypedVector()288 static TypedVector EmptyTypedVector() {
289 static const uint8_t empty_typed_vector[] = { 0 /*len*/ };
290 return TypedVector(empty_typed_vector + 1, 1, FBT_INT);
291 }
IsTheEmptyVector()292 bool IsTheEmptyVector() const {
293 return data_ == TypedVector::EmptyTypedVector().data_;
294 }
295
ElementType()296 Type ElementType() { return type_; }
297
298 friend Reference;
299
300 private:
301 Type type_;
302
303 friend Map;
304 };
305
306 class FixedTypedVector : public Object {
307 public:
FixedTypedVector(const uint8_t * data,uint8_t byte_width,Type element_type,uint8_t len)308 FixedTypedVector(const uint8_t *data, uint8_t byte_width, Type element_type,
309 uint8_t len)
310 : Object(data, byte_width), type_(element_type), len_(len) {}
311
312 Reference operator[](size_t i) const;
313
EmptyFixedTypedVector()314 static FixedTypedVector EmptyFixedTypedVector() {
315 static const uint8_t fixed_empty_vector[] = { 0 /* unused */ };
316 return FixedTypedVector(fixed_empty_vector, 1, FBT_INT, 0);
317 }
IsTheEmptyFixedTypedVector()318 bool IsTheEmptyFixedTypedVector() const {
319 return data_ == FixedTypedVector::EmptyFixedTypedVector().data_;
320 }
321
ElementType()322 Type ElementType() { return type_; }
size()323 uint8_t size() { return len_; }
324
325 private:
326 Type type_;
327 uint8_t len_;
328 };
329
330 class Map : public Vector {
331 public:
Map(const uint8_t * data,uint8_t byte_width)332 Map(const uint8_t *data, uint8_t byte_width) : Vector(data, byte_width) {}
333
334 Reference operator[](const char *key) const;
335 Reference operator[](const std::string &key) const;
336
Values()337 Vector Values() const { return Vector(data_, byte_width_); }
338
Keys()339 TypedVector Keys() const {
340 const size_t num_prefixed_fields = 3;
341 auto keys_offset = data_ - byte_width_ * num_prefixed_fields;
342 return TypedVector(Indirect(keys_offset, byte_width_),
343 static_cast<uint8_t>(
344 ReadUInt64(keys_offset + byte_width_, byte_width_)),
345 FBT_KEY);
346 }
347
EmptyMap()348 static Map EmptyMap() {
349 static const uint8_t empty_map[] = {
350 0 /*keys_len*/, 0 /*keys_offset*/, 1 /*keys_width*/, 0 /*len*/
351 };
352 return Map(empty_map + 4, 1);
353 }
354
IsTheEmptyMap()355 bool IsTheEmptyMap() const { return data_ == EmptyMap().data_; }
356 };
357
358 template<typename T>
AppendToString(std::string & s,T && v,bool keys_quoted)359 void AppendToString(std::string &s, T &&v, bool keys_quoted) {
360 s += "[ ";
361 for (size_t i = 0; i < v.size(); i++) {
362 if (i) s += ", ";
363 v[i].ToString(true, keys_quoted, s);
364 }
365 s += " ]";
366 }
367
368 class Reference {
369 public:
Reference()370 Reference()
371 : data_(nullptr),
372 parent_width_(0),
373 byte_width_(BIT_WIDTH_8),
374 type_(FBT_NULL) {}
375
Reference(const uint8_t * data,uint8_t parent_width,uint8_t byte_width,Type type)376 Reference(const uint8_t *data, uint8_t parent_width, uint8_t byte_width,
377 Type type)
378 : data_(data),
379 parent_width_(parent_width),
380 byte_width_(byte_width),
381 type_(type) {}
382
Reference(const uint8_t * data,uint8_t parent_width,uint8_t packed_type)383 Reference(const uint8_t *data, uint8_t parent_width, uint8_t packed_type)
384 : data_(data), parent_width_(parent_width) {
385 byte_width_ = 1U << static_cast<BitWidth>(packed_type & 3);
386 type_ = static_cast<Type>(packed_type >> 2);
387 }
388
GetType()389 Type GetType() const { return type_; }
390
IsNull()391 bool IsNull() const { return type_ == FBT_NULL; }
IsBool()392 bool IsBool() const { return type_ == FBT_BOOL; }
IsInt()393 bool IsInt() const { return type_ == FBT_INT || type_ == FBT_INDIRECT_INT; }
IsUInt()394 bool IsUInt() const {
395 return type_ == FBT_UINT || type_ == FBT_INDIRECT_UINT;
396 }
IsIntOrUint()397 bool IsIntOrUint() const { return IsInt() || IsUInt(); }
IsFloat()398 bool IsFloat() const {
399 return type_ == FBT_FLOAT || type_ == FBT_INDIRECT_FLOAT;
400 }
IsNumeric()401 bool IsNumeric() const { return IsIntOrUint() || IsFloat(); }
IsString()402 bool IsString() const { return type_ == FBT_STRING; }
IsKey()403 bool IsKey() const { return type_ == FBT_KEY; }
IsVector()404 bool IsVector() const { return type_ == FBT_VECTOR || type_ == FBT_MAP; }
IsUntypedVector()405 bool IsUntypedVector() const { return type_ == FBT_VECTOR; }
IsTypedVector()406 bool IsTypedVector() const { return flexbuffers::IsTypedVector(type_); }
IsFixedTypedVector()407 bool IsFixedTypedVector() const {
408 return flexbuffers::IsFixedTypedVector(type_);
409 }
IsAnyVector()410 bool IsAnyVector() const {
411 return (IsTypedVector() || IsFixedTypedVector() || IsVector());
412 }
IsMap()413 bool IsMap() const { return type_ == FBT_MAP; }
IsBlob()414 bool IsBlob() const { return type_ == FBT_BLOB; }
AsBool()415 bool AsBool() const {
416 return (type_ == FBT_BOOL ? ReadUInt64(data_, parent_width_)
417 : AsUInt64()) != 0;
418 }
419
420 // Reads any type as a int64_t. Never fails, does most sensible conversion.
421 // Truncates floats, strings are attempted to be parsed for a number,
422 // vectors/maps return their size. Returns 0 if all else fails.
AsInt64()423 int64_t AsInt64() const {
424 if (type_ == FBT_INT) {
425 // A fast path for the common case.
426 return ReadInt64(data_, parent_width_);
427 } else
428 switch (type_) {
429 case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_);
430 case FBT_UINT: return ReadUInt64(data_, parent_width_);
431 case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_);
432 case FBT_FLOAT:
433 return static_cast<int64_t>(ReadDouble(data_, parent_width_));
434 case FBT_INDIRECT_FLOAT:
435 return static_cast<int64_t>(ReadDouble(Indirect(), byte_width_));
436 case FBT_NULL: return 0;
437 case FBT_STRING: return flatbuffers::StringToInt(AsString().c_str());
438 case FBT_VECTOR: return static_cast<int64_t>(AsVector().size());
439 case FBT_BOOL: return ReadInt64(data_, parent_width_);
440 default:
441 // Convert other things to int.
442 return 0;
443 }
444 }
445
446 // TODO: could specialize these to not use AsInt64() if that saves
447 // extension ops in generated code, and use a faster op than ReadInt64.
AsInt32()448 int32_t AsInt32() const { return static_cast<int32_t>(AsInt64()); }
AsInt16()449 int16_t AsInt16() const { return static_cast<int16_t>(AsInt64()); }
AsInt8()450 int8_t AsInt8() const { return static_cast<int8_t>(AsInt64()); }
451
AsUInt64()452 uint64_t AsUInt64() const {
453 if (type_ == FBT_UINT) {
454 // A fast path for the common case.
455 return ReadUInt64(data_, parent_width_);
456 } else
457 switch (type_) {
458 case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_);
459 case FBT_INT: return ReadInt64(data_, parent_width_);
460 case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_);
461 case FBT_FLOAT:
462 return static_cast<uint64_t>(ReadDouble(data_, parent_width_));
463 case FBT_INDIRECT_FLOAT:
464 return static_cast<uint64_t>(ReadDouble(Indirect(), byte_width_));
465 case FBT_NULL: return 0;
466 case FBT_STRING: return flatbuffers::StringToUInt(AsString().c_str());
467 case FBT_VECTOR: return static_cast<uint64_t>(AsVector().size());
468 case FBT_BOOL: return ReadUInt64(data_, parent_width_);
469 default:
470 // Convert other things to uint.
471 return 0;
472 }
473 }
474
AsUInt32()475 uint32_t AsUInt32() const { return static_cast<uint32_t>(AsUInt64()); }
AsUInt16()476 uint16_t AsUInt16() const { return static_cast<uint16_t>(AsUInt64()); }
AsUInt8()477 uint8_t AsUInt8() const { return static_cast<uint8_t>(AsUInt64()); }
478
AsDouble()479 double AsDouble() const {
480 if (type_ == FBT_FLOAT) {
481 // A fast path for the common case.
482 return ReadDouble(data_, parent_width_);
483 } else
484 switch (type_) {
485 case FBT_INDIRECT_FLOAT: return ReadDouble(Indirect(), byte_width_);
486 case FBT_INT:
487 return static_cast<double>(ReadInt64(data_, parent_width_));
488 case FBT_UINT:
489 return static_cast<double>(ReadUInt64(data_, parent_width_));
490 case FBT_INDIRECT_INT:
491 return static_cast<double>(ReadInt64(Indirect(), byte_width_));
492 case FBT_INDIRECT_UINT:
493 return static_cast<double>(ReadUInt64(Indirect(), byte_width_));
494 case FBT_NULL: return 0.0;
495 case FBT_STRING: {
496 #if 1
497 #pragma GCC diagnostic push
498 #pragma GCC diagnostic ignored "-Wnull-dereference"
499 // TODO(b/173239141): Patched via micro/tools/make/flexbuffers_download.sh
500 // Introduce a segfault for an unsupported code path for TFLM.
501 return *(static_cast<double*>(nullptr));
502 #pragma GCC diagnostic pop
503 #else
504 // This is the original code
505 double d;
506 flatbuffers::StringToNumber(AsString().c_str(), &d);
507 return d;
508 #endif
509 }
510 case FBT_VECTOR: return static_cast<double>(AsVector().size());
511 case FBT_BOOL:
512 return static_cast<double>(ReadUInt64(data_, parent_width_));
513 default:
514 // Convert strings and other things to float.
515 return 0;
516 }
517 }
518
AsFloat()519 float AsFloat() const { return static_cast<float>(AsDouble()); }
520
AsKey()521 const char *AsKey() const {
522 if (type_ == FBT_KEY || type_ == FBT_STRING) {
523 return reinterpret_cast<const char *>(Indirect());
524 } else {
525 return "";
526 }
527 }
528
529 // This function returns the empty string if you try to read something that
530 // is not a string or key.
AsString()531 String AsString() const {
532 if (type_ == FBT_STRING) {
533 return String(Indirect(), byte_width_);
534 } else if (type_ == FBT_KEY) {
535 auto key = Indirect();
536 return String(key, byte_width_,
537 strlen(reinterpret_cast<const char *>(key)));
538 } else {
539 return String::EmptyString();
540 }
541 }
542
543 // Unlike AsString(), this will convert any type to a std::string.
ToString()544 std::string ToString() const {
545 std::string s;
546 ToString(false, false, s);
547 return s;
548 }
549
550 // Convert any type to a JSON-like string. strings_quoted determines if
551 // string values at the top level receive "" quotes (inside other values
552 // they always do). keys_quoted determines if keys are quoted, at any level.
553 // TODO(wvo): add further options to have indentation/newlines.
ToString(bool strings_quoted,bool keys_quoted,std::string & s)554 void ToString(bool strings_quoted, bool keys_quoted, std::string &s) const {
555 if (type_ == FBT_STRING) {
556 String str(Indirect(), byte_width_);
557 if (strings_quoted) {
558 flatbuffers::EscapeString(str.c_str(), str.length(), &s, true, false);
559 } else {
560 s.append(str.c_str(), str.length());
561 }
562 } else if (IsKey()) {
563 auto str = AsKey();
564 if (keys_quoted) {
565 flatbuffers::EscapeString(str, strlen(str), &s, true, false);
566 } else {
567 s += str;
568 }
569 } else if (IsInt()) {
570 s += flatbuffers::NumToString(AsInt64());
571 } else if (IsUInt()) {
572 s += flatbuffers::NumToString(AsUInt64());
573 } else if (IsFloat()) {
574 s += flatbuffers::NumToString(AsDouble());
575 } else if (IsNull()) {
576 s += "null";
577 } else if (IsBool()) {
578 s += AsBool() ? "true" : "false";
579 } else if (IsMap()) {
580 s += "{ ";
581 auto m = AsMap();
582 auto keys = m.Keys();
583 auto vals = m.Values();
584 for (size_t i = 0; i < keys.size(); i++) {
585 keys[i].ToString(true, keys_quoted, s);
586 s += ": ";
587 vals[i].ToString(true, keys_quoted, s);
588 if (i < keys.size() - 1) s += ", ";
589 }
590 s += " }";
591 } else if (IsVector()) {
592 AppendToString<Vector>(s, AsVector(), keys_quoted);
593 } else if (IsTypedVector()) {
594 AppendToString<TypedVector>(s, AsTypedVector(), keys_quoted);
595 } else if (IsFixedTypedVector()) {
596 AppendToString<FixedTypedVector>(s, AsFixedTypedVector(), keys_quoted);
597 } else if (IsBlob()) {
598 auto blob = AsBlob();
599 flatbuffers::EscapeString(reinterpret_cast<const char *>(blob.data()),
600 blob.size(), &s, true, false);
601 } else {
602 s += "(?)";
603 }
604 }
605
606 // This function returns the empty blob if you try to read a not-blob.
607 // Strings can be viewed as blobs too.
AsBlob()608 Blob AsBlob() const {
609 if (type_ == FBT_BLOB || type_ == FBT_STRING) {
610 return Blob(Indirect(), byte_width_);
611 } else {
612 return Blob::EmptyBlob();
613 }
614 }
615
616 // This function returns the empty vector if you try to read a not-vector.
617 // Maps can be viewed as vectors too.
AsVector()618 Vector AsVector() const {
619 if (type_ == FBT_VECTOR || type_ == FBT_MAP) {
620 return Vector(Indirect(), byte_width_);
621 } else {
622 return Vector::EmptyVector();
623 }
624 }
625
AsTypedVector()626 TypedVector AsTypedVector() const {
627 if (IsTypedVector()) {
628 auto tv =
629 TypedVector(Indirect(), byte_width_, ToTypedVectorElementType(type_));
630 if (tv.type_ == FBT_STRING) {
631 // These can't be accessed as strings, since we don't know the bit-width
632 // of the size field, see the declaration of
633 // FBT_VECTOR_STRING_DEPRECATED above for details.
634 // We change the type here to be keys, which are a subtype of strings,
635 // and will ignore the size field. This will truncate strings with
636 // embedded nulls.
637 tv.type_ = FBT_KEY;
638 }
639 return tv;
640 } else {
641 return TypedVector::EmptyTypedVector();
642 }
643 }
644
AsFixedTypedVector()645 FixedTypedVector AsFixedTypedVector() const {
646 if (IsFixedTypedVector()) {
647 uint8_t len = 0;
648 auto vtype = ToFixedTypedVectorElementType(type_, &len);
649 return FixedTypedVector(Indirect(), byte_width_, vtype, len);
650 } else {
651 return FixedTypedVector::EmptyFixedTypedVector();
652 }
653 }
654
AsMap()655 Map AsMap() const {
656 if (type_ == FBT_MAP) {
657 return Map(Indirect(), byte_width_);
658 } else {
659 return Map::EmptyMap();
660 }
661 }
662
663 template<typename T> T As() const;
664
665 // Experimental: Mutation functions.
666 // These allow scalars in an already created buffer to be updated in-place.
667 // Since by default scalars are stored in the smallest possible space,
668 // the new value may not fit, in which case these functions return false.
669 // To avoid this, you can construct the values you intend to mutate using
670 // Builder::ForceMinimumBitWidth.
MutateInt(int64_t i)671 bool MutateInt(int64_t i) {
672 if (type_ == FBT_INT) {
673 return Mutate(data_, i, parent_width_, WidthI(i));
674 } else if (type_ == FBT_INDIRECT_INT) {
675 return Mutate(Indirect(), i, byte_width_, WidthI(i));
676 } else if (type_ == FBT_UINT) {
677 auto u = static_cast<uint64_t>(i);
678 return Mutate(data_, u, parent_width_, WidthU(u));
679 } else if (type_ == FBT_INDIRECT_UINT) {
680 auto u = static_cast<uint64_t>(i);
681 return Mutate(Indirect(), u, byte_width_, WidthU(u));
682 } else {
683 return false;
684 }
685 }
686
MutateBool(bool b)687 bool MutateBool(bool b) {
688 return type_ == FBT_BOOL && Mutate(data_, b, parent_width_, BIT_WIDTH_8);
689 }
690
MutateUInt(uint64_t u)691 bool MutateUInt(uint64_t u) {
692 if (type_ == FBT_UINT) {
693 return Mutate(data_, u, parent_width_, WidthU(u));
694 } else if (type_ == FBT_INDIRECT_UINT) {
695 return Mutate(Indirect(), u, byte_width_, WidthU(u));
696 } else if (type_ == FBT_INT) {
697 auto i = static_cast<int64_t>(u);
698 return Mutate(data_, i, parent_width_, WidthI(i));
699 } else if (type_ == FBT_INDIRECT_INT) {
700 auto i = static_cast<int64_t>(u);
701 return Mutate(Indirect(), i, byte_width_, WidthI(i));
702 } else {
703 return false;
704 }
705 }
706
MutateFloat(float f)707 bool MutateFloat(float f) {
708 if (type_ == FBT_FLOAT) {
709 return MutateF(data_, f, parent_width_, BIT_WIDTH_32);
710 } else if (type_ == FBT_INDIRECT_FLOAT) {
711 return MutateF(Indirect(), f, byte_width_, BIT_WIDTH_32);
712 } else {
713 return false;
714 }
715 }
716
MutateFloat(double d)717 bool MutateFloat(double d) {
718 if (type_ == FBT_FLOAT) {
719 return MutateF(data_, d, parent_width_, WidthF(d));
720 } else if (type_ == FBT_INDIRECT_FLOAT) {
721 return MutateF(Indirect(), d, byte_width_, WidthF(d));
722 } else {
723 return false;
724 }
725 }
726
MutateString(const char * str,size_t len)727 bool MutateString(const char *str, size_t len) {
728 auto s = AsString();
729 if (s.IsTheEmptyString()) return false;
730 // This is very strict, could allow shorter strings, but that creates
731 // garbage.
732 if (s.length() != len) return false;
733 memcpy(const_cast<char *>(s.c_str()), str, len);
734 return true;
735 }
MutateString(const char * str)736 bool MutateString(const char *str) { return MutateString(str, strlen(str)); }
MutateString(const std::string & str)737 bool MutateString(const std::string &str) {
738 return MutateString(str.data(), str.length());
739 }
740
741 private:
Indirect()742 const uint8_t *Indirect() const {
743 return flexbuffers::Indirect(data_, parent_width_);
744 }
745
746 template<typename T>
Mutate(const uint8_t * dest,T t,size_t byte_width,BitWidth value_width)747 bool Mutate(const uint8_t *dest, T t, size_t byte_width,
748 BitWidth value_width) {
749 auto fits = static_cast<size_t>(static_cast<size_t>(1U) << value_width) <=
750 byte_width;
751 if (fits) {
752 t = flatbuffers::EndianScalar(t);
753 memcpy(const_cast<uint8_t *>(dest), &t, byte_width);
754 }
755 return fits;
756 }
757
758 template<typename T>
MutateF(const uint8_t * dest,T t,size_t byte_width,BitWidth value_width)759 bool MutateF(const uint8_t *dest, T t, size_t byte_width,
760 BitWidth value_width) {
761 if (byte_width == sizeof(double))
762 return Mutate(dest, static_cast<double>(t), byte_width, value_width);
763 if (byte_width == sizeof(float))
764 return Mutate(dest, static_cast<float>(t), byte_width, value_width);
765 FLATBUFFERS_ASSERT(false);
766 return false;
767 }
768
769 const uint8_t *data_;
770 uint8_t parent_width_;
771 uint8_t byte_width_;
772 Type type_;
773 };
774
775 // Template specialization for As().
776 template<> inline bool Reference::As<bool>() const { return AsBool(); }
777
778 template<> inline int8_t Reference::As<int8_t>() const { return AsInt8(); }
779 template<> inline int16_t Reference::As<int16_t>() const { return AsInt16(); }
780 template<> inline int32_t Reference::As<int32_t>() const { return AsInt32(); }
781 template<> inline int64_t Reference::As<int64_t>() const { return AsInt64(); }
782
783 template<> inline uint8_t Reference::As<uint8_t>() const { return AsUInt8(); }
784 template<> inline uint16_t Reference::As<uint16_t>() const {
785 return AsUInt16();
786 }
787 template<> inline uint32_t Reference::As<uint32_t>() const {
788 return AsUInt32();
789 }
790 template<> inline uint64_t Reference::As<uint64_t>() const {
791 return AsUInt64();
792 }
793
794 template<> inline double Reference::As<double>() const { return AsDouble(); }
795 template<> inline float Reference::As<float>() const { return AsFloat(); }
796
797 template<> inline String Reference::As<String>() const { return AsString(); }
798 template<> inline std::string Reference::As<std::string>() const {
799 return AsString().str();
800 }
801
802 template<> inline Blob Reference::As<Blob>() const { return AsBlob(); }
803 template<> inline Vector Reference::As<Vector>() const { return AsVector(); }
804 template<> inline TypedVector Reference::As<TypedVector>() const {
805 return AsTypedVector();
806 }
807 template<> inline FixedTypedVector Reference::As<FixedTypedVector>() const {
808 return AsFixedTypedVector();
809 }
810 template<> inline Map Reference::As<Map>() const { return AsMap(); }
811
PackedType(BitWidth bit_width,Type type)812 inline uint8_t PackedType(BitWidth bit_width, Type type) {
813 return static_cast<uint8_t>(bit_width | (type << 2));
814 }
815
NullPackedType()816 inline uint8_t NullPackedType() { return PackedType(BIT_WIDTH_8, FBT_NULL); }
817
818 // Vector accessors.
819 // Note: if you try to access outside of bounds, you get a Null value back
820 // instead. Normally this would be an assert, but since this is "dynamically
821 // typed" data, you may not want that (someone sends you a 2d vector and you
822 // wanted 3d).
823 // The Null converts seamlessly into a default value for any other type.
824 // TODO(wvo): Could introduce an #ifdef that makes this into an assert?
825 inline Reference Vector::operator[](size_t i) const {
826 auto len = size();
827 if (i >= len) return Reference(nullptr, 1, NullPackedType());
828 auto packed_type = (data_ + len * byte_width_)[i];
829 auto elem = data_ + i * byte_width_;
830 return Reference(elem, byte_width_, packed_type);
831 }
832
833 inline Reference TypedVector::operator[](size_t i) const {
834 auto len = size();
835 if (i >= len) return Reference(nullptr, 1, NullPackedType());
836 auto elem = data_ + i * byte_width_;
837 return Reference(elem, byte_width_, 1, type_);
838 }
839
840 inline Reference FixedTypedVector::operator[](size_t i) const {
841 if (i >= len_) return Reference(nullptr, 1, NullPackedType());
842 auto elem = data_ + i * byte_width_;
843 return Reference(elem, byte_width_, 1, type_);
844 }
845
KeyCompare(const void * key,const void * elem)846 template<typename T> int KeyCompare(const void *key, const void *elem) {
847 auto str_elem = reinterpret_cast<const char *>(
848 Indirect<T>(reinterpret_cast<const uint8_t *>(elem)));
849 auto skey = reinterpret_cast<const char *>(key);
850 return strcmp(skey, str_elem);
851 }
852
853 inline Reference Map::operator[](const char *key) const {
854 auto keys = Keys();
855 // We can't pass keys.byte_width_ to the comparison function, so we have
856 // to pick the right one ahead of time.
857 int (*comp)(const void *, const void *) = nullptr;
858 switch (keys.byte_width_) {
859 case 1: comp = KeyCompare<uint8_t>; break;
860 case 2: comp = KeyCompare<uint16_t>; break;
861 case 4: comp = KeyCompare<uint32_t>; break;
862 case 8: comp = KeyCompare<uint64_t>; break;
863 }
864 auto res = std::bsearch(key, keys.data_, keys.size(), keys.byte_width_, comp);
865 if (!res) return Reference(nullptr, 1, NullPackedType());
866 auto i = (reinterpret_cast<uint8_t *>(res) - keys.data_) / keys.byte_width_;
867 return (*static_cast<const Vector *>(this))[i];
868 }
869
870 inline Reference Map::operator[](const std::string &key) const {
871 return (*this)[key.c_str()];
872 }
873
GetRoot(const uint8_t * buffer,size_t size)874 inline Reference GetRoot(const uint8_t *buffer, size_t size) {
875 // See Finish() below for the serialization counterpart of this.
876 // The root starts at the end of the buffer, so we parse backwards from there.
877 auto end = buffer + size;
878 auto byte_width = *--end;
879 auto packed_type = *--end;
880 end -= byte_width; // The root data item.
881 return Reference(end, byte_width, packed_type);
882 }
883
GetRoot(const std::vector<uint8_t> & buffer)884 inline Reference GetRoot(const std::vector<uint8_t> &buffer) {
885 return GetRoot(flatbuffers::vector_data(buffer), buffer.size());
886 }
887
888 // Flags that configure how the Builder behaves.
889 // The "Share" flags determine if the Builder automatically tries to pool
890 // this type. Pooling can reduce the size of serialized data if there are
891 // multiple maps of the same kind, at the expense of slightly slower
892 // serialization (the cost of lookups) and more memory use (std::set).
893 // By default this is on for keys, but off for strings.
894 // Turn keys off if you have e.g. only one map.
895 // Turn strings on if you expect many non-unique string values.
896 // Additionally, sharing key vectors can save space if you have maps with
897 // identical field populations.
898 enum BuilderFlag {
899 BUILDER_FLAG_NONE = 0,
900 BUILDER_FLAG_SHARE_KEYS = 1,
901 BUILDER_FLAG_SHARE_STRINGS = 2,
902 BUILDER_FLAG_SHARE_KEYS_AND_STRINGS = 3,
903 BUILDER_FLAG_SHARE_KEY_VECTORS = 4,
904 BUILDER_FLAG_SHARE_ALL = 7,
905 };
906
907 class Builder FLATBUFFERS_FINAL_CLASS {
908 public:
909 Builder(size_t initial_size = 256,
910 BuilderFlag flags = BUILDER_FLAG_SHARE_KEYS)
buf_(initial_size)911 : buf_(initial_size),
912 finished_(false),
913 flags_(flags),
914 force_min_bit_width_(BIT_WIDTH_8),
915 key_pool(KeyOffsetCompare(buf_)),
916 string_pool(StringOffsetCompare(buf_)) {
917 buf_.clear();
918 }
919
920 /// @brief Get the serialized buffer (after you call `Finish()`).
921 /// @return Returns a vector owned by this class.
GetBuffer()922 const std::vector<uint8_t> &GetBuffer() const {
923 Finished();
924 return buf_;
925 }
926
927 // Size of the buffer. Does not include unfinished values.
GetSize()928 size_t GetSize() const { return buf_.size(); }
929
930 // Reset all state so we can re-use the buffer.
Clear()931 void Clear() {
932 buf_.clear();
933 stack_.clear();
934 finished_ = false;
935 // flags_ remains as-is;
936 force_min_bit_width_ = BIT_WIDTH_8;
937 key_pool.clear();
938 string_pool.clear();
939 }
940
941 // All value constructing functions below have two versions: one that
942 // takes a key (for placement inside a map) and one that doesn't (for inside
943 // vectors and elsewhere).
944
Null()945 void Null() { stack_.push_back(Value()); }
Null(const char * key)946 void Null(const char *key) {
947 Key(key);
948 Null();
949 }
950
Int(int64_t i)951 void Int(int64_t i) { stack_.push_back(Value(i, FBT_INT, WidthI(i))); }
Int(const char * key,int64_t i)952 void Int(const char *key, int64_t i) {
953 Key(key);
954 Int(i);
955 }
956
UInt(uint64_t u)957 void UInt(uint64_t u) { stack_.push_back(Value(u, FBT_UINT, WidthU(u))); }
UInt(const char * key,uint64_t u)958 void UInt(const char *key, uint64_t u) {
959 Key(key);
960 UInt(u);
961 }
962
Float(float f)963 void Float(float f) { stack_.push_back(Value(f)); }
Float(const char * key,float f)964 void Float(const char *key, float f) {
965 Key(key);
966 Float(f);
967 }
968
Double(double f)969 void Double(double f) { stack_.push_back(Value(f)); }
Double(const char * key,double d)970 void Double(const char *key, double d) {
971 Key(key);
972 Double(d);
973 }
974
Bool(bool b)975 void Bool(bool b) { stack_.push_back(Value(b)); }
Bool(const char * key,bool b)976 void Bool(const char *key, bool b) {
977 Key(key);
978 Bool(b);
979 }
980
IndirectInt(int64_t i)981 void IndirectInt(int64_t i) { PushIndirect(i, FBT_INDIRECT_INT, WidthI(i)); }
IndirectInt(const char * key,int64_t i)982 void IndirectInt(const char *key, int64_t i) {
983 Key(key);
984 IndirectInt(i);
985 }
986
IndirectUInt(uint64_t u)987 void IndirectUInt(uint64_t u) {
988 PushIndirect(u, FBT_INDIRECT_UINT, WidthU(u));
989 }
IndirectUInt(const char * key,uint64_t u)990 void IndirectUInt(const char *key, uint64_t u) {
991 Key(key);
992 IndirectUInt(u);
993 }
994
IndirectFloat(float f)995 void IndirectFloat(float f) {
996 PushIndirect(f, FBT_INDIRECT_FLOAT, BIT_WIDTH_32);
997 }
IndirectFloat(const char * key,float f)998 void IndirectFloat(const char *key, float f) {
999 Key(key);
1000 IndirectFloat(f);
1001 }
1002
IndirectDouble(double f)1003 void IndirectDouble(double f) {
1004 PushIndirect(f, FBT_INDIRECT_FLOAT, WidthF(f));
1005 }
IndirectDouble(const char * key,double d)1006 void IndirectDouble(const char *key, double d) {
1007 Key(key);
1008 IndirectDouble(d);
1009 }
1010
Key(const char * str,size_t len)1011 size_t Key(const char *str, size_t len) {
1012 auto sloc = buf_.size();
1013 WriteBytes(str, len + 1);
1014 if (flags_ & BUILDER_FLAG_SHARE_KEYS) {
1015 auto it = key_pool.find(sloc);
1016 if (it != key_pool.end()) {
1017 // Already in the buffer. Remove key we just serialized, and use
1018 // existing offset instead.
1019 buf_.resize(sloc);
1020 sloc = *it;
1021 } else {
1022 key_pool.insert(sloc);
1023 }
1024 }
1025 stack_.push_back(Value(static_cast<uint64_t>(sloc), FBT_KEY, BIT_WIDTH_8));
1026 return sloc;
1027 }
1028
Key(const char * str)1029 size_t Key(const char *str) { return Key(str, strlen(str)); }
Key(const std::string & str)1030 size_t Key(const std::string &str) { return Key(str.c_str(), str.size()); }
1031
String(const char * str,size_t len)1032 size_t String(const char *str, size_t len) {
1033 auto reset_to = buf_.size();
1034 auto sloc = CreateBlob(str, len, 1, FBT_STRING);
1035 if (flags_ & BUILDER_FLAG_SHARE_STRINGS) {
1036 StringOffset so(sloc, len);
1037 auto it = string_pool.find(so);
1038 if (it != string_pool.end()) {
1039 // Already in the buffer. Remove string we just serialized, and use
1040 // existing offset instead.
1041 buf_.resize(reset_to);
1042 sloc = it->first;
1043 stack_.back().u_ = sloc;
1044 } else {
1045 string_pool.insert(so);
1046 }
1047 }
1048 return sloc;
1049 }
String(const char * str)1050 size_t String(const char *str) { return String(str, strlen(str)); }
String(const std::string & str)1051 size_t String(const std::string &str) {
1052 return String(str.c_str(), str.size());
1053 }
String(const flexbuffers::String & str)1054 void String(const flexbuffers::String &str) {
1055 String(str.c_str(), str.length());
1056 }
1057
String(const char * key,const char * str)1058 void String(const char *key, const char *str) {
1059 Key(key);
1060 String(str);
1061 }
String(const char * key,const std::string & str)1062 void String(const char *key, const std::string &str) {
1063 Key(key);
1064 String(str);
1065 }
String(const char * key,const flexbuffers::String & str)1066 void String(const char *key, const flexbuffers::String &str) {
1067 Key(key);
1068 String(str);
1069 }
1070
Blob(const void * data,size_t len)1071 size_t Blob(const void *data, size_t len) {
1072 return CreateBlob(data, len, 0, FBT_BLOB);
1073 }
Blob(const std::vector<uint8_t> & v)1074 size_t Blob(const std::vector<uint8_t> &v) {
1075 return CreateBlob(flatbuffers::vector_data(v), v.size(), 0, FBT_BLOB);
1076 }
1077
1078 // TODO(wvo): support all the FlexBuffer types (like flexbuffers::String),
1079 // e.g. Vector etc. Also in overloaded versions.
1080 // Also some FlatBuffers types?
1081
StartVector()1082 size_t StartVector() { return stack_.size(); }
StartVector(const char * key)1083 size_t StartVector(const char *key) {
1084 Key(key);
1085 return stack_.size();
1086 }
StartMap()1087 size_t StartMap() { return stack_.size(); }
StartMap(const char * key)1088 size_t StartMap(const char *key) {
1089 Key(key);
1090 return stack_.size();
1091 }
1092
1093 // TODO(wvo): allow this to specify an aligment greater than the natural
1094 // alignment.
EndVector(size_t start,bool typed,bool fixed)1095 size_t EndVector(size_t start, bool typed, bool fixed) {
1096 auto vec = CreateVector(start, stack_.size() - start, 1, typed, fixed);
1097 // Remove temp elements and return vector.
1098 stack_.resize(start);
1099 stack_.push_back(vec);
1100 return static_cast<size_t>(vec.u_);
1101 }
1102
EndMap(size_t start)1103 size_t EndMap(size_t start) {
1104 // We should have interleaved keys and values on the stack.
1105 // Make sure it is an even number:
1106 auto len = stack_.size() - start;
1107 FLATBUFFERS_ASSERT(!(len & 1));
1108 len /= 2;
1109 // Make sure keys are all strings:
1110 for (auto key = start; key < stack_.size(); key += 2) {
1111 FLATBUFFERS_ASSERT(stack_[key].type_ == FBT_KEY);
1112 }
1113 // Now sort values, so later we can do a binary search lookup.
1114 // We want to sort 2 array elements at a time.
1115 struct TwoValue {
1116 Value key;
1117 Value val;
1118 };
1119 // TODO(wvo): strict aliasing?
1120 // TODO(wvo): allow the caller to indicate the data is already sorted
1121 // for maximum efficiency? With an assert to check sortedness to make sure
1122 // we're not breaking binary search.
1123 // Or, we can track if the map is sorted as keys are added which would be
1124 // be quite cheap (cheaper than checking it here), so we can skip this
1125 // step automatically when appliccable, and encourage people to write in
1126 // sorted fashion.
1127 // std::sort is typically already a lot faster on sorted data though.
1128 auto dict =
1129 reinterpret_cast<TwoValue *>(flatbuffers::vector_data(stack_) + start);
1130 std::sort(dict, dict + len,
1131 [&](const TwoValue &a, const TwoValue &b) -> bool {
1132 auto as = reinterpret_cast<const char *>(
1133 flatbuffers::vector_data(buf_) + a.key.u_);
1134 auto bs = reinterpret_cast<const char *>(
1135 flatbuffers::vector_data(buf_) + b.key.u_);
1136 auto comp = strcmp(as, bs);
1137 // If this assertion hits, you've added two keys with the same
1138 // value to this map.
1139 // TODO: Have to check for pointer equality, as some sort
1140 // implementation apparently call this function with the same
1141 // element?? Why?
1142 FLATBUFFERS_ASSERT(comp || &a == &b);
1143 return comp < 0;
1144 });
1145 // First create a vector out of all keys.
1146 // TODO(wvo): if kBuilderFlagShareKeyVectors is true, see if we can share
1147 // the first vector.
1148 auto keys = CreateVector(start, len, 2, true, false);
1149 auto vec = CreateVector(start + 1, len, 2, false, false, &keys);
1150 // Remove temp elements and return map.
1151 stack_.resize(start);
1152 stack_.push_back(vec);
1153 return static_cast<size_t>(vec.u_);
1154 }
1155
Vector(F f)1156 template<typename F> size_t Vector(F f) {
1157 auto start = StartVector();
1158 f();
1159 return EndVector(start, false, false);
1160 }
Vector(F f,T & state)1161 template<typename F, typename T> size_t Vector(F f, T &state) {
1162 auto start = StartVector();
1163 f(state);
1164 return EndVector(start, false, false);
1165 }
Vector(const char * key,F f)1166 template<typename F> size_t Vector(const char *key, F f) {
1167 auto start = StartVector(key);
1168 f();
1169 return EndVector(start, false, false);
1170 }
1171 template<typename F, typename T>
Vector(const char * key,F f,T & state)1172 size_t Vector(const char *key, F f, T &state) {
1173 auto start = StartVector(key);
1174 f(state);
1175 return EndVector(start, false, false);
1176 }
1177
Vector(const T * elems,size_t len)1178 template<typename T> void Vector(const T *elems, size_t len) {
1179 if (flatbuffers::is_scalar<T>::value) {
1180 // This path should be a lot quicker and use less space.
1181 ScalarVector(elems, len, false);
1182 } else {
1183 auto start = StartVector();
1184 for (size_t i = 0; i < len; i++) Add(elems[i]);
1185 EndVector(start, false, false);
1186 }
1187 }
1188 template<typename T>
Vector(const char * key,const T * elems,size_t len)1189 void Vector(const char *key, const T *elems, size_t len) {
1190 Key(key);
1191 Vector(elems, len);
1192 }
Vector(const std::vector<T> & vec)1193 template<typename T> void Vector(const std::vector<T> &vec) {
1194 Vector(flatbuffers::vector_data(vec), vec.size());
1195 }
1196
TypedVector(F f)1197 template<typename F> size_t TypedVector(F f) {
1198 auto start = StartVector();
1199 f();
1200 return EndVector(start, true, false);
1201 }
TypedVector(F f,T & state)1202 template<typename F, typename T> size_t TypedVector(F f, T &state) {
1203 auto start = StartVector();
1204 f(state);
1205 return EndVector(start, true, false);
1206 }
TypedVector(const char * key,F f)1207 template<typename F> size_t TypedVector(const char *key, F f) {
1208 auto start = StartVector(key);
1209 f();
1210 return EndVector(start, true, false);
1211 }
1212 template<typename F, typename T>
TypedVector(const char * key,F f,T & state)1213 size_t TypedVector(const char *key, F f, T &state) {
1214 auto start = StartVector(key);
1215 f(state);
1216 return EndVector(start, true, false);
1217 }
1218
FixedTypedVector(const T * elems,size_t len)1219 template<typename T> size_t FixedTypedVector(const T *elems, size_t len) {
1220 // We only support a few fixed vector lengths. Anything bigger use a
1221 // regular typed vector.
1222 FLATBUFFERS_ASSERT(len >= 2 && len <= 4);
1223 // And only scalar values.
1224 static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types");
1225 return ScalarVector(elems, len, true);
1226 }
1227
1228 template<typename T>
FixedTypedVector(const char * key,const T * elems,size_t len)1229 size_t FixedTypedVector(const char *key, const T *elems, size_t len) {
1230 Key(key);
1231 return FixedTypedVector(elems, len);
1232 }
1233
Map(F f)1234 template<typename F> size_t Map(F f) {
1235 auto start = StartMap();
1236 f();
1237 return EndMap(start);
1238 }
Map(F f,T & state)1239 template<typename F, typename T> size_t Map(F f, T &state) {
1240 auto start = StartMap();
1241 f(state);
1242 return EndMap(start);
1243 }
Map(const char * key,F f)1244 template<typename F> size_t Map(const char *key, F f) {
1245 auto start = StartMap(key);
1246 f();
1247 return EndMap(start);
1248 }
Map(const char * key,F f,T & state)1249 template<typename F, typename T> size_t Map(const char *key, F f, T &state) {
1250 auto start = StartMap(key);
1251 f(state);
1252 return EndMap(start);
1253 }
Map(const std::map<std::string,T> & map)1254 template<typename T> void Map(const std::map<std::string, T> &map) {
1255 auto start = StartMap();
1256 for (auto it = map.begin(); it != map.end(); ++it)
1257 Add(it->first.c_str(), it->second);
1258 EndMap(start);
1259 }
1260
1261 // If you wish to share a value explicitly (a value not shared automatically
1262 // through one of the BUILDER_FLAG_SHARE_* flags) you can do so with these
1263 // functions. Or if you wish to turn those flags off for performance reasons
1264 // and still do some explicit sharing. For example:
1265 // builder.IndirectDouble(M_PI);
1266 // auto id = builder.LastValue(); // Remember where we stored it.
1267 // .. more code goes here ..
1268 // builder.ReuseValue(id); // Refers to same double by offset.
1269 // LastValue works regardless of whether the value has a key or not.
1270 // Works on any data type.
1271 struct Value;
LastValue()1272 Value LastValue() { return stack_.back(); }
ReuseValue(Value v)1273 void ReuseValue(Value v) { stack_.push_back(v); }
ReuseValue(const char * key,Value v)1274 void ReuseValue(const char *key, Value v) {
1275 Key(key);
1276 ReuseValue(v);
1277 }
1278
1279 // Overloaded Add that tries to call the correct function above.
Add(int8_t i)1280 void Add(int8_t i) { Int(i); }
Add(int16_t i)1281 void Add(int16_t i) { Int(i); }
Add(int32_t i)1282 void Add(int32_t i) { Int(i); }
Add(int64_t i)1283 void Add(int64_t i) { Int(i); }
Add(uint8_t u)1284 void Add(uint8_t u) { UInt(u); }
Add(uint16_t u)1285 void Add(uint16_t u) { UInt(u); }
Add(uint32_t u)1286 void Add(uint32_t u) { UInt(u); }
Add(uint64_t u)1287 void Add(uint64_t u) { UInt(u); }
Add(float f)1288 void Add(float f) { Float(f); }
Add(double d)1289 void Add(double d) { Double(d); }
Add(bool b)1290 void Add(bool b) { Bool(b); }
Add(const char * str)1291 void Add(const char *str) { String(str); }
Add(const std::string & str)1292 void Add(const std::string &str) { String(str); }
Add(const flexbuffers::String & str)1293 void Add(const flexbuffers::String &str) { String(str); }
1294
Add(const std::vector<T> & vec)1295 template<typename T> void Add(const std::vector<T> &vec) { Vector(vec); }
1296
Add(const char * key,const T & t)1297 template<typename T> void Add(const char *key, const T &t) {
1298 Key(key);
1299 Add(t);
1300 }
1301
Add(const std::map<std::string,T> & map)1302 template<typename T> void Add(const std::map<std::string, T> &map) {
1303 Map(map);
1304 }
1305
1306 template<typename T> void operator+=(const T &t) { Add(t); }
1307
1308 // This function is useful in combination with the Mutate* functions above.
1309 // It forces elements of vectors and maps to have a minimum size, such that
1310 // they can later be updated without failing.
1311 // Call with no arguments to reset.
1312 void ForceMinimumBitWidth(BitWidth bw = BIT_WIDTH_8) {
1313 force_min_bit_width_ = bw;
1314 }
1315
Finish()1316 void Finish() {
1317 // If you hit this assert, you likely have objects that were never included
1318 // in a parent. You need to have exactly one root to finish a buffer.
1319 // Check your Start/End calls are matched, and all objects are inside
1320 // some other object.
1321 FLATBUFFERS_ASSERT(stack_.size() == 1);
1322
1323 // Write root value.
1324 auto byte_width = Align(stack_[0].ElemWidth(buf_.size(), 0));
1325 WriteAny(stack_[0], byte_width);
1326 // Write root type.
1327 Write(stack_[0].StoredPackedType(), 1);
1328 // Write root size. Normally determined by parent, but root has no parent :)
1329 Write(byte_width, 1);
1330
1331 finished_ = true;
1332 }
1333
1334 private:
Finished()1335 void Finished() const {
1336 // If you get this assert, you're attempting to get access a buffer
1337 // which hasn't been finished yet. Be sure to call
1338 // Builder::Finish with your root object.
1339 FLATBUFFERS_ASSERT(finished_);
1340 }
1341
1342 // Align to prepare for writing a scalar with a certain size.
Align(BitWidth alignment)1343 uint8_t Align(BitWidth alignment) {
1344 auto byte_width = 1U << alignment;
1345 buf_.insert(buf_.end(), flatbuffers::PaddingBytes(buf_.size(), byte_width),
1346 0);
1347 return static_cast<uint8_t>(byte_width);
1348 }
1349
WriteBytes(const void * val,size_t size)1350 void WriteBytes(const void *val, size_t size) {
1351 buf_.insert(buf_.end(), reinterpret_cast<const uint8_t *>(val),
1352 reinterpret_cast<const uint8_t *>(val) + size);
1353 }
1354
Write(T val,size_t byte_width)1355 template<typename T> void Write(T val, size_t byte_width) {
1356 FLATBUFFERS_ASSERT(sizeof(T) >= byte_width);
1357 val = flatbuffers::EndianScalar(val);
1358 WriteBytes(&val, byte_width);
1359 }
1360
WriteDouble(double f,uint8_t byte_width)1361 void WriteDouble(double f, uint8_t byte_width) {
1362 switch (byte_width) {
1363 case 8: Write(f, byte_width); break;
1364 case 4: Write(static_cast<float>(f), byte_width); break;
1365 // case 2: Write(static_cast<half>(f), byte_width); break;
1366 // case 1: Write(static_cast<quarter>(f), byte_width); break;
1367 default: FLATBUFFERS_ASSERT(0);
1368 }
1369 }
1370
WriteOffset(uint64_t o,uint8_t byte_width)1371 void WriteOffset(uint64_t o, uint8_t byte_width) {
1372 auto reloff = buf_.size() - o;
1373 FLATBUFFERS_ASSERT(byte_width == 8 || reloff < 1ULL << (byte_width * 8));
1374 Write(reloff, byte_width);
1375 }
1376
PushIndirect(T val,Type type,BitWidth bit_width)1377 template<typename T> void PushIndirect(T val, Type type, BitWidth bit_width) {
1378 auto byte_width = Align(bit_width);
1379 auto iloc = buf_.size();
1380 Write(val, byte_width);
1381 stack_.push_back(Value(static_cast<uint64_t>(iloc), type, bit_width));
1382 }
1383
WidthB(size_t byte_width)1384 static BitWidth WidthB(size_t byte_width) {
1385 switch (byte_width) {
1386 case 1: return BIT_WIDTH_8;
1387 case 2: return BIT_WIDTH_16;
1388 case 4: return BIT_WIDTH_32;
1389 case 8: return BIT_WIDTH_64;
1390 default: FLATBUFFERS_ASSERT(false); return BIT_WIDTH_64;
1391 }
1392 }
1393
GetScalarType()1394 template<typename T> static Type GetScalarType() {
1395 static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types");
1396 return flatbuffers::is_floating_point<T>::value
1397 ? FBT_FLOAT
1398 : flatbuffers::is_same<T, bool>::value
1399 ? FBT_BOOL
1400 : (flatbuffers::is_unsigned<T>::value ? FBT_UINT
1401 : FBT_INT);
1402 }
1403
1404 public:
1405 // This was really intended to be private, except for LastValue/ReuseValue.
1406 struct Value {
1407 union {
1408 int64_t i_;
1409 uint64_t u_;
1410 double f_;
1411 };
1412
1413 Type type_;
1414
1415 // For scalars: of itself, for vector: of its elements, for string: length.
1416 BitWidth min_bit_width_;
1417
ValueValue1418 Value() : i_(0), type_(FBT_NULL), min_bit_width_(BIT_WIDTH_8) {}
1419
ValueValue1420 Value(bool b)
1421 : u_(static_cast<uint64_t>(b)),
1422 type_(FBT_BOOL),
1423 min_bit_width_(BIT_WIDTH_8) {}
1424
ValueValue1425 Value(int64_t i, Type t, BitWidth bw)
1426 : i_(i), type_(t), min_bit_width_(bw) {}
ValueValue1427 Value(uint64_t u, Type t, BitWidth bw)
1428 : u_(u), type_(t), min_bit_width_(bw) {}
1429
ValueValue1430 Value(float f)
1431 : f_(static_cast<double>(f)),
1432 type_(FBT_FLOAT),
1433 min_bit_width_(BIT_WIDTH_32) {}
ValueValue1434 Value(double f) : f_(f), type_(FBT_FLOAT), min_bit_width_(WidthF(f)) {}
1435
1436 uint8_t StoredPackedType(BitWidth parent_bit_width_ = BIT_WIDTH_8) const {
1437 return PackedType(StoredWidth(parent_bit_width_), type_);
1438 }
1439
ElemWidthValue1440 BitWidth ElemWidth(size_t buf_size, size_t elem_index) const {
1441 if (IsInline(type_)) {
1442 return min_bit_width_;
1443 } else {
1444 // We have an absolute offset, but want to store a relative offset
1445 // elem_index elements beyond the current buffer end. Since whether
1446 // the relative offset fits in a certain byte_width depends on
1447 // the size of the elements before it (and their alignment), we have
1448 // to test for each size in turn.
1449 for (size_t byte_width = 1;
1450 byte_width <= sizeof(flatbuffers::largest_scalar_t);
1451 byte_width *= 2) {
1452 // Where are we going to write this offset?
1453 auto offset_loc = buf_size +
1454 flatbuffers::PaddingBytes(buf_size, byte_width) +
1455 elem_index * byte_width;
1456 // Compute relative offset.
1457 auto offset = offset_loc - u_;
1458 // Does it fit?
1459 auto bit_width = WidthU(offset);
1460 if (static_cast<size_t>(static_cast<size_t>(1U) << bit_width) ==
1461 byte_width)
1462 return bit_width;
1463 }
1464 FLATBUFFERS_ASSERT(false); // Must match one of the sizes above.
1465 return BIT_WIDTH_64;
1466 }
1467 }
1468
1469 BitWidth StoredWidth(BitWidth parent_bit_width_ = BIT_WIDTH_8) const {
1470 if (IsInline(type_)) {
1471 return (std::max)(min_bit_width_, parent_bit_width_);
1472 } else {
1473 return min_bit_width_;
1474 }
1475 }
1476 };
1477
1478 private:
WriteAny(const Value & val,uint8_t byte_width)1479 void WriteAny(const Value &val, uint8_t byte_width) {
1480 switch (val.type_) {
1481 case FBT_NULL:
1482 case FBT_INT: Write(val.i_, byte_width); break;
1483 case FBT_BOOL:
1484 case FBT_UINT: Write(val.u_, byte_width); break;
1485 case FBT_FLOAT: WriteDouble(val.f_, byte_width); break;
1486 default: WriteOffset(val.u_, byte_width); break;
1487 }
1488 }
1489
CreateBlob(const void * data,size_t len,size_t trailing,Type type)1490 size_t CreateBlob(const void *data, size_t len, size_t trailing, Type type) {
1491 auto bit_width = WidthU(len);
1492 auto byte_width = Align(bit_width);
1493 Write<uint64_t>(len, byte_width);
1494 auto sloc = buf_.size();
1495 WriteBytes(data, len + trailing);
1496 stack_.push_back(Value(static_cast<uint64_t>(sloc), type, bit_width));
1497 return sloc;
1498 }
1499
1500 template<typename T>
ScalarVector(const T * elems,size_t len,bool fixed)1501 size_t ScalarVector(const T *elems, size_t len, bool fixed) {
1502 auto vector_type = GetScalarType<T>();
1503 auto byte_width = sizeof(T);
1504 auto bit_width = WidthB(byte_width);
1505 // If you get this assert, you're trying to write a vector with a size
1506 // field that is bigger than the scalars you're trying to write (e.g. a
1507 // byte vector > 255 elements). For such types, write a "blob" instead.
1508 // TODO: instead of asserting, could write vector with larger elements
1509 // instead, though that would be wasteful.
1510 FLATBUFFERS_ASSERT(WidthU(len) <= bit_width);
1511 Align(bit_width);
1512 if (!fixed) Write<uint64_t>(len, byte_width);
1513 auto vloc = buf_.size();
1514 for (size_t i = 0; i < len; i++) Write(elems[i], byte_width);
1515 stack_.push_back(Value(static_cast<uint64_t>(vloc),
1516 ToTypedVector(vector_type, fixed ? len : 0),
1517 bit_width));
1518 return vloc;
1519 }
1520
1521 Value CreateVector(size_t start, size_t vec_len, size_t step, bool typed,
1522 bool fixed, const Value *keys = nullptr) {
1523 FLATBUFFERS_ASSERT(
1524 !fixed ||
1525 typed); // typed=false, fixed=true combination is not supported.
1526 // Figure out smallest bit width we can store this vector with.
1527 auto bit_width = (std::max)(force_min_bit_width_, WidthU(vec_len));
1528 auto prefix_elems = 1;
1529 if (keys) {
1530 // If this vector is part of a map, we will pre-fix an offset to the keys
1531 // to this vector.
1532 bit_width = (std::max)(bit_width, keys->ElemWidth(buf_.size(), 0));
1533 prefix_elems += 2;
1534 }
1535 Type vector_type = FBT_KEY;
1536 // Check bit widths and types for all elements.
1537 for (size_t i = start; i < stack_.size(); i += step) {
1538 auto elem_width =
1539 stack_[i].ElemWidth(buf_.size(), i - start + prefix_elems);
1540 bit_width = (std::max)(bit_width, elem_width);
1541 if (typed) {
1542 if (i == start) {
1543 vector_type = stack_[i].type_;
1544 } else {
1545 // If you get this assert, you are writing a typed vector with
1546 // elements that are not all the same type.
1547 FLATBUFFERS_ASSERT(vector_type == stack_[i].type_);
1548 }
1549 }
1550 }
1551 // If you get this assert, your fixed types are not one of:
1552 // Int / UInt / Float / Key.
1553 FLATBUFFERS_ASSERT(!fixed || IsTypedVectorElementType(vector_type));
1554 auto byte_width = Align(bit_width);
1555 // Write vector. First the keys width/offset if available, and size.
1556 if (keys) {
1557 WriteOffset(keys->u_, byte_width);
1558 Write<uint64_t>(1ULL << keys->min_bit_width_, byte_width);
1559 }
1560 if (!fixed) Write<uint64_t>(vec_len, byte_width);
1561 // Then the actual data.
1562 auto vloc = buf_.size();
1563 for (size_t i = start; i < stack_.size(); i += step) {
1564 WriteAny(stack_[i], byte_width);
1565 }
1566 // Then the types.
1567 if (!typed) {
1568 for (size_t i = start; i < stack_.size(); i += step) {
1569 buf_.push_back(stack_[i].StoredPackedType(bit_width));
1570 }
1571 }
1572 return Value(static_cast<uint64_t>(vloc),
1573 keys ? FBT_MAP
1574 : (typed ? ToTypedVector(vector_type, fixed ? vec_len : 0)
1575 : FBT_VECTOR),
1576 bit_width);
1577 }
1578
1579 // You shouldn't really be copying instances of this class.
1580 Builder(const Builder &);
1581 Builder &operator=(const Builder &);
1582
1583 std::vector<uint8_t> buf_;
1584 std::vector<Value> stack_;
1585
1586 bool finished_;
1587
1588 BuilderFlag flags_;
1589
1590 BitWidth force_min_bit_width_;
1591
1592 struct KeyOffsetCompare {
KeyOffsetCompareKeyOffsetCompare1593 explicit KeyOffsetCompare(const std::vector<uint8_t> &buf) : buf_(&buf) {}
operatorKeyOffsetCompare1594 bool operator()(size_t a, size_t b) const {
1595 auto stra =
1596 reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + a);
1597 auto strb =
1598 reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + b);
1599 return strcmp(stra, strb) < 0;
1600 }
1601 const std::vector<uint8_t> *buf_;
1602 };
1603
1604 typedef std::pair<size_t, size_t> StringOffset;
1605 struct StringOffsetCompare {
StringOffsetCompareStringOffsetCompare1606 explicit StringOffsetCompare(const std::vector<uint8_t> &buf)
1607 : buf_(&buf) {}
operatorStringOffsetCompare1608 bool operator()(const StringOffset &a, const StringOffset &b) const {
1609 auto stra = reinterpret_cast<const char *>(
1610 flatbuffers::vector_data(*buf_) + a.first);
1611 auto strb = reinterpret_cast<const char *>(
1612 flatbuffers::vector_data(*buf_) + b.first);
1613 return strncmp(stra, strb, (std::min)(a.second, b.second) + 1) < 0;
1614 }
1615 const std::vector<uint8_t> *buf_;
1616 };
1617
1618 typedef std::set<size_t, KeyOffsetCompare> KeyOffsetMap;
1619 typedef std::set<StringOffset, StringOffsetCompare> StringOffsetMap;
1620
1621 KeyOffsetMap key_pool;
1622 StringOffsetMap string_pool;
1623 };
1624
1625 } // namespace flexbuffers
1626
1627 #if defined(_MSC_VER)
1628 # pragma warning(pop)
1629 #endif
1630
1631 #endif // FLATBUFFERS_FLEXBUFFERS_H_
1632