1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/lite/micro/test_helpers.h"
17
18 #include <cstdarg>
19 #include <cstddef>
20 #include <cstdint>
21 #include <initializer_list>
22 #include <new>
23
24 #include "flatbuffers/flatbuffers.h" // from @flatbuffers
25 #include "tensorflow/lite/c/common.h"
26 #include "tensorflow/lite/core/api/error_reporter.h"
27 #include "tensorflow/lite/kernels/internal/compatibility.h"
28 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
29 #include "tensorflow/lite/kernels/kernel_util.h"
30 #include "tensorflow/lite/micro/all_ops_resolver.h"
31 #include "tensorflow/lite/micro/micro_utils.h"
32 #include "tensorflow/lite/schema/schema_generated.h"
33
34 // TODO(b/170464050): Use TFLM test only version of schema_utils.
35
36 namespace tflite {
37 namespace testing {
38 namespace {
39
40 class StackAllocator : public flatbuffers::Allocator {
41 public:
StackAllocator()42 StackAllocator() : data_(data_backing_), data_size_(0) {}
43
allocate(size_t size)44 uint8_t* allocate(size_t size) override {
45 TFLITE_DCHECK((data_size_ + size) <= kStackAllocatorSize);
46 uint8_t* result = data_;
47 data_ += size;
48 data_size_ += size;
49 return result;
50 }
51
deallocate(uint8_t * p,size_t)52 void deallocate(uint8_t* p, size_t) override {}
53
instance()54 static StackAllocator& instance() {
55 // Avoid using true dynamic memory allocation to be portable to bare metal.
56 static char inst_memory[sizeof(StackAllocator)];
57 static StackAllocator* inst = new (inst_memory) StackAllocator;
58 return *inst;
59 }
60
61 static constexpr size_t kStackAllocatorSize = 8192;
62
63 private:
64 uint8_t data_backing_[kStackAllocatorSize];
65 uint8_t* data_;
66 int data_size_;
67 };
68
BuilderInstance()69 flatbuffers::FlatBufferBuilder* BuilderInstance() {
70 static char inst_memory[sizeof(flatbuffers::FlatBufferBuilder)];
71 static flatbuffers::FlatBufferBuilder* inst =
72 new (inst_memory) flatbuffers::FlatBufferBuilder(
73 StackAllocator::kStackAllocatorSize, &StackAllocator::instance());
74 return inst;
75 }
76
77 // A wrapper around FlatBuffer API to help build model easily.
78 class ModelBuilder {
79 public:
80 typedef int32_t Tensor;
81 typedef int Operator;
82 typedef int Node;
83
84 // `builder` needs to be available until BuildModel is called.
ModelBuilder(flatbuffers::FlatBufferBuilder * builder)85 explicit ModelBuilder(flatbuffers::FlatBufferBuilder* builder)
86 : builder_(builder) {}
87
88 // Registers an operator that will be used in the model.
89 Operator RegisterOp(BuiltinOperator op, const char* custom_code);
90
91 // Adds a tensor to the model.
AddTensor(TensorType type,std::initializer_list<int32_t> shape)92 Tensor AddTensor(TensorType type, std::initializer_list<int32_t> shape) {
93 return AddTensorImpl(type, /* is_variable */ false, shape);
94 }
95
96 // Adds a variable tensor to the model.
AddVariableTensor(TensorType type,std::initializer_list<int32_t> shape)97 Tensor AddVariableTensor(TensorType type,
98 std::initializer_list<int32_t> shape) {
99 return AddTensorImpl(type, /* is_variable */ true, shape);
100 }
101
102 // Adds a node to the model with given input and output Tensors.
103 Node AddNode(Operator op, std::initializer_list<Tensor> inputs,
104 std::initializer_list<Tensor> outputs);
105
106 void AddMetadata(const char* description_string,
107 const int32_t* metadata_buffer_data, size_t num_elements);
108
109 // Constructs the flatbuffer model using `builder_` and return a pointer to
110 // it. The returned model has the same lifetime as `builder_`.
111 // Note the default value of 0 for num_subgraph_inputs means all tensor inputs
112 // are in subgraph input list.
113 const Model* BuildModel(std::initializer_list<Tensor> inputs,
114 std::initializer_list<Tensor> outputs,
115 size_t num_subgraph_inputs = 0);
116
117 private:
118 // Adds a tensor to the model.
119 Tensor AddTensorImpl(TensorType type, bool is_variable,
120 std::initializer_list<int32_t> shape);
121
122 flatbuffers::FlatBufferBuilder* builder_;
123
124 static constexpr int kMaxOperatorCodes = 10;
125 flatbuffers::Offset<tflite::OperatorCode> operator_codes_[kMaxOperatorCodes];
126 int next_operator_code_id_ = 0;
127
128 static constexpr int kMaxOperators = 50;
129 flatbuffers::Offset<tflite::Operator> operators_[kMaxOperators];
130 int next_operator_id_ = 0;
131
132 static constexpr int kMaxTensors = 50;
133 flatbuffers::Offset<tflite::Tensor> tensors_[kMaxTensors];
134
135 static constexpr int kMaxMetadataBuffers = 10;
136
137 static constexpr int kMaxMetadatas = 10;
138 flatbuffers::Offset<Metadata> metadata_[kMaxMetadatas];
139
140 flatbuffers::Offset<Buffer> metadata_buffers_[kMaxMetadataBuffers];
141
142 int nbr_of_metadata_buffers_ = 0;
143
144 int next_tensor_id_ = 0;
145 };
146
RegisterOp(BuiltinOperator op,const char * custom_code)147 ModelBuilder::Operator ModelBuilder::RegisterOp(BuiltinOperator op,
148 const char* custom_code) {
149 TFLITE_DCHECK(next_operator_code_id_ <= kMaxOperatorCodes);
150 operator_codes_[next_operator_code_id_] = tflite::CreateOperatorCodeDirect(
151 *builder_, /*deprecated_builtin_code=*/0, custom_code, /*version=*/0, op);
152 next_operator_code_id_++;
153 return next_operator_code_id_ - 1;
154 }
155
AddNode(ModelBuilder::Operator op,std::initializer_list<ModelBuilder::Tensor> inputs,std::initializer_list<ModelBuilder::Tensor> outputs)156 ModelBuilder::Node ModelBuilder::AddNode(
157 ModelBuilder::Operator op,
158 std::initializer_list<ModelBuilder::Tensor> inputs,
159 std::initializer_list<ModelBuilder::Tensor> outputs) {
160 TFLITE_DCHECK(next_operator_id_ <= kMaxOperators);
161 operators_[next_operator_id_] = tflite::CreateOperator(
162 *builder_, op, builder_->CreateVector(inputs.begin(), inputs.size()),
163 builder_->CreateVector(outputs.begin(), outputs.size()),
164 BuiltinOptions_NONE);
165 next_operator_id_++;
166 return next_operator_id_ - 1;
167 }
168
AddMetadata(const char * description_string,const int32_t * metadata_buffer_data,size_t num_elements)169 void ModelBuilder::AddMetadata(const char* description_string,
170 const int32_t* metadata_buffer_data,
171 size_t num_elements) {
172 metadata_[ModelBuilder::nbr_of_metadata_buffers_] =
173 CreateMetadata(*builder_, builder_->CreateString(description_string),
174 1 + ModelBuilder::nbr_of_metadata_buffers_);
175
176 metadata_buffers_[nbr_of_metadata_buffers_] = tflite::CreateBuffer(
177 *builder_, builder_->CreateVector((uint8_t*)metadata_buffer_data,
178 sizeof(uint32_t) * num_elements));
179
180 ModelBuilder::nbr_of_metadata_buffers_++;
181 }
182
BuildModel(std::initializer_list<ModelBuilder::Tensor> inputs,std::initializer_list<ModelBuilder::Tensor> outputs,size_t num_subgraph_inputs)183 const Model* ModelBuilder::BuildModel(
184 std::initializer_list<ModelBuilder::Tensor> inputs,
185 std::initializer_list<ModelBuilder::Tensor> outputs,
186 size_t num_subgraph_inputs) {
187 // Model schema requires an empty buffer at idx 0.
188 size_t buffer_size = 1 + ModelBuilder::nbr_of_metadata_buffers_;
189 flatbuffers::Offset<Buffer> buffers[kMaxMetadataBuffers];
190 buffers[0] = tflite::CreateBuffer(*builder_);
191
192 // Place the metadata buffers first in the buffer since the indices for them
193 // have already been set in AddMetadata()
194 for (int i = 1; i < ModelBuilder::nbr_of_metadata_buffers_ + 1; ++i) {
195 buffers[i] = metadata_buffers_[i - 1];
196 }
197
198 // Default to single subgraph model.
199 constexpr size_t subgraphs_size = 1;
200
201 // Find out number of subgraph inputs.
202 if (num_subgraph_inputs == 0) {
203 // This is the default case.
204 num_subgraph_inputs = inputs.size();
205 } else {
206 // A non-zero value of num_subgraph_inputs means that some of
207 // the operator input tensors are not subgraph inputs.
208 TFLITE_DCHECK(num_subgraph_inputs <= inputs.size());
209 }
210
211 const flatbuffers::Offset<SubGraph> subgraphs[subgraphs_size] = {
212 tflite::CreateSubGraph(
213 *builder_, builder_->CreateVector(tensors_, next_tensor_id_),
214 builder_->CreateVector(inputs.begin(), num_subgraph_inputs),
215 builder_->CreateVector(outputs.begin(), outputs.size()),
216 builder_->CreateVector(operators_, next_operator_id_),
217 builder_->CreateString("test_subgraph"))};
218
219 flatbuffers::Offset<Model> model_offset;
220 if (ModelBuilder::nbr_of_metadata_buffers_ > 0) {
221 model_offset = tflite::CreateModel(
222 *builder_, 0,
223 builder_->CreateVector(operator_codes_, next_operator_code_id_),
224 builder_->CreateVector(subgraphs, subgraphs_size),
225 builder_->CreateString("teset_model"),
226 builder_->CreateVector(buffers, buffer_size), 0,
227 builder_->CreateVector(metadata_,
228 ModelBuilder::nbr_of_metadata_buffers_));
229 } else {
230 model_offset = tflite::CreateModel(
231 *builder_, 0,
232 builder_->CreateVector(operator_codes_, next_operator_code_id_),
233 builder_->CreateVector(subgraphs, subgraphs_size),
234 builder_->CreateString("teset_model"),
235 builder_->CreateVector(buffers, buffer_size));
236 }
237
238 tflite::FinishModelBuffer(*builder_, model_offset);
239 void* model_pointer = builder_->GetBufferPointer();
240 const Model* model = flatbuffers::GetRoot<Model>(model_pointer);
241 return model;
242 }
243
AddTensorImpl(TensorType type,bool is_variable,std::initializer_list<int32_t> shape)244 ModelBuilder::Tensor ModelBuilder::AddTensorImpl(
245 TensorType type, bool is_variable, std::initializer_list<int32_t> shape) {
246 TFLITE_DCHECK(next_tensor_id_ <= kMaxTensors);
247 tensors_[next_tensor_id_] = tflite::CreateTensor(
248 *builder_, builder_->CreateVector(shape.begin(), shape.size()), type,
249 /* buffer */ 0, /* name */ 0, /* quantization */ 0,
250 /* is_variable */ is_variable,
251 /* sparsity */ 0);
252 next_tensor_id_++;
253 return next_tensor_id_ - 1;
254 }
255
BuildSimpleStatefulModel()256 const Model* BuildSimpleStatefulModel() {
257 using flatbuffers::Offset;
258 flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance();
259
260 ModelBuilder model_builder(fb_builder);
261
262 const int op_id =
263 model_builder.RegisterOp(BuiltinOperator_CUSTOM, "simple_stateful_op");
264 const int input_tensor = model_builder.AddTensor(TensorType_UINT8, {3});
265 const int median_tensor = model_builder.AddTensor(TensorType_UINT8, {3});
266 const int invoke_count_tensor =
267 model_builder.AddTensor(TensorType_INT32, {1});
268
269 model_builder.AddNode(op_id, {input_tensor},
270 {median_tensor, invoke_count_tensor});
271 return model_builder.BuildModel({input_tensor},
272 {median_tensor, invoke_count_tensor});
273 }
274
BuildSimpleModelWithBranch()275 const Model* BuildSimpleModelWithBranch() {
276 using flatbuffers::Offset;
277 flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance();
278
279 ModelBuilder model_builder(fb_builder);
280 /* Model structure
281 | t0
282 +------|
283 | v
284 | +---------+
285 | | n0 |
286 | | |
287 | +---------+
288 v +
289 |
290 +---------+ | t1
291 | n1 | |
292 | | |
293 +---------+ |
294 | |
295 t2 | v
296 | +---------+
297 +-->| n2 |
298 | |
299 +-------|-+
300 |t3
301 v
302 */
303 const int op_id =
304 model_builder.RegisterOp(BuiltinOperator_CUSTOM, "mock_custom");
305 const int t0 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3});
306 const int t1 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3});
307 const int t2 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3});
308 const int t3 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3});
309 model_builder.AddNode(op_id, {t0}, {t1}); // n0
310 model_builder.AddNode(op_id, {t0}, {t2}); // n1
311 model_builder.AddNode(op_id, {t1, t2}, {t3}); // n2
312 return model_builder.BuildModel({t0}, {t3});
313 }
314
BuildModelWithOfflinePlanning(int number_of_tensors,const int32_t * metadata_buffer,NodeConnection * node_conn,int num_conns,int num_subgraph_inputs)315 const Model* BuildModelWithOfflinePlanning(int number_of_tensors,
316 const int32_t* metadata_buffer,
317 NodeConnection* node_conn,
318 int num_conns,
319 int num_subgraph_inputs) {
320 using flatbuffers::Offset;
321 flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance();
322
323 ModelBuilder model_builder(fb_builder);
324
325 const int op_id =
326 model_builder.RegisterOp(BuiltinOperator_CUSTOM, "mock_custom");
327
328 for (int i = 0; i < number_of_tensors; ++i) {
329 model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3});
330 }
331
332 for (int i = 0; i < num_conns; ++i) {
333 model_builder.AddNode(op_id, node_conn[i].input, node_conn[i].output);
334 }
335
336 model_builder.AddMetadata(
337 "OfflineMemoryAllocation", metadata_buffer,
338 number_of_tensors + tflite::testing::kOfflinePlannerHeaderSize);
339
340 return model_builder.BuildModel(
341 node_conn[0].input, node_conn[num_conns - 1].output, num_subgraph_inputs);
342 }
343
BuildModelWithUnusedInputs()344 const Model* BuildModelWithUnusedInputs() {
345 using flatbuffers::Offset;
346 flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
347
348 constexpr size_t buffers_size = 1;
349 const Offset<Buffer> buffers[buffers_size] = {CreateBuffer(*builder)};
350 constexpr size_t tensor_shape_size = 2;
351 const int32_t tensor_shape[tensor_shape_size] = {1, 64};
352 constexpr size_t tensors_size = 4;
353 const Offset<Tensor> tensors[tensors_size] = {
354 CreateTensor(*builder,
355 builder->CreateVector(tensor_shape, tensor_shape_size),
356 TensorType_INT8, 0,
357 builder->CreateString("test_input_tensor"), 0, false),
358 CreateTensor(*builder,
359 builder->CreateVector(tensor_shape, tensor_shape_size),
360 TensorType_INT8, 0,
361 builder->CreateString("test_unused_input_tensor"), 0, false),
362 CreateTensor(*builder,
363 builder->CreateVector(tensor_shape, tensor_shape_size),
364 TensorType_INT8, 0,
365 builder->CreateString("test_output_tensor"), 0, false),
366 CreateTensor(*builder,
367 builder->CreateVector(tensor_shape, tensor_shape_size),
368 TensorType_INT8, 0,
369 builder->CreateString("test_unused_tensor"), 0, false),
370 };
371 constexpr size_t inputs_size = 2;
372 const int32_t inputs[inputs_size] = {0, 1};
373 constexpr size_t outputs_size = 1;
374 const int32_t outputs[outputs_size] = {2};
375 constexpr size_t operator_inputs_size = 1;
376 const int32_t operator_inputs[operator_inputs_size] = {0};
377 constexpr size_t operator_outputs_size = 1;
378 const int32_t operator_outputs[operator_outputs_size] = {2};
379 constexpr size_t operators_size = 1;
380 const Offset<Operator> operators[operators_size] = {
381 CreateOperator(
382 *builder, 0,
383 builder->CreateVector(operator_inputs, operator_inputs_size),
384 builder->CreateVector(operator_outputs, operator_outputs_size),
385 BuiltinOptions_NONE),
386 };
387 constexpr size_t subgraphs_size = 1;
388 const Offset<SubGraph> subgraphs[subgraphs_size] = {
389 CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size),
390 builder->CreateVector(inputs, inputs_size),
391 builder->CreateVector(outputs, outputs_size),
392 builder->CreateVector(operators, operators_size),
393 builder->CreateString("test_subgraph"))};
394 constexpr size_t operator_codes_size = 1;
395 const Offset<OperatorCode> operator_codes[operator_codes_size] = {
396 CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0,
397 "mock_custom",
398 /*version=*/0, BuiltinOperator_CUSTOM)};
399 const Offset<Model> model_offset = CreateModel(
400 *builder, 0, builder->CreateVector(operator_codes, operator_codes_size),
401 builder->CreateVector(subgraphs, subgraphs_size),
402 builder->CreateString("test_model"),
403 builder->CreateVector(buffers, buffers_size));
404 FinishModelBuffer(*builder, model_offset);
405 void* model_pointer = builder->GetBufferPointer();
406 const Model* model = flatbuffers::GetRoot<Model>(model_pointer);
407 return model;
408 }
409
BuildSimpleMockModel()410 const Model* BuildSimpleMockModel() {
411 using flatbuffers::Offset;
412 flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
413
414 constexpr size_t buffer_data_size = 1;
415 const uint8_t buffer_data[buffer_data_size] = {21};
416 constexpr size_t buffers_size = 2;
417 const Offset<Buffer> buffers[buffers_size] = {
418 CreateBuffer(*builder),
419 CreateBuffer(*builder,
420 builder->CreateVector(buffer_data, buffer_data_size))};
421 constexpr size_t tensor_shape_size = 1;
422 const int32_t tensor_shape[tensor_shape_size] = {1};
423 constexpr size_t tensors_size = 4;
424 const Offset<Tensor> tensors[tensors_size] = {
425 CreateTensor(*builder,
426 builder->CreateVector(tensor_shape, tensor_shape_size),
427 TensorType_INT32, 0,
428 builder->CreateString("test_input_tensor"), 0, false),
429 CreateTensor(*builder,
430 builder->CreateVector(tensor_shape, tensor_shape_size),
431 TensorType_UINT8, 1,
432 builder->CreateString("test_weight_tensor"), 0, false),
433 CreateTensor(*builder,
434 builder->CreateVector(tensor_shape, tensor_shape_size),
435 TensorType_INT32, 0,
436 builder->CreateString("test_output_tensor"), 0, false),
437 CreateTensor(*builder,
438 builder->CreateVector(tensor_shape, tensor_shape_size),
439 TensorType_INT32, 0,
440 builder->CreateString("test_output2_tensor"), 0, false),
441 };
442 constexpr size_t inputs_size = 1;
443 const int32_t inputs[inputs_size] = {0};
444 constexpr size_t outputs_size = 2;
445 const int32_t outputs[outputs_size] = {2, 3};
446 constexpr size_t operator_inputs_size = 2;
447 const int32_t operator_inputs[operator_inputs_size] = {0, 1};
448 constexpr size_t operator_outputs_size = 1;
449 const int32_t operator_outputs[operator_outputs_size] = {2};
450 const int32_t operator2_outputs[operator_outputs_size] = {3};
451 constexpr size_t operators_size = 2;
452 const Offset<Operator> operators[operators_size] = {
453 CreateOperator(
454 *builder, 0,
455 builder->CreateVector(operator_inputs, operator_inputs_size),
456 builder->CreateVector(operator_outputs, operator_outputs_size),
457 BuiltinOptions_NONE),
458 CreateOperator(
459 *builder, 0,
460 builder->CreateVector(operator_inputs, operator_inputs_size),
461 builder->CreateVector(operator2_outputs, operator_outputs_size),
462 BuiltinOptions_NONE),
463 };
464 constexpr size_t subgraphs_size = 1;
465 const Offset<SubGraph> subgraphs[subgraphs_size] = {
466 CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size),
467 builder->CreateVector(inputs, inputs_size),
468 builder->CreateVector(outputs, outputs_size),
469 builder->CreateVector(operators, operators_size),
470 builder->CreateString("test_subgraph"))};
471 constexpr size_t operator_codes_size = 1;
472 const Offset<OperatorCode> operator_codes[operator_codes_size] = {
473 CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0,
474 "mock_custom",
475 /*version=*/0, BuiltinOperator_CUSTOM)};
476 const Offset<Model> model_offset = CreateModel(
477 *builder, 0, builder->CreateVector(operator_codes, operator_codes_size),
478 builder->CreateVector(subgraphs, subgraphs_size),
479 builder->CreateString("test_model"),
480 builder->CreateVector(buffers, buffers_size));
481 FinishModelBuffer(*builder, model_offset);
482 void* model_pointer = builder->GetBufferPointer();
483 const Model* model = flatbuffers::GetRoot<Model>(model_pointer);
484 return model;
485 }
486
BuildComplexMockModel()487 const Model* BuildComplexMockModel() {
488 using flatbuffers::Offset;
489 flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
490
491 constexpr size_t buffer_data_size = 1;
492 const uint8_t buffer_data_1[buffer_data_size] = {21};
493 const uint8_t buffer_data_2[buffer_data_size] = {21};
494 const uint8_t buffer_data_3[buffer_data_size] = {21};
495 constexpr size_t buffers_size = 7;
496 const Offset<Buffer> buffers[buffers_size] = {
497 // Op 1 buffers:
498 CreateBuffer(*builder),
499 CreateBuffer(*builder),
500 CreateBuffer(*builder,
501 builder->CreateVector(buffer_data_1, buffer_data_size)),
502 // Op 2 buffers:
503 CreateBuffer(*builder),
504 CreateBuffer(*builder,
505 builder->CreateVector(buffer_data_2, buffer_data_size)),
506 // Op 3 buffers:
507 CreateBuffer(*builder),
508 CreateBuffer(*builder,
509 builder->CreateVector(buffer_data_3, buffer_data_size)),
510 };
511 constexpr size_t tensor_shape_size = 1;
512 const int32_t tensor_shape[tensor_shape_size] = {1};
513
514 constexpr size_t tensors_size = 10;
515 const Offset<Tensor> tensors[tensors_size] = {
516 // Op 1 inputs:
517 CreateTensor(
518 *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
519 TensorType_INT32, 0, builder->CreateString("test_input_tensor_1"), 0,
520 false /* is_variable */),
521 CreateTensor(
522 *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
523 TensorType_INT32, 1, builder->CreateString("test_variable_tensor_1"),
524 0, true /* is_variable */),
525 CreateTensor(
526 *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
527 TensorType_UINT8, 2, builder->CreateString("test_weight_tensor_1"), 0,
528 false /* is_variable */),
529 // Op 1 output / Op 2 input:
530 CreateTensor(
531 *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
532 TensorType_INT32, 0, builder->CreateString("test_output_tensor_1"), 0,
533 false /* is_variable */),
534 // Op 2 inputs:
535 CreateTensor(
536 *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
537 TensorType_INT32, 1, builder->CreateString("test_variable_tensor_2"),
538 0, true /* is_variable */),
539 CreateTensor(
540 *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
541 TensorType_UINT8, 2, builder->CreateString("test_weight_tensor_2"), 0,
542 false /* is_variable */),
543 // Op 2 output / Op 3 input:
544 CreateTensor(
545 *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
546 TensorType_INT32, 0, builder->CreateString("test_output_tensor_2"), 0,
547 false /* is_variable */),
548 // Op 3 inputs:
549 CreateTensor(
550 *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
551 TensorType_INT32, 1, builder->CreateString("test_variable_tensor_3"),
552 0, true /* is_variable */),
553 CreateTensor(
554 *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
555 TensorType_UINT8, 2, builder->CreateString("test_weight_tensor_3"), 0,
556 false /* is_variable */),
557 // Op 3 output:
558 CreateTensor(
559 *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
560 TensorType_INT32, 0, builder->CreateString("test_output_tensor_3"), 0,
561 false /* is_variable */),
562 };
563
564 constexpr size_t operators_size = 3;
565 Offset<Operator> operators[operators_size];
566 {
567 // Set Op 1 attributes:
568 constexpr size_t operator_inputs_size = 3;
569 const int32_t operator_inputs[operator_inputs_size] = {0, 1, 2};
570 constexpr size_t operator_outputs_size = 1;
571 const int32_t operator_outputs[operator_outputs_size] = {3};
572
573 operators[0] = {CreateOperator(
574 *builder, 0,
575 builder->CreateVector(operator_inputs, operator_inputs_size),
576 builder->CreateVector(operator_outputs, operator_outputs_size),
577 BuiltinOptions_NONE)};
578 }
579
580 {
581 // Set Op 2 attributes
582 constexpr size_t operator_inputs_size = 3;
583 const int32_t operator_inputs[operator_inputs_size] = {3, 4, 5};
584 constexpr size_t operator_outputs_size = 1;
585 const int32_t operator_outputs[operator_outputs_size] = {6};
586
587 operators[1] = {CreateOperator(
588 *builder, 0,
589 builder->CreateVector(operator_inputs, operator_inputs_size),
590 builder->CreateVector(operator_outputs, operator_outputs_size),
591 BuiltinOptions_NONE)};
592 }
593
594 {
595 // Set Op 3 attributes
596 constexpr size_t operator_inputs_size = 3;
597 const int32_t operator_inputs[operator_inputs_size] = {6, 7, 8};
598 constexpr size_t operator_outputs_size = 1;
599 const int32_t operator_outputs[operator_outputs_size] = {9};
600
601 operators[2] = {CreateOperator(
602 *builder, 0,
603 builder->CreateVector(operator_inputs, operator_inputs_size),
604 builder->CreateVector(operator_outputs, operator_outputs_size),
605 BuiltinOptions_NONE)};
606 }
607
608 constexpr size_t inputs_size = 1;
609 const int32_t inputs[inputs_size] = {0};
610 constexpr size_t outputs_size = 1;
611 const int32_t outputs[outputs_size] = {9};
612
613 constexpr size_t subgraphs_size = 1;
614 const Offset<SubGraph> subgraphs[subgraphs_size] = {
615 CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size),
616 builder->CreateVector(inputs, inputs_size),
617 builder->CreateVector(outputs, outputs_size),
618 builder->CreateVector(operators, operators_size),
619 builder->CreateString("test_subgraph"))};
620
621 constexpr size_t operator_codes_size = 1;
622 const Offset<OperatorCode> operator_codes[operator_codes_size] = {
623 CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0,
624 "mock_custom",
625 /*version=*/0, BuiltinOperator_CUSTOM)};
626
627 const Offset<Model> model_offset = CreateModel(
628 *builder, 0, builder->CreateVector(operator_codes, operator_codes_size),
629 builder->CreateVector(subgraphs, subgraphs_size),
630 builder->CreateString("test_model"),
631 builder->CreateVector(buffers, buffers_size));
632
633 FinishModelBuffer(*builder, model_offset);
634 void* model_pointer = builder->GetBufferPointer();
635 const Model* model = flatbuffers::GetRoot<Model>(model_pointer);
636 return model;
637 }
638
BuildSimpleMultipleInputsModel()639 const Model* BuildSimpleMultipleInputsModel() {
640 using flatbuffers::Offset;
641 flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
642
643 constexpr size_t buffers_size = 1;
644 const Offset<Buffer> buffers[buffers_size] = {
645 CreateBuffer(*builder),
646 };
647 constexpr size_t tensor_shape_size = 1;
648 const int32_t tensor_shape[tensor_shape_size] = {1};
649 constexpr size_t tensors_size = 4;
650 const Offset<Tensor> tensors[tensors_size] = {
651 CreateTensor(*builder,
652 builder->CreateVector(tensor_shape, tensor_shape_size),
653 TensorType_INT32, 0,
654 builder->CreateString("test_input_tensor1"), 0, false),
655 CreateTensor(*builder,
656 builder->CreateVector(tensor_shape, tensor_shape_size),
657 TensorType_INT8, 0,
658 builder->CreateString("test_input_tensor2"), 0, false),
659 CreateTensor(*builder,
660 builder->CreateVector(tensor_shape, tensor_shape_size),
661 TensorType_INT32, 0,
662 builder->CreateString("test_input_tensor3"), 0, false),
663 CreateTensor(*builder,
664 builder->CreateVector(tensor_shape, tensor_shape_size),
665 TensorType_INT32, 0,
666 builder->CreateString("test_output_tensor"), 0, false),
667 };
668 constexpr size_t inputs_size = 3;
669 const int32_t inputs[inputs_size] = {0, 1, 2};
670 constexpr size_t outputs_size = 1;
671 const int32_t outputs[outputs_size] = {3};
672 constexpr size_t operator_inputs_size = 3;
673 const int32_t operator_inputs[operator_inputs_size] = {0, 1, 2};
674 constexpr size_t operator_outputs_size = 1;
675 const int32_t operator_outputs[operator_outputs_size] = {3};
676 constexpr size_t operators_size = 1;
677 const Offset<Operator> operators[operators_size] = {
678 CreateOperator(
679 *builder, 0,
680 builder->CreateVector(operator_inputs, operator_inputs_size),
681 builder->CreateVector(operator_outputs, operator_outputs_size),
682 BuiltinOptions_NONE),
683 };
684 constexpr size_t subgraphs_size = 1;
685 const Offset<SubGraph> subgraphs[subgraphs_size] = {
686 CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size),
687 builder->CreateVector(inputs, inputs_size),
688 builder->CreateVector(outputs, outputs_size),
689 builder->CreateVector(operators, operators_size),
690 builder->CreateString("test_subgraph"))};
691 constexpr size_t operator_codes_size = 1;
692 const Offset<OperatorCode> operator_codes[operator_codes_size] = {
693 CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0,
694 "multiple_inputs_op",
695 /*version=*/0, BuiltinOperator_CUSTOM)};
696 const Offset<Model> model_offset = CreateModel(
697 *builder, 0, builder->CreateVector(operator_codes, operator_codes_size),
698 builder->CreateVector(subgraphs, subgraphs_size),
699 builder->CreateString("test_model"),
700 builder->CreateVector(buffers, buffers_size));
701 FinishModelBuffer(*builder, model_offset);
702 void* model_pointer = builder->GetBufferPointer();
703 const Model* model = flatbuffers::GetRoot<Model>(model_pointer);
704 return model;
705 }
706
BuildSimpleModelWithSubgraphsAndIf()707 const Model* BuildSimpleModelWithSubgraphsAndIf() {
708 using flatbuffers::Offset;
709 flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
710
711 constexpr size_t buffers_size = 1;
712 const Offset<Buffer> buffers[buffers_size] = {
713 CreateBuffer(*builder),
714 };
715 const int32_t condition_tensor_shape[] = {1};
716 const int32_t data_tensor_shape[] = {1, 2};
717 constexpr size_t tensors_size = 4;
718 const Offset<Tensor> subgraph1_tensors[tensors_size] = {
719 CreateTensor(*builder, builder->CreateVector(condition_tensor_shape, 1),
720 TensorType_BOOL, 0,
721 builder->CreateString("condition tensor"), 0, false),
722 CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2),
723 TensorType_FLOAT32, 0,
724 builder->CreateString("input_tensor1"), 0, false),
725 CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2),
726 TensorType_FLOAT32, 0,
727 builder->CreateString("input_tensor2"), 0, false),
728 CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2),
729 TensorType_FLOAT32, 0,
730 builder->CreateString("output_tensor"), 0, false),
731 };
732 const Offset<Tensor> subgraph2_tensors[tensors_size] = {
733 CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2),
734 TensorType_FLOAT32, 0,
735 builder->CreateString("input_tensor1"), 0, false),
736 CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2),
737 TensorType_FLOAT32, 0,
738 builder->CreateString("input_tensor2"), 0, false),
739 CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2),
740 TensorType_FLOAT32, 0,
741 builder->CreateString("output_tensor"), 0, false),
742 };
743 const Offset<Tensor> subgraph3_tensors[tensors_size] = {
744 CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2),
745 TensorType_FLOAT32, 0,
746 builder->CreateString("input_tensor1"), 0, false),
747 CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2),
748 TensorType_FLOAT32, 0,
749 builder->CreateString("input_tensor2"), 0, false),
750 CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2),
751 TensorType_FLOAT32, 0,
752 builder->CreateString("output_tensor"), 0, false),
753 };
754
755 constexpr size_t if_inputs_size = 3;
756 const int32_t if_inputs[if_inputs_size] = {0, 1, 2};
757 constexpr size_t outputs_size = 1;
758 const int32_t if_outputs[outputs_size] = {3};
759 constexpr size_t operator_inputs_size = 2;
760 const int32_t operator_inputs[operator_inputs_size] = {0, 1};
761 const int32_t operator_outputs[outputs_size] = {2};
762 constexpr size_t operators_size = 1;
763 const Offset<Operator> subgraph1_operators[operators_size] = {
764 CreateOperator(
765 *builder, 0, builder->CreateVector(if_inputs, if_inputs_size),
766 builder->CreateVector(if_outputs, outputs_size),
767 BuiltinOptions_IfOptions, CreateIfOptions(*builder, 1, 2).Union()),
768 };
769 const Offset<Operator> subgraph2_operators[operators_size] = {
770 CreateOperator(
771 *builder, 1,
772 builder->CreateVector(operator_inputs, operator_inputs_size),
773 builder->CreateVector(operator_outputs, outputs_size),
774 BuiltinOptions_NONE),
775 };
776 const Offset<Operator> subgraph3_operators[operators_size] = {
777 CreateOperator(
778 *builder, 2,
779 builder->CreateVector(operator_inputs, operator_inputs_size),
780 builder->CreateVector(operator_outputs, outputs_size),
781 BuiltinOptions_NONE),
782 };
783 constexpr size_t subgraphs_size = 3;
784 const Offset<SubGraph> subgraphs[subgraphs_size] = {
785 CreateSubGraph(*builder, builder->CreateVector(subgraph1_tensors, 4),
786 builder->CreateVector(if_inputs, if_inputs_size),
787 builder->CreateVector(if_outputs, outputs_size),
788 builder->CreateVector(subgraph1_operators, operators_size),
789 builder->CreateString("if_subgraph")),
790 CreateSubGraph(
791 *builder, builder->CreateVector(subgraph2_tensors, 3),
792 builder->CreateVector(operator_inputs, operator_inputs_size),
793 builder->CreateVector(operator_outputs, outputs_size),
794 builder->CreateVector(subgraph2_operators, operators_size),
795 builder->CreateString("then_subgraph")),
796 CreateSubGraph(
797 *builder, builder->CreateVector(subgraph3_tensors, 3),
798 builder->CreateVector(operator_inputs, operator_inputs_size),
799 builder->CreateVector(operator_outputs, outputs_size),
800 builder->CreateVector(subgraph3_operators, operators_size),
801 builder->CreateString("else_subgraph")),
802 };
803 constexpr size_t operator_codes_size = 3;
804 const Offset<OperatorCode> operator_codes[operator_codes_size] = {
805 CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0,
806 "multiple_inputs_op",
807 /*version=*/0, BuiltinOperator_IF),
808 CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0,
809 "multiple_inputs_op",
810 /*version=*/0, BuiltinOperator_ADD),
811 CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0,
812 "multiple_inputs_op",
813 /*version=*/0, BuiltinOperator_MUL),
814 };
815 const Offset<Model> model_offset = CreateModel(
816 *builder, 0, builder->CreateVector(operator_codes, operator_codes_size),
817 builder->CreateVector(subgraphs, subgraphs_size),
818 builder->CreateString("test_model"),
819 builder->CreateVector(buffers, buffers_size));
820 FinishModelBuffer(*builder, model_offset);
821 void* model_pointer = builder->GetBufferPointer();
822 const Model* model = flatbuffers::GetRoot<Model>(model_pointer);
823 return model;
824 }
825
826 } // namespace
827
getRegistration()828 const TfLiteRegistration* SimpleStatefulOp::getRegistration() {
829 return GetMutableRegistration();
830 }
831
GetMutableRegistration()832 TfLiteRegistration* SimpleStatefulOp::GetMutableRegistration() {
833 static TfLiteRegistration r;
834 r.init = Init;
835 r.prepare = Prepare;
836 r.invoke = Invoke;
837 return &r;
838 }
839
Init(TfLiteContext * context,const char * buffer,size_t length)840 void* SimpleStatefulOp::Init(TfLiteContext* context, const char* buffer,
841 size_t length) {
842 TFLITE_DCHECK(context->AllocateBufferForEval == nullptr);
843 TFLITE_DCHECK(context->GetScratchBuffer == nullptr);
844 TFLITE_DCHECK(context->RequestScratchBufferInArena == nullptr);
845
846 void* raw = context->AllocatePersistentBuffer(context, sizeof(OpData));
847 OpData* data = reinterpret_cast<OpData*>(raw);
848 *data = {};
849 return raw;
850 }
851
Prepare(TfLiteContext * context,TfLiteNode * node)852 TfLiteStatus SimpleStatefulOp::Prepare(TfLiteContext* context,
853 TfLiteNode* node) {
854 OpData* data = reinterpret_cast<OpData*>(node->user_data);
855
856 // Make sure that the input is in uint8_t with at least 1 data entry.
857 const TfLiteTensor* input;
858 TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
859 if (input->type != kTfLiteUInt8) return kTfLiteError;
860 if (NumElements(input->dims) == 0) return kTfLiteError;
861
862 // Allocate a temporary buffer with the same size of input for sorting.
863 TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena(
864 context, sizeof(uint8_t) * NumElements(input->dims),
865 &data->sorting_buffer));
866 // We can interleave scratch / persistent buffer allocation.
867 data->invoke_count = reinterpret_cast<int*>(
868 context->AllocatePersistentBuffer(context, sizeof(int)));
869 *data->invoke_count = 0;
870
871 return kTfLiteOk;
872 }
873
Invoke(TfLiteContext * context,TfLiteNode * node)874 TfLiteStatus SimpleStatefulOp::Invoke(TfLiteContext* context,
875 TfLiteNode* node) {
876 OpData* data = reinterpret_cast<OpData*>(node->user_data);
877 *data->invoke_count += 1;
878
879 const TfLiteTensor* input;
880 TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
881 const uint8_t* input_data = GetTensorData<uint8_t>(input);
882 int size = NumElements(input->dims);
883
884 uint8_t* sorting_buffer = reinterpret_cast<uint8_t*>(
885 context->GetScratchBuffer(context, data->sorting_buffer));
886 // Copy inputs data to the sorting buffer. We don't want to mutate the input
887 // tensor as it might be used by a another node.
888 for (int i = 0; i < size; i++) {
889 sorting_buffer[i] = input_data[i];
890 }
891
892 // In place insertion sort on `sorting_buffer`.
893 for (int i = 1; i < size; i++) {
894 for (int j = i; j > 0 && sorting_buffer[j] < sorting_buffer[j - 1]; j--) {
895 std::swap(sorting_buffer[j], sorting_buffer[j - 1]);
896 }
897 }
898
899 TfLiteTensor* median;
900 TF_LITE_ENSURE_OK(context,
901 GetOutputSafe(context, node, kMedianTensor, &median));
902 uint8_t* median_data = GetTensorData<uint8_t>(median);
903 TfLiteTensor* invoke_count;
904 TF_LITE_ENSURE_OK(context,
905 GetOutputSafe(context, node, kInvokeCount, &invoke_count));
906 int32_t* invoke_count_data = GetTensorData<int32_t>(invoke_count);
907
908 median_data[0] = sorting_buffer[size / 2];
909 invoke_count_data[0] = *data->invoke_count;
910 return kTfLiteOk;
911 }
912
getRegistration()913 const TfLiteRegistration* MockCustom::getRegistration() {
914 return GetMutableRegistration();
915 }
916
GetMutableRegistration()917 TfLiteRegistration* MockCustom::GetMutableRegistration() {
918 static TfLiteRegistration r;
919 r.init = Init;
920 r.prepare = Prepare;
921 r.invoke = Invoke;
922 r.free = Free;
923 return &r;
924 }
925
Init(TfLiteContext * context,const char * buffer,size_t length)926 void* MockCustom::Init(TfLiteContext* context, const char* buffer,
927 size_t length) {
928 // We don't support delegate in TFL micro. This is a weak check to test if
929 // context struct being zero-initialized.
930 TFLITE_DCHECK(context->ReplaceNodeSubsetsWithDelegateKernels == nullptr);
931 freed_ = false;
932 // Do nothing.
933 return nullptr;
934 }
935
Free(TfLiteContext * context,void * buffer)936 void MockCustom::Free(TfLiteContext* context, void* buffer) { freed_ = true; }
937
Prepare(TfLiteContext * context,TfLiteNode * node)938 TfLiteStatus MockCustom::Prepare(TfLiteContext* context, TfLiteNode* node) {
939 return kTfLiteOk;
940 }
941
Invoke(TfLiteContext * context,TfLiteNode * node)942 TfLiteStatus MockCustom::Invoke(TfLiteContext* context, TfLiteNode* node) {
943 const TfLiteTensor* input;
944 TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
945 const int32_t* input_data = input->data.i32;
946 const TfLiteTensor* weight;
947 TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &weight));
948 const uint8_t* weight_data = weight->data.uint8;
949 TfLiteTensor* output;
950 TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
951 int32_t* output_data = output->data.i32;
952 output_data[0] =
953 0; // Catch output tensor sharing memory with an input tensor
954 output_data[0] = input_data[0] + weight_data[0];
955 return kTfLiteOk;
956 }
957
958 bool MockCustom::freed_ = false;
959
getRegistration()960 const TfLiteRegistration* MultipleInputs::getRegistration() {
961 return GetMutableRegistration();
962 }
963
GetMutableRegistration()964 TfLiteRegistration* MultipleInputs::GetMutableRegistration() {
965 static TfLiteRegistration r;
966 r.init = Init;
967 r.prepare = Prepare;
968 r.invoke = Invoke;
969 r.free = Free;
970 return &r;
971 }
972
Init(TfLiteContext * context,const char * buffer,size_t length)973 void* MultipleInputs::Init(TfLiteContext* context, const char* buffer,
974 size_t length) {
975 // We don't support delegate in TFL micro. This is a weak check to test if
976 // context struct being zero-initialized.
977 TFLITE_DCHECK(context->ReplaceNodeSubsetsWithDelegateKernels == nullptr);
978 freed_ = false;
979 // Do nothing.
980 return nullptr;
981 }
982
Free(TfLiteContext * context,void * buffer)983 void MultipleInputs::Free(TfLiteContext* context, void* buffer) {
984 freed_ = true;
985 }
986
Prepare(TfLiteContext * context,TfLiteNode * node)987 TfLiteStatus MultipleInputs::Prepare(TfLiteContext* context, TfLiteNode* node) {
988 return kTfLiteOk;
989 }
990
Invoke(TfLiteContext * context,TfLiteNode * node)991 TfLiteStatus MultipleInputs::Invoke(TfLiteContext* context, TfLiteNode* node) {
992 const TfLiteTensor* input;
993 TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
994 const int32_t* input_data = input->data.i32;
995 const TfLiteTensor* input1;
996 TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input1));
997 const int32_t* input_data1 = input1->data.i32;
998 const TfLiteTensor* input2;
999 TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &input2));
1000 const int32_t* input_data2 = input2->data.i32;
1001
1002 TfLiteTensor* output;
1003 TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
1004 int32_t* output_data = output->data.i32;
1005 output_data[0] =
1006 0; // Catch output tensor sharing memory with an input tensor
1007 output_data[0] = input_data[0] + input_data1[0] + input_data2[0];
1008 return kTfLiteOk;
1009 }
1010
1011 bool MultipleInputs::freed_ = false;
1012
GetOpResolver()1013 AllOpsResolver GetOpResolver() {
1014 AllOpsResolver op_resolver;
1015 op_resolver.AddCustom("mock_custom", MockCustom::GetMutableRegistration());
1016 op_resolver.AddCustom("simple_stateful_op",
1017 SimpleStatefulOp::GetMutableRegistration());
1018 op_resolver.AddCustom("multiple_inputs_op",
1019 MultipleInputs::GetMutableRegistration());
1020 return op_resolver;
1021 }
GetModelWithUnusedInputs()1022 const Model* GetModelWithUnusedInputs() {
1023 static Model* model = nullptr;
1024 if (!model) {
1025 model = const_cast<Model*>(BuildModelWithUnusedInputs());
1026 }
1027 return model;
1028 }
1029
GetSimpleMockModel()1030 const Model* GetSimpleMockModel() {
1031 static Model* model = nullptr;
1032 if (!model) {
1033 model = const_cast<Model*>(BuildSimpleMockModel());
1034 }
1035 return model;
1036 }
1037
GetSimpleMultipleInputsModel()1038 const Model* GetSimpleMultipleInputsModel() {
1039 static Model* model = nullptr;
1040 if (!model) {
1041 model = const_cast<Model*>(BuildSimpleMultipleInputsModel());
1042 }
1043 return model;
1044 }
1045
GetSimpleModelWithSubgraphsAndIf()1046 const Model* GetSimpleModelWithSubgraphsAndIf() {
1047 static Model* model = nullptr;
1048 if (!model) {
1049 model = const_cast<Model*>(BuildSimpleModelWithSubgraphsAndIf());
1050 }
1051 return model;
1052 }
1053
GetComplexMockModel()1054 const Model* GetComplexMockModel() {
1055 static Model* model = nullptr;
1056 if (!model) {
1057 model = const_cast<Model*>(BuildComplexMockModel());
1058 }
1059 return model;
1060 }
1061
GetSimpleModelWithBranch()1062 const Model* GetSimpleModelWithBranch() {
1063 static Model* model = nullptr;
1064 if (!model) {
1065 model = const_cast<Model*>(BuildSimpleModelWithBranch());
1066 }
1067 return model;
1068 }
1069
GetModelWithOfflinePlanning(int num_tensors,const int32_t * metadata_buffer,NodeConnection * node_conn,int num_conns,int num_subgraph_inputs)1070 const Model* GetModelWithOfflinePlanning(int num_tensors,
1071 const int32_t* metadata_buffer,
1072 NodeConnection* node_conn,
1073 int num_conns,
1074 int num_subgraph_inputs) {
1075 const Model* model = BuildModelWithOfflinePlanning(
1076 num_tensors, metadata_buffer, node_conn, num_conns, num_subgraph_inputs);
1077 return model;
1078 }
1079
GetSimpleStatefulModel()1080 const Model* GetSimpleStatefulModel() {
1081 static Model* model = nullptr;
1082 if (!model) {
1083 model = const_cast<Model*>(BuildSimpleStatefulModel());
1084 }
1085 return model;
1086 }
1087
Create1dFlatbufferTensor(int size,bool is_variable)1088 const Tensor* Create1dFlatbufferTensor(int size, bool is_variable) {
1089 using flatbuffers::Offset;
1090 flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
1091 constexpr size_t tensor_shape_size = 1;
1092 const int32_t tensor_shape[tensor_shape_size] = {size};
1093 const Offset<Tensor> tensor_offset = CreateTensor(
1094 *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
1095 TensorType_INT32, 0, builder->CreateString("test_tensor"), 0,
1096 is_variable);
1097 builder->Finish(tensor_offset);
1098 void* tensor_pointer = builder->GetBufferPointer();
1099 const Tensor* tensor = flatbuffers::GetRoot<Tensor>(tensor_pointer);
1100 return tensor;
1101 }
1102
CreateQuantizedFlatbufferTensor(int size)1103 const Tensor* CreateQuantizedFlatbufferTensor(int size) {
1104 using flatbuffers::Offset;
1105 flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
1106 const Offset<QuantizationParameters> quant_params =
1107 CreateQuantizationParameters(
1108 *builder,
1109 /*min=*/builder->CreateVector<float>({0.1f}),
1110 /*max=*/builder->CreateVector<float>({0.2f}),
1111 /*scale=*/builder->CreateVector<float>({0.3f}),
1112 /*zero_point=*/builder->CreateVector<int64_t>({100ll}));
1113
1114 constexpr size_t tensor_shape_size = 1;
1115 const int32_t tensor_shape[tensor_shape_size] = {size};
1116 const Offset<Tensor> tensor_offset = CreateTensor(
1117 *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
1118 TensorType_INT32, 0, builder->CreateString("test_tensor"), quant_params,
1119 false);
1120 builder->Finish(tensor_offset);
1121 void* tensor_pointer = builder->GetBufferPointer();
1122 const Tensor* tensor = flatbuffers::GetRoot<Tensor>(tensor_pointer);
1123 return tensor;
1124 }
1125
CreateMissingQuantizationFlatbufferTensor(int size)1126 const Tensor* CreateMissingQuantizationFlatbufferTensor(int size) {
1127 using flatbuffers::Offset;
1128 flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
1129 const Offset<QuantizationParameters> quant_params =
1130 CreateQuantizationParameters(*builder, 0, 0, 0, 0,
1131 QuantizationDetails_NONE, 0, 0);
1132 constexpr size_t tensor_shape_size = 1;
1133 const int32_t tensor_shape[tensor_shape_size] = {size};
1134 const Offset<Tensor> tensor_offset = CreateTensor(
1135 *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
1136 TensorType_INT32, 0, builder->CreateString("test_tensor"), quant_params,
1137 false);
1138 builder->Finish(tensor_offset);
1139 void* tensor_pointer = builder->GetBufferPointer();
1140 const Tensor* tensor = flatbuffers::GetRoot<Tensor>(tensor_pointer);
1141 return tensor;
1142 }
1143
1144 const flatbuffers::Vector<flatbuffers::Offset<Buffer>>*
CreateFlatbufferBuffers()1145 CreateFlatbufferBuffers() {
1146 using flatbuffers::Offset;
1147 flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
1148 constexpr size_t buffers_size = 1;
1149 const Offset<Buffer> buffers[buffers_size] = {
1150 CreateBuffer(*builder),
1151 };
1152 const flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>
1153 buffers_offset = builder->CreateVector(buffers, buffers_size);
1154 builder->Finish(buffers_offset);
1155 void* buffers_pointer = builder->GetBufferPointer();
1156 const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* result =
1157 flatbuffers::GetRoot<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>(
1158 buffers_pointer);
1159 return result;
1160 }
1161
TestStrcmp(const char * a,const char * b)1162 int TestStrcmp(const char* a, const char* b) {
1163 if ((a == nullptr) || (b == nullptr)) {
1164 return -1;
1165 }
1166 while ((*a != 0) && (*a == *b)) {
1167 a++;
1168 b++;
1169 }
1170 return *reinterpret_cast<const unsigned char*>(a) -
1171 *reinterpret_cast<const unsigned char*>(b);
1172 }
1173
1174 // Wrapper to forward kernel errors to the interpreter's error reporter.
ReportOpError(struct TfLiteContext * context,const char * format,...)1175 void ReportOpError(struct TfLiteContext* context, const char* format, ...) {
1176 #ifndef TF_LITE_STRIP_ERROR_STRINGS
1177 ErrorReporter* error_reporter = static_cast<ErrorReporter*>(context->impl_);
1178 va_list args;
1179 va_start(args, format);
1180 TF_LITE_REPORT_ERROR(error_reporter, format, args);
1181 va_end(args);
1182 #endif
1183 }
1184
1185 // Create a TfLiteIntArray from an array of ints. The first element in the
1186 // supplied array must be the size of the array expressed as an int.
IntArrayFromInts(int * int_array)1187 TfLiteIntArray* IntArrayFromInts(int* int_array) {
1188 return reinterpret_cast<TfLiteIntArray*>(int_array);
1189 }
1190
1191 // Create a TfLiteFloatArray from an array of floats. The first element in the
1192 // supplied array must be the size of the array expressed as a float.
FloatArrayFromFloats(const float * floats)1193 TfLiteFloatArray* FloatArrayFromFloats(const float* floats) {
1194 static_assert(sizeof(float) == sizeof(int),
1195 "assumes sizeof(float) == sizeof(int) to perform casting");
1196 int size = static_cast<int>(floats[0]);
1197 *reinterpret_cast<int32_t*>(const_cast<float*>(floats)) = size;
1198 return reinterpret_cast<TfLiteFloatArray*>(const_cast<float*>(floats));
1199 }
1200
CreateQuantizedBiasTensor(const float * data,int16_t * quantized,TfLiteIntArray * dims,float input_scale,float weights_scale,bool is_variable)1201 TfLiteTensor CreateQuantizedBiasTensor(const float* data, int16_t* quantized,
1202 TfLiteIntArray* dims, float input_scale,
1203 float weights_scale, bool is_variable) {
1204 float bias_scale = input_scale * weights_scale;
1205 tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale);
1206
1207 // Quantized int16_t tensors always have a zero point of 0, since the range of
1208 // int16_t values is large, and because zero point costs extra cycles during
1209 // processing.
1210 TfLiteTensor result =
1211 CreateQuantizedTensor(quantized, dims, bias_scale, 0, is_variable);
1212 return result;
1213 }
1214
CreateQuantizedBiasTensor(const float * data,int32_t * quantized,TfLiteIntArray * dims,float input_scale,float weights_scale,bool is_variable)1215 TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
1216 TfLiteIntArray* dims, float input_scale,
1217 float weights_scale, bool is_variable) {
1218 float bias_scale = input_scale * weights_scale;
1219 tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale);
1220
1221 // Quantized int32_t tensors always have a zero point of 0, since the range of
1222 // int32_t values is large, and because zero point costs extra cycles during
1223 // processing.
1224 TfLiteTensor result =
1225 CreateQuantizedTensor(quantized, dims, bias_scale, 0, is_variable);
1226 return result;
1227 }
1228
CreateQuantizedBiasTensor(const float * data,std::int64_t * quantized,TfLiteIntArray * dims,float input_scale,float weights_scale,bool is_variable)1229 TfLiteTensor CreateQuantizedBiasTensor(const float* data,
1230 std::int64_t* quantized,
1231 TfLiteIntArray* dims, float input_scale,
1232 float weights_scale, bool is_variable) {
1233 float bias_scale = input_scale * weights_scale;
1234 tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale);
1235
1236 // Quantized int32_t tensors always have a zero point of 0, since the range of
1237 // int32_t values is large, and because zero point costs extra cycles during
1238 // processing.
1239 TfLiteTensor result =
1240 CreateQuantizedTensor(quantized, dims, bias_scale, 0, is_variable);
1241 return result;
1242 }
1243
1244 // Quantizes int32_t bias tensor with per-channel weights determined by input
1245 // scale multiplied by weight scale for each channel.
1246 template <typename T>
CreatePerChannelQuantizedBiasTensor(const float * input,T * quantized,TfLiteIntArray * dims,float input_scale,float * weight_scales,float * scales,int * zero_points,TfLiteAffineQuantization * affine_quant,int quantized_dimension,bool is_variable)1247 TfLiteTensor CreatePerChannelQuantizedBiasTensor(
1248 const float* input, T* quantized, TfLiteIntArray* dims, float input_scale,
1249 float* weight_scales, float* scales, int* zero_points,
1250 TfLiteAffineQuantization* affine_quant, int quantized_dimension,
1251 bool is_variable) {
1252 int input_size = ElementCount(*dims);
1253 int num_channels = dims->data[quantized_dimension];
1254 // First element is reserved for array length
1255 zero_points[0] = num_channels;
1256 scales[0] = static_cast<float>(num_channels);
1257 float* scales_array = &scales[1];
1258 for (int i = 0; i < num_channels; i++) {
1259 scales_array[i] = input_scale * weight_scales[i];
1260 zero_points[i + 1] = 0;
1261 }
1262
1263 SymmetricPerChannelQuantize<T>(input, quantized, input_size, num_channels,
1264 scales_array);
1265
1266 affine_quant->scale = FloatArrayFromFloats(scales);
1267 affine_quant->zero_point = IntArrayFromInts(zero_points);
1268 affine_quant->quantized_dimension = quantized_dimension;
1269
1270 TfLiteTensor result = CreateTensor(quantized, dims, is_variable);
1271 result.quantization = {kTfLiteAffineQuantization, affine_quant};
1272 return result;
1273 }
1274
CreatePerChannelQuantizedBiasTensor(const float * input,int32_t * quantized,TfLiteIntArray * dims,float input_scale,float * weight_scales,float * scales,int * zero_points,TfLiteAffineQuantization * affine_quant,int quantized_dimension,bool is_variable)1275 TfLiteTensor CreatePerChannelQuantizedBiasTensor(
1276 const float* input, int32_t* quantized, TfLiteIntArray* dims,
1277 float input_scale, float* weight_scales, float* scales, int* zero_points,
1278 TfLiteAffineQuantization* affine_quant, int quantized_dimension,
1279 bool is_variable) {
1280 return CreatePerChannelQuantizedBiasTensor<int32_t>(
1281 input, quantized, dims, input_scale, weight_scales, scales, zero_points,
1282 affine_quant, quantized_dimension, is_variable);
1283 }
1284
CreatePerChannelQuantizedBiasTensor(const float * input,std::int64_t * quantized,TfLiteIntArray * dims,float input_scale,float * weight_scales,float * scales,int * zero_points,TfLiteAffineQuantization * affine_quant,int quantized_dimension,bool is_variable)1285 TfLiteTensor CreatePerChannelQuantizedBiasTensor(
1286 const float* input, std::int64_t* quantized, TfLiteIntArray* dims,
1287 float input_scale, float* weight_scales, float* scales, int* zero_points,
1288 TfLiteAffineQuantization* affine_quant, int quantized_dimension,
1289 bool is_variable) {
1290 return CreatePerChannelQuantizedBiasTensor<std::int64_t>(
1291 input, quantized, dims, input_scale, weight_scales, scales, zero_points,
1292 affine_quant, quantized_dimension, is_variable);
1293 }
1294
CreateSymmetricPerChannelQuantizedTensor(const float * input,int8_t * quantized,TfLiteIntArray * dims,float * scales,int * zero_points,TfLiteAffineQuantization * affine_quant,int quantized_dimension,bool is_variable)1295 TfLiteTensor CreateSymmetricPerChannelQuantizedTensor(
1296 const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales,
1297 int* zero_points, TfLiteAffineQuantization* affine_quant,
1298 int quantized_dimension, bool is_variable) {
1299 int channel_count = dims->data[quantized_dimension];
1300 scales[0] = static_cast<float>(channel_count);
1301 zero_points[0] = channel_count;
1302
1303 SignedSymmetricPerChannelQuantize(input, dims, quantized_dimension, quantized,
1304 &scales[1]);
1305
1306 for (int i = 0; i < channel_count; i++) {
1307 zero_points[i + 1] = 0;
1308 }
1309
1310 affine_quant->scale = FloatArrayFromFloats(scales);
1311 affine_quant->zero_point = IntArrayFromInts(zero_points);
1312 affine_quant->quantized_dimension = quantized_dimension;
1313
1314 TfLiteTensor result = CreateTensor(quantized, dims, is_variable);
1315 result.quantization = {kTfLiteAffineQuantization, affine_quant};
1316 return result;
1317 }
1318
GetModelTensorCount(const Model * model)1319 size_t GetModelTensorCount(const Model* model) {
1320 auto* subgraphs = model->subgraphs();
1321 if (subgraphs) {
1322 return (*subgraphs)[0]->tensors()->size();
1323 }
1324 return 0;
1325 }
1326
1327 } // namespace testing
1328 } // namespace tflite
1329