1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
17
18 #include <cstddef>
19 #include <cstdint>
20 #include <memory>
21
22 #include "flatbuffers/flatbuffers.h" // from @flatbuffers
23 #include "tensorflow/lite/c/builtin_op_data.h"
24 #include "tensorflow/lite/c/common.h"
25 #include "tensorflow/lite/core/api/error_reporter.h"
26 #include "tensorflow/lite/kernels/internal/compatibility.h"
27 #include "tensorflow/lite/schema/schema_generated.h"
28
29 namespace tflite {
30
31 namespace {
32
33 // Utility class for safely allocating POD data. This is useful for avoiding
34 // leaks in cases where op params are allocated but fail to propagate to the
35 // parsed op data (e.g., when model parameters are invalid).
36 class SafeBuiltinDataAllocator {
37 public:
38 class BuiltinDataDeleter {
39 public:
BuiltinDataDeleter(BuiltinDataAllocator * allocator)40 explicit BuiltinDataDeleter(BuiltinDataAllocator* allocator)
41 : allocator_(allocator) {}
42
operator ()(void * data)43 void operator()(void* data) { allocator_->Deallocate(data); }
44
45 private:
46 BuiltinDataAllocator* allocator_;
47 };
48
49 template <typename T>
50 using BuiltinDataPtr = std::unique_ptr<T, BuiltinDataDeleter>;
51
SafeBuiltinDataAllocator(BuiltinDataAllocator * allocator)52 explicit SafeBuiltinDataAllocator(BuiltinDataAllocator* allocator)
53 : allocator_(allocator) {}
54
55 template <typename T>
Allocate()56 BuiltinDataPtr<T> Allocate() {
57 return BuiltinDataPtr<T>(allocator_->AllocatePOD<T>(),
58 BuiltinDataDeleter(allocator_));
59 }
60
61 private:
62 BuiltinDataAllocator* allocator_;
63 };
64
65 // All the Parse functions take some pointers as params and this function has
66 // the common DCHECKs to catch if any of those are nullptr.
CheckParsePointerParams(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)67 void CheckParsePointerParams(const Operator* op, ErrorReporter* error_reporter,
68 BuiltinDataAllocator* allocator,
69 void** builtin_data) {
70 TFLITE_DCHECK(op != nullptr);
71 TFLITE_DCHECK(error_reporter != nullptr);
72 TFLITE_DCHECK(allocator != nullptr);
73 TFLITE_DCHECK(builtin_data != nullptr);
74 }
75
76 // Copies the contents from the flatbuffer int vector `flatbuffer` into the
77 // int array `buffer`. `flat_vector` and `buffer` represent the same
78 // configuration operation for a given operation.
FlatBufferIntVectorToArray(int max_size_of_buffer,const flatbuffers::Vector<int32_t> * flat_vector,int * buffer,ErrorReporter * error_reporter,const char * op_name)79 TfLiteStatus FlatBufferIntVectorToArray(
80 int max_size_of_buffer, const flatbuffers::Vector<int32_t>* flat_vector,
81 int* buffer, ErrorReporter* error_reporter, const char* op_name) {
82 if (!flat_vector) {
83 TF_LITE_REPORT_ERROR(error_reporter,
84 "Input array not provided for operation '%s'.\n",
85 op_name);
86 return kTfLiteError;
87 } else {
88 size_t num_dimensions = flat_vector->size();
89 if (num_dimensions > max_size_of_buffer / sizeof(int)) {
90 TF_LITE_REPORT_ERROR(
91 error_reporter,
92 "Found too many dimensions in the input array of operation '%s'.\n",
93 op_name);
94 return kTfLiteError;
95 } else {
96 for (size_t i = 0; i < num_dimensions; ++i) {
97 buffer[i] = flat_vector->Get(i);
98 }
99 }
100 }
101 return kTfLiteOk;
102 }
103
104 // Converts the flatbuffer activation to what is used at runtime.
ConvertActivation(ActivationFunctionType activation)105 TfLiteFusedActivation ConvertActivation(ActivationFunctionType activation) {
106 switch (activation) {
107 case ActivationFunctionType_NONE:
108 return kTfLiteActNone;
109 case ActivationFunctionType_RELU:
110 return kTfLiteActRelu;
111 case ActivationFunctionType_RELU_N1_TO_1:
112 return kTfLiteActReluN1To1;
113 case ActivationFunctionType_RELU6:
114 return kTfLiteActRelu6;
115 case ActivationFunctionType_TANH:
116 return kTfLiteActTanh;
117 case ActivationFunctionType_SIGN_BIT:
118 return kTfLiteActSignBit;
119 }
120 return kTfLiteActNone;
121 }
122
123 // Converts the flatbuffer padding enum to what is used at runtime.
ConvertPadding(Padding padding)124 TfLitePadding ConvertPadding(Padding padding) {
125 switch (padding) {
126 case Padding_SAME:
127 return kTfLitePaddingSame;
128 case Padding_VALID:
129 return kTfLitePaddingValid;
130 }
131 return kTfLitePaddingUnknown;
132 }
133
134 #ifndef TF_LITE_STATIC_MEMORY
ParseOpDataTfLite(const Operator * op,BuiltinOperator op_type,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)135 TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
136 ErrorReporter* error_reporter,
137 BuiltinDataAllocator* allocator,
138 void** builtin_data) {
139 auto parseLSHProjectionType = [](LSHProjectionType type) {
140 switch (type) {
141 case LSHProjectionType_SPARSE:
142 return kTfLiteLshProjectionSparse;
143 case LSHProjectionType_DENSE:
144 return kTfLiteLshProjectionDense;
145 default:
146 return kTfLiteLshProjectionUnknown;
147 }
148 };
149 auto parseCombinerType = [](CombinerType type) {
150 switch (type) {
151 case CombinerType_MEAN:
152 return kTfLiteCombinerTypeMean;
153 case CombinerType_SQRTN:
154 return kTfLiteCombinerTypeSqrtn;
155 case CombinerType_SUM:
156 default:
157 return kTfLiteCombinerTypeSum;
158 }
159 };
160
161 SafeBuiltinDataAllocator safe_allocator(allocator);
162 *builtin_data = nullptr;
163 switch (op_type) {
164 case BuiltinOperator_ABS: {
165 return ParseAbs(op, error_reporter, allocator, builtin_data);
166 }
167
168 case BuiltinOperator_ADD: {
169 return ParseAdd(op, error_reporter, allocator, builtin_data);
170 }
171
172 case BuiltinOperator_ADD_N: {
173 return ParseAddN(op, error_reporter, allocator, builtin_data);
174 }
175
176 case BuiltinOperator_ARG_MAX: {
177 return ParseArgMax(op, error_reporter, allocator, builtin_data);
178 }
179
180 case BuiltinOperator_ARG_MIN: {
181 return ParseArgMin(op, error_reporter, allocator, builtin_data);
182 }
183
184 case BuiltinOperator_AVERAGE_POOL_2D: {
185 return ParsePool(op, error_reporter, allocator, builtin_data);
186 }
187
188 case BuiltinOperator_BATCH_MATMUL: {
189 return ParseBatchMatMul(op, error_reporter, allocator, builtin_data);
190 }
191
192 case BuiltinOperator_BATCH_TO_SPACE_ND: {
193 return ParseBatchToSpaceNd(op, error_reporter, allocator, builtin_data);
194 }
195
196 case BuiltinOperator_CEIL: {
197 return ParseCeil(op, error_reporter, allocator, builtin_data);
198 }
199
200 case BuiltinOperator_CONCATENATION: {
201 return ParseConcatenation(op, error_reporter, allocator, builtin_data);
202 }
203
204 case BuiltinOperator_CONV_2D: {
205 return ParseConv2D(op, error_reporter, allocator, builtin_data);
206 }
207
208 case BuiltinOperator_CUMSUM: {
209 return ParseCumsum(op, error_reporter, allocator, builtin_data);
210 }
211
212 case BuiltinOperator_DEPTH_TO_SPACE: {
213 return ParseDepthToSpace(op, error_reporter, allocator, builtin_data);
214 }
215
216 case BuiltinOperator_DEPTHWISE_CONV_2D: {
217 return ParseDepthwiseConv2D(op, error_reporter, allocator, builtin_data);
218 }
219
220 case BuiltinOperator_DEQUANTIZE: {
221 return ParseDequantize(op, error_reporter, allocator, builtin_data);
222 }
223
224 case BuiltinOperator_DIV: {
225 return ParseDiv(op, error_reporter, allocator, builtin_data);
226 }
227
228 case BuiltinOperator_ELU: {
229 return ParseElu(op, error_reporter, allocator, builtin_data);
230 }
231
232 case BuiltinOperator_EXP: {
233 return ParseExp(op, error_reporter, allocator, builtin_data);
234 }
235
236 case BuiltinOperator_EXPAND_DIMS: {
237 return ParseExpandDims(op, error_reporter, allocator, builtin_data);
238 }
239
240 case BuiltinOperator_FILL: {
241 return ParseFill(op, error_reporter, allocator, builtin_data);
242 }
243
244 case BuiltinOperator_FLOOR: {
245 return ParseFloor(op, error_reporter, allocator, builtin_data);
246 }
247
248 case BuiltinOperator_FLOOR_DIV: {
249 return ParseFloorDiv(op, error_reporter, allocator, builtin_data);
250 }
251
252 case BuiltinOperator_FLOOR_MOD: {
253 return ParseFloorMod(op, error_reporter, allocator, builtin_data);
254 }
255
256 case BuiltinOperator_FULLY_CONNECTED: {
257 return ParseFullyConnected(op, error_reporter, allocator, builtin_data);
258 }
259
260 case BuiltinOperator_GATHER_ND: {
261 return ParseGatherNd(op, error_reporter, allocator, builtin_data);
262 }
263
264 case BuiltinOperator_GREATER: {
265 return ParseGreater(op, error_reporter, allocator, builtin_data);
266 }
267
268 case BuiltinOperator_GREATER_EQUAL: {
269 return ParseGreaterEqual(op, error_reporter, allocator, builtin_data);
270 }
271
272 case BuiltinOperator_HARD_SWISH: {
273 return ParseHardSwish(op, error_reporter, allocator, builtin_data);
274 }
275
276 case BuiltinOperator_L2_NORMALIZATION: {
277 return ParseL2Normalization(op, error_reporter, allocator, builtin_data);
278 }
279
280 case BuiltinOperator_L2_POOL_2D: {
281 return ParsePool(op, error_reporter, allocator, builtin_data);
282 }
283
284 case BuiltinOperator_LEAKY_RELU: {
285 return ParseLeakyRelu(op, error_reporter, allocator, builtin_data);
286 }
287
288 case BuiltinOperator_LESS: {
289 return ParseLess(op, error_reporter, allocator, builtin_data);
290 }
291
292 case BuiltinOperator_LESS_EQUAL: {
293 return ParseLessEqual(op, error_reporter, allocator, builtin_data);
294 }
295
296 case BuiltinOperator_LOG: {
297 return ParseLog(op, error_reporter, allocator, builtin_data);
298 }
299
300 case BuiltinOperator_LOGICAL_AND: {
301 return ParseLogicalAnd(op, error_reporter, allocator, builtin_data);
302 }
303
304 case BuiltinOperator_LOGICAL_NOT: {
305 return ParseLogicalNot(op, error_reporter, allocator, builtin_data);
306 }
307
308 case BuiltinOperator_LOGICAL_OR: {
309 return ParseLogicalOr(op, error_reporter, allocator, builtin_data);
310 }
311
312 case BuiltinOperator_LOGISTIC: {
313 return ParseLogistic(op, error_reporter, allocator, builtin_data);
314 }
315
316 case BuiltinOperator_LOG_SOFTMAX: {
317 return ParseLogSoftmax(op, error_reporter, allocator, builtin_data);
318 }
319
320 case BuiltinOperator_MAXIMUM: {
321 return ParseMaximum(op, error_reporter, allocator, builtin_data);
322 }
323
324 case BuiltinOperator_MAX_POOL_2D: {
325 return ParsePool(op, error_reporter, allocator, builtin_data);
326 }
327
328 case BuiltinOperator_MEAN: {
329 return ParseReducer(op, error_reporter, allocator, builtin_data);
330 }
331
332 case BuiltinOperator_MINIMUM: {
333 return ParseMinimum(op, error_reporter, allocator, builtin_data);
334 }
335
336 case BuiltinOperator_MUL: {
337 return ParseMul(op, error_reporter, allocator, builtin_data);
338 }
339
340 case BuiltinOperator_NEG: {
341 return ParseNeg(op, error_reporter, allocator, builtin_data);
342 }
343
344 case BuiltinOperator_NOT_EQUAL: {
345 return ParseNotEqual(op, error_reporter, allocator, builtin_data);
346 }
347
348 case BuiltinOperator_PACK: {
349 return ParsePack(op, error_reporter, allocator, builtin_data);
350 }
351
352 case BuiltinOperator_PAD: {
353 return ParsePad(op, error_reporter, allocator, builtin_data);
354 }
355
356 case BuiltinOperator_PADV2: {
357 return ParsePadV2(op, error_reporter, allocator, builtin_data);
358 }
359
360 case BuiltinOperator_POW: {
361 return ParsePow(op, error_reporter, allocator, builtin_data);
362 }
363
364 case BuiltinOperator_PRELU: {
365 return ParsePrelu(op, error_reporter, allocator, builtin_data);
366 }
367
368 case BuiltinOperator_QUANTIZE: {
369 return ParseQuantize(op, error_reporter, allocator, builtin_data);
370 }
371
372 case BuiltinOperator_REDUCE_ANY: {
373 return ParseReducer(op, error_reporter, allocator, builtin_data);
374 }
375
376 case BuiltinOperator_REDUCE_ALL: {
377 return ParseReducer(op, error_reporter, allocator, builtin_data);
378 }
379
380 case BuiltinOperator_REDUCE_MAX: {
381 return ParseReducer(op, error_reporter, allocator, builtin_data);
382 }
383
384 case BuiltinOperator_REDUCE_MIN: {
385 return ParseReducer(op, error_reporter, allocator, builtin_data);
386 }
387
388 case BuiltinOperator_REDUCE_PROD: {
389 return ParseReducer(op, error_reporter, allocator, builtin_data);
390 }
391
392 case BuiltinOperator_RELU: {
393 return ParseRelu(op, error_reporter, allocator, builtin_data);
394 }
395
396 case BuiltinOperator_RELU6: {
397 return ParseRelu6(op, error_reporter, allocator, builtin_data);
398 }
399
400 case BuiltinOperator_RESHAPE: {
401 return ParseReshape(op, error_reporter, allocator, builtin_data);
402 }
403
404 case BuiltinOperator_RESIZE_BILINEAR: {
405 return ParseResizeBilinear(op, error_reporter, allocator, builtin_data);
406 }
407
408 case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
409 return ParseResizeNearestNeighbor(op, error_reporter, allocator,
410 builtin_data);
411 }
412
413 case BuiltinOperator_ROUND: {
414 return ParseRound(op, error_reporter, allocator, builtin_data);
415 }
416
417 case BuiltinOperator_RSQRT: {
418 return ParseRsqrt(op, error_reporter, allocator, builtin_data);
419 }
420
421 case BuiltinOperator_SHAPE: {
422 return ParseShape(op, error_reporter, allocator, builtin_data);
423 }
424
425 case BuiltinOperator_SIN: {
426 return ParseSin(op, error_reporter, allocator, builtin_data);
427 }
428
429 case BuiltinOperator_SOFTMAX: {
430 return ParseSoftmax(op, error_reporter, allocator, builtin_data);
431 }
432
433 case BuiltinOperator_SPACE_TO_BATCH_ND: {
434 return ParseSpaceToBatchNd(op, error_reporter, allocator, builtin_data);
435 }
436
437 case BuiltinOperator_SPACE_TO_DEPTH: {
438 return ParseSpaceToDepth(op, error_reporter, allocator, builtin_data);
439 }
440
441 case BuiltinOperator_SPLIT: {
442 return ParseSplit(op, error_reporter, allocator, builtin_data);
443 }
444
445 case BuiltinOperator_SPLIT_V: {
446 return ParseSplitV(op, error_reporter, allocator, builtin_data);
447 }
448
449 case BuiltinOperator_SQRT: {
450 return ParseSqrt(op, error_reporter, allocator, builtin_data);
451 }
452
453 case BuiltinOperator_SQUARE: {
454 return ParseSquare(op, error_reporter, allocator, builtin_data);
455 }
456
457 case BuiltinOperator_SQUEEZE: {
458 return ParseSqueeze(op, error_reporter, allocator, builtin_data);
459 }
460
461 case BuiltinOperator_STRIDED_SLICE: {
462 return ParseStridedSlice(op, error_reporter, allocator, builtin_data);
463 }
464
465 case BuiltinOperator_SUB: {
466 return ParseSub(op, error_reporter, allocator, builtin_data);
467 }
468
469 case BuiltinOperator_SUM: {
470 return ParseReducer(op, error_reporter, allocator, builtin_data);
471 }
472
473 case BuiltinOperator_SVDF: {
474 return ParseSvdf(op, error_reporter, allocator, builtin_data);
475 }
476
477 case BuiltinOperator_TANH: {
478 return ParseTanh(op, error_reporter, allocator, builtin_data);
479 }
480
481 case BuiltinOperator_TRANSPOSE_CONV: {
482 return ParseTransposeConv(op, error_reporter, allocator, builtin_data);
483 }
484
485 case BuiltinOperator_UNPACK: {
486 return ParseUnpack(op, error_reporter, allocator, builtin_data);
487 }
488
489 case BuiltinOperator_ZEROS_LIKE: {
490 return ParseZerosLike(op, error_reporter, allocator, builtin_data);
491 }
492
493 case BuiltinOperator_CAST: {
494 return ParseCast(op, error_reporter, allocator, builtin_data);
495 }
496 case BuiltinOperator_LSH_PROJECTION: {
497 auto params = safe_allocator.Allocate<TfLiteLSHProjectionParams>();
498 TF_LITE_ENSURE(error_reporter, params != nullptr);
499 if (const auto* lshParams =
500 op->builtin_options_as_LSHProjectionOptions()) {
501 params->type = parseLSHProjectionType(lshParams->type());
502 }
503 *builtin_data = params.release();
504 return kTfLiteOk;
505 }
506 case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: {
507 auto params = safe_allocator.Allocate<TfLiteSequenceRNNParams>();
508 TF_LITE_ENSURE(error_reporter, params != nullptr);
509 if (const auto* sequence_rnn_params =
510 op->builtin_options_as_SequenceRNNOptions()) {
511 params->activation =
512 ConvertActivation(sequence_rnn_params->fused_activation_function());
513 params->time_major = sequence_rnn_params->time_major();
514 params->asymmetric_quantize_inputs =
515 sequence_rnn_params->asymmetric_quantize_inputs();
516 }
517 *builtin_data = params.release();
518 return kTfLiteOk;
519 }
520 case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN: {
521 auto params =
522 safe_allocator.Allocate<TfLiteBidirectionalSequenceRNNParams>();
523 TF_LITE_ENSURE(error_reporter, params != nullptr);
524 if (const auto* bidi_sequence_rnn_params =
525 op->builtin_options_as_BidirectionalSequenceRNNOptions()) {
526 params->activation = ConvertActivation(
527 bidi_sequence_rnn_params->fused_activation_function());
528 params->time_major = bidi_sequence_rnn_params->time_major();
529 params->merge_outputs = bidi_sequence_rnn_params->merge_outputs();
530 params->asymmetric_quantize_inputs =
531 bidi_sequence_rnn_params->asymmetric_quantize_inputs();
532 }
533 *builtin_data = params.release();
534 return kTfLiteOk;
535 }
536 case BuiltinOperator_RNN: {
537 auto params = safe_allocator.Allocate<TfLiteRNNParams>();
538 TF_LITE_ENSURE(error_reporter, params != nullptr);
539 if (const auto* rnn_params = op->builtin_options_as_RNNOptions()) {
540 params->activation =
541 ConvertActivation(rnn_params->fused_activation_function());
542 params->asymmetric_quantize_inputs =
543 rnn_params->asymmetric_quantize_inputs();
544 }
545 *builtin_data = params.release();
546 return kTfLiteOk;
547 }
548 case BuiltinOperator_EMBEDDING_LOOKUP_SPARSE: {
549 auto params =
550 safe_allocator.Allocate<TfLiteEmbeddingLookupSparseParams>();
551 TF_LITE_ENSURE(error_reporter, params != nullptr);
552 if (const auto* embedding_params =
553 op->builtin_options_as_EmbeddingLookupSparseOptions()) {
554 params->combiner = parseCombinerType(embedding_params->combiner());
555 }
556 *builtin_data = params.release();
557 return kTfLiteOk;
558 }
559
560 case BuiltinOperator_HASHTABLE_LOOKUP:
561 // no-op.
562 return kTfLiteOk;
563
564 case BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: {
565 auto params = safe_allocator.Allocate<TfLiteLocalResponseNormParams>();
566 TF_LITE_ENSURE(error_reporter, params != nullptr);
567 if (const auto* schema_params =
568 op->builtin_options_as_LocalResponseNormalizationOptions()) {
569 params->radius = schema_params->radius();
570 params->bias = schema_params->bias();
571 params->alpha = schema_params->alpha();
572 params->beta = schema_params->beta();
573 }
574 *builtin_data = params.release();
575 return kTfLiteOk;
576 }
577 case BuiltinOperator_LSTM: {
578 auto params = safe_allocator.Allocate<TfLiteLSTMParams>();
579 TF_LITE_ENSURE(error_reporter, params != nullptr);
580 if (const auto* lstm_params = op->builtin_options_as_LSTMOptions()) {
581 params->activation =
582 ConvertActivation(lstm_params->fused_activation_function());
583 params->cell_clip = lstm_params->cell_clip();
584 params->proj_clip = lstm_params->proj_clip();
585 switch (lstm_params->kernel_type()) {
586 case LSTMKernelType_FULL:
587 params->kernel_type = kTfLiteLSTMFullKernel;
588 break;
589 case LSTMKernelType_BASIC:
590 params->kernel_type = kTfLiteLSTMBasicKernel;
591 break;
592 default:
593 TF_LITE_REPORT_ERROR(error_reporter,
594 "Unhandled LSTM kernel type: %d",
595 lstm_params->kernel_type());
596 return kTfLiteError;
597 }
598 params->asymmetric_quantize_inputs =
599 lstm_params->asymmetric_quantize_inputs();
600 } else {
601 TF_LITE_REPORT_ERROR(error_reporter,
602 "No valid LSTM builtin options exist");
603 return kTfLiteError;
604 }
605 *builtin_data = params.release();
606 return kTfLiteOk;
607 }
608 case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: {
609 auto params =
610 safe_allocator.Allocate<TfLiteUnidirectionalSequenceLSTMParams>();
611 TF_LITE_ENSURE(error_reporter, params != nullptr);
612 if (const auto* seq_lstm_params =
613 op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) {
614 params->activation =
615 ConvertActivation(seq_lstm_params->fused_activation_function());
616 params->cell_clip = seq_lstm_params->cell_clip();
617 params->proj_clip = seq_lstm_params->proj_clip();
618 params->time_major = seq_lstm_params->time_major();
619 params->asymmetric_quantize_inputs =
620 seq_lstm_params->asymmetric_quantize_inputs();
621 }
622 *builtin_data = params.release();
623 return kTfLiteOk;
624 }
625 case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: {
626 auto params =
627 safe_allocator.Allocate<TfLiteBidirectionalSequenceLSTMParams>();
628 TF_LITE_ENSURE(error_reporter, params != nullptr);
629 if (const auto* bidi_lstm_params =
630 op->builtin_options_as_BidirectionalSequenceLSTMOptions()) {
631 params->activation =
632 ConvertActivation(bidi_lstm_params->fused_activation_function());
633 params->cell_clip = bidi_lstm_params->cell_clip();
634 params->proj_clip = bidi_lstm_params->proj_clip();
635 params->merge_outputs = bidi_lstm_params->merge_outputs();
636 params->time_major = bidi_lstm_params->time_major();
637 params->asymmetric_quantize_inputs =
638 bidi_lstm_params->asymmetric_quantize_inputs();
639 }
640 *builtin_data = params.release();
641 return kTfLiteOk;
642 }
643 case BuiltinOperator_SKIP_GRAM: {
644 auto params = safe_allocator.Allocate<TfLiteSkipGramParams>();
645 TF_LITE_ENSURE(error_reporter, params != nullptr);
646 if (const auto* skip_gram_params =
647 op->builtin_options_as_SkipGramOptions()) {
648 params->ngram_size = skip_gram_params->ngram_size();
649 params->max_skip_size = skip_gram_params->max_skip_size();
650 params->include_all_ngrams = skip_gram_params->include_all_ngrams();
651 }
652 *builtin_data = params.release();
653 return kTfLiteOk;
654 }
655
656 case BuiltinOperator_GATHER: {
657 return ParseGather(op, error_reporter, allocator, builtin_data);
658 }
659 case BuiltinOperator_SPARSE_TO_DENSE: {
660 auto params = safe_allocator.Allocate<TfLiteSparseToDenseParams>();
661 TF_LITE_ENSURE(error_reporter, params != nullptr);
662 if (const auto* sparse_to_dense_params =
663 op->builtin_options_as_SparseToDenseOptions()) {
664 params->validate_indices = sparse_to_dense_params->validate_indices();
665 }
666 *builtin_data = params.release();
667 return kTfLiteOk;
668 }
669 case BuiltinOperator_DELEGATE: {
670 TF_LITE_REPORT_ERROR(error_reporter,
671 "DELEGATE op shouldn't exist in model.");
672 return kTfLiteError;
673 }
674 case BuiltinOperator_FAKE_QUANT: {
675 auto params = safe_allocator.Allocate<TfLiteFakeQuantParams>();
676 TF_LITE_ENSURE(error_reporter, params != nullptr);
677 if (const auto* schema_params =
678 op->builtin_options_as_FakeQuantOptions()) {
679 params->min = schema_params->min();
680 params->max = schema_params->max();
681 params->num_bits = schema_params->num_bits();
682 params->narrow_range = schema_params->narrow_range();
683 }
684 *builtin_data = params.release();
685 return kTfLiteOk;
686 }
687 case BuiltinOperator_ONE_HOT: {
688 auto params = safe_allocator.Allocate<TfLiteOneHotParams>();
689 TF_LITE_ENSURE(error_reporter, params != nullptr);
690 if (const auto* schema_params = op->builtin_options_as_OneHotOptions()) {
691 params->axis = schema_params->axis();
692 }
693 *builtin_data = params.release();
694 return kTfLiteOk;
695 }
696 case BuiltinOperator_MIRROR_PAD: {
697 auto params = safe_allocator.Allocate<TfLiteMirrorPaddingParams>();
698 TF_LITE_ENSURE(error_reporter, params != nullptr);
699 const auto* mirror_pad_params = op->builtin_options_as_MirrorPadOptions();
700 if (mirror_pad_params != nullptr) {
701 params->mode =
702 mirror_pad_params->mode() == tflite::MirrorPadMode_REFLECT
703 ? TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect
704 : TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingSymmetric;
705 }
706 *builtin_data = params.release();
707 return kTfLiteOk;
708 }
709 case BuiltinOperator_UNIQUE: {
710 auto params = safe_allocator.Allocate<TfLiteUniqueParams>();
711 TF_LITE_ENSURE(error_reporter, params != nullptr);
712 const auto* unique_params = op->builtin_options_as_UniqueOptions();
713 if (unique_params != nullptr) {
714 params->index_out_type =
715 unique_params->idx_out_type() == tflite::TensorType_INT64
716 ? TfLiteType::kTfLiteInt64
717 : TfLiteType::kTfLiteInt32;
718 }
719 *builtin_data = params.release();
720 return kTfLiteOk;
721 }
722 case BuiltinOperator_REVERSE_SEQUENCE: {
723 auto params = safe_allocator.Allocate<TfLiteReverseSequenceParams>();
724 TF_LITE_ENSURE(error_reporter, params != nullptr);
725 if (const auto* reverse_seq_params =
726 op->builtin_options_as_ReverseSequenceOptions()) {
727 params->seq_dim = reverse_seq_params->seq_dim();
728 params->batch_dim = reverse_seq_params->batch_dim();
729 }
730 *builtin_data = params.release();
731 return kTfLiteOk;
732 }
733 case BuiltinOperator_IF: {
734 auto params = safe_allocator.Allocate<TfLiteIfParams>();
735 TF_LITE_ENSURE(error_reporter, params != nullptr);
736 if (const auto* if_params = op->builtin_options_as_IfOptions()) {
737 params->then_subgraph_index = if_params->then_subgraph_index();
738 params->else_subgraph_index = if_params->else_subgraph_index();
739 }
740 *builtin_data = params.release();
741 return kTfLiteOk;
742 }
743 case BuiltinOperator_WHILE: {
744 auto params = safe_allocator.Allocate<TfLiteWhileParams>();
745 TF_LITE_ENSURE(error_reporter, params != nullptr);
746 if (const auto* while_params = op->builtin_options_as_WhileOptions()) {
747 params->cond_subgraph_index = while_params->cond_subgraph_index();
748 params->body_subgraph_index = while_params->body_subgraph_index();
749 }
750 *builtin_data = params.release();
751 return kTfLiteOk;
752 }
753 case BuiltinOperator_CALL_ONCE: {
754 auto params = safe_allocator.Allocate<TfLiteCallOnceParams>();
755 TF_LITE_ENSURE(error_reporter, params != nullptr);
756 if (const auto* call_once_params =
757 op->builtin_options_as_CallOnceOptions()) {
758 params->init_subgraph_index = call_once_params->init_subgraph_index();
759 }
760 *builtin_data = params.release();
761 return kTfLiteOk;
762 }
763 case BuiltinOperator_CONV_3D:
764 case BuiltinOperator_CONV_3D_TRANSPOSE: {
765 auto params = safe_allocator.Allocate<TfLiteConv3DParams>();
766 TF_LITE_ENSURE(error_reporter, params != nullptr);
767 if (const auto* conv3d_params = op->builtin_options_as_Conv3DOptions()) {
768 params->padding = ConvertPadding(conv3d_params->padding());
769 params->activation =
770 ConvertActivation(conv3d_params->fused_activation_function());
771 params->stride_depth = conv3d_params->stride_d();
772 params->stride_height = conv3d_params->stride_h();
773 params->stride_width = conv3d_params->stride_w();
774 params->dilation_depth_factor = conv3d_params->dilation_d_factor();
775 params->dilation_height_factor = conv3d_params->dilation_h_factor();
776 params->dilation_width_factor = conv3d_params->dilation_w_factor();
777 }
778 *builtin_data = params.release();
779 return kTfLiteOk;
780 }
781 case BuiltinOperator_HASHTABLE: {
782 auto params = safe_allocator.Allocate<TfLiteHashtableParams>();
783 TF_LITE_ENSURE(error_reporter, params != nullptr);
784 if (const auto* hashtable_params =
785 op->builtin_options_as_HashtableOptions()) {
786 params->table_id = hashtable_params->table_id();
787 TF_LITE_ENSURE_STATUS(ConvertTensorType(
788 hashtable_params->key_dtype(), ¶ms->key_dtype, error_reporter));
789 TF_LITE_ENSURE_STATUS(ConvertTensorType(hashtable_params->value_dtype(),
790 ¶ms->value_dtype,
791 error_reporter));
792 }
793 *builtin_data = params.release();
794 return kTfLiteOk;
795 }
796 case BuiltinOperator_VAR_HANDLE: {
797 auto params = safe_allocator.Allocate<TfLiteVarHandleParams>();
798 TF_LITE_ENSURE(error_reporter, params != nullptr);
799 params->container = nullptr;
800 params->shared_name = nullptr;
801 if (const auto* var_handle_params =
802 op->builtin_options_as_VarHandleOptions()) {
803 if (var_handle_params->container())
804 params->container = var_handle_params->container()->c_str();
805 if (var_handle_params->shared_name())
806 params->shared_name = var_handle_params->shared_name()->c_str();
807 }
808 *builtin_data = params.release();
809 return kTfLiteOk;
810 }
811 // Below are the ops with no builtin_data structure.
812 // TODO(aselle): Implement call in BuiltinOptions, but nullptrs are
813 // ok for now, since there is no call implementation either.
814 case BuiltinOperator_CALL:
815 case BuiltinOperator_CONCAT_EMBEDDINGS:
816 case BuiltinOperator_COS:
817 case BuiltinOperator_CUSTOM:
818 case BuiltinOperator_EMBEDDING_LOOKUP:
819 case BuiltinOperator_EQUAL:
820 case BuiltinOperator_MATRIX_DIAG:
821 case BuiltinOperator_MATRIX_SET_DIAG:
822 case BuiltinOperator_RELU_N1_TO_1:
823 case BuiltinOperator_SELECT:
824 case BuiltinOperator_SELECT_V2:
825 case BuiltinOperator_SLICE:
826 case BuiltinOperator_TILE:
827 case BuiltinOperator_TOPK_V2:
828 case BuiltinOperator_TRANSPOSE:
829 case BuiltinOperator_RANGE:
830 case BuiltinOperator_SQUARED_DIFFERENCE:
831 case BuiltinOperator_REVERSE_V2:
832 case BuiltinOperator_WHERE:
833 case BuiltinOperator_RANK:
834 case BuiltinOperator_NON_MAX_SUPPRESSION_V4:
835 case BuiltinOperator_NON_MAX_SUPPRESSION_V5:
836 case BuiltinOperator_SCATTER_ND:
837 case BuiltinOperator_DENSIFY:
838 case BuiltinOperator_SEGMENT_SUM:
839 case BuiltinOperator_BROADCAST_TO:
840 case BuiltinOperator_RFFT2D:
841 case BuiltinOperator_IMAG:
842 case BuiltinOperator_REAL:
843 case BuiltinOperator_COMPLEX_ABS:
844 case BuiltinOperator_HASHTABLE_FIND:
845 case BuiltinOperator_HASHTABLE_IMPORT:
846 case BuiltinOperator_HASHTABLE_SIZE:
847 case BuiltinOperator_READ_VARIABLE:
848 case BuiltinOperator_ASSIGN_VARIABLE:
849 case BuiltinOperator_BROADCAST_ARGS:
850 return kTfLiteOk;
851 case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES:
852 return kTfLiteError;
853 }
854 return kTfLiteError;
855 } // NOLINT[readability/fn_size]
856 #endif // !defined(TF_LITE_STATIC_MEMORY)
857 } // namespace
858
ConvertTensorType(TensorType tensor_type,TfLiteType * type,ErrorReporter * error_reporter)859 TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
860 ErrorReporter* error_reporter) {
861 switch (tensor_type) {
862 case TensorType_FLOAT16:
863 *type = kTfLiteFloat16;
864 return kTfLiteOk;
865 case TensorType_FLOAT32:
866 *type = kTfLiteFloat32;
867 return kTfLiteOk;
868 case TensorType_FLOAT64:
869 *type = kTfLiteFloat64;
870 return kTfLiteOk;
871 case TensorType_INT16:
872 *type = kTfLiteInt16;
873 return kTfLiteOk;
874 case TensorType_INT32:
875 *type = kTfLiteInt32;
876 return kTfLiteOk;
877 case TensorType_UINT32:
878 *type = kTfLiteUInt32;
879 return kTfLiteOk;
880 case TensorType_UINT8:
881 *type = kTfLiteUInt8;
882 return kTfLiteOk;
883 case TensorType_INT8:
884 *type = kTfLiteInt8;
885 return kTfLiteOk;
886 case TensorType_INT64:
887 *type = kTfLiteInt64;
888 return kTfLiteOk;
889 case TensorType_UINT64:
890 *type = kTfLiteUInt64;
891 return kTfLiteOk;
892 case TensorType_STRING:
893 *type = kTfLiteString;
894 return kTfLiteOk;
895 case TensorType_BOOL:
896 *type = kTfLiteBool;
897 return kTfLiteOk;
898 case TensorType_COMPLEX64:
899 *type = kTfLiteComplex64;
900 return kTfLiteOk;
901 case TensorType_COMPLEX128:
902 *type = kTfLiteComplex128;
903 return kTfLiteOk;
904 case TensorType_RESOURCE:
905 *type = kTfLiteResource;
906 return kTfLiteOk;
907 case TensorType_VARIANT:
908 *type = kTfLiteVariant;
909 return kTfLiteOk;
910 default:
911 *type = kTfLiteNoType;
912 TF_LITE_REPORT_ERROR(error_reporter,
913 "Unsupported data type %d in tensor\n", tensor_type);
914 return kTfLiteError;
915 }
916 }
917
918 // We have this parse function instead of directly returning kTfLiteOk from the
919 // switch-case in ParseOpData because this function is used as part of the
920 // selective registration for the OpResolver implementation in micro.
ParseAbs(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)921 TfLiteStatus ParseAbs(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
922 void**) {
923 return kTfLiteOk;
924 }
925
ParseAdd(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)926 TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
927 BuiltinDataAllocator* allocator, void** builtin_data) {
928 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
929
930 SafeBuiltinDataAllocator safe_allocator(allocator);
931 std::unique_ptr<TfLiteAddParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
932 params = safe_allocator.Allocate<TfLiteAddParams>();
933 TF_LITE_ENSURE(error_reporter, params != nullptr);
934
935 const AddOptions* schema_params = op->builtin_options_as_AddOptions();
936
937 if (schema_params != nullptr) {
938 params->activation =
939 ConvertActivation(schema_params->fused_activation_function());
940 params->pot_scale_int16 = schema_params->pot_scale_int16();
941 } else {
942 // TODO(b/157480169): We should either return kTfLiteError or fill in some
943 // reasonable defaults in the params struct. We are not doing so until we
944 // better undertand the ramifications of changing the legacy behavior.
945 }
946
947 *builtin_data = params.release();
948 return kTfLiteOk;
949 }
950
ParseAddN(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)951 TfLiteStatus ParseAddN(const Operator* op, ErrorReporter* error_reporter,
952 BuiltinDataAllocator* allocator, void** builtin_data) {
953 return kTfLiteOk;
954 }
955
ParseArgMax(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)956 TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
957 BuiltinDataAllocator* allocator, void** builtin_data) {
958 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
959
960 SafeBuiltinDataAllocator safe_allocator(allocator);
961 std::unique_ptr<TfLiteArgMaxParams,
962 SafeBuiltinDataAllocator::BuiltinDataDeleter>
963 params = safe_allocator.Allocate<TfLiteArgMaxParams>();
964 TF_LITE_ENSURE(error_reporter, params != nullptr);
965
966 const ArgMaxOptions* schema_params = op->builtin_options_as_ArgMaxOptions();
967
968 if (schema_params != nullptr) {
969 TF_LITE_ENSURE_STATUS(ConvertTensorType(
970 schema_params->output_type(), ¶ms->output_type, error_reporter));
971 } else {
972 // TODO(b/157480169): We should either return kTfLiteError or fill in some
973 // reasonable defaults in the params struct. We are not doing so until we
974 // better undertand the ramifications of changing the legacy behavior.
975 }
976
977 *builtin_data = params.release();
978 return kTfLiteOk;
979 }
980
ParseArgMin(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)981 TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
982 BuiltinDataAllocator* allocator, void** builtin_data) {
983 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
984
985 SafeBuiltinDataAllocator safe_allocator(allocator);
986 std::unique_ptr<TfLiteArgMinParams,
987 SafeBuiltinDataAllocator::BuiltinDataDeleter>
988 params = safe_allocator.Allocate<TfLiteArgMinParams>();
989 TF_LITE_ENSURE(error_reporter, params != nullptr);
990
991 const ArgMinOptions* schema_params = op->builtin_options_as_ArgMinOptions();
992
993 if (schema_params != nullptr) {
994 TF_LITE_ENSURE_STATUS(ConvertTensorType(
995 schema_params->output_type(), ¶ms->output_type, error_reporter));
996 } else {
997 // TODO(b/157480169): We should either return kTfLiteError or fill in some
998 // reasonable defaults in the params struct. We are not doing so until we
999 // better undertand the ramifications of changing the legacy behavior.
1000 }
1001
1002 *builtin_data = params.release();
1003 return kTfLiteOk;
1004 }
1005
1006 // We have this parse function instead of directly returning kTfLiteOk from the
1007 // switch-case in ParseOpData because this function is used as part of the
1008 // selective registration for the OpResolver implementation in micro.
ParseBatchMatMul(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1009 TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter,
1010 BuiltinDataAllocator* allocator,
1011 void** builtin_data) {
1012 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1013
1014 SafeBuiltinDataAllocator safe_allocator(allocator);
1015 auto params = safe_allocator.Allocate<TfLiteBatchMatMulParams>();
1016 TF_LITE_ENSURE(error_reporter, params != nullptr);
1017 if (const auto* bmm_params = op->builtin_options_as_BatchMatMulOptions()) {
1018 params->adj_x = bmm_params->adj_x();
1019 params->adj_y = bmm_params->adj_y();
1020 params->asymmetric_quantize_inputs =
1021 bmm_params->asymmetric_quantize_inputs();
1022 }
1023 *builtin_data = params.release();
1024 return kTfLiteOk;
1025 }
1026
1027 // We have this parse function instead of directly returning kTfLiteOk from the
1028 // switch-case in ParseOpData because this function is used as part of the
1029 // selective registration for the OpResolver implementation in micro.
ParseBatchToSpaceNd(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1030 TfLiteStatus ParseBatchToSpaceNd(const Operator*, ErrorReporter*,
1031 BuiltinDataAllocator*, void**) {
1032 return kTfLiteOk;
1033 }
1034
1035 // We have this parse function instead of directly returning kTfLiteOk from the
1036 // switch-case in ParseOpData because this function is used as part of the
1037 // selective registration for the OpResolver implementation in micro.
ParseCast(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1038 TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter,
1039 BuiltinDataAllocator* allocator, void** builtin_data) {
1040 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1041
1042 SafeBuiltinDataAllocator safe_allocator(allocator);
1043 auto params = safe_allocator.Allocate<TfLiteCastParams>();
1044 TF_LITE_ENSURE(error_reporter, params != nullptr);
1045 if (const auto* schema_params = op->builtin_options_as_CastOptions()) {
1046 TF_LITE_ENSURE_STATUS(ConvertTensorType(
1047 schema_params->in_data_type(), ¶ms->in_data_type, error_reporter));
1048 TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_data_type(),
1049 ¶ms->out_data_type,
1050 error_reporter));
1051 }
1052 *builtin_data = params.release();
1053 return kTfLiteOk;
1054 }
1055
1056 // We have this parse function instead of directly returning kTfLiteOk from the
1057 // switch-case in ParseOpData because this function is used as part of the
1058 // selective registration for the OpResolver implementation in micro.
ParseCeil(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1059 TfLiteStatus ParseCeil(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1060 void**) {
1061 return kTfLiteOk;
1062 }
1063
ParseConcatenation(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1064 TfLiteStatus ParseConcatenation(const Operator* op,
1065 ErrorReporter* error_reporter,
1066 BuiltinDataAllocator* allocator,
1067 void** builtin_data) {
1068 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1069
1070 SafeBuiltinDataAllocator safe_allocator(allocator);
1071 std::unique_ptr<TfLiteConcatenationParams,
1072 SafeBuiltinDataAllocator::BuiltinDataDeleter>
1073 params = safe_allocator.Allocate<TfLiteConcatenationParams>();
1074 TF_LITE_ENSURE(error_reporter, params != nullptr);
1075
1076 const ConcatenationOptions* schema_params =
1077 op->builtin_options_as_ConcatenationOptions();
1078
1079 if (schema_params != nullptr) {
1080 params->activation =
1081 ConvertActivation(schema_params->fused_activation_function());
1082 params->axis = schema_params->axis();
1083 } else {
1084 // TODO(b/157480169): We should either return kTfLiteError or fill in some
1085 // reasonable defaults in the params struct. We are not doing so until we
1086 // better undertand the ramifications of changing the legacy behavior.
1087 }
1088
1089 *builtin_data = params.release();
1090 return kTfLiteOk;
1091 }
1092
ParseConv2D(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1093 TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,
1094 BuiltinDataAllocator* allocator, void** builtin_data) {
1095 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1096
1097 SafeBuiltinDataAllocator safe_allocator(allocator);
1098 std::unique_ptr<TfLiteConvParams,
1099 SafeBuiltinDataAllocator::BuiltinDataDeleter>
1100 params = safe_allocator.Allocate<TfLiteConvParams>();
1101 TF_LITE_ENSURE(error_reporter, params != nullptr);
1102
1103 const Conv2DOptions* schema_params = op->builtin_options_as_Conv2DOptions();
1104
1105 if (schema_params != nullptr) {
1106 params->padding = ConvertPadding(schema_params->padding());
1107 params->stride_width = schema_params->stride_w();
1108 params->stride_height = schema_params->stride_h();
1109 params->activation =
1110 ConvertActivation(schema_params->fused_activation_function());
1111
1112 params->dilation_width_factor = schema_params->dilation_w_factor();
1113 params->dilation_height_factor = schema_params->dilation_h_factor();
1114 } else {
1115 // TODO(b/157480169): We should either return kTfLiteError or fill in some
1116 // reasonable defaults in the params struct. We are not doing so until we
1117 // better undertand the ramifications of changing the legacy behavior.
1118 }
1119
1120 *builtin_data = params.release();
1121 return kTfLiteOk;
1122 }
1123
1124 // We have this parse function instead of directly returning kTfLiteOk from the
1125 // switch-case in ParseOpData because this function is used as part of the
1126 // selective registration for the OpResolver implementation in micro.
ParseCumsum(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1127 TfLiteStatus ParseCumsum(const Operator* op, ErrorReporter* error_reporter,
1128 BuiltinDataAllocator* allocator, void** builtin_data) {
1129 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1130
1131 SafeBuiltinDataAllocator safe_allocator(allocator);
1132 auto params = safe_allocator.Allocate<TfLiteCumsumParams>();
1133 TF_LITE_ENSURE(error_reporter, params != nullptr);
1134 if (const auto* cumsum_params = op->builtin_options_as_CumsumOptions()) {
1135 params->exclusive = cumsum_params->exclusive();
1136 params->reverse = cumsum_params->reverse();
1137 }
1138 *builtin_data = params.release();
1139 return kTfLiteOk;
1140 }
1141
1142 // We have this parse function instead of directly returning kTfLiteOk from the
1143 // switch-case in ParseOpData because this function is used as part of the
1144 // selective registration for the OpResolver implementation in micro.
ParseCos(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1145 TfLiteStatus ParseCos(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1146 void**) {
1147 return kTfLiteOk;
1148 }
1149
ParseDepthToSpace(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1150 TfLiteStatus ParseDepthToSpace(const Operator* op,
1151 ErrorReporter* error_reporter,
1152 BuiltinDataAllocator* allocator,
1153 void** builtin_data) {
1154 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1155
1156 SafeBuiltinDataAllocator safe_allocator(allocator);
1157 std::unique_ptr<TfLiteDepthToSpaceParams,
1158 SafeBuiltinDataAllocator::BuiltinDataDeleter>
1159 params = safe_allocator.Allocate<TfLiteDepthToSpaceParams>();
1160 TF_LITE_ENSURE(error_reporter, params != nullptr);
1161
1162 const auto* schema_params = op->builtin_options_as_DepthToSpaceOptions();
1163 if (schema_params != nullptr) {
1164 params->block_size = schema_params->block_size();
1165 } else {
1166 // TODO(b/157480169): We should either return kTfLiteError or fill in some
1167 // reasonable defaults in the params struct. We are not doing so until we
1168 // better undertand the ramifications of changing the legacy behavior.
1169 }
1170
1171 *builtin_data = params.release();
1172 return kTfLiteOk;
1173 }
1174
ParseDepthwiseConv2D(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1175 TfLiteStatus ParseDepthwiseConv2D(const Operator* op,
1176 ErrorReporter* error_reporter,
1177 BuiltinDataAllocator* allocator,
1178 void** builtin_data) {
1179 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1180
1181 SafeBuiltinDataAllocator safe_allocator(allocator);
1182
1183 std::unique_ptr<TfLiteDepthwiseConvParams,
1184 SafeBuiltinDataAllocator::BuiltinDataDeleter>
1185 params = safe_allocator.Allocate<TfLiteDepthwiseConvParams>();
1186 TF_LITE_ENSURE(error_reporter, params != nullptr);
1187
1188 const DepthwiseConv2DOptions* schema_params =
1189 op->builtin_options_as_DepthwiseConv2DOptions();
1190
1191 if (schema_params != nullptr) {
1192 params->padding = ConvertPadding(schema_params->padding());
1193 params->stride_width = schema_params->stride_w();
1194 params->stride_height = schema_params->stride_h();
1195 params->depth_multiplier = schema_params->depth_multiplier();
1196 params->activation =
1197 ConvertActivation(schema_params->fused_activation_function());
1198
1199 params->dilation_width_factor = schema_params->dilation_w_factor();
1200 params->dilation_height_factor = schema_params->dilation_h_factor();
1201 } else {
1202 // TODO(b/157480169): We should either return kTfLiteError or fill in some
1203 // reasonable defaults in the params struct. We are not doing so until we
1204 // better undertand the ramifications of changing the legacy behavior.
1205 }
1206
1207 *builtin_data = params.release();
1208 return kTfLiteOk;
1209 }
1210
1211 // We have this parse function instead of directly returning kTfLiteOk from the
1212 // switch-case in ParseOpData because this function is used as part of the
1213 // selective registration for the OpResolver implementation in micro.
ParseDequantize(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1214 TfLiteStatus ParseDequantize(const Operator*, ErrorReporter*,
1215 BuiltinDataAllocator*, void**) {
1216 return kTfLiteOk;
1217 }
1218
ParseDiv(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1219 TfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter,
1220 BuiltinDataAllocator* allocator, void** builtin_data) {
1221 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1222
1223 SafeBuiltinDataAllocator safe_allocator(allocator);
1224 auto params = safe_allocator.Allocate<TfLiteDivParams>();
1225 TF_LITE_ENSURE(error_reporter, params != nullptr);
1226 if (const auto* schema_params = op->builtin_options_as_DivOptions()) {
1227 params->activation =
1228 ConvertActivation(schema_params->fused_activation_function());
1229 }
1230 *builtin_data = params.release();
1231 return kTfLiteOk;
1232 }
1233
1234 // We have this parse function instead of directly returning kTfLiteOk from the
1235 // switch-case in ParseOpData because this function is used as part of the
1236 // selective registration for the OpResolver implementation in micro.
ParseElu(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1237 TfLiteStatus ParseElu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1238 void**) {
1239 return kTfLiteOk;
1240 }
1241
1242 // We have this parse function instead of directly returning kTfLiteOk from the
1243 // switch-case in ParseOpData because this function is used as part of the
1244 // selective registration for the OpResolver implementation in micro.
ParseEqual(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1245 TfLiteStatus ParseEqual(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1246 void**) {
1247 return kTfLiteOk;
1248 }
1249
1250 // We have this parse function instead of directly returning kTfLiteOk from the
1251 // switch-case in ParseOpData because this function is used as part of the
1252 // selective registration for the OpResolver implementation in micro.
ParseExp(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1253 TfLiteStatus ParseExp(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1254 void**) {
1255 return kTfLiteOk;
1256 }
1257
1258 // We have this parse function instead of directly returning kTfLiteOk from the
1259 // switch-case in ParseOpData because this function is used as part of the
1260 // selective registration for the OpResolver implementation in micro.
ParseExpandDims(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1261 TfLiteStatus ParseExpandDims(const Operator*, ErrorReporter*,
1262 BuiltinDataAllocator*, void**) {
1263 return kTfLiteOk;
1264 }
1265
1266 // We have this parse function instead of directly returning kTfLiteOk from the
1267 // switch-case in ParseOpData because this function is used as part of the
1268 // selective registration for the OpResolver implementation in micro.
ParseFill(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1269 TfLiteStatus ParseFill(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1270 void**) {
1271 return kTfLiteOk;
1272 }
1273
1274 // We have this parse function instead of directly returning kTfLiteOk from the
1275 // switch-case in ParseOpData because this function is used as part of the
1276 // selective registration for the OpResolver implementation in micro.
ParseFloor(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1277 TfLiteStatus ParseFloor(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1278 void**) {
1279 return kTfLiteOk;
1280 }
1281
1282 // We have this parse function instead of directly returning kTfLiteOk from the
1283 // switch-case in ParseOpData because this function is used as part of the
1284 // selective registration for the OpResolver implementation in micro.
ParseFloorDiv(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1285 TfLiteStatus ParseFloorDiv(const Operator*, ErrorReporter*,
1286 BuiltinDataAllocator*, void**) {
1287 return kTfLiteOk;
1288 }
1289
1290 // We have this parse function instead of directly returning kTfLiteOk from the
1291 // switch-case in ParseOpData because this function is used as part of the
1292 // selective registration for the OpResolver implementation in micro.
ParseFloorMod(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1293 TfLiteStatus ParseFloorMod(const Operator*, ErrorReporter*,
1294 BuiltinDataAllocator*, void**) {
1295 return kTfLiteOk;
1296 }
1297
ParseFullyConnected(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1298 TfLiteStatus ParseFullyConnected(const Operator* op,
1299 ErrorReporter* error_reporter,
1300 BuiltinDataAllocator* allocator,
1301 void** builtin_data) {
1302 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1303
1304 SafeBuiltinDataAllocator safe_allocator(allocator);
1305
1306 std::unique_ptr<TfLiteFullyConnectedParams,
1307 SafeBuiltinDataAllocator::BuiltinDataDeleter>
1308 params = safe_allocator.Allocate<TfLiteFullyConnectedParams>();
1309 TF_LITE_ENSURE(error_reporter, params != nullptr);
1310
1311 const FullyConnectedOptions* schema_params =
1312 op->builtin_options_as_FullyConnectedOptions();
1313
1314 if (schema_params != nullptr) {
1315 params->activation =
1316 ConvertActivation(schema_params->fused_activation_function());
1317 params->keep_num_dims = schema_params->keep_num_dims();
1318 params->asymmetric_quantize_inputs =
1319 schema_params->asymmetric_quantize_inputs();
1320
1321 switch (schema_params->weights_format()) {
1322 case FullyConnectedOptionsWeightsFormat_DEFAULT:
1323 params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
1324 break;
1325 case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
1326 params->weights_format =
1327 kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
1328 break;
1329 default:
1330 TF_LITE_REPORT_ERROR(error_reporter,
1331 "Unhandled fully-connected weights format.");
1332 return kTfLiteError;
1333 }
1334 } else {
1335 // TODO(b/157480169): We should either return kTfLiteError or fill in some
1336 // reasonable defaults in the params struct. We are not doing so until we
1337 // better undertand the ramifications of changing the legacy behavior.
1338 }
1339
1340 *builtin_data = params.release();
1341 return kTfLiteOk;
1342 }
1343
1344 // We have this parse function instead of directly returning kTfLiteOk from the
1345 // switch-case in ParseOpData because this function is used as part of the
1346 // selective registration for the OpResolver implementation in micro.
ParseGather(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1347 TfLiteStatus ParseGather(const Operator* op, ErrorReporter* error_reporter,
1348 BuiltinDataAllocator* allocator, void** builtin_data) {
1349 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1350
1351 SafeBuiltinDataAllocator safe_allocator(allocator);
1352 auto params = safe_allocator.Allocate<TfLiteGatherParams>();
1353 TF_LITE_ENSURE(error_reporter, params != nullptr);
1354 params->axis = 0;
1355 params->batch_dims = 0;
1356 if (const auto* gather_params = op->builtin_options_as_GatherOptions()) {
1357 params->axis = gather_params->axis();
1358 params->batch_dims = gather_params->batch_dims();
1359 }
1360
1361 *builtin_data = params.release();
1362 return kTfLiteOk;
1363 }
1364
1365 // We have this parse function instead of directly returning kTfLiteOk from the
1366 // switch-case in ParseOpData because this function is used as part of the
1367 // selective registration for the OpResolver implementation in micro.
ParseGatherNd(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1368 TfLiteStatus ParseGatherNd(const Operator*, ErrorReporter*,
1369 BuiltinDataAllocator*, void**) {
1370 return kTfLiteOk;
1371 }
1372
1373 // We have this parse function instead of directly returning kTfLiteOk from the
1374 // switch-case in ParseOpData because this function is used as part of the
1375 // selective registration for the OpResolver implementation in micro.
ParseGreater(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1376 TfLiteStatus ParseGreater(const Operator*, ErrorReporter*,
1377 BuiltinDataAllocator*, void**) {
1378 return kTfLiteOk;
1379 }
1380
1381 // We have this parse function instead of directly returning kTfLiteOk from the
1382 // switch-case in ParseOpData because this function is used as part of the
1383 // selective registration for the OpResolver implementation in micro.
ParseGreaterEqual(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1384 TfLiteStatus ParseGreaterEqual(const Operator*, ErrorReporter*,
1385 BuiltinDataAllocator*, void**) {
1386 return kTfLiteOk;
1387 }
1388
1389 // We have this parse function instead of directly returning kTfLiteOk from the
1390 // switch-case in ParseOpData because this function is used as part of the
1391 // selective registration for the OpResolver implementation in micro.
ParseHardSwish(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1392 TfLiteStatus ParseHardSwish(const Operator*, ErrorReporter*,
1393 BuiltinDataAllocator*, void**) {
1394 return kTfLiteOk;
1395 }
1396
ParseIf(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1397 TfLiteStatus ParseIf(const Operator* op, ErrorReporter* error_reporter,
1398 BuiltinDataAllocator* allocator, void** builtin_data) {
1399 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1400
1401 SafeBuiltinDataAllocator safe_allocator(allocator);
1402 std::unique_ptr<TfLiteIfParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
1403 params = safe_allocator.Allocate<TfLiteIfParams>();
1404 TF_LITE_ENSURE(error_reporter, params != nullptr);
1405
1406 const IfOptions* schema_params = op->builtin_options_as_IfOptions();
1407
1408 if (schema_params != nullptr) {
1409 params->then_subgraph_index = schema_params->then_subgraph_index();
1410 params->else_subgraph_index = schema_params->else_subgraph_index();
1411 } else {
1412 // TODO(b/157480169): We should either return kTfLiteError or fill in some
1413 // reasonable defaults in the params struct. We are not doing so until we
1414 // better undertand the ramifications of changing the legacy behavior.
1415 }
1416
1417 *builtin_data = params.release();
1418 return kTfLiteOk;
1419 }
1420
ParseL2Normalization(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1421 TfLiteStatus ParseL2Normalization(const Operator* op,
1422 ErrorReporter* error_reporter,
1423 BuiltinDataAllocator* allocator,
1424 void** builtin_data) {
1425 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1426
1427 SafeBuiltinDataAllocator safe_allocator(allocator);
1428 std::unique_ptr<TfLiteL2NormParams,
1429 SafeBuiltinDataAllocator::BuiltinDataDeleter>
1430 params = safe_allocator.Allocate<TfLiteL2NormParams>();
1431 TF_LITE_ENSURE(error_reporter, params != nullptr);
1432
1433 const L2NormOptions* schema_params = op->builtin_options_as_L2NormOptions();
1434
1435 if (schema_params != nullptr) {
1436 params->activation =
1437 ConvertActivation(schema_params->fused_activation_function());
1438 } else {
1439 // TODO(b/157480169): We should either return kTfLiteError or fill in some
1440 // reasonable defaults in the params struct. We are not doing so until we
1441 // better undertand the ramifications of changing the legacy behavior.
1442 }
1443
1444 *builtin_data = params.release();
1445 return kTfLiteOk;
1446 }
1447
ParseLeakyRelu(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1448 TfLiteStatus ParseLeakyRelu(const Operator* op, ErrorReporter* error_reporter,
1449 BuiltinDataAllocator* allocator,
1450 void** builtin_data) {
1451 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1452
1453 SafeBuiltinDataAllocator safe_allocator(allocator);
1454 auto params = safe_allocator.Allocate<TfLiteLeakyReluParams>();
1455 TF_LITE_ENSURE(error_reporter, params != nullptr);
1456 if (const auto* leaky_relu_params =
1457 op->builtin_options_as_LeakyReluOptions()) {
1458 params->alpha = leaky_relu_params->alpha();
1459 }
1460 *builtin_data = params.release();
1461 return kTfLiteOk;
1462 }
1463
1464 // We have this parse function instead of directly returning kTfLiteOk from the
1465 // switch-case in ParseOpData because this function is used as part of the
1466 // selective registration for the OpResolver implementation in micro.
ParseLess(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1467 TfLiteStatus ParseLess(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1468 void**) {
1469 return kTfLiteOk;
1470 }
1471
1472 // We have this parse function instead of directly returning kTfLiteOk from the
1473 // switch-case in ParseOpData because this function is used as part of the
1474 // selective registration for the OpResolver implementation in micro.
ParseLessEqual(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1475 TfLiteStatus ParseLessEqual(const Operator*, ErrorReporter*,
1476 BuiltinDataAllocator*, void**) {
1477 return kTfLiteOk;
1478 }
1479
1480 // We have this parse function instead of directly returning kTfLiteOk from the
1481 // switch-case in ParseOpData because this function is used as part of the
1482 // selective registration for the OpResolver implementation in micro.
ParseLog(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1483 TfLiteStatus ParseLog(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1484 void**) {
1485 return kTfLiteOk;
1486 }
1487
1488 // We have this parse function instead of directly returning kTfLiteOk from the
1489 // switch-case in ParseOpData because this function is used as part of the
1490 // selective registration for the OpResolver implementation in micro.
ParseLogicalAnd(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1491 TfLiteStatus ParseLogicalAnd(const Operator*, ErrorReporter*,
1492 BuiltinDataAllocator*, void**) {
1493 return kTfLiteOk;
1494 }
1495
1496 // We have this parse function instead of directly returning kTfLiteOk from the
1497 // switch-case in ParseOpData because this function is used as part of the
1498 // selective registration for the OpResolver implementation in micro.
ParseLogicalNot(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1499 TfLiteStatus ParseLogicalNot(const Operator*, ErrorReporter*,
1500 BuiltinDataAllocator*, void**) {
1501 return kTfLiteOk;
1502 }
1503
1504 // We have this parse function instead of directly returning kTfLiteOk from the
1505 // switch-case in ParseOpData because this function is used as part of the
1506 // selective registration for the OpResolver implementation in micro.
ParseLogicalOr(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1507 TfLiteStatus ParseLogicalOr(const Operator*, ErrorReporter*,
1508 BuiltinDataAllocator*, void**) {
1509 return kTfLiteOk;
1510 }
1511
1512 // We have this parse function instead of directly returning kTfLiteOk from the
1513 // switch-case in ParseOpData because this function is used as part of the
1514 // selective registration for the OpResolver implementation in micro.
ParseLogistic(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1515 TfLiteStatus ParseLogistic(const Operator*, ErrorReporter*,
1516 BuiltinDataAllocator*, void**) {
1517 return kTfLiteOk;
1518 }
1519
1520 // We have this parse function instead of directly returning kTfLiteOk from the
1521 // switch-case in ParseOpData because this function is used as part of the
1522 // selective registration for the OpResolver implementation in micro.
ParseLogSoftmax(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1523 TfLiteStatus ParseLogSoftmax(const Operator*, ErrorReporter*,
1524 BuiltinDataAllocator*, void**) {
1525 return kTfLiteOk;
1526 }
1527
1528 // We have this parse function instead of directly returning kTfLiteOk from the
1529 // switch-case in ParseOpData because this function is used as part of the
1530 // selective registration for the OpResolver implementation in micro.
ParseMaximum(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1531 TfLiteStatus ParseMaximum(const Operator*, ErrorReporter*,
1532 BuiltinDataAllocator*, void**) {
1533 return kTfLiteOk;
1534 }
1535
1536 // We have this parse function instead of directly returning kTfLiteOk from the
1537 // switch-case in ParseOpData because this function is used as part of the
1538 // selective registration for the OpResolver implementation in micro.
ParseMinimum(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1539 TfLiteStatus ParseMinimum(const Operator*, ErrorReporter*,
1540 BuiltinDataAllocator*, void**) {
1541 return kTfLiteOk;
1542 }
1543
ParseMul(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1544 TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
1545 BuiltinDataAllocator* allocator, void** builtin_data) {
1546 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1547
1548 SafeBuiltinDataAllocator safe_allocator(allocator);
1549 std::unique_ptr<TfLiteMulParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
1550 params = safe_allocator.Allocate<TfLiteMulParams>();
1551 TF_LITE_ENSURE(error_reporter, params != nullptr);
1552
1553 const MulOptions* schema_params = op->builtin_options_as_MulOptions();
1554
1555 if (schema_params != nullptr) {
1556 params->activation =
1557 ConvertActivation(schema_params->fused_activation_function());
1558 } else {
1559 // TODO(b/157480169): We should either return kTfLiteError or fill in some
1560 // reasonable defaults in the params struct. We are not doing so until we
1561 // better undertand the ramifications of changing the legacy behavior.
1562 }
1563
1564 *builtin_data = params.release();
1565 return kTfLiteOk;
1566 }
1567
1568 // We have this parse function instead of directly returning kTfLiteOk from the
1569 // switch-case in ParseOpData because this function is used as part of the
1570 // selective registration for the OpResolver implementation in micro.
ParseNeg(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1571 TfLiteStatus ParseNeg(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1572 void**) {
1573 return kTfLiteOk;
1574 }
1575
1576 // We have this parse function instead of directly returning kTfLiteOk from the
1577 // switch-case in ParseOpData because this function is used as part of the
1578 // selective registration for the OpResolver implementation in micro.
ParseNotEqual(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1579 TfLiteStatus ParseNotEqual(const Operator*, ErrorReporter*,
1580 BuiltinDataAllocator*, void**) {
1581 return kTfLiteOk;
1582 }
1583
ParsePack(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1584 TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter,
1585 BuiltinDataAllocator* allocator, void** builtin_data) {
1586 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1587
1588 SafeBuiltinDataAllocator safe_allocator(allocator);
1589 std::unique_ptr<TfLitePackParams,
1590 SafeBuiltinDataAllocator::BuiltinDataDeleter>
1591 params = safe_allocator.Allocate<TfLitePackParams>();
1592 TF_LITE_ENSURE(error_reporter, params != nullptr);
1593
1594 const PackOptions* schema_params = op->builtin_options_as_PackOptions();
1595
1596 if (schema_params != nullptr) {
1597 params->values_count = schema_params->values_count();
1598 params->axis = schema_params->axis();
1599 } else {
1600 // TODO(b/157480169): We should either return kTfLiteError or fill in some
1601 // reasonable defaults in the params struct. We are not doing so until we
1602 // better undertand the ramifications of changing the legacy behavior.
1603 }
1604
1605 *builtin_data = params.release();
1606 return kTfLiteOk;
1607 }
1608
1609 // We have this parse function instead of directly returning kTfLiteOk from the
1610 // switch-case in ParseOpData because this function is used as part of the
1611 // selective registration for the OpResolver implementation in micro.
ParsePad(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1612 TfLiteStatus ParsePad(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1613 void**) {
1614 return kTfLiteOk;
1615 }
1616
1617 // We have this parse function instead of directly returning kTfLiteOk from the
1618 // switch-case in ParseOpData because this function is used as part of the
1619 // selective registration for the OpResolver implementation in micro.
ParsePadV2(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1620 TfLiteStatus ParsePadV2(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1621 void**) {
1622 return kTfLiteOk;
1623 }
1624
ParsePool(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1625 TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,
1626 BuiltinDataAllocator* allocator, void** builtin_data) {
1627 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1628
1629 SafeBuiltinDataAllocator safe_allocator(allocator);
1630 std::unique_ptr<TfLitePoolParams,
1631 SafeBuiltinDataAllocator::BuiltinDataDeleter>
1632 params = safe_allocator.Allocate<TfLitePoolParams>();
1633 TF_LITE_ENSURE(error_reporter, params != nullptr);
1634
1635 const Pool2DOptions* schema_params = op->builtin_options_as_Pool2DOptions();
1636
1637 if (schema_params != nullptr) {
1638 params->padding = ConvertPadding(schema_params->padding());
1639 params->stride_width = schema_params->stride_w();
1640 params->stride_height = schema_params->stride_h();
1641 params->filter_width = schema_params->filter_width();
1642 params->filter_height = schema_params->filter_height();
1643 params->activation =
1644 ConvertActivation(schema_params->fused_activation_function());
1645 } else {
1646 // TODO(b/157480169): We should either return kTfLiteError or fill in some
1647 // reasonable defaults in the params struct. We are not doing so until we
1648 // better undertand the ramifications of changing the legacy behavior.
1649 }
1650
1651 *builtin_data = params.release();
1652 return kTfLiteOk;
1653 }
1654
1655 // We have this parse function instead of directly returning kTfLiteOk from the
1656 // switch-case in ParseOpData because this function is used as part of the
1657 // selective registration for the OpResolver implementation in micro.
ParsePow(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1658 TfLiteStatus ParsePow(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1659 void**) {
1660 return kTfLiteOk;
1661 }
1662
1663 // We have this parse function instead of directly returning kTfLiteOk from the
1664 // switch-case in ParseOpData because this function is used as part of the
1665 // selective registration for the OpResolver implementation in micro.
ParsePrelu(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1666 TfLiteStatus ParsePrelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1667 void**) {
1668 return kTfLiteOk;
1669 }
1670
1671 // We have this parse function instead of directly returning kTfLiteOk from the
1672 // switch-case in ParseOpData because this function is used as part of the
1673 // selective registration for the OpResolver implementation in micro.
ParseQuantize(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1674 TfLiteStatus ParseQuantize(const Operator*, ErrorReporter*,
1675 BuiltinDataAllocator*, void**) {
1676 return kTfLiteOk;
1677 }
1678
ParseReducer(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1679 TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
1680 BuiltinDataAllocator* allocator,
1681 void** builtin_data) {
1682 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1683
1684 SafeBuiltinDataAllocator safe_allocator(allocator);
1685
1686 std::unique_ptr<TfLiteReducerParams,
1687 SafeBuiltinDataAllocator::BuiltinDataDeleter>
1688 params = safe_allocator.Allocate<TfLiteReducerParams>();
1689 TF_LITE_ENSURE(error_reporter, params != nullptr);
1690
1691 const ReducerOptions* schema_params = op->builtin_options_as_ReducerOptions();
1692
1693 if (schema_params != nullptr) {
1694 params->keep_dims = schema_params->keep_dims();
1695 } else {
1696 // TODO(b/157480169): We should either return kTfLiteError or fill in some
1697 // reasonable defaults in the params struct. We are not doing so until we
1698 // better undertand the ramifications of changing the legacy behavior.
1699 }
1700
1701 *builtin_data = params.release();
1702 return kTfLiteOk;
1703 }
1704
1705 // We have this parse function instead of directly returning kTfLiteOk from the
1706 // switch-case in ParseOpData because this function is used as part of the
1707 // selective registration for the OpResolver implementation in micro.
ParseRelu(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1708 TfLiteStatus ParseRelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1709 void**) {
1710 return kTfLiteOk;
1711 }
1712
1713 // We have this parse function instead of directly returning kTfLiteOk from the
1714 // switch-case in ParseOpData because this function is used as part of the
1715 // selective registration for the OpResolver implementation in micro.
ParseRelu6(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1716 TfLiteStatus ParseRelu6(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1717 void**) {
1718 return kTfLiteOk;
1719 }
1720
ParseReshape(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1721 TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter,
1722 BuiltinDataAllocator* allocator,
1723 void** builtin_data) {
1724 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1725
1726 SafeBuiltinDataAllocator safe_allocator(allocator);
1727
1728 std::unique_ptr<TfLiteReshapeParams,
1729 SafeBuiltinDataAllocator::BuiltinDataDeleter>
1730 params = safe_allocator.Allocate<TfLiteReshapeParams>();
1731 TF_LITE_ENSURE(error_reporter, params != nullptr);
1732
1733 const ReshapeOptions* schema_params = op->builtin_options_as_ReshapeOptions();
1734
1735 if (schema_params != nullptr) {
1736 const flatbuffers::Vector<int32_t>* new_shape = schema_params->new_shape();
1737 if (new_shape != nullptr) {
1738 TF_LITE_ENSURE_STATUS(
1739 FlatBufferIntVectorToArray(sizeof(params->shape), new_shape,
1740 params->shape, error_reporter, "reshape"));
1741 params->num_dimensions = new_shape->size();
1742 } else {
1743 // TODO(b/157480169) TODO(b/147203660): We should either return
1744 // kTfLiteError or fill in some reasonable defaults in the params struct.
1745 // We are not doing so until we better undertand the ramifications of
1746 // changing the legacy behavior.
1747 }
1748 } else {
1749 // TODO(b/157480169): We should either return kTfLiteError or fill in some
1750 // reasonable defaults in the params struct. We are not doing so until we
1751 // better undertand the ramifications of changing the legacy behavior.
1752 }
1753
1754 *builtin_data = params.release();
1755 return kTfLiteOk;
1756 }
1757
ParseResizeBilinear(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1758 TfLiteStatus ParseResizeBilinear(const Operator* op,
1759 ErrorReporter* error_reporter,
1760 BuiltinDataAllocator* allocator,
1761 void** builtin_data) {
1762 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1763
1764 SafeBuiltinDataAllocator safe_allocator(allocator);
1765 std::unique_ptr<TfLiteResizeBilinearParams,
1766 SafeBuiltinDataAllocator::BuiltinDataDeleter>
1767 params = safe_allocator.Allocate<TfLiteResizeBilinearParams>();
1768 TF_LITE_ENSURE(error_reporter, params != nullptr);
1769
1770 const ResizeBilinearOptions* schema_params =
1771 op->builtin_options_as_ResizeBilinearOptions();
1772
1773 if (schema_params != nullptr) {
1774 params->align_corners = schema_params->align_corners();
1775 params->half_pixel_centers = schema_params->half_pixel_centers();
1776 } else {
1777 params->align_corners = false;
1778 params->half_pixel_centers = false;
1779 }
1780
1781 *builtin_data = params.release();
1782 return kTfLiteOk;
1783 }
1784
ParseResizeNearestNeighbor(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1785 TfLiteStatus ParseResizeNearestNeighbor(const Operator* op,
1786 ErrorReporter* error_reporter,
1787 BuiltinDataAllocator* allocator,
1788 void** builtin_data) {
1789 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1790
1791 SafeBuiltinDataAllocator safe_allocator(allocator);
1792 std::unique_ptr<TfLiteResizeNearestNeighborParams,
1793 SafeBuiltinDataAllocator::BuiltinDataDeleter>
1794 params = safe_allocator.Allocate<TfLiteResizeNearestNeighborParams>();
1795 TF_LITE_ENSURE(error_reporter, params != nullptr);
1796
1797 const ResizeNearestNeighborOptions* schema_params =
1798 op->builtin_options_as_ResizeNearestNeighborOptions();
1799
1800 if (schema_params != nullptr) {
1801 params->align_corners = schema_params->align_corners();
1802 params->half_pixel_centers = schema_params->half_pixel_centers();
1803 } else {
1804 params->align_corners = false;
1805 params->half_pixel_centers = false;
1806 }
1807
1808 *builtin_data = params.release();
1809 return kTfLiteOk;
1810 }
1811
1812 // We have this parse function instead of directly returning kTfLiteOk from the
1813 // switch-case in ParseOpData because this function is used as part of the
1814 // selective registration for the OpResolver implementation in micro.
ParseRound(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1815 TfLiteStatus ParseRound(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1816 void**) {
1817 return kTfLiteOk;
1818 }
1819
1820 // We have this parse function instead of directly returning kTfLiteOk from the
1821 // switch-case in ParseOpData because this function is used as part of the
1822 // selective registration for the OpResolver implementation in micro.
ParseRsqrt(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1823 TfLiteStatus ParseRsqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1824 void**) {
1825 return kTfLiteOk;
1826 }
1827
ParseShape(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1828 TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter,
1829 BuiltinDataAllocator* allocator, void** builtin_data) {
1830 SafeBuiltinDataAllocator safe_allocator(allocator);
1831 std::unique_ptr<TfLiteShapeParams,
1832 SafeBuiltinDataAllocator::BuiltinDataDeleter>
1833 params = safe_allocator.Allocate<TfLiteShapeParams>();
1834 TF_LITE_ENSURE(error_reporter, params != nullptr);
1835
1836 const ShapeOptions* schema_params = op->builtin_options_as_ShapeOptions();
1837
1838 if (schema_params != nullptr) {
1839 TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_type(),
1840 ¶ms->out_type, error_reporter));
1841 } else {
1842 // TODO(b/157480169): We should either return kTfLiteError or fill in some
1843 // reasonable defaults in the params struct. We are not doing so until we
1844 // better undertand the ramifications of changing the legacy behavior.
1845 }
1846
1847 *builtin_data = params.release();
1848 return kTfLiteOk;
1849 }
1850
1851 // We have this parse function instead of directly returning kTfLiteOk from the
1852 // switch-case in ParseOpData because this function is used as part of the
1853 // selective registration for the OpResolver implementation in micro.
ParseSin(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1854 TfLiteStatus ParseSin(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1855 void**) {
1856 return kTfLiteOk;
1857 }
1858
ParseSoftmax(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1859 TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
1860 BuiltinDataAllocator* allocator,
1861 void** builtin_data) {
1862 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1863
1864 SafeBuiltinDataAllocator safe_allocator(allocator);
1865 std::unique_ptr<TfLiteSoftmaxParams,
1866 SafeBuiltinDataAllocator::BuiltinDataDeleter>
1867 params = safe_allocator.Allocate<TfLiteSoftmaxParams>();
1868 TF_LITE_ENSURE(error_reporter, params != nullptr);
1869
1870 const SoftmaxOptions* schema_params = op->builtin_options_as_SoftmaxOptions();
1871
1872 if (schema_params != nullptr) {
1873 params->beta = schema_params->beta();
1874 } else {
1875 // TODO(b/157480169): We should either return kTfLiteError or fill in some
1876 // reasonable defaults in the params struct. We are not doing so until we
1877 // better undertand the ramifications of changing the legacy behavior.
1878 }
1879
1880 *builtin_data = params.release();
1881 return kTfLiteOk;
1882 }
1883
1884 // We have this parse function instead of directly returning kTfLiteOk from the
1885 // switch-case in ParseOpData because this function is used as part of the
1886 // selective registration for the OpResolver implementation in micro.
ParseSpaceToBatchNd(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1887 TfLiteStatus ParseSpaceToBatchNd(const Operator*, ErrorReporter*,
1888 BuiltinDataAllocator*, void**) {
1889 return kTfLiteOk;
1890 }
1891
ParseSpaceToDepth(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1892 TfLiteStatus ParseSpaceToDepth(const Operator* op,
1893 ErrorReporter* error_reporter,
1894 BuiltinDataAllocator* allocator,
1895 void** builtin_data) {
1896 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1897
1898 SafeBuiltinDataAllocator safe_allocator(allocator);
1899 std::unique_ptr<TfLiteSpaceToDepthParams,
1900 SafeBuiltinDataAllocator::BuiltinDataDeleter>
1901 params = safe_allocator.Allocate<TfLiteSpaceToDepthParams>();
1902 TF_LITE_ENSURE(error_reporter, params != nullptr);
1903
1904 const auto* schema_params = op->builtin_options_as_SpaceToDepthOptions();
1905 if (schema_params != nullptr) {
1906 params->block_size = schema_params->block_size();
1907 } else {
1908 // TODO(b/157480169): We should either return kTfLiteError or fill in some
1909 // reasonable defaults in the params struct. We are not doing so until we
1910 // better undertand the ramifications of changing the legacy behavior.
1911 }
1912
1913 *builtin_data = params.release();
1914 return kTfLiteOk;
1915 }
1916
ParseSplit(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1917 TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,
1918 BuiltinDataAllocator* allocator, void** builtin_data) {
1919 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1920
1921 SafeBuiltinDataAllocator safe_allocator(allocator);
1922 std::unique_ptr<TfLiteSplitParams,
1923 SafeBuiltinDataAllocator::BuiltinDataDeleter>
1924 params = safe_allocator.Allocate<TfLiteSplitParams>();
1925 TF_LITE_ENSURE(error_reporter, params != nullptr);
1926
1927 const SplitOptions* schema_params = op->builtin_options_as_SplitOptions();
1928
1929 if (schema_params != nullptr) {
1930 params->num_splits = schema_params->num_splits();
1931 } else {
1932 // TODO(b/157480169): We should either return kTfLiteError or fill in some
1933 // reasonable defaults in the params struct. We are not doing so until we
1934 // better undertand the ramifications of changing the legacy behavior.
1935 }
1936
1937 *builtin_data = params.release();
1938 return kTfLiteOk;
1939 }
1940
ParseSplitV(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1941 TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter,
1942 BuiltinDataAllocator* allocator, void** builtin_data) {
1943 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1944 SafeBuiltinDataAllocator safe_allocator(allocator);
1945
1946 std::unique_ptr<TfLiteSplitVParams,
1947 SafeBuiltinDataAllocator::BuiltinDataDeleter>
1948 params = safe_allocator.Allocate<TfLiteSplitVParams>();
1949 TF_LITE_ENSURE(error_reporter, params != nullptr);
1950
1951 const SplitVOptions* schema_params = op->builtin_options_as_SplitVOptions();
1952
1953 if (schema_params != nullptr) {
1954 params->num_splits = schema_params->num_splits();
1955 } else {
1956 // TODO(b/157480169): We should either return kTfLiteError or fill in some
1957 // reasonable defaults in the params struct. We are not doing so until we
1958 // better undertand the ramifications of changing the legacy behavior.
1959 }
1960
1961 *builtin_data = params.release();
1962 return kTfLiteOk;
1963 }
1964
ParseSqueeze(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1965 TfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter,
1966 BuiltinDataAllocator* allocator,
1967 void** builtin_data) {
1968 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1969 SafeBuiltinDataAllocator safe_allocator(allocator);
1970
1971 std::unique_ptr<TfLiteSqueezeParams,
1972 SafeBuiltinDataAllocator::BuiltinDataDeleter>
1973 params = safe_allocator.Allocate<TfLiteSqueezeParams>();
1974 TF_LITE_ENSURE(error_reporter, params != nullptr);
1975
1976 const SqueezeOptions* schema_params = op->builtin_options_as_SqueezeOptions();
1977
1978 if (schema_params != nullptr) {
1979 const auto* squeeze_dims = schema_params->squeeze_dims();
1980 if (squeeze_dims != nullptr) {
1981 TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(
1982 sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims,
1983 error_reporter, "squeeze"));
1984 params->num_squeeze_dims = squeeze_dims->size();
1985 } else {
1986 params->num_squeeze_dims = 0;
1987 }
1988 } else {
1989 // TODO(b/157480169): We should either return kTfLiteError or fill in some
1990 // reasonable defaults in the params struct. We are not doing so until we
1991 // better undertand the ramifications of changing the legacy behavior.
1992 }
1993
1994 *builtin_data = params.release();
1995 return kTfLiteOk;
1996 }
1997
1998 // We have this parse function instead of directly returning kTfLiteOk from the
1999 // switch-case in ParseOpData because this function is used as part of the
2000 // selective registration for the OpResolver implementation in micro.
ParseSqrt(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)2001 TfLiteStatus ParseSqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
2002 void**) {
2003 return kTfLiteOk;
2004 }
2005
2006 // We have this parse function instead of directly returning kTfLiteOk from the
2007 // switch-case in ParseOpData because this function is used as part of the
2008 // selective registration for the OpResolver implementation in micro.
ParseSquare(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)2009 TfLiteStatus ParseSquare(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
2010 void**) {
2011 return kTfLiteOk;
2012 }
2013
ParseStridedSlice(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2014 TfLiteStatus ParseStridedSlice(const Operator* op,
2015 ErrorReporter* error_reporter,
2016 BuiltinDataAllocator* allocator,
2017 void** builtin_data) {
2018 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2019
2020 SafeBuiltinDataAllocator safe_allocator(allocator);
2021 std::unique_ptr<TfLiteStridedSliceParams,
2022 SafeBuiltinDataAllocator::BuiltinDataDeleter>
2023 params = safe_allocator.Allocate<TfLiteStridedSliceParams>();
2024 TF_LITE_ENSURE(error_reporter, params != nullptr);
2025
2026 const StridedSliceOptions* schema_params =
2027 op->builtin_options_as_StridedSliceOptions();
2028
2029 if (schema_params != nullptr) {
2030 params->begin_mask = schema_params->begin_mask();
2031 params->end_mask = schema_params->end_mask();
2032 params->ellipsis_mask = schema_params->ellipsis_mask();
2033 params->new_axis_mask = schema_params->new_axis_mask();
2034 params->shrink_axis_mask = schema_params->shrink_axis_mask();
2035 } else {
2036 // TODO(b/157480169): We should either return kTfLiteError or fill in some
2037 // reasonable defaults in the params struct. We are not doing so until we
2038 // better undertand the ramifications of changing the legacy behavior.
2039 }
2040
2041 *builtin_data = params.release();
2042 return kTfLiteOk;
2043 }
2044
ParseSub(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2045 TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter,
2046 BuiltinDataAllocator* allocator, void** builtin_data) {
2047 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2048
2049 SafeBuiltinDataAllocator safe_allocator(allocator);
2050 std::unique_ptr<TfLiteSubParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
2051 params = safe_allocator.Allocate<TfLiteSubParams>();
2052 TF_LITE_ENSURE(error_reporter, params != nullptr);
2053
2054 const SubOptions* schema_params = op->builtin_options_as_SubOptions();
2055
2056 if (schema_params != nullptr) {
2057 params->activation =
2058 ConvertActivation(schema_params->fused_activation_function());
2059 params->pot_scale_int16 = schema_params->pot_scale_int16();
2060 } else {
2061 // TODO(b/157480169): We should either return kTfLiteError or fill in some
2062 // reasonable defaults in the params struct. We are not doing so until we
2063 // better undertand the ramifications of changing the legacy behavior.
2064 }
2065
2066 *builtin_data = params.release();
2067 return kTfLiteOk;
2068 }
2069
ParseSvdf(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2070 TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter,
2071 BuiltinDataAllocator* allocator, void** builtin_data) {
2072 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2073
2074 SafeBuiltinDataAllocator safe_allocator(allocator);
2075 std::unique_ptr<TfLiteSVDFParams,
2076 SafeBuiltinDataAllocator::BuiltinDataDeleter>
2077 params = safe_allocator.Allocate<TfLiteSVDFParams>();
2078 TF_LITE_ENSURE(error_reporter, params != nullptr);
2079
2080 const SVDFOptions* schema_params = op->builtin_options_as_SVDFOptions();
2081 if (schema_params != nullptr) {
2082 params->rank = schema_params->rank();
2083 params->activation =
2084 ConvertActivation(schema_params->fused_activation_function());
2085 params->asymmetric_quantize_inputs =
2086 schema_params->asymmetric_quantize_inputs();
2087 } else {
2088 // TODO(b/157480169): We should either return kTfLiteError or fill in some
2089 // reasonable defaults in the params struct. We are not doing so until we
2090 // better undertand the ramifications of changing the legacy behavior.
2091 }
2092
2093 *builtin_data = params.release();
2094 return kTfLiteOk;
2095 }
2096
2097 // We have this parse function instead of directly returning kTfLiteOk from the
2098 // switch-case in ParseOpData because this function is used as part of the
2099 // selective registration for the OpResolver implementation in micro.
ParseTanh(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)2100 TfLiteStatus ParseTanh(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
2101 void**) {
2102 return kTfLiteOk;
2103 }
2104 //
2105 // We have this parse function instead of directly returning kTfLiteOk from the
2106 // switch-case in ParseOpData because this function is used as part of the
2107 // selective registration for the OpResolver implementation in micro.
ParseTranspose(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)2108 TfLiteStatus ParseTranspose(const Operator*, ErrorReporter*,
2109 BuiltinDataAllocator*, void**) {
2110 return kTfLiteOk;
2111 }
2112
ParseTransposeConv(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2113 TfLiteStatus ParseTransposeConv(const Operator* op,
2114 ErrorReporter* error_reporter,
2115 BuiltinDataAllocator* allocator,
2116 void** builtin_data) {
2117 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2118
2119 SafeBuiltinDataAllocator safe_allocator(allocator);
2120 std::unique_ptr<TfLiteTransposeConvParams,
2121 SafeBuiltinDataAllocator::BuiltinDataDeleter>
2122 params = safe_allocator.Allocate<TfLiteTransposeConvParams>();
2123 TF_LITE_ENSURE(error_reporter, params != nullptr);
2124 const TransposeConvOptions* transpose_conv_params =
2125 op->builtin_options_as_TransposeConvOptions();
2126 if (transpose_conv_params != nullptr) {
2127 params->padding = ConvertPadding(transpose_conv_params->padding());
2128 params->stride_width = transpose_conv_params->stride_w();
2129 params->stride_height = transpose_conv_params->stride_h();
2130 } else {
2131 // TODO(b/157480169): We should either return kTfLiteError or fill in some
2132 // reasonable defaults in the params struct. We are not doing so until we
2133 // better undertand the ramifications of changing the legacy behavior.
2134 }
2135 *builtin_data = params.release();
2136 return kTfLiteOk;
2137 }
2138
ParseUnpack(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2139 TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
2140 BuiltinDataAllocator* allocator, void** builtin_data) {
2141 CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2142
2143 SafeBuiltinDataAllocator safe_allocator(allocator);
2144 std::unique_ptr<TfLiteUnpackParams,
2145 SafeBuiltinDataAllocator::BuiltinDataDeleter>
2146 params = safe_allocator.Allocate<TfLiteUnpackParams>();
2147 TF_LITE_ENSURE(error_reporter, params != nullptr);
2148
2149 const UnpackOptions* schema_params = op->builtin_options_as_UnpackOptions();
2150
2151 if (schema_params != nullptr) {
2152 params->num = schema_params->num();
2153 params->axis = schema_params->axis();
2154 } else {
2155 // TODO(b/157480169): We should either return kTfLiteError or fill in some
2156 // reasonable defaults in the params struct. We are not doing so until we
2157 // better undertand the ramifications of changing the legacy behavior.
2158 }
2159
2160 *builtin_data = params.release();
2161 return kTfLiteOk;
2162 }
2163
2164 // We have this parse function instead of directly returning kTfLiteOk from the
2165 // switch-case in ParseOpData because this function is used as part of the
2166 // selective registration for the OpResolver implementation in micro.
ParseZerosLike(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)2167 TfLiteStatus ParseZerosLike(const Operator*, ErrorReporter*,
2168 BuiltinDataAllocator*, void**) {
2169 return kTfLiteOk;
2170 }
2171
ParseOpData(const Operator * op,BuiltinOperator op_type,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2172 TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
2173 ErrorReporter* error_reporter,
2174 BuiltinDataAllocator* allocator, void** builtin_data) {
2175 // TODO(b/145762662): It would be preferable to have the build graph for TF Lite
2176 // Micro not have the ParseOpData function at all. This would require splitting
2177 // the current file into two separate files, one of which defines the
2178 // ParseOpData function and the other that defines the operator specific parse
2179 // functions (e.g. ParseAdd).
2180 //
2181 // Such a split was attempted but was not worth the effort at the time because
2182 // of the following reasons:
2183 // * We could either duplicate the functions and the SafeBuiltinDataAllocator
2184 // class in the anonymous namespace of this file, or attempt to make a common
2185 // library with these helper functions and class.
2186 // * Making a common library with a separate build target was not feasible as
2187 // it introduced circular dependencies due to the ErrorReporter and a common
2188 // .cc and .h within the same api build target the also cause circular
2189 // dependencies due to the BuiltinDataAllocator class.
2190 // * If all the builtin operators were to have their own parse functions, or we
2191 // were ok with some amount of code duplication, then this split of the .cc
2192 // files would be a lot more feasible.
2193 #ifdef TF_LITE_STATIC_MEMORY
2194 TF_LITE_REPORT_ERROR(
2195 error_reporter,
2196 "ParseOpData is unsupported on TfLiteMicro, please use the operator "
2197 "specific parse functions (e.g. ParseAdd etc.).\n");
2198 return kTfLiteError;
2199 #else
2200 return ParseOpDataTfLite(op, op_type, error_reporter, allocator,
2201 builtin_data);
2202 #endif
2203 }
2204
2205 } // namespace tflite
2206