1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/kernels/kernel_util.h"
16
17 #include <stdint.h>
18 #include <stdlib.h>
19
20 #include <algorithm>
21 #include <complex>
22 #include <limits>
23 #include <memory>
24 #ifndef TF_LITE_STATIC_MEMORY
25 #include <string>
26 #endif // TF_LITE_STATIC_MEMORY
27
28 #include "tensorflow/lite/c/builtin_op_data.h"
29 #include "tensorflow/lite/c/common.h"
30 #include "tensorflow/lite/kernels/internal/cppmath.h"
31 #include "tensorflow/lite/kernels/internal/quantization_util.h"
32
33 #if defined(__APPLE__)
34 #include "TargetConditionals.h"
35 #endif
36
37 namespace tflite {
38
39 namespace {
40
41 // Assumes tensor_index is a valid index (in bounds)
GetTensorAtIndex(const TfLiteContext * context,int tensor_index)42 inline TfLiteTensor* GetTensorAtIndex(const TfLiteContext* context,
43 int tensor_index) {
44 if (context->tensors != nullptr) {
45 return &context->tensors[tensor_index];
46 } else {
47 return context->GetTensor(context, tensor_index);
48 }
49 }
50
51 // Validate in a single place to reduce binary size
ValidateTensorIndexingSafe(const TfLiteContext * context,int index,int max_size,const int * tensor_indices,int * tensor_index)52 inline TfLiteStatus ValidateTensorIndexingSafe(const TfLiteContext* context,
53 int index, int max_size,
54 const int* tensor_indices,
55 int* tensor_index) {
56 if (index < 0 || index >= max_size) {
57 TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
58 "Invalid tensor index %d (not in [0, %d))\n", index,
59 max_size);
60 return kTfLiteError;
61 }
62 if (tensor_indices[index] == kTfLiteOptionalTensor) {
63 TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
64 "Tensor at index %d was optional but was expected\n",
65 index);
66 return kTfLiteError;
67 }
68
69 *tensor_index = tensor_indices[index];
70 return kTfLiteOk;
71 }
72
73 // Same as above but returns -1 for invalid inputs instead of status + logging
74 // error.
ValidateTensorIndexing(const TfLiteContext * context,int index,int max_size,const int * tensor_indices)75 inline int ValidateTensorIndexing(const TfLiteContext* context, int index,
76 int max_size, const int* tensor_indices) {
77 if (index >= 0 && index < max_size) {
78 const int tensor_index = tensor_indices[index];
79 if (tensor_index != kTfLiteOptionalTensor) {
80 return tensor_index;
81 }
82 }
83 return -1;
84 }
85
GetMutableInput(const TfLiteContext * context,const TfLiteNode * node,int index)86 inline TfLiteTensor* GetMutableInput(const TfLiteContext* context,
87 const TfLiteNode* node, int index) {
88 const int tensor_index = ValidateTensorIndexing(
89 context, index, node->inputs->size, node->inputs->data);
90 if (tensor_index < 0) {
91 return nullptr;
92 }
93 return GetTensorAtIndex(context, tensor_index);
94 }
95
GetMutableInputSafe(const TfLiteContext * context,const TfLiteNode * node,int index,const TfLiteTensor ** tensor)96 inline TfLiteStatus GetMutableInputSafe(const TfLiteContext* context,
97 const TfLiteNode* node, int index,
98 const TfLiteTensor** tensor) {
99 int tensor_index;
100 TF_LITE_ENSURE_OK(
101 context, ValidateTensorIndexingSafe(context, index, node->inputs->size,
102 node->inputs->data, &tensor_index));
103 *tensor = GetTensorAtIndex(context, tensor_index);
104 return kTfLiteOk;
105 }
106
107 } // anonymous namespace.
108
GetInput(const TfLiteContext * context,const TfLiteNode * node,int index)109 const TfLiteTensor* GetInput(const TfLiteContext* context,
110 const TfLiteNode* node, int index) {
111 return GetMutableInput(context, node, index);
112 }
113
GetInputSafe(const TfLiteContext * context,const TfLiteNode * node,int index,const TfLiteTensor ** tensor)114 TfLiteStatus GetInputSafe(const TfLiteContext* context, const TfLiteNode* node,
115 int index, const TfLiteTensor** tensor) {
116 return GetMutableInputSafe(context, node, index, tensor);
117 }
118
GetVariableInput(TfLiteContext * context,const TfLiteNode * node,int index)119 TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node,
120 int index) {
121 TfLiteTensor* tensor = GetMutableInput(context, node, index);
122 if (tensor == nullptr) return nullptr;
123 return tensor->is_variable ? tensor : nullptr;
124 }
125
GetOutput(TfLiteContext * context,const TfLiteNode * node,int index)126 TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node,
127 int index) {
128 const int tensor_index = ValidateTensorIndexing(
129 context, index, node->outputs->size, node->outputs->data);
130 if (tensor_index < 0) {
131 return nullptr;
132 }
133 return GetTensorAtIndex(context, tensor_index);
134 }
135
GetOutputSafe(const TfLiteContext * context,const TfLiteNode * node,int index,TfLiteTensor ** tensor)136 TfLiteStatus GetOutputSafe(const TfLiteContext* context, const TfLiteNode* node,
137 int index, TfLiteTensor** tensor) {
138 int tensor_index;
139 TF_LITE_ENSURE_OK(
140 context, ValidateTensorIndexingSafe(context, index, node->outputs->size,
141 node->outputs->data, &tensor_index));
142 *tensor = GetTensorAtIndex(context, tensor_index);
143 return kTfLiteOk;
144 }
145
GetOptionalInputTensor(const TfLiteContext * context,const TfLiteNode * node,int index)146 const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context,
147 const TfLiteNode* node, int index) {
148 return GetInput(context, node, index);
149 }
150
151 #ifndef TF_LITE_STATIC_MEMORY
GetTemporary(TfLiteContext * context,const TfLiteNode * node,int index)152 TfLiteTensor* GetTemporary(TfLiteContext* context, const TfLiteNode* node,
153 int index) {
154 const int tensor_index = ValidateTensorIndexing(
155 context, index, node->temporaries->size, node->temporaries->data);
156 if (tensor_index < 0) {
157 return nullptr;
158 }
159 return GetTensorAtIndex(context, tensor_index);
160 }
161
GetTemporarySafe(const TfLiteContext * context,const TfLiteNode * node,int index,TfLiteTensor ** tensor)162 TfLiteStatus GetTemporarySafe(const TfLiteContext* context,
163 const TfLiteNode* node, int index,
164 TfLiteTensor** tensor) {
165 int tensor_index;
166 TF_LITE_ENSURE_OK(context, ValidateTensorIndexingSafe(
167 context, index, node->temporaries->size,
168 node->temporaries->data, &tensor_index));
169 *tensor = GetTensorAtIndex(context, tensor_index);
170 return kTfLiteOk;
171 }
172
GetIntermediates(TfLiteContext * context,const TfLiteNode * node,int index)173 const TfLiteTensor* GetIntermediates(TfLiteContext* context,
174 const TfLiteNode* node, int index) {
175 const int tensor_index = ValidateTensorIndexing(
176 context, index, node->intermediates->size, node->intermediates->data);
177 if (tensor_index < 0) {
178 return nullptr;
179 }
180 return GetTensorAtIndex(context, tensor_index);
181 }
182
GetIntermediatesSafe(const TfLiteContext * context,const TfLiteNode * node,int index,TfLiteTensor ** tensor)183 TfLiteStatus GetIntermediatesSafe(const TfLiteContext* context,
184 const TfLiteNode* node, int index,
185 TfLiteTensor** tensor) {
186 int tensor_index;
187 TF_LITE_ENSURE_OK(context, ValidateTensorIndexingSafe(
188 context, index, node->intermediates->size,
189 node->intermediates->data, &tensor_index));
190 *tensor = GetTensorAtIndex(context, tensor_index);
191 return kTfLiteOk;
192 }
193 #endif // TF_LITE_STATIC_MEMORY
194
195 // Per-axis
PopulateConvolutionQuantizationParams(TfLiteContext * context,const TfLiteTensor * input,const TfLiteTensor * filter,const TfLiteTensor * bias,TfLiteTensor * output,const TfLiteFusedActivation & activation,int32_t * multiplier,int * shift,int32_t * output_activation_min,int32_t * output_activation_max,int32_t * per_channel_multiplier,int32_t * per_channel_shift)196 TfLiteStatus PopulateConvolutionQuantizationParams(
197 TfLiteContext* context, const TfLiteTensor* input,
198 const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
199 const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
200 int32_t* output_activation_min, int32_t* output_activation_max,
201 int32_t* per_channel_multiplier, int32_t* per_channel_shift) {
202 const auto* affine_quantization =
203 reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
204 return PopulateConvolutionQuantizationParams(
205 context, input, filter, bias, output, activation, multiplier, shift,
206 output_activation_min, output_activation_max, per_channel_multiplier,
207 per_channel_shift, affine_quantization->scale->size);
208 }
209
210 // Per-axis & per-tensor
PopulateConvolutionQuantizationParams(TfLiteContext * context,const TfLiteTensor * input,const TfLiteTensor * filter,const TfLiteTensor * bias,TfLiteTensor * output,const TfLiteFusedActivation & activation,int32_t * multiplier,int * shift,int32_t * output_activation_min,int32_t * output_activation_max,int32_t * per_channel_multiplier,int32_t * per_channel_shift,int num_channels)211 TfLiteStatus PopulateConvolutionQuantizationParams(
212 TfLiteContext* context, const TfLiteTensor* input,
213 const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
214 const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
215 int32_t* output_activation_min, int32_t* output_activation_max,
216 int32_t* per_channel_multiplier, int32_t* per_channel_shift,
217 int num_channels) {
218 TF_LITE_ENSURE_EQ(context, input->quantization.type,
219 kTfLiteAffineQuantization);
220 TF_LITE_ENSURE_EQ(context, filter->quantization.type,
221 kTfLiteAffineQuantization);
222 // TODO(jianlijianli): Enable bias type check and bias scale == input scale
223 // * filter scale for each channel in affine quantization once bias
224 // quantization is properly populated.
225 // TF_LITE_ENSURE_EQ(context, bias->quantization.type,
226 // kTfLiteAffineQuantization);
227
228 // Check data type.
229 const auto* affine_quantization =
230 reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
231 TF_LITE_ENSURE(context, affine_quantization);
232 TF_LITE_ENSURE(context, affine_quantization->scale);
233 const bool is_per_channel = affine_quantization->scale->size > 1;
234 if (is_per_channel) {
235 // Currently only Int8/Int16 is supported for per channel quantization.
236 TF_LITE_ENSURE(context,
237 input->type == kTfLiteInt8 || input->type == kTfLiteInt16);
238 TF_LITE_ENSURE_EQ(context, filter->type, kTfLiteInt8);
239 TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, num_channels);
240 TF_LITE_ENSURE_EQ(
241 context, num_channels,
242 filter->dims->data[affine_quantization->quantized_dimension]);
243 }
244
245 // Populate multiplier and shift using affine quantization.
246 const float input_scale = input->params.scale;
247 const float output_scale = output->params.scale;
248 const float* filter_scales = affine_quantization->scale->data;
249 for (int i = 0; i < num_channels; ++i) {
250 // If per-tensor quantization parameter is specified, broadcast it along the
251 // quantization dimension (channels_out).
252 const float scale = is_per_channel ? filter_scales[i] : filter_scales[0];
253 const double filter_scale = static_cast<double>(scale);
254 const double effective_output_scale = static_cast<double>(input_scale) *
255 filter_scale /
256 static_cast<double>(output_scale);
257 int32_t significand;
258 int channel_shift;
259 QuantizeMultiplier(effective_output_scale, &significand, &channel_shift);
260 per_channel_multiplier[i] = significand;
261 per_channel_shift[i] = channel_shift;
262 }
263
264 // Populate scalar quantization parameters.
265 // This check on legacy quantization parameters is kept only for backward
266 // compatibility.
267 if (input->type == kTfLiteUInt8) {
268 // Check bias scale == input scale * filter scale.
269 double real_multiplier = 0.0;
270 TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler(
271 context, input, filter, bias, output, &real_multiplier));
272 int exponent;
273
274 // Populate quantization parameters with multiplier and shift.
275 QuantizeMultiplier(real_multiplier, multiplier, &exponent);
276 *shift = -exponent;
277 }
278 if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 ||
279 input->type == kTfLiteInt16) {
280 TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
281 context, activation, output, output_activation_min,
282 output_activation_max));
283 }
284 return kTfLiteOk;
285 }
286
GetQuantizedConvolutionMultipler(TfLiteContext * context,const TfLiteTensor * input,const TfLiteTensor * filter,const TfLiteTensor * bias,TfLiteTensor * output,double * multiplier)287 TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
288 const TfLiteTensor* input,
289 const TfLiteTensor* filter,
290 const TfLiteTensor* bias,
291 TfLiteTensor* output,
292 double* multiplier) {
293 const double input_product_scale = static_cast<double>(input->params.scale) *
294 static_cast<double>(filter->params.scale);
295 // The following conditions must be guaranteed by the training pipeline.
296 if (bias) {
297 const double bias_scale = static_cast<double>(bias->params.scale);
298 // Here we're making sure the input_product_scale & bias_scale are about the
299 // same. Since we have:
300 // (output - output_zp) * output_scale =
301 // input_product_scale * input_product + bias * bias_scale ---- (0)
302 //
303 // (0) equals:
304 // (input_product + bias) * input_product_scale ----- (1)
305 // +
306 // bias * (bias_scale - input_product_scale) ------ (2)
307 //
308 // For the real kernel computation, we're doing (1), so we really need to
309 // make sure (2) has minimum impact on the output, so:
310 // bias * (bias_scale - input_product_scale) / output_scale should be
311 // a small number for an integer.
312 // Since normally bias should be within a small range.
313 // We should expect (bias_scale - input_product_scale) / output_scale to
314 // be a small number like 0.02.
315 const double scale_diff = std::abs(input_product_scale - bias_scale);
316 const double output_scale = static_cast<double>(output->params.scale);
317
318 TF_LITE_ENSURE(context, scale_diff / output_scale <= 0.02);
319 }
320 return GetQuantizedConvolutionMultipler(context, input, filter, output,
321 multiplier);
322 }
323
GetQuantizedConvolutionMultipler(TfLiteContext * context,const TfLiteTensor * input,const TfLiteTensor * filter,TfLiteTensor * output,double * multiplier)324 TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
325 const TfLiteTensor* input,
326 const TfLiteTensor* filter,
327 TfLiteTensor* output,
328 double* multiplier) {
329 const double input_product_scale =
330 static_cast<double>(input->params.scale * filter->params.scale);
331 TF_LITE_ENSURE(context, input_product_scale >= 0);
332 *multiplier = input_product_scale / static_cast<double>(output->params.scale);
333
334 return kTfLiteOk;
335 }
336
337 namespace {
338
Quantize(TfLiteContext * context,float scale,int32_t zero_point,float f,int32_t & q)339 inline TfLiteStatus Quantize(TfLiteContext* context, float scale,
340 int32_t zero_point, float f, int32_t& q) {
341 const float tmp = TfLiteRound(f / scale);
342 const bool no_integer_overflow_from_quantization =
343 (tmp >= static_cast<float>(std::numeric_limits<int32_t>::min()) &&
344 tmp <= static_cast<float>(std::numeric_limits<int32_t>::max()));
345 TF_LITE_ENSURE(context, no_integer_overflow_from_quantization);
346 q = zero_point + static_cast<int32_t>(tmp);
347 return kTfLiteOk;
348 }
349
CalculateActivationRangeQuantizedImpl(TfLiteContext * context,TfLiteFusedActivation activation,int32_t qmin,int32_t qmax,TfLiteTensor * output,int32_t * act_min,int32_t * act_max)350 TfLiteStatus CalculateActivationRangeQuantizedImpl(
351 TfLiteContext* context, TfLiteFusedActivation activation, int32_t qmin,
352 int32_t qmax, TfLiteTensor* output, int32_t* act_min, int32_t* act_max) {
353 const auto scale = output->params.scale;
354 const auto zero_point = output->params.zero_point;
355
356 int32_t tmp_q;
357 if (activation == kTfLiteActRelu) {
358 TF_LITE_ENSURE_OK(context,
359 Quantize(context, scale, zero_point, 0.0, tmp_q));
360 *act_min = std::max(qmin, tmp_q);
361 *act_max = qmax;
362 } else if (activation == kTfLiteActRelu6) {
363 TF_LITE_ENSURE_OK(context,
364 Quantize(context, scale, zero_point, 0.0, tmp_q));
365 *act_min = std::max(qmin, tmp_q);
366 TF_LITE_ENSURE_OK(context,
367 Quantize(context, scale, zero_point, 6.0, tmp_q));
368 *act_max = std::min(qmax, tmp_q);
369 } else if (activation == kTfLiteActReluN1To1) {
370 TF_LITE_ENSURE_OK(context,
371 Quantize(context, scale, zero_point, -1.0, tmp_q));
372 *act_min = std::max(qmin, tmp_q);
373 TF_LITE_ENSURE_OK(context,
374 Quantize(context, scale, zero_point, 1.0, tmp_q));
375 *act_max = std::min(qmax, tmp_q);
376 } else {
377 *act_min = qmin;
378 *act_max = qmax;
379 }
380 return kTfLiteOk;
381 }
382 } // namespace
383
CalculateActivationRangeQuantized(TfLiteContext * context,TfLiteFusedActivation activation,TfLiteTensor * output,int32_t * act_min,int32_t * act_max)384 TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
385 TfLiteFusedActivation activation,
386 TfLiteTensor* output,
387 int32_t* act_min,
388 int32_t* act_max) {
389 int32_t qmin = 0;
390 int32_t qmax = 0;
391 if (output->type == kTfLiteUInt8) {
392 qmin = std::numeric_limits<uint8_t>::min();
393 qmax = std::numeric_limits<uint8_t>::max();
394 } else if (output->type == kTfLiteInt8) {
395 qmin = std::numeric_limits<int8_t>::min();
396 qmax = std::numeric_limits<int8_t>::max();
397 } else if (output->type == kTfLiteInt16) {
398 qmin = std::numeric_limits<int16_t>::min();
399 qmax = std::numeric_limits<int16_t>::max();
400 } else {
401 TF_LITE_ENSURE(context, false);
402 }
403
404 return CalculateActivationRangeQuantizedImpl(context, activation, qmin, qmax,
405 output, act_min, act_max);
406 }
407
HaveSameShapes(const TfLiteTensor * input1,const TfLiteTensor * input2)408 bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) {
409 return TfLiteIntArrayEqual(input1->dims, input2->dims);
410 }
411
412 #ifndef TF_LITE_STATIC_MEMORY
413
414 // TODO(b/172067338): Having this function be part of TF_LITE_STATIC_MEMORY
415 // build results in a 6KB size increase, even though the function is unsused for
416 // that build. What appears to be happening is that while the linker drops the
417 // unsused function, the string library that gets pulled in is not dropped,
418 // resulting in the increased binary size.
GetShapeDebugString(const TfLiteIntArray * shape)419 std::string GetShapeDebugString(const TfLiteIntArray* shape) {
420 std::string str;
421 for (int d = 0; d < shape->size; ++d) {
422 if (str.empty())
423 str = "[" + std::to_string(shape->data[d]);
424 else
425 str += ", " + std::to_string(shape->data[d]);
426 }
427 str += "]";
428 return str;
429 }
430
CalculateShapeForBroadcast(TfLiteContext * context,const TfLiteTensor * input1,const TfLiteTensor * input2,TfLiteIntArray ** output_shape)431 TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
432 const TfLiteTensor* input1,
433 const TfLiteTensor* input2,
434 TfLiteIntArray** output_shape) {
435 const int dims1 = NumDimensions(input1);
436 const int dims2 = NumDimensions(input2);
437 const int out_dims = std::max(dims1, dims2);
438
439 std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape(
440 TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree);
441 for (int i = 0; i < out_dims; ++i) {
442 const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1);
443 const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1);
444 if (!(d1 == d2 || d1 == 1 || d2 == 1)) {
445 context->ReportError(context,
446 "Given shapes, %s and %s, are not broadcastable.",
447 GetShapeDebugString(input1->dims).c_str(),
448 GetShapeDebugString(input2->dims).c_str());
449 return kTfLiteError;
450 }
451
452 if (d1 == 0 || d2 == 0) {
453 shape->data[out_dims - i - 1] = 0;
454 } else {
455 shape->data[out_dims - i - 1] = std::max(d1, d2);
456 }
457 }
458 *output_shape = shape.release();
459 return kTfLiteOk;
460 }
461
CalculateShapeForBroadcast(TfLiteContext * context,const TfLiteTensor * input1,const TfLiteTensor * input2,const TfLiteTensor * input3,TfLiteIntArray ** output_shape)462 TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
463 const TfLiteTensor* input1,
464 const TfLiteTensor* input2,
465 const TfLiteTensor* input3,
466 TfLiteIntArray** output_shape) {
467 const int dims1 = NumDimensions(input1);
468 const int dims2 = NumDimensions(input2);
469 const int dims3 = NumDimensions(input3);
470 const int out_dims = std::max(std::max(dims1, dims2), dims3);
471 std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape(
472 TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree);
473 for (int i = 0; i < out_dims; ++i) {
474 const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1);
475 const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1);
476 const int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1);
477 const int min_value = std::min(std::min(d1, d2), d3);
478 int max_value = std::max(std::max(d1, d2), d3);
479 // If one dimention is 0, others must be 0 or 1.
480 if (min_value == 0) max_value = 0;
481 if (!(d1 == 1 || d1 == max_value) || !(d2 == 1 || d2 == max_value) ||
482 !(d3 == 1 || d3 == max_value)) {
483 context->ReportError(
484 context, "Given shapes, %s, %s and %s, are not broadcastable.",
485 GetShapeDebugString(input1->dims).c_str(),
486 GetShapeDebugString(input2->dims).c_str(),
487 GetShapeDebugString(input3->dims).c_str());
488 return kTfLiteError;
489 }
490 shape->data[out_dims - i - 1] = max_value;
491 }
492 *output_shape = shape.release();
493 return kTfLiteOk;
494 }
495 #endif // TF_LITE_STATIC_MEMORY
496
497 // Size of string is not constant, return 0 in such case.
TfLiteTypeGetSize(TfLiteType type)498 int TfLiteTypeGetSize(TfLiteType type) {
499 switch (type) {
500 case kTfLiteUInt8:
501 static_assert(sizeof(uint8_t) == 1, "");
502 return 1;
503 case kTfLiteInt8:
504 static_assert(sizeof(int8_t) == 1, "");
505 return 1;
506 case kTfLiteBool:
507 return sizeof(bool);
508 case kTfLiteInt16:
509 static_assert(sizeof(int16_t) == 2, "");
510 return 2;
511 case kTfLiteFloat16:
512 static_assert(sizeof(int16_t) == 2, "");
513 return 2;
514 case kTfLiteFloat32:
515 static_assert(sizeof(float) == 4, "");
516 return 4;
517 case kTfLiteInt32:
518 static_assert(sizeof(int32_t) == 4, "");
519 return 4;
520 case kTfLiteUInt32:
521 static_assert(sizeof(uint32_t) == 4, "");
522 return 4;
523 case kTfLiteInt64:
524 static_assert(sizeof(int64_t) == 8, "");
525 return 8;
526 case kTfLiteUInt64:
527 static_assert(sizeof(uint64_t) == 8, "");
528 return 8;
529 case kTfLiteFloat64:
530 static_assert(sizeof(double) == 8, "");
531 return 8;
532 case kTfLiteComplex64:
533 static_assert(sizeof(std::complex<float>) == 8, "");
534 return 8;
535 case kTfLiteComplex128:
536 static_assert(sizeof(std::complex<double>) == 16, "");
537 return 16;
538 default:
539 return 0;
540 }
541 }
542
IsMobilePlatform()543 bool IsMobilePlatform() {
544 #if defined(ANDROID) || defined(__ANDROID__)
545 return true;
546 #elif defined(__APPLE__)
547 #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
548 return true;
549 #endif
550 #endif
551 return false;
552 }
553
554 } // namespace tflite
555