1 /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/kernels/internal/reference/quantize.h"
16 
17 #include "tensorflow/lite/c/common.h"
18 #include "tensorflow/lite/kernels/internal/quantization_util.h"
19 #include "tensorflow/lite/kernels/internal/reference/requantize.h"
20 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
21 #include "tensorflow/lite/kernels/kernel_util.h"
22 #include "tensorflow/lite/micro/kernels/ceva/ceva_tflm_lib.h"
23 #include "tensorflow/lite/micro/kernels/kernel_util.h"
24 #include "tensorflow/lite/micro/kernels/quantize.h"
25 #include "tensorflow/lite/micro/micro_utils.h"
26 #ifdef MCPS_MEASUREMENT
27 #include "tensorflow/lite/micro/kernels/ceva/mcps_macros.h "
28 #endif
29 
30 namespace tflite {
31 namespace {
32 
Init(TfLiteContext * context,const char * buffer,size_t length)33 void* Init(TfLiteContext* context, const char* buffer, size_t length) {
34   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
35   return context->AllocatePersistentBuffer(context,
36                                            sizeof(OpDataQuantizeReference));
37 }
38 
EvalCEVA(TfLiteContext * context,TfLiteNode * node)39 TfLiteStatus EvalCEVA(TfLiteContext* context, TfLiteNode* node) {
40   TFLITE_DCHECK(node->user_data != nullptr);
41 
42   auto* data = static_cast<OpDataQuantizeReference*>(node->user_data);
43 
44   const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0);
45   TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0);
46 
47   if (input->type == kTfLiteFloat32 && output->type == kTfLiteInt8) {
48     const float* input_data = tflite::micro::GetTensorData<float>(input);
49     int8_t* output_data = tflite::micro::GetTensorData<int8_t>(output);
50     const int flat_size =
51         MatchingFlatSize(tflite::micro::GetTensorShape(input),
52                          tflite::micro::GetTensorShape(output));
53 
54 #ifdef MCPS_MEASUREMENT
55     MCPS_START_ONE;
56 #endif
57     CEVA_TFLM_AffineQuantize_Int8(input_data, output_data, flat_size,
58                                   data->quantization_params.scale,
59                                   data->quantization_params.zero_point);
60 #ifdef MCPS_MEASUREMENT
61     MCPS_STOP_ONE("Test params:CEVA_TFLM_AffineQuantize_Int8 loop = %d",
62                   flat_size);
63 #endif
64   } else
65     return EvalQuantizeReference(context, node);
66   return kTfLiteOk;
67 }
68 
Eval(TfLiteContext * context,TfLiteNode * node)69 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
70 #if defined(CEVA_BX1) || defined(CEVA_SP500)
71   return EvalCEVA(context, node);
72 #else
73   return EvalQuantizeReference(context, node);
74 #endif
75 }
76 
77 }  // namespace
78 
79 // This Op (QUANTIZE) quantizes the input and produces quantized output.
80 // AffineQuantize takes scale and zero point and quantizes the float value to
81 // quantized output, in int8_t or uint8_t format.
Register_QUANTIZE()82 TfLiteRegistration Register_QUANTIZE() {
83   return {/*init=*/Init,
84           /*free=*/nullptr,
85           /*prepare=*/PrepareQuantizeReference,
86           /*invoke=*/Eval,
87           /*profiling_string=*/nullptr,
88           /*builtin_code=*/0,
89           /*custom_name=*/nullptr,
90           /*version=*/0};
91 }
92 
93 }  // namespace tflite
94