1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include <type_traits>
17
18 #include "tensorflow/lite/c/builtin_op_data.h"
19 #include "tensorflow/lite/c/common.h"
20 #include "tensorflow/lite/micro/kernels/kernel_runner.h"
21 #include "tensorflow/lite/micro/test_helpers.h"
22 #include "tensorflow/lite/micro/testing/micro_test.h"
23
24 namespace tflite {
25 namespace testing {
26 namespace {
27
28 constexpr int kMaxInputTensors = 3;
29 constexpr int kMaxOutputTensors = 1;
30
ExecuteAddN(TfLiteTensor * tensors,int tensors_count)31 void ExecuteAddN(TfLiteTensor* tensors, int tensors_count) {
32 int input_array_data[kMaxInputTensors + kMaxOutputTensors] = {tensors_count -
33 1};
34 for (int i = 1; i < tensors_count; i++) {
35 input_array_data[i] = i - 1;
36 }
37 TfLiteIntArray* inputs_array = IntArrayFromInts(input_array_data);
38 int kOutputArrayData[] = {1, tensors_count - 1};
39 TfLiteIntArray* outputs_array = IntArrayFromInts(kOutputArrayData);
40
41 const TfLiteRegistration registration = tflite::Register_ADD_N();
42 micro::KernelRunner runner(registration, tensors, tensors_count, inputs_array,
43 outputs_array, nullptr);
44
45 TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
46 TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
47 }
48
49 template <typename T>
TestAddN(int * input_dims_data,const T * const * input_data,int input_data_count,int * expected_dims,const T * expected_data,T * output_data)50 void TestAddN(int* input_dims_data, const T* const* input_data,
51 int input_data_count, int* expected_dims, const T* expected_data,
52 T* output_data) {
53 TF_LITE_MICRO_EXPECT_LE(input_data_count, kMaxInputTensors);
54
55 TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
56 TfLiteIntArray* output_dims = IntArrayFromInts(expected_dims);
57 const int output_count = ElementCount(*output_dims);
58
59 TfLiteTensor tensors[kMaxInputTensors + kMaxOutputTensors] = {};
60 for (int i = 0; i < input_data_count; i++) {
61 tensors[i] = CreateTensor(input_data[i], input_dims);
62 }
63 tensors[input_data_count] = CreateTensor(output_data, output_dims);
64
65 ExecuteAddN(tensors, input_data_count + 1);
66
67 for (int i = 0; i < output_count; i++) {
68 TF_LITE_MICRO_EXPECT_EQ(expected_data[i], output_data[i]);
69 }
70 }
71
72 // min/max are used to compute scale, zero-point, compare tolerance
73 template <typename T, int kNumInputs, int kOutputSize>
74 struct TestQuantParams {
75 float data_min; // input and output data minimum value
76 float data_max; // input and output data maximum value
77 T input_data[kNumInputs][kOutputSize]; // quantized input storage
78 T output_data[kOutputSize]; // quantized output storage
79 };
80
81 // for quantized Add, the error shouldn't exceed step
82 template <typename T>
GetTolerance(float min,float max)83 float GetTolerance(float min, float max) {
84 float kQuantizedStep =
85 2.0f * (max - min) /
86 (std::numeric_limits<T>::max() - std::numeric_limits<T>::min());
87 return kQuantizedStep;
88 }
89
90 template <typename T, int kNumInputs, int kOutputSize>
TestAddNQuantized(TestQuantParams<T,kNumInputs,kOutputSize> * params,int * input_dims_data,const float * const * input_data,int * expected_dims,const float * expected_data,float * output_data)91 void TestAddNQuantized(TestQuantParams<T, kNumInputs, kOutputSize>* params,
92 int* input_dims_data, const float* const* input_data,
93 int* expected_dims, const float* expected_data,
94 float* output_data) {
95 TF_LITE_MICRO_EXPECT_LE(kNumInputs, kMaxInputTensors);
96
97 TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
98 TfLiteIntArray* output_dims = IntArrayFromInts(expected_dims);
99
100 const float scale = ScaleFromMinMax<T>(params->data_min, params->data_max);
101 const int zero_point =
102 ZeroPointFromMinMax<T>(params->data_min, params->data_max);
103
104 TfLiteTensor tensors[kMaxInputTensors + kMaxOutputTensors] = {};
105 for (int i = 0; i < kNumInputs; i++) {
106 tensors[i] = CreateQuantizedTensor(input_data[i], params->input_data[i],
107 input_dims, scale, zero_point);
108 }
109 tensors[kNumInputs] = CreateQuantizedTensor(params->output_data, output_dims,
110 scale, zero_point);
111
112 ExecuteAddN(tensors, kNumInputs + 1);
113
114 Dequantize(params->output_data, kOutputSize, scale, zero_point, output_data);
115 const float kTolerance = GetTolerance<T>(params->data_min, params->data_max);
116 for (int i = 0; i < kOutputSize; i++) {
117 TF_LITE_MICRO_EXPECT_NEAR(expected_data[i], output_data[i], kTolerance);
118 }
119 }
120
121 } // namespace
122 } // namespace testing
123 } // namespace tflite
124
125 TF_LITE_MICRO_TESTS_BEGIN
126
TF_LITE_MICRO_TEST(FloatAddNOpAddMultipleTensors)127 TF_LITE_MICRO_TEST(FloatAddNOpAddMultipleTensors) {
128 int kDims[] = {4, 1, 2, 2, 1};
129 constexpr float kInput1[] = {-2.0, 0.2, 0.7, 0.8};
130 constexpr float kInput2[] = {0.1, 0.2, 0.3, 0.5};
131 constexpr float kInput3[] = {0.5, 0.1, 0.1, 0.2};
132 constexpr float kExpect[] = {-1.4, 0.5, 1.1, 1.5};
133 const float* kInputs[tflite::testing::kMaxInputTensors] = {
134 kInput1,
135 kInput2,
136 kInput3,
137 };
138 constexpr int kInputCount = std::extent<decltype(kInputs)>::value;
139 constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
140 float output_data[kOutputCount];
141
142 tflite::testing::TestAddN(kDims, kInputs, kInputCount, kDims, kExpect,
143 output_data);
144 }
145
TF_LITE_MICRO_TEST(Int8AddNOpAddMultipleTensors)146 TF_LITE_MICRO_TEST(Int8AddNOpAddMultipleTensors) {
147 int kDims[] = {4, 1, 2, 2, 1};
148 constexpr float kInput1[] = {-2.0, 0.2, 0.7, 0.8};
149 constexpr float kInput2[] = {0.1, 0.2, 0.3, 0.5};
150 constexpr float kInput3[] = {0.5, 0.1, 0.1, 0.2};
151 constexpr float kExpect[] = {-1.4, 0.5, 1.1, 1.5};
152 const float* kInputs[tflite::testing::kMaxInputTensors] = {
153 kInput1,
154 kInput2,
155 kInput3,
156 };
157 constexpr int kInputCount = std::extent<decltype(kInputs)>::value;
158 constexpr int kOutputCount = std::extent<decltype(kExpect)>::value;
159 float output_data[kOutputCount];
160
161 tflite::testing::TestQuantParams<int8_t, kInputCount, kOutputCount> params =
162 {};
163 params.data_min = -3.0;
164 params.data_max = 3.0;
165
166 tflite::testing::TestAddNQuantized<int8_t, kInputCount, kOutputCount>(
167 ¶ms, kDims, kInputs, kDims, kExpect, output_data);
168 }
169
170 TF_LITE_MICRO_TESTS_END
171