1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_
16 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_
17
18 #include <limits>
19
20 #include "tensorflow/lite/kernels/internal/common.h"
21 #include "tensorflow/lite/kernels/internal/types.h"
22
23 namespace tflite {
24 namespace reference_integer_ops {
25
CheckArithmeticParams(const ArithmeticParams & params)26 inline void CheckArithmeticParams(const ArithmeticParams& params) {
27 TFLITE_DCHECK_LE(params.quantized_activation_min,
28 params.quantized_activation_max);
29 // Input offset is negative input zero point. Activation tensors are
30 // asymmetric quantized so they span the full int8 range.
31 TFLITE_DCHECK_GE(-params.input1_offset, std::numeric_limits<int8_t>::min());
32 TFLITE_DCHECK_GE(-params.input2_offset, std::numeric_limits<int8_t>::min());
33 TFLITE_DCHECK_LE(-params.input1_offset, std::numeric_limits<int8_t>::max());
34 TFLITE_DCHECK_LE(-params.input2_offset, std::numeric_limits<int8_t>::max());
35 }
36
ElementWise(int size,const ArithmeticParams & params,const int8_t * input1_data,const int8_t * input2_data,int8_t * output_data,void (* check_arithmetic_params)(const ArithmeticParams &),int8_t (* binary_func)(int8_t,int8_t,const ArithmeticParams &))37 inline void ElementWise(
38 int size, const ArithmeticParams& params, const int8_t* input1_data,
39 const int8_t* input2_data, int8_t* output_data,
40 void (*check_arithmetic_params)(const ArithmeticParams&),
41 int8_t (*binary_func)(int8_t, int8_t, const ArithmeticParams&)) {
42 CheckArithmeticParams(params);
43 for (int i = 0; i < size; ++i) {
44 output_data[i] = binary_func(input1_data[i], input2_data[i], params);
45 }
46 }
47
BroadcastBinaryFunction4DSlow(const ArithmeticParams & params,const RuntimeShape & input1_shape,const int8_t * input1_data,const RuntimeShape & input2_shape,const int8_t * input2_data,const RuntimeShape & output_shape,int8_t * output_data,void (* check_arithmetic_params)(const ArithmeticParams &),int8_t (* binary_func)(int8_t,int8_t,const ArithmeticParams &))48 inline void BroadcastBinaryFunction4DSlow(
49 const ArithmeticParams& params, const RuntimeShape& input1_shape,
50 const int8_t* input1_data, const RuntimeShape& input2_shape,
51 const int8_t* input2_data, const RuntimeShape& output_shape,
52 int8_t* output_data,
53 void (*check_arithmetic_params)(const ArithmeticParams&),
54 int8_t (*binary_func)(int8_t, int8_t, const ArithmeticParams&)) {
55 NdArrayDesc<4> desc1;
56 NdArrayDesc<4> desc2;
57 NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
58 &desc2);
59 const RuntimeShape extended_output_shape =
60 RuntimeShape::ExtendedShape(4, output_shape);
61
62 // In Tensorflow, the dimensions are canonically named (batch_number, row,
63 // col, channel), with extents (batches, height, width, depth), with the
64 // trailing dimension changing most rapidly (channels has the smallest stride,
65 // typically 1 element).
66 //
67 // In generated C code, we store arrays with the dimensions reversed. The
68 // first dimension has smallest stride.
69 //
70 // We name our variables by their Tensorflow convention, but generate C code
71 // nesting loops such that the innermost loop has the smallest stride for the
72 // best cache behavior.
73 for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
74 for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
75 for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
76 for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
77 output_data[Offset(extended_output_shape, b, y, x, c)] = binary_func(
78 input1_data[SubscriptToIndex(desc1, b, y, x, c)],
79 input2_data[SubscriptToIndex(desc2, b, y, x, c)], params);
80 }
81 }
82 }
83 }
84 }
85
AddFunc(int8_t x,int8_t y,const ArithmeticParams & params)86 inline int8_t AddFunc(int8_t x, int8_t y, const ArithmeticParams& params) {
87 const int32_t input1_val = params.input1_offset + x;
88 const int32_t input2_val = params.input2_offset + y;
89 const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
90 const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
91 const int32_t scaled_input1_val =
92 MultiplyByQuantizedMultiplierSmallerThanOneExp(
93 shifted_input1_val, params.input1_multiplier, params.input1_shift);
94 const int32_t scaled_input2_val =
95 MultiplyByQuantizedMultiplierSmallerThanOneExp(
96 shifted_input2_val, params.input2_multiplier, params.input2_shift);
97 const int32_t raw_sum = scaled_input1_val + scaled_input2_val;
98 const int32_t raw_output =
99 MultiplyByQuantizedMultiplierSmallerThanOneExp(
100 raw_sum, params.output_multiplier, params.output_shift) +
101 params.output_offset;
102 const int32_t clamped_output =
103 std::min(params.quantized_activation_max,
104 std::max(params.quantized_activation_min, raw_output));
105 return static_cast<int8_t>(clamped_output);
106 }
107
108 // Element-wise add that can often be used for inner loop of broadcast add as
109 // well as the non-broadcast add.
AddElementwise(int size,const ArithmeticParams & params,const int8_t * input1_data,const int8_t * input2_data,int8_t * output_data)110 inline void AddElementwise(int size, const ArithmeticParams& params,
111 const int8_t* input1_data, const int8_t* input2_data,
112 int8_t* output_data) {
113 ElementWise(size, params, input1_data, input2_data, output_data,
114 CheckArithmeticParams, AddFunc);
115 }
116
Add(const ArithmeticParams & params,const RuntimeShape & input1_shape,const int8_t * input1_data,const RuntimeShape & input2_shape,const int8_t * input2_data,const RuntimeShape & output_shape,int8_t * output_data)117 inline void Add(const ArithmeticParams& params,
118 const RuntimeShape& input1_shape, const int8_t* input1_data,
119 const RuntimeShape& input2_shape, const int8_t* input2_data,
120 const RuntimeShape& output_shape, int8_t* output_data) {
121 CheckArithmeticParams(params);
122
123 const int flat_size =
124 MatchingElementsSize(input1_shape, input2_shape, output_shape);
125
126 AddElementwise(flat_size, params, input1_data, input2_data, output_data);
127 }
128
BroadcastAdd4DSlow(const ArithmeticParams & params,const RuntimeShape & input1_shape,const int8_t * input1_data,const RuntimeShape & input2_shape,const int8_t * input2_data,const RuntimeShape & output_shape,int8_t * output_data)129 inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
130 const RuntimeShape& input1_shape,
131 const int8_t* input1_data,
132 const RuntimeShape& input2_shape,
133 const int8_t* input2_data,
134 const RuntimeShape& output_shape,
135 int8_t* output_data) {
136 BroadcastBinaryFunction4DSlow(params, input1_shape, input1_data, input2_shape,
137 input2_data, output_shape, output_data,
138 CheckArithmeticParams, AddFunc);
139 }
140
141 } // namespace reference_integer_ops
142 } // namespace tflite
143
144 #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_
145