1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/micro/kernels/fully_connected.h"
17 
18 #include "tensorflow/lite/c/builtin_op_data.h"
19 #include "tensorflow/lite/c/common.h"
20 #include "tensorflow/lite/kernels/internal/common.h"
21 #include "tensorflow/lite/kernels/internal/quantization_util.h"
22 #include "tensorflow/lite/kernels/internal/reference/fully_connected.h"
23 #include "tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h"
24 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
25 #include "tensorflow/lite/kernels/kernel_util.h"
26 #include "tensorflow/lite/micro/kernels/kernel_util.h"
27 
28 namespace tflite {
29 namespace {
30 
Init(TfLiteContext * context,const char * buffer,size_t length)31 void* Init(TfLiteContext* context, const char* buffer, size_t length) {
32   TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
33   return context->AllocatePersistentBuffer(context,
34                                            sizeof(OpDataFullyConnected));
35 }
36 
Prepare(TfLiteContext * context,TfLiteNode * node)37 TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
38   TFLITE_DCHECK(node->user_data != nullptr);
39   TFLITE_DCHECK(node->builtin_data != nullptr);
40 
41   auto* data = static_cast<OpDataFullyConnected*>(node->user_data);
42   const auto params =
43       static_cast<const TfLiteFullyConnectedParams*>(node->builtin_data);
44 
45   const TfLiteTensor* input =
46       GetInput(context, node, kFullyConnectedInputTensor);
47   TF_LITE_ENSURE(context, input != nullptr);
48   const TfLiteTensor* filter =
49       GetInput(context, node, kFullyConnectedWeightsTensor);
50   TF_LITE_ENSURE(context, filter != nullptr);
51   const TfLiteTensor* bias =
52       GetOptionalInputTensor(context, node, kFullyConnectedBiasTensor);
53   TfLiteTensor* output = GetOutput(context, node, kFullyConnectedOutputTensor);
54   TF_LITE_ENSURE(context, output != nullptr);
55 
56   TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
57   TF_LITE_ENSURE_MSG(context, input->type == filter->type,
58                      "Hybrid models are not supported on TFLite Micro.");
59 
60   return CalculateOpDataFullyConnected(context, params->activation, input->type,
61                                        input, filter, bias, output, data);
62 }
63 
Eval(TfLiteContext * context,TfLiteNode * node)64 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
65   TFLITE_DCHECK(node->builtin_data != nullptr);
66   const auto* params =
67       static_cast<const TfLiteFullyConnectedParams*>(node->builtin_data);
68 
69   const TfLiteEvalTensor* input =
70       tflite::micro::GetEvalInput(context, node, kFullyConnectedInputTensor);
71   const TfLiteEvalTensor* filter =
72       tflite::micro::GetEvalInput(context, node, kFullyConnectedWeightsTensor);
73   const TfLiteEvalTensor* bias =
74       tflite::micro::GetEvalInput(context, node, kFullyConnectedBiasTensor);
75   TfLiteEvalTensor* output =
76       tflite::micro::GetEvalOutput(context, node, kFullyConnectedOutputTensor);
77 
78   TFLITE_DCHECK(node->user_data != nullptr);
79   const auto& data =
80       *(static_cast<const OpDataFullyConnected*>(node->user_data));
81 
82   // Checks in Prepare ensure input, output and filter types are all the same.
83   switch (input->type) {
84     case kTfLiteFloat32: {
85       tflite::reference_ops::FullyConnected(
86           FullyConnectedParamsFloat(params->activation),
87           tflite::micro::GetTensorShape(input),
88           tflite::micro::GetTensorData<float>(input),
89           tflite::micro::GetTensorShape(filter),
90           tflite::micro::GetTensorData<float>(filter),
91           tflite::micro::GetTensorShape(bias),
92           tflite::micro::GetTensorData<float>(bias),
93           tflite::micro::GetTensorShape(output),
94           tflite::micro::GetTensorData<float>(output));
95       break;
96     }
97 
98     case kTfLiteInt8: {
99       tflite::reference_integer_ops::FullyConnected(
100           FullyConnectedParamsQuantized(data),
101           tflite::micro::GetTensorShape(input),
102           tflite::micro::GetTensorData<int8_t>(input),
103           tflite::micro::GetTensorShape(filter),
104           tflite::micro::GetTensorData<int8_t>(filter),
105           tflite::micro::GetTensorShape(bias),
106           tflite::micro::GetTensorData<int32_t>(bias),
107           tflite::micro::GetTensorShape(output),
108           tflite::micro::GetTensorData<int8_t>(output));
109       break;
110     }
111 
112     default: {
113       TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
114                          TfLiteTypeGetName(input->type), input->type);
115       return kTfLiteError;
116     }
117   }
118   return kTfLiteOk;
119 }
120 
121 }  // namespace
122 
Register_FULLY_CONNECTED()123 TfLiteRegistration Register_FULLY_CONNECTED() {
124   return {/*init=*/Init,
125           /*free=*/nullptr,
126           /*prepare=*/Prepare,
127           /*invoke=*/Eval,
128           /*profiling_string=*/nullptr,
129           /*builtin_code=*/0,
130           /*custom_name=*/nullptr,
131           /*version=*/0};
132 }
133 
134 }  // namespace tflite
135