/tflite-micro-3.4.0-2.7.6/tensorflow/lite/micro/kernels/arc_mli/ |
D | scratch_buf_mgr.cc | 115 MliTensorInterface* bias, MliTensorInterface* out) { in get_arc_scratch_buffer_for_conv_tensors() argument 120 if (!inside_arc_ccm(bias->Data<int32_t>())) { in get_arc_scratch_buffer_for_conv_tensors() 122 mli_hlp_count_elem_num(bias->MliTensor(), 0) * in get_arc_scratch_buffer_for_conv_tensors() 123 mli_hlp_tensor_element_size(bias->MliTensor()); in get_arc_scratch_buffer_for_conv_tensors() 124 bias->SetData<int32_t>( in get_arc_scratch_buffer_for_conv_tensors() 129 if (bias->Data<int32_t>() == NULL) { in get_arc_scratch_buffer_for_conv_tensors() 132 bias->SetData<int32_t>( in get_arc_scratch_buffer_for_conv_tensors() 137 if (bias->Data<int32_t>() == NULL) ret_val = kTfLiteError; in get_arc_scratch_buffer_for_conv_tensors() 165 MliTensorInterface* bias, MliTensorInterface* out) { in get_arc_scratch_buffer_for_fully_connect_tensors() argument 171 if (!inside_arc_ccm(bias->Data<int32_t>())) { in get_arc_scratch_buffer_for_fully_connect_tensors() [all …]
|
D | fully_connected.cc | 71 const TfLiteTensor* filter, const TfLiteTensor* bias, in IsMliApplicable() argument 79 (bias->type == kTfLiteInt32) && in IsMliApplicable() 92 const TfLiteTensor* bias, TfLiteTensor* output, in CalculateOpData() argument 99 context, input, filter, bias, output, &real_multiplier)); in CalculateOpData() 128 const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); in Prepare() local 140 filter, bias, output, data); in Prepare() 143 IsMliApplicable(context, input, filter, bias, params, in Prepare() 158 ops::micro::ConvertToMliTensor(bias, &data->mli_bias); in Prepare() 199 const TfLiteEvalTensor* bias, in EvalMliQuantizedInt8() argument 203 ops::micro::MliTensorAttachBuffer<int32_t>(bias, &data.mli_bias); in EvalMliQuantizedInt8() [all …]
|
D | scratch_buf_mgr.h | 43 MliTensorInterface* bias, MliTensorInterface* out); 77 MliTensorInterface* bias, MliTensorInterface* out); 120 const MliTensorInterface* weights, const MliTensorInterface* bias,
|
D | conv.cc | 102 const TfLiteTensor* filter, const TfLiteTensor* bias, in IsMliApplicable() argument 109 (input->type == kTfLiteInt8) && (bias->type == kTfLiteInt32) && in IsMliApplicable() 138 const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); in CalculateOpData() local 145 context, input, filter, bias, output, params->activation, in CalculateOpData() 170 const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); in Prepare() local 194 IsMliApplicable(context, input, filter, bias, params); in Prepare() 279 ops::micro::ConvertToMliTensorPerChannel(bias, &data->mli_bias, in Prepare() 317 const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias, in EvalQuantized() argument 344 tflite::micro::GetTensorShape(bias), in EvalQuantized() 345 tflite::micro::GetTensorData<int32_t>(bias), in EvalQuantized() [all …]
|
D | depthwise_conv.cc | 97 const TfLiteTensor* filter, const TfLiteTensor* bias, in IsMliApplicable() argument 113 (input->type == kTfLiteInt8) && (bias->type == kTfLiteInt32) && in IsMliApplicable() 146 const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); in CalculateOpData() local 153 context, input, filter, bias, output, params->activation, in CalculateOpData() 179 const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); in Prepare() local 205 IsMliApplicable(context, input, filter, bias, params); in Prepare() 305 ops::micro::ConvertToMliTensorPerChannel(bias, &data->mli_bias, in Prepare() 344 const TfLiteEvalTensor* bias, TfLiteEvalTensor* output) { in EvalFloat() argument 368 tflite::micro::GetTensorShape(bias), in EvalFloat() 369 tflite::micro::GetTensorData<float>(bias), in EvalFloat() [all …]
|
/tflite-micro-3.4.0-2.7.6/third_party/hexagon/ |
D | fully_connected.cc | 91 const TfLiteTensor* bias, TfLiteTensor* output, in CalculateOpData() argument 97 context, input, filter, bias, output, &real_multiplier)); in CalculateOpData() 139 const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); in Prepare() local 153 filter, bias, output, data); in Prepare() 161 const TfLiteEvalTensor* bias, in EvalQuantizedInt8() argument 178 tflite::micro::GetTensorShape(bias), in EvalQuantizedInt8() 179 tflite::micro::GetTensorData<int32_t>(bias), in EvalQuantizedInt8() 189 const TfLiteEvalTensor* bias, in EvalQuantized() argument 211 tflite::micro::GetTensorShape(bias), \ in EvalQuantized() 212 tflite::micro::GetTensorData<int32_t>(bias), \ in EvalQuantized() [all …]
|
D | svdf.cc | 163 const TfLiteEvalTensor* weights_time, const TfLiteEvalTensor* bias, in EvalFloatSVDF() argument 177 const float* bias_ptr = tflite::micro::GetTensorData<float>(bias); in EvalFloatSVDF() 417 const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); in Prepare() local 455 if (bias != nullptr) { in Prepare() 456 TF_LITE_ENSURE_EQ(context, bias->dims->data[0], num_units); in Prepare() 476 if (bias != nullptr) { in Prepare() 477 TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteInt32); in Prepare() 492 std::abs(static_cast<double>(bias->params.scale) - in Prepare() 527 if (bias != nullptr) { in Prepare() 528 TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteFloat32); in Prepare() [all …]
|
/tflite-micro-3.4.0-2.7.6/tensorflow/lite/micro/kernels/xtensa/ |
D | depthwise_conv.cc | 65 const TfLiteEvalTensor* bias = in Eval() local 87 tflite::micro::GetTensorShape(bias), in Eval() 88 tflite::micro::GetTensorData<int32_t>(bias), in Eval() 106 tflite::micro::GetTensorShape(bias), in Eval() 107 tflite::micro::GetTensorData<int32_t>(bias), in Eval() 111 DepthwiseConvEvalHifi(context, node, params, op_data, input, filter, bias, in Eval() 122 tflite::micro::GetTensorShape(bias), in Eval() 123 tflite::micro::GetTensorData<int32_t>(bias), in Eval()
|
D | fully_connected.cc | 123 const TfLiteTensor* bias, TfLiteTensor* output, in CalculateOpData() argument 127 context, input, filter, bias, output, &real_multiplier)); in CalculateOpData() 162 const TfLiteTensor* bias = in Prepare() local 179 filter, bias, output, data); in Prepare() 186 const TfLiteEvalTensor* bias, in EvalQuantizedInt8() argument 201 tflite::micro::GetTensorShape(bias), in EvalQuantizedInt8() 202 tflite::micro::GetTensorData<int32_t>(bias), in EvalQuantizedInt8() 222 tflite::micro::GetTensorData<int32_t>(bias), accum_depth, in EvalQuantizedInt8() 241 tflite::micro::GetTensorShape(bias), in EvalQuantizedInt8() 242 tflite::micro::GetTensorData<int32_t>(bias), in EvalQuantizedInt8() [all …]
|
D | conv.cc | 65 const TfLiteEvalTensor* bias = in Eval() local 88 tflite::micro::GetTensorShape(bias), in Eval() 89 tflite::micro::GetTensorData<int32_t>(bias), in Eval() 106 tflite::micro::GetTensorShape(bias), in Eval() 107 tflite::micro::GetTensorData<int32_t>(bias), in Eval() 111 ConvEvalHifi(context, node, params, op_data, input, filter, bias, output); in Eval()
|
D | conv_int8_reference.cc | 50 const TfLiteEvalTensor* bias = in ConvReferenceEvalInt8() local 62 tflite::micro::GetTensorShape(bias), in ConvReferenceEvalInt8() 63 tflite::micro::GetTensorData<int32_t>(bias), in ConvReferenceEvalInt8()
|
D | depthwise_conv_hifi.cc | 90 const TfLiteEvalTensor* bias, in DepthwiseConvEvalHifi() argument 110 const RuntimeShape& bias_shape = tflite::micro::GetTensorShape(bias); in DepthwiseConvEvalHifi() 129 const int32_t* bias_data = tflite::micro::GetTensorData<int32_t>(bias); in DepthwiseConvEvalHifi() 174 tflite::micro::GetTensorShape(bias), in DepthwiseConvEvalHifi() 175 tflite::micro::GetTensorData<int32_t>(bias), in DepthwiseConvEvalHifi()
|
/tflite-micro-3.4.0-2.7.6/tensorflow/lite/micro/kernels/ |
D | conv.cc | 42 const TfLiteEvalTensor* bias = in Eval() local 69 tflite::micro::GetTensorShape(bias), in Eval() 70 tflite::micro::GetTensorData<float>(bias), in Eval() 83 tflite::micro::GetTensorShape(bias), in Eval() 84 tflite::micro::GetTensorData<std::int64_t>(bias), in Eval() 96 tflite::micro::GetTensorShape(bias), in Eval() 97 tflite::micro::GetTensorData<int32_t>(bias), in Eval()
|
D | fully_connected.cc | 51 const TfLiteTensor* bias = in Prepare() local 61 input, filter, bias, output, data); in Prepare() 73 const TfLiteEvalTensor* bias = in Eval() local 91 tflite::micro::GetTensorShape(bias), in Eval() 92 tflite::micro::GetTensorData<float>(bias), in Eval() 105 tflite::micro::GetTensorShape(bias), in Eval() 106 tflite::micro::GetTensorData<int32_t>(bias), in Eval()
|
D | depthwise_conv.cc | 51 const TfLiteEvalTensor* bias = in Eval() local 64 tflite::micro::GetTensorShape(bias), in Eval() 65 tflite::micro::GetTensorData<float>(bias), in Eval() 78 tflite::micro::GetTensorShape(bias), in Eval() 79 tflite::micro::GetTensorData<int32_t>(bias), in Eval()
|
D | transpose_conv.cc | 99 const TfLiteTensor* bias = in CalculateOpData() local 106 context, input, filter, bias, output, params->activation, in CalculateOpData() 118 if (bias->type == kTfLiteInt16) { in CalculateOpData() 121 context, GetTensorShape(bias).FlatSize() * sizeof(std::int64_t), in CalculateOpData() 229 const TfLiteEvalTensor* bias = in Eval() local 253 tflite::micro::GetTensorShape(bias), in Eval() 254 tflite::micro::GetTensorData<float>(bias), in Eval() 269 tflite::micro::GetTensorShape(bias), in Eval() 270 tflite::micro::GetTensorData<int32_t>(bias), in Eval() 281 if (bias->type == kTfLiteInt16) { in Eval() [all …]
|
D | svdf.cc | 50 const TfLiteEvalTensor* bias = in Eval() local 62 context, node, input, weights_feature, weights_time, bias, params, in Eval() 70 weights_time, bias, params, activation_state, in Eval()
|
D | svdf_common.cc | 260 const TfLiteEvalTensor* weights_time, const TfLiteEvalTensor* bias, in EvalFloatSvdfReference() argument 274 const float* bias_ptr = tflite::micro::GetTensorData<float>(bias); in EvalFloatSvdfReference() 350 const TfLiteTensor* bias = in PrepareSvdf() local 389 if (bias != nullptr) { in PrepareSvdf() 390 TF_LITE_ENSURE_EQ(context, bias->dims->data[0], num_units); in PrepareSvdf() 410 if (bias != nullptr) { in PrepareSvdf() 411 TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteInt32); in PrepareSvdf() 426 std::abs(static_cast<double>(bias->params.scale) - in PrepareSvdf() 454 if (bias != nullptr) { in PrepareSvdf() 455 TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteFloat32); in PrepareSvdf()
|
/tflite-micro-3.4.0-2.7.6/tensorflow/lite/micro/kernels/ceva/ |
D | fully_connected.cc | 61 const TfLiteTensor* bias = in Prepare() local 70 input, filter, bias, output, data); in Prepare() 77 const TfLiteEvalTensor* bias, in EvalQuantizedInt8CEVA() argument 88 tflite::micro::GetTensorShape(bias).DimensionsCount(); in EvalQuantizedInt8CEVA() 100 const_cast<int32_t*>(tflite::micro::GetTensorData<int32_t>(bias)); in EvalQuantizedInt8CEVA() 139 const TfLiteEvalTensor* bias, in EvalFloatCEVA() argument 156 tflite::micro::GetTensorShape(bias).DimensionsCount(); in EvalFloatCEVA() 167 float* biasp = const_cast<float*>(tflite::micro::GetTensorData<float>(bias)); in EvalFloatCEVA() 211 const TfLiteEvalTensor* bias = in EvalCEVA() local 224 bias, output); in EvalCEVA() [all …]
|
D | depthwise_conv.cc | 46 const TfLiteEvalTensor* bias, TfLiteEvalTensor* output) { in EvalFloat() argument 57 bias_data = tflite::micro::GetTensorData<float>(bias); in EvalFloat() 62 const RuntimeShape& bias_shape = tflite::micro::GetTensorShape(bias); in EvalFloat() 118 const TfLiteEvalTensor* bias, in EvalQuantizedPerChannel() argument 133 bias_data = tflite::micro::GetTensorData<int32_t>(bias); in EvalQuantizedPerChannel() 138 const RuntimeShape& bias_shape = tflite::micro::GetTensorShape(bias); in EvalQuantizedPerChannel() 210 const TfLiteEvalTensor* bias = in EvalCEVA() local 219 EvalFloat(context, node, params, data, input, filter, bias, output); in EvalCEVA() 222 EvalQuantizedPerChannel(context, node, params, data, input, filter, bias, in EvalCEVA()
|
D | conv.cc | 46 const TfLiteEvalTensor* bias, in EvalQuantizedPerChannel() argument 61 const RuntimeShape& bias_shape = tflite::micro::GetTensorShape(bias); in EvalQuantizedPerChannel() 90 bias_data = tflite::micro::GetTensorData<int32_t>(bias); in EvalQuantizedPerChannel() 134 const TfLiteEvalTensor* bias, TfLiteEvalTensor* im2col, in EvalFloat() argument 147 const RuntimeShape& bias_shape = tflite::micro::GetTensorShape(bias); in EvalFloat() 177 bias_data = tflite::micro::GetTensorData<float>(bias); in EvalFloat() 213 const TfLiteEvalTensor* bias = in EvalCEVA() local 229 EvalFloat(context, node, params, data, input, filter, bias, nullptr, in EvalCEVA() 233 EvalQuantizedPerChannel(context, node, params, data, input, filter, bias, in EvalCEVA()
|
/tflite-micro-3.4.0-2.7.6/tensorflow/lite/micro/kernels/cmsis_nn/ |
D | fully_connected.cc | 63 const TfLiteTensor* bias = in Prepare() local 75 context, params->activation, input->type, input, filter, bias, output, in Prepare() 107 const TfLiteEvalTensor* bias, in EvalQuantizedInt8() argument 167 tflite::micro::GetTensorData<int32_t>(bias), &output_dims, in EvalQuantizedInt8() 183 const TfLiteEvalTensor* bias = in Eval() local 200 tflite::micro::GetTensorShape(bias), in Eval() 201 tflite::micro::GetTensorData<float>(bias), in Eval() 207 return EvalQuantizedInt8(context, node, data, input, filter, bias, in Eval() 230 const TfLiteEvalTensor* bias = in EvalInt8() local 245 return EvalQuantizedInt8(context, node, data, input, filter, bias, output); in EvalInt8()
|
D | conv.cc | 131 const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias, in EvalQuantizedPerChannel() argument 160 RuntimeShape bias_shape = tflite::micro::GetTensorShape(bias); in EvalQuantizedPerChannel() 170 if (tflite::micro::GetTensorData<int8_t>(bias)) { in EvalQuantizedPerChannel() 222 tflite::micro::GetTensorData<int32_t>(bias), &output_dims, in EvalQuantizedPerChannel() 234 tflite::micro::GetTensorShape(bias), in EvalQuantizedPerChannel() 235 tflite::micro::GetTensorData<int32_t>(bias), in EvalQuantizedPerChannel() 247 const TfLiteEvalTensor* bias = in Eval() local 275 tflite::micro::GetTensorShape(bias), in Eval() 276 tflite::micro::GetTensorData<float>(bias), in Eval() 284 bias, output, nullptr); in Eval() [all …]
|
D | depthwise_conv.cc | 157 const TfLiteEvalTensor* bias, in EvalQuantizedPerChannel() argument 184 RuntimeShape bias_shape = tflite::micro::GetTensorShape(bias); in EvalQuantizedPerChannel() 192 if (tflite::micro::GetTensorData<int8_t>(bias)) { in EvalQuantizedPerChannel() 234 tflite::micro::GetTensorData<int32_t>(bias), &output_dims, in EvalQuantizedPerChannel() 246 tflite::micro::GetTensorShape(bias), in EvalQuantizedPerChannel() 247 tflite::micro::GetTensorData<int32_t>(bias), in EvalQuantizedPerChannel() 267 const TfLiteEvalTensor* bias = in Eval() local 280 tflite::micro::GetTensorShape(bias), in Eval() 281 tflite::micro::GetTensorData<float>(bias), in Eval() 287 EvalQuantizedPerChannel(context, node, params, data, input, filter, bias, in Eval()
|
D | svdf.cc | 136 const TfLiteEvalTensor* weights_time, const TfLiteEvalTensor* bias, in EvalFloatSVDF() argument 150 const float* bias_ptr = tflite::micro::GetTensorData<float>(bias); in EvalFloatSVDF() 306 const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); in Prepare() local 343 if (bias != nullptr) { in Prepare() 344 TF_LITE_ENSURE_EQ(context, bias->dims->data[0], num_units); in Prepare() 364 if (bias != nullptr) { in Prepare() 365 TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteInt32); in Prepare() 380 std::abs(static_cast<double>(bias->params.scale) - in Prepare() 408 if (bias != nullptr) { in Prepare() 409 TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteFloat32); in Prepare() [all …]
|