Home
last modified time | relevance | path

Searched refs:tensor (Results 1 – 25 of 29) sorted by relevance

12

/tflite-micro-3.4.0-2.7.6/tensorflow/lite/kernels/internal/
Dtensor_ctypes.h24 inline T* GetTensorData(TfLiteTensor* tensor) { in GetTensorData() argument
25 return tensor != nullptr ? reinterpret_cast<T*>(tensor->data.raw) : nullptr; in GetTensorData()
29 inline const T* GetTensorData(const TfLiteTensor* tensor) { in GetTensorData() argument
30 return tensor != nullptr ? reinterpret_cast<const T*>(tensor->data.raw) in GetTensorData()
34 inline RuntimeShape GetTensorShape(const TfLiteTensor* tensor) { in GetTensorShape() argument
35 if (tensor == nullptr) { in GetTensorShape()
39 TfLiteIntArray* dims = tensor->dims; in GetTensorShape()
DBUILD93 ":tensor",
117 name = "tensor",
/tflite-micro-3.4.0-2.7.6/tensorflow/lite/core/api/
Dtensor_utils.cc24 TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor) { in ResetVariableTensor() argument
25 if (!tensor->is_variable) { in ResetVariableTensor()
31 if (tensor->type == kTfLiteInt8) { in ResetVariableTensor()
32 value = tensor->params.zero_point; in ResetVariableTensor()
39 memset(tensor->data.raw, value, tensor->bytes); in ResetVariableTensor()
41 char* raw_ptr = tensor->data.raw; in ResetVariableTensor()
42 for (size_t i = 0; i < tensor->bytes; ++i) { in ResetVariableTensor()
Dtensor_utils.h24 TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor);
/tflite-micro-3.4.0-2.7.6/tensorflow/lite/c/
Dcommon.c163 TfLiteTensor* tensor) { in TfLiteTensorReset() argument
164 TfLiteTensorFree(tensor); in TfLiteTensorReset()
165 tensor->type = type; in TfLiteTensorReset()
166 tensor->name = name; in TfLiteTensorReset()
167 tensor->dims = dims; in TfLiteTensorReset()
168 tensor->params = quantization; in TfLiteTensorReset()
169 tensor->data.raw = buffer; in TfLiteTensorReset()
170 tensor->bytes = size; in TfLiteTensorReset()
171 tensor->allocation_type = allocation_type; in TfLiteTensorReset()
172 tensor->allocation = allocation; in TfLiteTensorReset()
[all …]
Dcommon.h615 TfLiteTensor* tensor);
619 void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
689 TfLiteStatus (*ResizeTensor)(struct TfLiteContext*, TfLiteTensor* tensor,
768 TfLiteTensor* tensor, int dims,
936 TfLiteTensor* tensor);
943 TfLiteTensor* tensor);
/tflite-micro-3.4.0-2.7.6/tensorflow/lite/micro/kernels/
Dkernel_util.cc30 const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor) { in GetTensorShape() argument
31 if (tensor == nullptr || tensor->dims == nullptr) { in GetTensorShape()
34 TfLiteIntArray* dims = tensor->dims; in GetTensorShape()
57 TfLiteTensor* tensor, in CreateWritableTensorDimsWithCopy() argument
59 TF_LITE_ENSURE(context, tensor != nullptr); in CreateWritableTensorDimsWithCopy()
62 int ranks = tensor->dims->size; in CreateWritableTensorDimsWithCopy()
66 TfLiteIntArray* old_dims = tensor->dims; in CreateWritableTensorDimsWithCopy()
68 tensor->dims = new_dims; in CreateWritableTensorDimsWithCopy()
Dfill.cc31 const TfLiteTensor* tensor) { in EnsureEqImpl() argument
33 TF_LITE_ENSURE_EQ(context, array->data[i], GetTensorData<T>(tensor)[i]); in EnsureEqImpl()
41 const TfLiteTensor* tensor) { in EnsureEq() argument
42 TF_LITE_ENSURE_EQ(context, NumDimensions(tensor), 1); in EnsureEq()
43 const auto tensor_len = tensor->dims->data[0]; in EnsureEq()
46 switch (tensor->type) { in EnsureEq()
48 return EnsureEqImpl<int8_t>(context, array, tensor); in EnsureEq()
50 return EnsureEqImpl<uint8_t>(context, array, tensor); in EnsureEq()
52 return EnsureEqImpl<int16_t>(context, array, tensor); in EnsureEq()
54 return EnsureEqImpl<int32_t>(context, array, tensor); in EnsureEq()
[all …]
Dkernel_util.h55 T* GetTensorData(TfLiteEvalTensor* tensor) { in GetTensorData() argument
56 return tensor != nullptr ? reinterpret_cast<T*>(tensor->data.raw) : nullptr; in GetTensorData()
61 const T* GetTensorData(const TfLiteEvalTensor* tensor) { in GetTensorData() argument
62 TFLITE_DCHECK(tensor != nullptr); in GetTensorData()
63 return reinterpret_cast<const T*>(tensor->data.raw); in GetTensorData()
67 const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor);
80 TfLiteTensor* tensor,
Dl2norm_test.cc45 TfLiteTensor tensor; in CreateL2NormTensor() local
47 tensor = CreateQuantizedTensor(data, dims, kInputScale, kInputZeroPoint); in CreateL2NormTensor()
49 tensor = CreateQuantizedTensor(data, dims, kOutputScale, kOutputZeroPoint); in CreateL2NormTensor()
52 tensor.quantization.type = kTfLiteAffineQuantization; in CreateL2NormTensor()
53 return tensor; in CreateL2NormTensor()
Ddetection_postprocess.cc256 T ReInterpretTensor(const TfLiteEvalTensor* tensor) { in ReInterpretTensor() argument
257 const float* tensor_base = tflite::micro::GetTensorData<float>(tensor); in ReInterpretTensor()
262 T ReInterpretTensor(TfLiteEvalTensor* tensor) { in ReInterpretTensor() argument
263 float* tensor_base = tflite::micro::GetTensorData<float>(tensor); in ReInterpretTensor()
/tflite-micro-3.4.0-2.7.6/tensorflow/lite/kernels/
Dkernel_util.h52 int index, const TfLiteTensor** tensor);
83 int index, TfLiteTensor** tensor);
120 TfLiteTensor** tensor);
144 TfLiteTensor** tensor);
178 inline bool IsConstantTensor(const TfLiteTensor* tensor) { in IsConstantTensor() argument
179 return tensor->allocation_type == kTfLiteMmapRo; in IsConstantTensor()
184 inline bool IsDynamicTensor(const TfLiteTensor* tensor) { in IsDynamicTensor() argument
185 return tensor->allocation_type == kTfLiteDynamic; in IsDynamicTensor()
189 inline void SetTensorToDynamic(TfLiteTensor* tensor) { in SetTensorToDynamic() argument
190 if (tensor->allocation_type != kTfLiteDynamic) { in SetTensorToDynamic()
[all …]
Dkernel_util.cc98 const TfLiteTensor** tensor) { in GetMutableInputSafe() argument
103 *tensor = GetTensorAtIndex(context, tensor_index); in GetMutableInputSafe()
115 int index, const TfLiteTensor** tensor) { in GetInputSafe() argument
116 return GetMutableInputSafe(context, node, index, tensor); in GetInputSafe()
121 TfLiteTensor* tensor = GetMutableInput(context, node, index); in GetVariableInput() local
122 if (tensor == nullptr) return nullptr; in GetVariableInput()
123 return tensor->is_variable ? tensor : nullptr; in GetVariableInput()
137 int index, TfLiteTensor** tensor) { in GetOutputSafe() argument
142 *tensor = GetTensorAtIndex(context, tensor_index); in GetOutputSafe()
164 TfLiteTensor** tensor) { in GetTemporarySafe() argument
[all …]
/tflite-micro-3.4.0-2.7.6/tensorflow/lite/micro/kernels/ethos_u/
Dethosu.cc56 TfLiteTensor* tensor = context->GetTensor(context, node->inputs->data[0]); in Prepare() local
57 data->cms_data_size = tensor->bytes; in Prepare()
68 TfLiteEvalTensor* tensor; in Eval() local
90 tensor = context->GetEvalTensor(context, node->inputs->data[0]); in Eval()
91 cms_data = reinterpret_cast<void*>(tensor->data.uint8); in Eval()
95 tensor = context->GetEvalTensor(context, node->inputs->data[i]); in Eval()
97 static_cast<uint64_t>(reinterpret_cast<uintptr_t>(tensor->data.uint8)); in Eval()
99 for (int k = 0; k < tensor->dims->size; k++) { in Eval()
100 byte_size = byte_size * tensor->dims->data[k]; in Eval()
108 tensor = context->GetEvalTensor(context, node->outputs->data[i]); in Eval()
[all …]
/tflite-micro-3.4.0-2.7.6/tensorflow/lite/micro/
Dmicro_allocator_test.cc40 void VerifyMockTfLiteTensor(TfLiteTensor* tensor, bool is_variable = false) { in VerifyMockTfLiteTensor() argument
41 TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, tensor->type); in VerifyMockTfLiteTensor()
42 TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->size); in VerifyMockTfLiteTensor()
43 TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->data[0]); in VerifyMockTfLiteTensor()
44 TF_LITE_MICRO_EXPECT_EQ(is_variable, tensor->is_variable); in VerifyMockTfLiteTensor()
45 TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), tensor->bytes); in VerifyMockTfLiteTensor()
46 TF_LITE_MICRO_EXPECT_NE(nullptr, tensor->data.raw); in VerifyMockTfLiteTensor()
48 (reinterpret_cast<std::uintptr_t>(tensor->data.raw) % in VerifyMockTfLiteTensor()
52 void VerifyMockWeightTfLiteTensor(TfLiteTensor* tensor) { in VerifyMockWeightTfLiteTensor() argument
53 TF_LITE_MICRO_EXPECT_EQ(kTfLiteUInt8, tensor->type); in VerifyMockWeightTfLiteTensor()
[all …]
Drecording_micro_allocator_test.cc175 TfLiteTensor* tensor = micro_allocator->AllocatePersistentTfLiteTensor( in TF_LITE_MICRO_TEST() local
177 TF_LITE_MICRO_EXPECT_NE(tensor, nullptr); in TF_LITE_MICRO_TEST()
178 if (tensor == nullptr) return 1; in TF_LITE_MICRO_TEST()
201 TfLiteTensor* tensor = micro_allocator->AllocatePersistentTfLiteTensor( in TF_LITE_MICRO_TEST() local
203 TF_LITE_MICRO_EXPECT_NE(tensor, nullptr); in TF_LITE_MICRO_TEST()
204 if (tensor == nullptr) return 1; in TF_LITE_MICRO_TEST()
Dmicro_allocator.cc772 TfLiteTensor* tensor = AllocatePersistentTfLiteTensorInternal(); in AllocatePersistentTfLiteTensor() local
778 model, tensor, tensor_index, subgraph_index, in AllocatePersistentTfLiteTensor()
791 tensor->data.data = in AllocatePersistentTfLiteTensor()
795 tensor->dims = in AllocatePersistentTfLiteTensor()
798 return tensor; in AllocatePersistentTfLiteTensor()
810 TfLiteTensor* tensor = in AllocateTempTfLiteTensor() local
817 if (PopulateTfLiteTensorFromFlatbuffer(model, tensor, tensor_index, in AllocateTempTfLiteTensor()
831 tensor->data.data = in AllocateTempTfLiteTensor()
835 tensor->dims = in AllocateTempTfLiteTensor()
838 return tensor; in AllocateTempTfLiteTensor()
[all …]
Dmicro_graph.cc197 auto* tensor = subgraph->tensors()->Get(i); in ResetVariableTensors() local
198 if (tensor->is_variable()) { in ResetVariableTensors()
204 if (tensor->type() == tflite::TensorType_INT8) { in ResetVariableTensors()
205 value = tensor->quantization()->zero_point()->Get(0); in ResetVariableTensors()
Drecording_micro_allocator.cc202 const Model* model, TfLiteTensor* tensor, int tensor_index, in PopulateTfLiteTensorFromFlatbuffer() argument
207 model, tensor, tensor_index, subgraph_index, allocate_temp); in PopulateTfLiteTensorFromFlatbuffer()
Drecording_micro_allocator.h88 TfLiteTensor* tensor,
Dmicro_allocator.h236 TfLiteTensor* tensor,
DBUILD47 "//tensorflow/lite/kernels/internal:tensor",
152 "//tensorflow/lite/kernels/internal:tensor",
/tflite-micro-3.4.0-2.7.6/tensorflow/lite/micro/kernels/arc_mli/
Dmli_interface.h36 MliTensorInterface(mli_tensor* tensor) : tensor_(tensor){}; in MliTensorInterface() argument
/tflite-micro-3.4.0-2.7.6/tensorflow/lite/micro/kernels/vexriscv/doc/
DDepthwiseConv2D_int8.md69 `NHWC` format `(n, height, width, channel)` and flattened to an 1-d tensor, the
70 index of `(n, h, w, c)` in the tensor can then be calculated with `((n * H + h)
90 access order then becomes sequential on the 1-d tensor because the layout of
106 calculation to access the elements sequentially in the tensor, namely, `(0, 1,
125 channels (see the colored cells in the output tensor in the figure below) the
134 Ideally, we can use the output tensor directly as an accumulator, no extra space
135 is needed at runtime. Yet, since the output tensor is limited (8 bits) in an
136 integer model, accumulating intermediate values at the output tensor will cause
141 overflow when accumulating at output tensor and provide better memory access
145 array with size equals to the output tensor to accumulate the values.
[all …]
/tflite-micro-3.4.0-2.7.6/tensorflow/lite/micro/examples/micro_speech/arc_emsdp/
Demsdp.lcf19 # - move BSS from PSRAM to XCCM (includes tensor arena)

12