1 /*
2  * SPDX-FileCopyrightText: Copyright 2020-2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Licensed under the Apache License, Version 2.0 (the License); you may
7  * not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  * www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 /* ----------------------------------------------------------------------
20  * Project:      CMSIS NN Library
21  * Title:        arm_nn_types.h
22  * Description:  Public header file to contain the CMSIS-NN structs for the
23  *               TensorFlowLite micro compliant functions
24  *
25  * $Date:        8 March 2023
26  * $Revision:    V.2.5.0
27  *
28  * Target :  Arm(R) M-Profile Architecture
29  * -------------------------------------------------------------------- */
30 
31 #ifndef _ARM_NN_TYPES_H
32 #define _ARM_NN_TYPES_H
33 
34 #include <stdint.h>
35 
36 /** Enum for specifying activation function types */
37 typedef enum
38 {
39     ARM_SIGMOID = 0, /**< Sigmoid activation function */
40     ARM_TANH = 1,    /**< Tanh activation function */
41 } arm_nn_activation_type;
42 
43 /** Function return codes */
44 typedef enum
45 {
46     ARM_CMSIS_NN_SUCCESS = 0,        /**< No error */
47     ARM_CMSIS_NN_ARG_ERROR = -1,     /**< One or more arguments are incorrect */
48     ARM_CMSIS_NN_NO_IMPL_ERROR = -2, /**<  No implementation available */
49     ARM_CMSIS_NN_FAILURE = -3,       /**<  Logical error */
50 } arm_cmsis_nn_status;
51 
52 /** CMSIS-NN object to contain the width and height of a tile */
53 typedef struct
54 {
55     int32_t w; /**< Width */
56     int32_t h; /**< Height */
57 } cmsis_nn_tile;
58 
59 /** CMSIS-NN object used for the function context. */
60 typedef struct
61 {
62     void *buf;    /**< Pointer to a buffer needed for the optimization */
63     int32_t size; /**< Buffer size */
64 } cmsis_nn_context;
65 
66 /** CMSIS-NN object to contain the dimensions of the tensors */
67 typedef struct
68 {
69     int32_t n; /**< Generic dimension to contain either the batch size or output channels.
70                      Please refer to the function documentation for more information */
71     int32_t h; /**< Height */
72     int32_t w; /**< Width */
73     int32_t c; /**< Input channels */
74 } cmsis_nn_dims;
75 
76 /** CMSIS-NN object to contain LSTM specific input parameters related to dimensions */
77 typedef struct
78 {
79     int32_t max_time;
80     int32_t num_inputs;
81     int32_t num_batches;
82     int32_t num_outputs;
83 } cmsis_nn_lstm_dims;
84 
85 /** CMSIS-NN object for the per-channel quantization parameters */
86 typedef struct
87 {
88     int32_t *multiplier; /**< Multiplier values */
89     int32_t *shift;      /**< Shift values */
90 } cmsis_nn_per_channel_quant_params;
91 
92 /** CMSIS-NN object for the per-tensor quantization parameters */
93 typedef struct
94 {
95     int32_t multiplier; /**< Multiplier value */
96     int32_t shift;      /**< Shift value */
97 } cmsis_nn_per_tensor_quant_params;
98 
99 /** CMSIS-NN object for the quantized Relu activation */
100 typedef struct
101 {
102     int32_t min; /**< Min value used to clamp the result */
103     int32_t max; /**< Max value used to clamp the result */
104 } cmsis_nn_activation;
105 
106 /** CMSIS-NN object for the convolution layer parameters */
107 typedef struct
108 {
109     int32_t input_offset;  /**< Zero value for the input tensor */
110     int32_t output_offset; /**< Zero value for the output tensor */
111     cmsis_nn_tile stride;
112     cmsis_nn_tile padding;
113     cmsis_nn_tile dilation;
114     cmsis_nn_activation activation;
115 } cmsis_nn_conv_params;
116 
117 /** CMSIS-NN object for Depthwise convolution layer parameters */
118 typedef struct
119 {
120     int32_t input_offset;  /**< Zero value for the input tensor */
121     int32_t output_offset; /**< Zero value for the output tensor */
122     int32_t ch_mult;       /**< Channel Multiplier. ch_mult * in_ch = out_ch */
123     cmsis_nn_tile stride;
124     cmsis_nn_tile padding;
125     cmsis_nn_tile dilation;
126     cmsis_nn_activation activation;
127 } cmsis_nn_dw_conv_params;
128 /** CMSIS-NN object for pooling layer parameters */
129 typedef struct
130 {
131     cmsis_nn_tile stride;
132     cmsis_nn_tile padding;
133     cmsis_nn_activation activation;
134 } cmsis_nn_pool_params;
135 
136 /** CMSIS-NN object for Fully Connected layer parameters */
137 typedef struct
138 {
139     int32_t input_offset;  /**< Zero value for the input tensor */
140     int32_t filter_offset; /**< Zero value for the filter tensor. Not used */
141     int32_t output_offset; /**< Zero value for the output tensor */
142     cmsis_nn_activation activation;
143 } cmsis_nn_fc_params;
144 
145 /** CMSIS-NN object for SVDF layer parameters */
146 typedef struct
147 {
148     int32_t rank;
149     int32_t input_offset;  /**< Zero value for the input tensor */
150     int32_t output_offset; /**< Zero value for the output tensor */
151     cmsis_nn_activation input_activation;
152     cmsis_nn_activation output_activation;
153 } cmsis_nn_svdf_params;
154 
155 /** CMSIS-NN object for Softmax s16 layer parameters */
156 typedef struct
157 {
158     const int16_t *exp_lut;
159     const int16_t *one_by_one_lut;
160 } cmsis_nn_softmax_lut_s16;
161 
162 /** LSTM guard parameters */
163 typedef struct
164 {
165     int32_t input_variance;
166     int32_t forget_variance;
167     int32_t cell_variance;
168     int32_t output_variance;
169 } cmsis_nn_lstm_guard_params;
170 
171 /** LSTM scratch buffer container */
172 typedef struct
173 {
174     int16_t *input_gate;
175     int16_t *forget_gate;
176     int16_t *cell_gate;
177     int16_t *output_gate;
178 } cmsis_nn_lstm_context;
179 
180 /** Quantized clip value for cell and projection of LSTM input. Zero value means no clipping. */
181 typedef struct
182 {
183     int16_t cell;
184     int8_t projection;
185 } cmsis_nn_lstm_clip_params;
186 
187 /** CMSIS-NN object for quantization parameters */
188 typedef struct
189 {
190     int32_t multiplier; /**< Multiplier value */
191     int32_t shift;      /**< Shift value */
192 } cmsis_nn_scaling;
193 
194 /** CMSIS-NN norm layer coefficients */
195 typedef struct
196 {
197     int16_t *input_weight;
198     int16_t *forget_weight;
199     int16_t *cell_weight;
200     int16_t *output_weight;
201 } cmsis_nn_layer_norm;
202 
203 /** Parameters for integer LSTM, as defined in TFLM */
204 typedef struct
205 {
206     int32_t time_major; /**< Nonzero (true) if first row of data is timestamps for input */
207     cmsis_nn_scaling input_to_input_scaling;
208     cmsis_nn_scaling input_to_forget_scaling;
209     cmsis_nn_scaling input_to_cell_scaling;
210     cmsis_nn_scaling input_to_output_scaling;
211     cmsis_nn_scaling recurrent_to_input_scaling;
212     cmsis_nn_scaling recurrent_to_forget_scaling;
213     cmsis_nn_scaling recurrent_to_cell_scaling;
214     cmsis_nn_scaling recurrent_to_output_scaling;
215     cmsis_nn_scaling cell_to_input_scaling;
216     cmsis_nn_scaling cell_to_forget_scaling;
217     cmsis_nn_scaling cell_to_output_scaling;
218     cmsis_nn_scaling projection_scaling;
219     cmsis_nn_scaling hidden_scaling;
220     cmsis_nn_scaling layer_norm_input_scaling;  /**< layer normalization for input layer */
221     cmsis_nn_scaling layer_norm_forget_scaling; /**< layer normalization for forget gate */
222     cmsis_nn_scaling layer_norm_cell_scaling;   /**< layer normalization for cell */
223     cmsis_nn_scaling layer_norm_output_scaling; /**< layer normalization for outpus layer */
224 
225     int32_t cell_state_shift;
226     int32_t hidden_offset;
227     int32_t output_state_offset;
228 
229     cmsis_nn_lstm_clip_params clip;
230     cmsis_nn_lstm_guard_params guard;
231     cmsis_nn_layer_norm layer_norm;
232 
233     /* Effective bias is precalculated as bias + zero_point * weight.
234     Only applicable to when input/output are s8 and weights are s16 */
235     const int32_t *i2i_effective_bias; /**< input to input effective bias */
236     const int32_t *i2f_effective_bias; /**< input to forget gate effective bias */
237     const int32_t *i2c_effective_bias; /**< input to cell effective bias */
238     const int32_t *i2o_effective_bias; /**< input to output effective bias */
239 
240     const int32_t *r2i_effective_bias; /**< recurrent gate to input effective bias */
241     const int32_t *r2f_effective_bias; /**< recurrent gate to forget gate effective bias */
242     const int32_t *r2c_effective_bias; /**< recurrent gate to cell effective bias */
243     const int32_t *r2o_effective_bias; /**< recurrent gate to output effective bias */
244 
245     const int32_t *projection_effective_bias;
246 
247     /* Not precalculated bias */
248     const int32_t *input_gate_bias;
249     const int32_t *forget_gate_bias;
250     const int32_t *cell_gate_bias;
251     const int32_t *output_gate_bias;
252 
253     /* Activation min and max */
254     cmsis_nn_activation activation;
255 
256 } cmsis_nn_lstm_params;
257 
258 #endif // _ARM_NN_TYPES_H
259