1 /*
2 * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates <open-source-office@arm.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Licensed under the Apache License, Version 2.0 (the License); you may
7 * not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 /* ----------------------------------------------------------------------
20 * Project: CMSIS NN Library
21 * Title: arm_convolve_s8.c
22 * Description: s8 version of convolution using symmetric quantization.
23 *
24 * $Date: 27 February 2024
25 * $Revision: V.3.7.0
26 *
27 * Target : Arm(R) M-Profile Architecture
28 *
29 * -------------------------------------------------------------------- */
30
31 #include "arm_nnfunctions.h"
32 #include "arm_nnsupportfunctions.h"
33 /**
34 * @ingroup Public
35 */
36
37 /**
38 * @addtogroup NNConv
39 * @{
40 */
41
42 /*
43 * Basic s8 convolution function.
44 *
45 * Refer header file for details. Optimal use case for the DSP/MVE implementation is when input and output channels
46 * are multiples of 4 or atleast greater than 4.
47 *
48 */
arm_convolve_s8(const cmsis_nn_context * ctx,const cmsis_nn_conv_params * conv_params,const cmsis_nn_per_channel_quant_params * quant_params,const cmsis_nn_dims * input_dims,const int8_t * input_data,const cmsis_nn_dims * filter_dims,const int8_t * filter_data,const cmsis_nn_dims * bias_dims,const int32_t * bias_data,const cmsis_nn_dims * output_dims,int8_t * output_data)49 arm_cmsis_nn_status arm_convolve_s8(const cmsis_nn_context *ctx,
50 const cmsis_nn_conv_params *conv_params,
51 const cmsis_nn_per_channel_quant_params *quant_params,
52 const cmsis_nn_dims *input_dims,
53 const int8_t *input_data,
54 const cmsis_nn_dims *filter_dims,
55 const int8_t *filter_data,
56 const cmsis_nn_dims *bias_dims,
57 const int32_t *bias_data,
58 const cmsis_nn_dims *output_dims,
59 int8_t *output_data)
60 {
61 (void)bias_dims;
62
63 if (ctx->buf == NULL)
64 {
65 return ARM_CMSIS_NN_ARG_ERROR;
66 }
67 int16_t *buffer_a = (int16_t *)ctx->buf;
68
69 const int32_t input_batches = input_dims->n;
70 const uint16_t input_x = input_dims->w;
71 const uint16_t input_y = input_dims->h;
72 const uint16_t input_ch = input_dims->c;
73 const uint16_t kernel_x = filter_dims->w;
74 const uint16_t kernel_y = filter_dims->h;
75 const uint16_t kernel_ch = filter_dims->c;
76 const uint16_t output_x = output_dims->w;
77 const uint16_t output_y = output_dims->h;
78 const uint16_t output_ch = output_dims->c;
79
80 const uint16_t pad_x = conv_params->padding.w;
81 const uint16_t pad_y = conv_params->padding.h;
82 const uint16_t stride_x = conv_params->stride.w;
83 const uint16_t stride_y = conv_params->stride.h;
84 const int32_t dilation_x = conv_params->dilation.w;
85 const int32_t dilation_y = conv_params->dilation.h;
86 const int32_t out_offset = conv_params->output_offset;
87 const int32_t out_activation_min = conv_params->activation.min;
88 const int32_t out_activation_max = conv_params->activation.max;
89 const int32_t input_offset = conv_params->input_offset;
90
91 const int32_t groups = input_ch / kernel_ch;
92 const int32_t rhs_cols = kernel_x * kernel_y * kernel_ch;
93 const int32_t output_ch_per_group = output_ch / groups;
94
95 int32_t *output_mult = quant_params->multiplier;
96 int32_t *output_shift = quant_params->shift;
97
98 if (input_ch % groups != 0 || output_ch % groups != 0)
99 {
100 return ARM_CMSIS_NN_ARG_ERROR;
101 }
102
103 const int32_t remainder = rhs_cols % 4;
104 const int32_t aligned_rhs_cols = remainder != 0 ? rhs_cols + 4 - remainder : rhs_cols;
105
106 for (int i_batch = 0; i_batch < input_batches; i_batch++)
107 {
108
109 #if defined(ARM_MATH_MVEI)
110 const int32_t aligned_rhs_cols_offset = aligned_rhs_cols - rhs_cols;
111
112 /* Generate up to four columns from the input tensor a GEMM computation */
113 int8_t *im2col_buf = (int8_t *)buffer_a;
114 #else
115 /* Use as a ping-pong buffer for unordered elements */
116 int8_t *im2col_buf = (int8_t *)buffer_a + aligned_rhs_cols * 2;
117 int16_t *im2col_buf_start_s16 = buffer_a;
118 #endif
119 int32_t lhs_rows = 0;
120
121 const int8_t *filter_data_ptr = &filter_data[0];
122 const int32_t *bias_data_ptr = &bias_data[0];
123 const int32_t *output_mult_ptr = &output_mult[0];
124 const int32_t *output_shift_ptr = &output_shift[0];
125
126 /* This part implements the im2col function */
127 for (int32_t i_group = 0; i_group < groups; i_group++)
128 {
129 int8_t *out = output_data + i_group * output_ch_per_group;
130 for (int i_out_y = 0; i_out_y < output_y; i_out_y++)
131 {
132 for (int i_out_x = 0; i_out_x < output_x; i_out_x++)
133 {
134 const int32_t base_idx_x = stride_x * i_out_x - pad_x;
135 const int32_t base_idx_y = stride_y * i_out_y - pad_y;
136
137 for (int32_t i_ker_y = 0; i_ker_y < kernel_y; i_ker_y++)
138 {
139 for (int32_t i_ker_x = 0; i_ker_x < kernel_x; i_ker_x++)
140 {
141 const int32_t k_y = base_idx_y + dilation_y * i_ker_y;
142 const int32_t k_x = base_idx_x + dilation_x * i_ker_x;
143
144 if (k_y < 0 || k_y >= input_y || k_x < 0 || k_x >= input_x)
145 {
146 arm_memset_s8(im2col_buf, (int8_t)-input_offset, sizeof(int8_t) * kernel_ch);
147 }
148 else
149 {
150 arm_memcpy_s8(im2col_buf,
151 input_data + (k_y * input_x + k_x) * input_ch + i_group * kernel_ch,
152 sizeof(int8_t) * kernel_ch);
153 }
154 im2col_buf += kernel_ch;
155 }
156 }
157 lhs_rows++;
158
159 #if defined(ARM_MATH_MVEI)
160 im2col_buf += aligned_rhs_cols_offset;
161
162 /* Computation is filed for every 4 columns */
163 if (lhs_rows == 4)
164 {
165 arm_nn_mat_mult_nt_t_s8((int8_t *)buffer_a,
166 filter_data_ptr,
167 bias_data_ptr,
168 out,
169 output_mult_ptr,
170 output_shift_ptr,
171 lhs_rows,
172 output_ch_per_group,
173 rhs_cols,
174 input_offset,
175 out_offset,
176 out_activation_min,
177 out_activation_max,
178 output_ch,
179 aligned_rhs_cols);
180
181 out += lhs_rows * output_ch;
182
183 lhs_rows = 0;
184 im2col_buf = (int8_t *)buffer_a;
185 }
186 #else
187 #if defined(ARM_MATH_DSP)
188 /* Copy one column with input offset and no ordering */
189 arm_s8_to_s16_unordered_with_offset(
190 im2col_buf - rhs_cols, im2col_buf_start_s16, rhs_cols, (int16_t)input_offset);
191 #else
192
193 arm_q7_to_q15_with_offset(
194 im2col_buf - rhs_cols, im2col_buf_start_s16, rhs_cols, (int16_t)input_offset);
195
196 #endif
197 im2col_buf_start_s16 += aligned_rhs_cols;
198
199 if (lhs_rows == 2)
200 {
201 if (groups > 1)
202 {
203 out = arm_nn_mat_mult_kernel_row_offset_s8_s16(filter_data_ptr,
204 buffer_a,
205 output_ch_per_group,
206 output_shift_ptr,
207 output_mult_ptr,
208 out_offset,
209 out_activation_min,
210 out_activation_max,
211 rhs_cols,
212 aligned_rhs_cols,
213 bias_data_ptr,
214 output_ch,
215 out);
216 }
217 else
218 {
219 out = arm_nn_mat_mult_kernel_s8_s16(filter_data_ptr,
220 buffer_a,
221 output_ch_per_group,
222 output_shift_ptr,
223 output_mult_ptr,
224 out_offset,
225 out_activation_min,
226 out_activation_max,
227 rhs_cols,
228 aligned_rhs_cols,
229 bias_data_ptr,
230 out);
231 }
232
233 /* counter reset */
234 im2col_buf_start_s16 = buffer_a;
235 im2col_buf = (int8_t *)buffer_a + aligned_rhs_cols * 2;
236 lhs_rows = 0;
237 }
238 #endif
239 }
240 }
241
242 if (out == NULL)
243 {
244 return ARM_CMSIS_NN_NO_IMPL_ERROR;
245 }
246
247 /* Handle left over columns */
248 if (lhs_rows != 0)
249 {
250 #if defined(ARM_MATH_MVEI)
251 arm_nn_mat_mult_nt_t_s8((int8_t *)buffer_a,
252 filter_data_ptr,
253 bias_data_ptr,
254 out,
255 output_mult_ptr,
256 output_shift_ptr,
257 lhs_rows,
258 output_ch_per_group,
259 rhs_cols,
260 input_offset,
261 out_offset,
262 out_activation_min,
263 out_activation_max,
264 output_ch,
265 aligned_rhs_cols);
266
267 out += lhs_rows * output_ch;
268 lhs_rows = 0;
269 im2col_buf = (int8_t *)buffer_a;
270 #else // #if defined(ARM_MATH_MVEI)
271
272 const int8_t *ker_a = filter_data_ptr;
273 int i;
274
275 for (i = 0; i < output_ch_per_group; i++)
276 {
277 /* Load the accumulator with bias first */
278 int32_t sum = 0;
279 if (bias_data_ptr)
280 {
281 sum = bias_data_ptr[i];
282 }
283
284 const int16_t *ip_as_col = buffer_a;
285
286 #if defined(ARM_MATH_DSP)
287 /* 4 multiply and accumulates are done in one loop. */
288 uint16_t col_count = rhs_cols / 4;
289 while (col_count)
290 {
291 int32_t ker_a1, ker_a2;
292 int32_t ip_b1, ip_b2;
293
294 ker_a = read_and_pad_reordered(ker_a, &ker_a1, &ker_a2);
295
296 ip_b1 = arm_nn_read_q15x2_ia(&ip_as_col);
297 sum = SMLAD(ker_a1, ip_b1, sum);
298 ip_b2 = arm_nn_read_q15x2_ia(&ip_as_col);
299 sum = SMLAD(ker_a2, ip_b2, sum);
300
301 col_count--;
302 }
303 /* Handle left over mac */
304 col_count = rhs_cols & 0x3;
305 #else
306 uint16_t col_count = rhs_cols;
307
308 #endif
309 while (col_count)
310 {
311 int8_t ker_a1 = *ker_a++;
312 int16_t ip_b1 = *ip_as_col++;
313
314 sum += ker_a1 * ip_b1;
315 col_count--;
316 }
317
318 sum = arm_nn_requantize(sum, output_mult_ptr[i], output_shift_ptr[i]);
319 sum += out_offset;
320 sum = MAX(sum, out_activation_min);
321 sum = MIN(sum, out_activation_max);
322 *out++ = (int8_t)sum;
323 }
324
325 im2col_buf_start_s16 = buffer_a;
326 im2col_buf = (int8_t *)buffer_a + aligned_rhs_cols * 2;
327 lhs_rows = 0;
328 #endif // #if defined(ARM_MATH_MVEI)
329 }
330 filter_data_ptr += output_ch_per_group * rhs_cols;
331 bias_data_ptr += output_ch_per_group;
332 output_mult_ptr += output_ch_per_group;
333 output_shift_ptr += output_ch_per_group;
334 }
335 /* Advance to the next batch */
336 input_data += (input_x * input_y * input_ch);
337 output_data += (output_x * output_y * output_ch);
338 }
339
340 /* Return to application */
341 return ARM_CMSIS_NN_SUCCESS;
342 }
343
344 /**
345 * @} end of NNConv group
346 */
347