1 /*
2 * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates <open-source-office@arm.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Licensed under the Apache License, Version 2.0 (the License); you may
7 * not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 /* ----------------------------------------------------------------------
20 * Project: CMSIS NN Library
21 * Title: arm_convolve_s8.c
22 * Description: s8 version of convolution using symmetric quantization.
23 *
24 * $Date: 04 November 2024
25 * $Revision: V.4.0.0
26 *
27 * Target : Arm(R) M-Profile Architecture
28 *
29 * -------------------------------------------------------------------- */
30
31 #include "arm_nnfunctions.h"
32 #include "arm_nnsupportfunctions.h"
33 /**
34 * @ingroup Public
35 */
36
37 /**
38 * @addtogroup NNConv
39 * @{
40 */
41
42 /*
43 * Basic s8 convolution function.
44 *
45 * Refer header file for details. Optimal use case for the DSP/MVE implementation is when input and output channels
46 * are multiples of 4 or atleast greater than 4.
47 *
48 */
arm_convolve_s8(const cmsis_nn_context * ctx,const cmsis_nn_conv_params * conv_params,const cmsis_nn_per_channel_quant_params * quant_params,const cmsis_nn_dims * input_dims,const int8_t * input_data,const cmsis_nn_dims * filter_dims,const int8_t * filter_data,const cmsis_nn_dims * bias_dims,const int32_t * bias_data,const cmsis_nn_dims * upscale_dims,const cmsis_nn_dims * output_dims,int8_t * output_data)49 arm_cmsis_nn_status arm_convolve_s8(const cmsis_nn_context *ctx,
50 const cmsis_nn_conv_params *conv_params,
51 const cmsis_nn_per_channel_quant_params *quant_params,
52 const cmsis_nn_dims *input_dims,
53 const int8_t *input_data,
54 const cmsis_nn_dims *filter_dims,
55 const int8_t *filter_data,
56 const cmsis_nn_dims *bias_dims,
57 const int32_t *bias_data,
58 const cmsis_nn_dims *upscale_dims,
59 const cmsis_nn_dims *output_dims,
60 int8_t *output_data)
61 {
62 (void)bias_dims;
63
64 if (ctx->buf == NULL)
65 {
66 return ARM_CMSIS_NN_ARG_ERROR;
67 }
68 int16_t *buffer_a = (int16_t *)ctx->buf;
69
70 const int32_t input_batches = input_dims->n;
71 const uint16_t input_x = input_dims->w;
72 const uint16_t input_y = input_dims->h;
73 const uint16_t input_ch = input_dims->c;
74 const uint16_t kernel_x = filter_dims->w;
75 const uint16_t kernel_y = filter_dims->h;
76 const uint16_t kernel_ch = filter_dims->c;
77 const uint16_t output_x = output_dims->w;
78 const uint16_t output_y = output_dims->h;
79 const uint16_t output_ch = output_dims->c;
80
81 const uint16_t pad_x = conv_params->padding.w;
82 const uint16_t pad_y = conv_params->padding.h;
83 const uint16_t stride_x = conv_params->stride.w;
84 const uint16_t stride_y = conv_params->stride.h;
85 const int32_t dilation_x = conv_params->dilation.w;
86 const int32_t dilation_y = conv_params->dilation.h;
87 const int32_t out_offset = conv_params->output_offset;
88 const int32_t out_activation_min = conv_params->activation.min;
89 const int32_t out_activation_max = conv_params->activation.max;
90 const int32_t input_offset = conv_params->input_offset;
91
92 const int32_t groups = input_ch / kernel_ch;
93 const int32_t rhs_cols = kernel_x * kernel_y * kernel_ch;
94 const int32_t output_ch_per_group = output_ch / groups;
95
96 const int32_t *output_mult = quant_params->multiplier;
97 const int32_t *output_shift = quant_params->shift;
98
99 if (input_ch % groups != 0 || output_ch % groups != 0)
100 {
101 return ARM_CMSIS_NN_ARG_ERROR;
102 }
103
104 // For upscale_dims == 2, the actual index of the input data is the index of the upscaled input divided by two. In
105 // the ordinary case, there is no difference. The division is implemented as a rshift for optimization purposes.
106 uint32_t y_rshift = 0;
107 uint32_t x_rshift = 0;
108
109 if (upscale_dims)
110 {
111 y_rshift = upscale_dims->h == 2 ? 1 : 0;
112 x_rshift = upscale_dims->w == 2 ? 1 : 0;
113 }
114
115 const int32_t input_x_rshifted = input_x >> x_rshift;
116 const int32_t input_y_rshifted = input_y >> y_rshift;
117
118 const int32_t remainder = rhs_cols % 4;
119 const int32_t aligned_rhs_cols = remainder != 0 ? rhs_cols + 4 - remainder : rhs_cols;
120
121 for (int i_batch = 0; i_batch < input_batches; i_batch++)
122 {
123
124 #if defined(ARM_MATH_MVEI)
125 const int32_t aligned_rhs_cols_offset = aligned_rhs_cols - rhs_cols;
126
127 /* Generate up to four columns from the input tensor a GEMM computation */
128 int8_t *im2col_buf = (int8_t *)buffer_a;
129 #else
130 /* Use as a ping-pong buffer for unordered elements */
131 int8_t *im2col_buf = (int8_t *)buffer_a + aligned_rhs_cols * 2;
132 int16_t *im2col_buf_start_s16 = buffer_a;
133 #endif
134 int32_t lhs_rows = 0;
135
136 const int8_t *filter_data_ptr = &filter_data[0];
137 const int32_t *bias_data_ptr = &bias_data[0];
138 const int32_t *output_mult_ptr = &output_mult[0];
139 const int32_t *output_shift_ptr = &output_shift[0];
140
141 /* This part implements the im2col function */
142 for (int32_t i_group = 0; i_group < groups; i_group++)
143 {
144 int8_t *out = output_data + i_group * output_ch_per_group;
145 for (int i_out_y = 0; i_out_y < output_y; i_out_y++)
146 {
147 for (int i_out_x = 0; i_out_x < output_x; i_out_x++)
148 {
149 const int32_t base_idx_x = stride_x * i_out_x - pad_x;
150 const int32_t base_idx_y = stride_y * i_out_y - pad_y;
151
152 if (y_rshift == 1 || x_rshift == 1)
153 {
154 // Fill complete buf with -input_offset
155 arm_memset_s8(
156 im2col_buf, (int8_t)-input_offset, sizeof(int8_t) * kernel_ch * kernel_x * kernel_y);
157 for (int32_t i_ker_y = 0; i_ker_y < kernel_y; i_ker_y++)
158 {
159 const int32_t k_y = base_idx_y + dilation_y * i_ker_y;
160
161 // Don't copy data when padding, or for every second row if stride_y == 2
162 if ((k_y < 0 || k_y >= input_y) || (k_y % 2 && y_rshift == 1))
163 {
164 im2col_buf += kernel_ch * kernel_x;
165 }
166 else
167 {
168 const int32_t k_y_rshifted = k_y >> y_rshift;
169 for (int32_t i_ker_x = 0; i_ker_x < kernel_x; i_ker_x++)
170 {
171 const int32_t k_x = base_idx_x + dilation_x * i_ker_x;
172
173 // Don't copy data when padding, or for every second element if stride_x == 2
174 if ((k_x >= 0 && k_x < input_x) && ((k_x % 2 == 0) || x_rshift == 0))
175 {
176 const int32_t k_x_rshifted = k_x >> x_rshift;
177 arm_memcpy_s8(im2col_buf,
178 input_data +
179 (k_y_rshifted * input_x_rshifted + k_x_rshifted) * input_ch,
180 sizeof(int8_t) * kernel_ch);
181 }
182 im2col_buf += kernel_ch;
183 }
184 }
185 }
186 }
187 else
188 {
189 for (int32_t i_ker_y = 0; i_ker_y < kernel_y; i_ker_y++)
190 {
191 for (int32_t i_ker_x = 0; i_ker_x < kernel_x; i_ker_x++)
192 {
193 const int32_t k_y = base_idx_y + dilation_y * i_ker_y;
194 const int32_t k_x = base_idx_x + dilation_x * i_ker_x;
195
196 if (k_y < 0 || k_y >= input_y || k_x < 0 || k_x >= input_x)
197 {
198 arm_memset_s8(im2col_buf, (int8_t)-input_offset, sizeof(int8_t) * kernel_ch);
199 }
200 else
201 {
202 arm_memcpy_s8(im2col_buf,
203 input_data + (k_y * input_x + k_x) * input_ch + i_group * kernel_ch,
204 sizeof(int8_t) * kernel_ch);
205 }
206 im2col_buf += kernel_ch;
207 }
208 }
209 }
210 lhs_rows++;
211
212 #if defined(ARM_MATH_MVEI)
213 im2col_buf += aligned_rhs_cols_offset;
214
215 /* Computation is filed for every 4 columns */
216 if (lhs_rows == 4)
217 {
218 arm_nn_mat_mult_nt_t_s8((int8_t *)buffer_a,
219 filter_data_ptr,
220 bias_data_ptr,
221 out,
222 output_mult_ptr,
223 output_shift_ptr,
224 lhs_rows,
225 output_ch_per_group,
226 rhs_cols,
227 input_offset,
228 out_offset,
229 out_activation_min,
230 out_activation_max,
231 output_ch,
232 aligned_rhs_cols);
233
234 out += lhs_rows * output_ch;
235
236 lhs_rows = 0;
237 im2col_buf = (int8_t *)buffer_a;
238 }
239 #else
240 #if defined(ARM_MATH_DSP)
241 /* Copy one column with input offset and no ordering */
242 arm_s8_to_s16_unordered_with_offset(
243 im2col_buf - rhs_cols, im2col_buf_start_s16, rhs_cols, (int16_t)input_offset);
244 #else
245
246 arm_q7_to_q15_with_offset(
247 im2col_buf - rhs_cols, im2col_buf_start_s16, rhs_cols, (int16_t)input_offset);
248
249 #endif
250 im2col_buf_start_s16 += aligned_rhs_cols;
251
252 if (lhs_rows == 2)
253 {
254 if (groups > 1)
255 {
256 out = arm_nn_mat_mult_kernel_row_offset_s8_s16(filter_data_ptr,
257 buffer_a,
258 output_ch_per_group,
259 output_shift_ptr,
260 output_mult_ptr,
261 out_offset,
262 out_activation_min,
263 out_activation_max,
264 rhs_cols,
265 aligned_rhs_cols,
266 bias_data_ptr,
267 output_ch,
268 out);
269 }
270 else
271 {
272 out = arm_nn_mat_mult_kernel_s8_s16(filter_data_ptr,
273 buffer_a,
274 output_ch_per_group,
275 output_shift_ptr,
276 output_mult_ptr,
277 out_offset,
278 out_activation_min,
279 out_activation_max,
280 rhs_cols,
281 aligned_rhs_cols,
282 bias_data_ptr,
283 out);
284 }
285
286 /* counter reset */
287 im2col_buf_start_s16 = buffer_a;
288 im2col_buf = (int8_t *)buffer_a + aligned_rhs_cols * 2;
289 lhs_rows = 0;
290 }
291 #endif
292 }
293 }
294
295 if (out == NULL)
296 {
297 return ARM_CMSIS_NN_NO_IMPL_ERROR;
298 }
299
300 /* Handle left over columns */
301 if (lhs_rows != 0)
302 {
303 #if defined(ARM_MATH_MVEI)
304 arm_nn_mat_mult_nt_t_s8((int8_t *)buffer_a,
305 filter_data_ptr,
306 bias_data_ptr,
307 out,
308 output_mult_ptr,
309 output_shift_ptr,
310 lhs_rows,
311 output_ch_per_group,
312 rhs_cols,
313 input_offset,
314 out_offset,
315 out_activation_min,
316 out_activation_max,
317 output_ch,
318 aligned_rhs_cols);
319
320 out += lhs_rows * output_ch;
321 lhs_rows = 0;
322 im2col_buf = (int8_t *)buffer_a;
323 #else // #if defined(ARM_MATH_MVEI)
324
325 const int8_t *ker_a = filter_data_ptr;
326 int i;
327
328 for (i = 0; i < output_ch_per_group; i++)
329 {
330 /* Load the accumulator with bias first */
331 int32_t sum = 0;
332 if (bias_data_ptr)
333 {
334 sum = bias_data_ptr[i];
335 }
336
337 const int16_t *ip_as_col = buffer_a;
338
339 #if defined(ARM_MATH_DSP)
340 /* 4 multiply and accumulates are done in one loop. */
341 uint16_t col_count = rhs_cols / 4;
342 while (col_count)
343 {
344 int32_t ker_a1, ker_a2;
345 int32_t ip_b1, ip_b2;
346
347 ker_a = read_and_pad_reordered(ker_a, &ker_a1, &ker_a2);
348
349 ip_b1 = arm_nn_read_q15x2_ia(&ip_as_col);
350 sum = SMLAD(ker_a1, ip_b1, sum);
351 ip_b2 = arm_nn_read_q15x2_ia(&ip_as_col);
352 sum = SMLAD(ker_a2, ip_b2, sum);
353
354 col_count--;
355 }
356 /* Handle left over mac */
357 col_count = rhs_cols & 0x3;
358 #else
359 uint16_t col_count = rhs_cols;
360
361 #endif
362 while (col_count)
363 {
364 int8_t ker_a1 = *ker_a++;
365 int16_t ip_b1 = *ip_as_col++;
366
367 sum += ker_a1 * ip_b1;
368 col_count--;
369 }
370
371 sum = arm_nn_requantize(sum, output_mult_ptr[i], output_shift_ptr[i]);
372 sum += out_offset;
373 sum = MAX(sum, out_activation_min);
374 sum = MIN(sum, out_activation_max);
375 *out++ = (int8_t)sum;
376 }
377
378 im2col_buf_start_s16 = buffer_a;
379 im2col_buf = (int8_t *)buffer_a + aligned_rhs_cols * 2;
380 lhs_rows = 0;
381 #endif // #if defined(ARM_MATH_MVEI)
382 }
383 filter_data_ptr += output_ch_per_group * rhs_cols;
384 bias_data_ptr += output_ch_per_group;
385 output_mult_ptr += output_ch_per_group;
386 output_shift_ptr += output_ch_per_group;
387 }
388 /* Advance to the next batch */
389 input_data += (input_x_rshifted * input_y_rshifted * input_ch);
390 output_data += (output_x * output_y * output_ch);
391 }
392
393 /* Return to application */
394 return ARM_CMSIS_NN_SUCCESS;
395 }
396
397 /**
398 * @} end of NNConv group
399 */
400