1 /* ----------------------------------------------------------------------
2 * Project: CMSIS DSP Library
3 * Title: arm_helium_utils.h
4 * Description: Utility functions for Helium development
5 *
6 * @version V1.9.0
7 * @date 23 April 2021
8 *
9 * Target Processor: Cortex-M and Cortex-A cores
10 * -------------------------------------------------------------------- */
11 /*
12 * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
13 *
14 * SPDX-License-Identifier: Apache-2.0
15 *
16 * Licensed under the Apache License, Version 2.0 (the License); you may
17 * not use this file except in compliance with the License.
18 * You may obtain a copy of the License at
19 *
20 * www.apache.org/licenses/LICENSE-2.0
21 *
22 * Unless required by applicable law or agreed to in writing, software
23 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
24 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
25 * See the License for the specific language governing permissions and
26 * limitations under the License.
27 */
28
29 #ifndef _ARM_UTILS_HELIUM_H_
30 #define _ARM_UTILS_HELIUM_H_
31
32
33 #ifdef __cplusplus
34 extern "C"
35 {
36 #endif
37 /***************************************
38
39 Definitions available for MVEF and MVEI
40
41 ***************************************/
42 #if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI)) && !defined(ARM_MATH_AUTOVECTORIZE)
43
44 #define INACTIVELANE 0 /* inactive lane content */
45
46
47 #endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI) */
48
49 /***************************************
50
51 Definitions available for MVEF only
52
53 ***************************************/
54 #if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF)) && !defined(ARM_MATH_AUTOVECTORIZE)
55
vecAddAcrossF32Mve(float32x4_t in)56 __STATIC_FORCEINLINE float32_t vecAddAcrossF32Mve(float32x4_t in)
57 {
58 float32_t acc;
59
60 acc = vgetq_lane(in, 0) + vgetq_lane(in, 1) +
61 vgetq_lane(in, 2) + vgetq_lane(in, 3);
62
63 return acc;
64 }
65
66
67
68
69 /* newton initial guess */
70 #define INVSQRT_MAGIC_F32 0x5f3759df
71 #define INV_NEWTON_INIT_F32 0x7EF127EA
72
73
74 #define INVSQRT_NEWTON_MVE_F32(invSqrt, xHalf, xStart)\
75 { \
76 float32x4_t tmp; \
77 \
78 /* tmp = xhalf * x * x */ \
79 tmp = vmulq(xStart, xStart); \
80 tmp = vmulq(tmp, xHalf); \
81 /* (1.5f - xhalf * x * x) */ \
82 tmp = vsubq(vdupq_n_f32(1.5f), tmp); \
83 /* x = x*(1.5f-xhalf*x*x); */ \
84 invSqrt = vmulq(tmp, xStart); \
85 }
86 #endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) */
87
88
89 /***************************************
90
91 Definitions available for f16 datatype with HW acceleration only
92
93 ***************************************/
94 #if defined(ARM_FLOAT16_SUPPORTED)
95 #if defined (ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE)
96
vecAddAcrossF16Mve(float16x8_t in)97 __STATIC_FORCEINLINE float16_t vecAddAcrossF16Mve(float16x8_t in)
98 {
99 float16x8_t tmpVec;
100 _Float16 acc;
101
102 tmpVec = (float16x8_t) vrev32q_s16((int16x8_t) in);
103 in = vaddq_f16(tmpVec, in);
104 tmpVec = (float16x8_t) vrev64q_s32((int32x4_t) in);
105 in = vaddq_f16(tmpVec, in);
106 acc = (_Float16)vgetq_lane_f16(in, 0) + (_Float16)vgetq_lane_f16(in, 4);
107
108 return acc;
109 }
110
__mve_cmplx_sum_intra_vec_f16(float16x8_t vecIn)111 __STATIC_FORCEINLINE float16x8_t __mve_cmplx_sum_intra_vec_f16(
112 float16x8_t vecIn)
113 {
114 float16x8_t vecTmp, vecOut;
115 uint32_t tmp = 0;
116
117 vecTmp = (float16x8_t) vrev64q_s32((int32x4_t) vecIn);
118 // TO TRACK : using canonical addition leads to unefficient code generation for f16
119 // vecTmp = vecTmp + vecAccCpx0;
120 /*
121 * Compute
122 * re0+re1 | im0+im1 | re0+re1 | im0+im1
123 * re2+re3 | im2+im3 | re2+re3 | im2+im3
124 */
125 vecTmp = vaddq_f16(vecTmp, vecIn);
126 vecOut = vecTmp;
127 /*
128 * shift left, random tmp insertion in bottom
129 */
130 vecOut = vreinterpretq_f16_s32(vshlcq_s32(vreinterpretq_s32_f16(vecOut) , &tmp, 32));
131 /*
132 * Compute:
133 * DONTCARE | DONTCARE | re0+re1+re0+re1 |im0+im1+im0+im1
134 * re0+re1+re2+re3 | im0+im1+im2+im3 | re2+re3+re2+re3 |im2+im3+im2+im3
135 */
136 vecOut = vaddq_f16(vecOut, vecTmp);
137 /*
138 * Cmplx sum is in 4rd & 5th f16 elt
139 * return full vector
140 */
141 return vecOut;
142 }
143
144
145 #define mve_cmplx_sum_intra_r_i_f16(vec, Re, Im) \
146 { \
147 float16x8_t vecOut = __mve_cmplx_sum_intra_vec_f16(vec); \
148 Re = vgetq_lane(vecOut, 4); \
149 Im = vgetq_lane(vecOut, 5); \
150 }
151
mve_cmplx_sum_intra_vec_f16(float16x8_t vecIn,float16_t * pOut)152 __STATIC_FORCEINLINE void mve_cmplx_sum_intra_vec_f16(
153 float16x8_t vecIn,
154 float16_t *pOut)
155 {
156 float16x8_t vecOut = __mve_cmplx_sum_intra_vec_f16(vecIn);
157 /*
158 * Cmplx sum is in 4rd & 5th f16 elt
159 * use 32-bit extraction
160 */
161 *(float32_t *) pOut = ((float32x4_t) vecOut)[2];
162 }
163
164
165 #define INVSQRT_MAGIC_F16 0x59ba /* ( 0x1ba = 0x3759df >> 13) */
166
167 /* canonical version of INVSQRT_NEWTON_MVE_F16 leads to bad performance */
168 #define INVSQRT_NEWTON_MVE_F16(invSqrt, xHalf, xStart) \
169 { \
170 float16x8_t tmp; \
171 \
172 /* tmp = xhalf * x * x */ \
173 tmp = vmulq(xStart, xStart); \
174 tmp = vmulq(tmp, xHalf); \
175 /* (1.5f - xhalf * x * x) */ \
176 tmp = vsubq(vdupq_n_f16((float16_t)1.5), tmp); \
177 /* x = x*(1.5f-xhalf*x*x); */ \
178 invSqrt = vmulq(tmp, xStart); \
179 }
180
181 #endif
182 #endif
183
184 /***************************************
185
186 Definitions available for MVEI and MVEF only
187
188 ***************************************/
189 #if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI)) && !defined(ARM_MATH_AUTOVECTORIZE)
190 /* Following functions are used to transpose matrix in f32 and q31 cases */
arm_mat_trans_32bit_2x2_mve(uint32_t * pDataSrc,uint32_t * pDataDest)191 __STATIC_INLINE arm_status arm_mat_trans_32bit_2x2_mve(
192 uint32_t * pDataSrc,
193 uint32_t * pDataDest)
194 {
195 static const uint32x4_t vecOffs = { 0, 2, 1, 3 };
196 /*
197 *
198 * | 0 1 | => | 0 2 |
199 * | 2 3 | | 1 3 |
200 *
201 */
202 uint32x4_t vecIn = vldrwq_u32((uint32_t const *)pDataSrc);
203 vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs, vecIn);
204
205 return (ARM_MATH_SUCCESS);
206 }
207
arm_mat_trans_32bit_3x3_mve(uint32_t * pDataSrc,uint32_t * pDataDest)208 __STATIC_INLINE arm_status arm_mat_trans_32bit_3x3_mve(
209 uint32_t * pDataSrc,
210 uint32_t * pDataDest)
211 {
212 const uint32x4_t vecOffs1 = { 0, 3, 6, 1};
213 const uint32x4_t vecOffs2 = { 4, 7, 2, 5};
214 /*
215 *
216 * | 0 1 2 | | 0 3 6 | 4 x 32 flattened version | 0 3 6 1 |
217 * | 3 4 5 | => | 1 4 7 | => | 4 7 2 5 |
218 * | 6 7 8 | | 2 5 8 | (row major) | 8 . . . |
219 *
220 */
221 uint32x4_t vecIn1 = vldrwq_u32((uint32_t const *) pDataSrc);
222 uint32x4_t vecIn2 = vldrwq_u32((uint32_t const *) &pDataSrc[4]);
223
224 vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs1, vecIn1);
225 vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs2, vecIn2);
226
227 pDataDest[8] = pDataSrc[8];
228
229 return (ARM_MATH_SUCCESS);
230 }
231
arm_mat_trans_32bit_4x4_mve(uint32_t * pDataSrc,uint32_t * pDataDest)232 __STATIC_INLINE arm_status arm_mat_trans_32bit_4x4_mve(uint32_t * pDataSrc, uint32_t * pDataDest)
233 {
234 /*
235 * 4x4 Matrix transposition
236 * is 4 x de-interleave operation
237 *
238 * 0 1 2 3 0 4 8 12
239 * 4 5 6 7 1 5 9 13
240 * 8 9 10 11 2 6 10 14
241 * 12 13 14 15 3 7 11 15
242 */
243
244 uint32x4x4_t vecIn;
245
246 vecIn = vld4q((uint32_t const *) pDataSrc);
247 vstrwq(pDataDest, vecIn.val[0]);
248 pDataDest += 4;
249 vstrwq(pDataDest, vecIn.val[1]);
250 pDataDest += 4;
251 vstrwq(pDataDest, vecIn.val[2]);
252 pDataDest += 4;
253 vstrwq(pDataDest, vecIn.val[3]);
254
255 return (ARM_MATH_SUCCESS);
256 }
257
258
arm_mat_trans_32bit_generic_mve(uint16_t srcRows,uint16_t srcCols,uint32_t * pDataSrc,uint32_t * pDataDest)259 __STATIC_INLINE arm_status arm_mat_trans_32bit_generic_mve(
260 uint16_t srcRows,
261 uint16_t srcCols,
262 uint32_t * pDataSrc,
263 uint32_t * pDataDest)
264 {
265 uint32x4_t vecOffs;
266 uint32_t i;
267 uint32_t blkCnt;
268 uint32_t const *pDataC;
269 uint32_t *pDataDestR;
270 uint32x4_t vecIn;
271
272 vecOffs = vidupq_u32((uint32_t)0, 1);
273 vecOffs = vecOffs * srcCols;
274
275 i = srcCols;
276 do
277 {
278 pDataC = (uint32_t const *) pDataSrc;
279 pDataDestR = pDataDest;
280
281 blkCnt = srcRows >> 2;
282 while (blkCnt > 0U)
283 {
284 vecIn = vldrwq_gather_shifted_offset_u32(pDataC, vecOffs);
285 vstrwq(pDataDestR, vecIn);
286 pDataDestR += 4;
287 pDataC = pDataC + srcCols * 4;
288 /*
289 * Decrement the blockSize loop counter
290 */
291 blkCnt--;
292 }
293
294 /*
295 * tail
296 */
297 blkCnt = srcRows & 3;
298 if (blkCnt > 0U)
299 {
300 mve_pred16_t p0 = vctp32q(blkCnt);
301 vecIn = vldrwq_gather_shifted_offset_u32(pDataC, vecOffs);
302 vstrwq_p(pDataDestR, vecIn, p0);
303 }
304
305 pDataSrc += 1;
306 pDataDest += srcRows;
307 }
308 while (--i);
309
310 return (ARM_MATH_SUCCESS);
311 }
312
arm_mat_cmplx_trans_32bit(uint16_t srcRows,uint16_t srcCols,uint32_t * pDataSrc,uint16_t dstRows,uint16_t dstCols,uint32_t * pDataDest)313 __STATIC_INLINE arm_status arm_mat_cmplx_trans_32bit(
314 uint16_t srcRows,
315 uint16_t srcCols,
316 uint32_t *pDataSrc,
317 uint16_t dstRows,
318 uint16_t dstCols,
319 uint32_t *pDataDest)
320 {
321 uint32_t i;
322 uint32_t const *pDataC;
323 uint32_t *pDataRow;
324 uint32_t *pDataDestR, *pDataDestRow;
325 uint32x4_t vecOffsRef, vecOffsCur;
326 uint32_t blkCnt;
327 uint32x4_t vecIn;
328
329 #ifdef ARM_MATH_MATRIX_CHECK
330 /*
331 * Check for matrix mismatch condition
332 */
333 if ((srcRows != dstCols) || (srcCols != dstRows))
334 {
335 /*
336 * Set status as ARM_MATH_SIZE_MISMATCH
337 */
338 return ARM_MATH_SIZE_MISMATCH;
339 }
340 #else
341 (void)dstRows;
342 (void)dstCols;
343 #endif
344
345 /* 2x2, 3x3 and 4x4 specialization to be added */
346
347 vecOffsRef[0] = 0;
348 vecOffsRef[1] = 1;
349 vecOffsRef[2] = srcCols << 1;
350 vecOffsRef[3] = (srcCols << 1) + 1;
351
352 pDataRow = pDataSrc;
353 pDataDestRow = pDataDest;
354 i = srcCols;
355 do
356 {
357 pDataC = (uint32_t const *) pDataRow;
358 pDataDestR = pDataDestRow;
359 vecOffsCur = vecOffsRef;
360
361 blkCnt = (srcRows * CMPLX_DIM) >> 2;
362 while (blkCnt > 0U)
363 {
364 vecIn = vldrwq_gather_shifted_offset(pDataC, vecOffsCur);
365 vstrwq(pDataDestR, vecIn);
366 pDataDestR += 4;
367 vecOffsCur = vaddq(vecOffsCur, (srcCols << 2));
368 /*
369 * Decrement the blockSize loop counter
370 */
371 blkCnt--;
372 }
373 /*
374 * tail
375 * (will be merged thru tail predication)
376 */
377 blkCnt = (srcRows * CMPLX_DIM) & 3;
378 if (blkCnt > 0U)
379 {
380 mve_pred16_t p0 = vctp32q(blkCnt);
381 vecIn = vldrwq_gather_shifted_offset(pDataC, vecOffsCur);
382 vstrwq_p(pDataDestR, vecIn, p0);
383 }
384
385 pDataRow += CMPLX_DIM;
386 pDataDestRow += (srcRows * CMPLX_DIM);
387 }
388 while (--i);
389
390 return (ARM_MATH_SUCCESS);
391 }
392
arm_mat_trans_16bit_2x2(uint16_t * pDataSrc,uint16_t * pDataDest)393 __STATIC_INLINE arm_status arm_mat_trans_16bit_2x2(uint16_t * pDataSrc, uint16_t * pDataDest)
394 {
395 pDataDest[0] = pDataSrc[0];
396 pDataDest[3] = pDataSrc[3];
397 pDataDest[2] = pDataSrc[1];
398 pDataDest[1] = pDataSrc[2];
399
400 return (ARM_MATH_SUCCESS);
401 }
402
arm_mat_trans_16bit_3x3_mve(uint16_t * pDataSrc,uint16_t * pDataDest)403 __STATIC_INLINE arm_status arm_mat_trans_16bit_3x3_mve(uint16_t * pDataSrc, uint16_t * pDataDest)
404 {
405 static const uint16_t stridesTr33[8] = { 0, 3, 6, 1, 4, 7, 2, 5 };
406 uint16x8_t vecOffs1;
407 uint16x8_t vecIn1;
408 /*
409 *
410 * | 0 1 2 | | 0 3 6 | 8 x 16 flattened version | 0 3 6 1 4 7 2 5 |
411 * | 3 4 5 | => | 1 4 7 | => | 8 . . . . . . . |
412 * | 6 7 8 | | 2 5 8 | (row major)
413 *
414 */
415 vecOffs1 = vldrhq_u16((uint16_t const *) stridesTr33);
416 vecIn1 = vldrhq_u16((uint16_t const *) pDataSrc);
417
418 vstrhq_scatter_shifted_offset_u16(pDataDest, vecOffs1, vecIn1);
419
420 pDataDest[8] = pDataSrc[8];
421
422 return (ARM_MATH_SUCCESS);
423 }
424
425
arm_mat_trans_16bit_4x4_mve(uint16_t * pDataSrc,uint16_t * pDataDest)426 __STATIC_INLINE arm_status arm_mat_trans_16bit_4x4_mve(uint16_t * pDataSrc, uint16_t * pDataDest)
427 {
428 static const uint16_t stridesTr44_1[8] = { 0, 4, 8, 12, 1, 5, 9, 13 };
429 static const uint16_t stridesTr44_2[8] = { 2, 6, 10, 14, 3, 7, 11, 15 };
430 uint16x8_t vecOffs1, vecOffs2;
431 uint16x8_t vecIn1, vecIn2;
432 uint16_t const * pDataSrcVec = (uint16_t const *) pDataSrc;
433
434 /*
435 * 4x4 Matrix transposition
436 *
437 * | 0 1 2 3 | | 0 4 8 12 | 8 x 16 flattened version
438 * | 4 5 6 7 | => | 1 5 9 13 | => [0 4 8 12 1 5 9 13]
439 * | 8 9 10 11 | | 2 6 10 14 | [2 6 10 14 3 7 11 15]
440 * | 12 13 14 15 | | 3 7 11 15 |
441 */
442
443 vecOffs1 = vldrhq_u16((uint16_t const *) stridesTr44_1);
444 vecOffs2 = vldrhq_u16((uint16_t const *) stridesTr44_2);
445 vecIn1 = vldrhq_u16(pDataSrcVec);
446 pDataSrcVec += 8;
447 vecIn2 = vldrhq_u16(pDataSrcVec);
448
449 vstrhq_scatter_shifted_offset_u16(pDataDest, vecOffs1, vecIn1);
450 vstrhq_scatter_shifted_offset_u16(pDataDest, vecOffs2, vecIn2);
451
452
453 return (ARM_MATH_SUCCESS);
454 }
455
456
457
arm_mat_trans_16bit_generic(uint16_t srcRows,uint16_t srcCols,uint16_t * pDataSrc,uint16_t * pDataDest)458 __STATIC_INLINE arm_status arm_mat_trans_16bit_generic(
459 uint16_t srcRows,
460 uint16_t srcCols,
461 uint16_t * pDataSrc,
462 uint16_t * pDataDest)
463 {
464 uint16x8_t vecOffs;
465 uint32_t i;
466 uint32_t blkCnt;
467 uint16_t const *pDataC;
468 uint16_t *pDataDestR;
469 uint16x8_t vecIn;
470
471 vecOffs = vidupq_u16((uint32_t)0, 1);
472 vecOffs = vecOffs * srcCols;
473
474 i = srcCols;
475 while(i > 0U)
476 {
477 pDataC = (uint16_t const *) pDataSrc;
478 pDataDestR = pDataDest;
479
480 blkCnt = srcRows >> 3;
481 while (blkCnt > 0U)
482 {
483 vecIn = vldrhq_gather_shifted_offset_u16(pDataC, vecOffs);
484 vstrhq_u16(pDataDestR, vecIn);
485 pDataDestR += 8;
486 pDataC = pDataC + srcCols * 8;
487 /*
488 * Decrement the blockSize loop counter
489 */
490 blkCnt--;
491 }
492
493 /*
494 * tail
495 */
496 blkCnt = srcRows & 7;
497 if (blkCnt > 0U)
498 {
499 mve_pred16_t p0 = vctp16q(blkCnt);
500 vecIn = vldrhq_gather_shifted_offset_u16(pDataC, vecOffs);
501 vstrhq_p_u16(pDataDestR, vecIn, p0);
502 }
503 pDataSrc += 1;
504 pDataDest += srcRows;
505 i--;
506 }
507
508 return (ARM_MATH_SUCCESS);
509 }
510
511
arm_mat_cmplx_trans_16bit(uint16_t srcRows,uint16_t srcCols,uint16_t * pDataSrc,uint16_t dstRows,uint16_t dstCols,uint16_t * pDataDest)512 __STATIC_INLINE arm_status arm_mat_cmplx_trans_16bit(
513 uint16_t srcRows,
514 uint16_t srcCols,
515 uint16_t *pDataSrc,
516 uint16_t dstRows,
517 uint16_t dstCols,
518 uint16_t *pDataDest)
519 {
520 static const uint16_t loadCmplxCol[8] = { 0, 0, 1, 1, 2, 2, 3, 3 };
521 int i;
522 uint16x8_t vecOffsRef, vecOffsCur;
523 uint16_t const *pDataC;
524 uint16_t *pDataRow;
525 uint16_t *pDataDestR, *pDataDestRow;
526 uint32_t blkCnt;
527 uint16x8_t vecIn;
528
529 #ifdef ARM_MATH_MATRIX_CHECK
530 /*
531 * Check for matrix mismatch condition
532 */
533 if ((srcRows != dstCols) || (srcCols != dstRows))
534 {
535 /*
536 * Set status as ARM_MATH_SIZE_MISMATCH
537 */
538 return ARM_MATH_SIZE_MISMATCH;
539 }
540 #else
541 (void)dstRows;
542 (void)dstCols;
543 #endif
544
545 /*
546 * 2x2, 3x3 and 4x4 specialization to be added
547 */
548
549
550 /*
551 * build [0, 1, 2xcol, 2xcol+1, 4xcol, 4xcol+1, 6xcol, 6xcol+1]
552 */
553 vecOffsRef = vldrhq_u16((uint16_t const *) loadCmplxCol);
554 vecOffsRef = vmulq(vecOffsRef, (uint16_t) (srcCols * CMPLX_DIM))
555 + viwdupq_u16((uint32_t)0, (uint16_t) 2, 1);
556
557 pDataRow = pDataSrc;
558 pDataDestRow = pDataDest;
559 i = srcCols;
560 do
561 {
562 pDataC = (uint16_t const *) pDataRow;
563 pDataDestR = pDataDestRow;
564 vecOffsCur = vecOffsRef;
565
566 blkCnt = (srcRows * CMPLX_DIM) >> 3;
567 while (blkCnt > 0U)
568 {
569 vecIn = vldrhq_gather_shifted_offset(pDataC, vecOffsCur);
570 vstrhq(pDataDestR, vecIn);
571 pDataDestR+= 8; // VEC_LANES_U16
572 vecOffsCur = vaddq(vecOffsCur, (srcCols << 3));
573 /*
574 * Decrement the blockSize loop counter
575 */
576 blkCnt--;
577 }
578 /*
579 * tail
580 * (will be merged thru tail predication)
581 */
582 blkCnt = (srcRows * CMPLX_DIM) & 0x7;
583 if (blkCnt > 0U)
584 {
585 mve_pred16_t p0 = vctp16q(blkCnt);
586 vecIn = vldrhq_gather_shifted_offset(pDataC, vecOffsCur);
587 vstrhq_p(pDataDestR, vecIn, p0);
588 }
589
590 pDataRow += CMPLX_DIM;
591 pDataDestRow += (srcRows * CMPLX_DIM);
592 }
593 while (--i);
594
595 return (ARM_MATH_SUCCESS);
596 }
597 #endif /* MVEF and MVEI */
598
599 /***************************************
600
601 Definitions available for MVEI only
602
603 ***************************************/
604 #if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEI)) && !defined(ARM_MATH_AUTOVECTORIZE)
605
606 #include "arm_common_tables.h"
607
608 #define MVE_ASRL_SAT16(acc, shift) ((sqrshrl_sat48(acc, -(32-shift)) >> 32) & 0xffffffff)
609 #define MVE_ASRL_SAT32(acc, shift) ((sqrshrl(acc, -(32-shift)) >> 32) & 0xffffffff)
610
611
612 #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q31_MVE)
FAST_VSQRT_Q31(q31x4_t vecIn)613 __STATIC_INLINE q31x4_t FAST_VSQRT_Q31(q31x4_t vecIn)
614 {
615 q63x2_t vecTmpLL;
616 q31x4_t vecTmp0, vecTmp1;
617 q31_t scale;
618 q63_t tmp64;
619 q31x4_t vecNrm, vecDst, vecIdx, vecSignBits;
620
621
622 vecSignBits = vclsq(vecIn);
623 vecSignBits = vbicq_n_s32(vecSignBits, 1);
624 /*
625 * in = in << no_of_sign_bits;
626 */
627 vecNrm = vshlq(vecIn, vecSignBits);
628 /*
629 * index = in >> 24;
630 */
631 vecIdx = vecNrm >> 24;
632 vecIdx = vecIdx << 1;
633
634 vecTmp0 = vldrwq_gather_shifted_offset_s32(sqrtTable_Q31, (uint32x4_t)vecIdx);
635
636 vecIdx = vecIdx + 1;
637
638 vecTmp1 = vldrwq_gather_shifted_offset_s32(sqrtTable_Q31, (uint32x4_t)vecIdx);
639
640 vecTmp1 = vqrdmulhq(vecTmp1, vecNrm);
641 vecTmp0 = vecTmp0 - vecTmp1;
642 vecTmp1 = vqrdmulhq(vecTmp0, vecTmp0);
643 vecTmp1 = vqrdmulhq(vecNrm, vecTmp1);
644 vecTmp1 = vdupq_n_s32(0x18000000) - vecTmp1;
645 vecTmp0 = vqrdmulhq(vecTmp0, vecTmp1);
646 vecTmpLL = vmullbq_int(vecNrm, vecTmp0);
647
648 /*
649 * scale elements 0, 2
650 */
651 scale = 26 + (vecSignBits[0] >> 1);
652 tmp64 = asrl(vecTmpLL[0], scale);
653 vecDst[0] = (q31_t) tmp64;
654
655 scale = 26 + (vecSignBits[2] >> 1);
656 tmp64 = asrl(vecTmpLL[1], scale);
657 vecDst[2] = (q31_t) tmp64;
658
659 vecTmpLL = vmulltq_int(vecNrm, vecTmp0);
660
661 /*
662 * scale elements 1, 3
663 */
664 scale = 26 + (vecSignBits[1] >> 1);
665 tmp64 = asrl(vecTmpLL[0], scale);
666 vecDst[1] = (q31_t) tmp64;
667
668 scale = 26 + (vecSignBits[3] >> 1);
669 tmp64 = asrl(vecTmpLL[1], scale);
670 vecDst[3] = (q31_t) tmp64;
671 /*
672 * set negative values to 0
673 */
674 vecDst = vdupq_m(vecDst, 0, vcmpltq_n_s32(vecIn, 0));
675
676 return vecDst;
677 }
678 #endif
679
680 #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q15_MVE)
FAST_VSQRT_Q15(q15x8_t vecIn)681 __STATIC_INLINE q15x8_t FAST_VSQRT_Q15(q15x8_t vecIn)
682 {
683 q31x4_t vecTmpLev, vecTmpLodd, vecSignL;
684 q15x8_t vecTmp0, vecTmp1;
685 q15x8_t vecNrm, vecDst, vecIdx, vecSignBits;
686
687 vecDst = vuninitializedq_s16();
688
689 vecSignBits = vclsq(vecIn);
690 vecSignBits = vbicq_n_s16(vecSignBits, 1);
691 /*
692 * in = in << no_of_sign_bits;
693 */
694 vecNrm = vshlq(vecIn, vecSignBits);
695
696 vecIdx = vecNrm >> 8;
697 vecIdx = vecIdx << 1;
698
699 vecTmp0 = vldrhq_gather_shifted_offset_s16(sqrtTable_Q15, (uint16x8_t)vecIdx);
700
701 vecIdx = vecIdx + 1;
702
703 vecTmp1 = vldrhq_gather_shifted_offset_s16(sqrtTable_Q15, (uint16x8_t)vecIdx);
704
705 vecTmp1 = vqrdmulhq(vecTmp1, vecNrm);
706 vecTmp0 = vecTmp0 - vecTmp1;
707 vecTmp1 = vqrdmulhq(vecTmp0, vecTmp0);
708 vecTmp1 = vqrdmulhq(vecNrm, vecTmp1);
709 vecTmp1 = vdupq_n_s16(0x1800) - vecTmp1;
710 vecTmp0 = vqrdmulhq(vecTmp0, vecTmp1);
711
712 vecSignBits = vecSignBits >> 1;
713
714 vecTmpLev = vmullbq_int(vecNrm, vecTmp0);
715 vecTmpLodd = vmulltq_int(vecNrm, vecTmp0);
716
717 vecTmp0 = vecSignBits + 10;
718 /*
719 * negate sign to apply register based vshl
720 */
721 vecTmp0 = -vecTmp0;
722
723 /*
724 * shift even elements
725 */
726 vecSignL = vmovlbq(vecTmp0);
727 vecTmpLev = vshlq(vecTmpLev, vecSignL);
728 /*
729 * shift odd elements
730 */
731 vecSignL = vmovltq(vecTmp0);
732 vecTmpLodd = vshlq(vecTmpLodd, vecSignL);
733 /*
734 * merge and narrow odd and even parts
735 */
736 vecDst = vmovnbq_s32(vecDst, vecTmpLev);
737 vecDst = vmovntq_s32(vecDst, vecTmpLodd);
738 /*
739 * set negative values to 0
740 */
741 vecDst = vdupq_m(vecDst, 0, vcmpltq_n_s16(vecIn, 0));
742
743 return vecDst;
744 }
745 #endif
746
747 #endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEI) */
748
749 #ifdef __cplusplus
750 }
751 #endif
752
753 #endif
754