| /hal_nxp-latest/mcux/mcux-sdk/CMSIS/DSP/Source/FilteringFunctions/ |
| D | arm_biquad_cascade_df2T_f64.c | 148 float64_t acc1; /* Accumulator */ in arm_biquad_cascade_df2T_f64() local 184 acc1 = b0 * Xn1 + d1; in arm_biquad_cascade_df2T_f64() 187 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f64() 190 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f64() 192 *pOut++ = acc1; in arm_biquad_cascade_df2T_f64() 198 acc1 = b0 * Xn1 + d1; in arm_biquad_cascade_df2T_f64() 201 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f64() 204 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f64() 206 *pOut++ = acc1; in arm_biquad_cascade_df2T_f64() 211 acc1 = b0 * Xn1 + d1; in arm_biquad_cascade_df2T_f64() [all …]
|
| D | arm_biquad_cascade_df2T_f16.c | 59 float16_t acc0, acc1; in arm_biquad_cascade_df2T_f16() local 134 acc1 = vgetq_lane(state, 1); in arm_biquad_cascade_df2T_f16() 135 state = vfmaq(state, a1Coeffs, acc1); in arm_biquad_cascade_df2T_f16() 146 *pOut++ = acc1; in arm_biquad_cascade_df2T_f16() 200 _Float16 acc1; /* Accumulator */ in arm_biquad_cascade_df2T_f16() local 235 acc1 = b0 * Xn1 + d1; in arm_biquad_cascade_df2T_f16() 238 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f16() 241 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f16() 243 *pOut++ = acc1; in arm_biquad_cascade_df2T_f16() 248 acc1 = b0 * Xn1 + d1; in arm_biquad_cascade_df2T_f16() [all …]
|
| D | arm_biquad_cascade_df2T_f32.c | 59 float32_t acc0, acc1; in arm_biquad_cascade_df2T_f32() local 129 acc1 = vgetq_lane(state, 1); in arm_biquad_cascade_df2T_f32() 130 state = vfmaq(state, a1Coeffs, acc1); in arm_biquad_cascade_df2T_f32() 141 *pOut++ = acc1; in arm_biquad_cascade_df2T_f32() 195 float32_t acc1; /* accumulator */ in arm_biquad_cascade_df2T_f32() local 315 acc1 = (b0 * Xn1) + d1; in arm_biquad_cascade_df2T_f32() 318 *pOut++ = acc1; in arm_biquad_cascade_df2T_f32() 322 d1 = ((b1 * Xn1) + (a1 * acc1)) + d2; in arm_biquad_cascade_df2T_f32() 325 d2 = (b2 * Xn1) + (a2 * acc1); in arm_biquad_cascade_df2T_f32() 357 float32_t acc1; /* Accumulator */ in arm_biquad_cascade_df2T_f32() local [all …]
|
| D | arm_fir_q31.c | 235 q63_t acc0=0, acc1=0, acc2=0, acc3=0; in arm_fir_q31_1_4_mve() local 257 acc1 = vrmlaldavhq(vecIn0, vecCoeffs); in arm_fir_q31_1_4_mve() 266 acc1 = asrl(acc1, 23); in arm_fir_q31_1_4_mve() 271 *pOutput++ = (q31_t) acc1; in arm_fir_q31_1_4_mve() 298 acc1 = vrmlaldavhq(vecIn0, vecCoeffs); in arm_fir_q31_1_4_mve() 304 acc1 = asrl(acc1, 23); in arm_fir_q31_1_4_mve() 308 *pOutput++ = (q31_t) acc1; in arm_fir_q31_1_4_mve() 326 acc1 = vrmlaldavhq(vecIn0, vecCoeffs); in arm_fir_q31_1_4_mve() 329 acc1 = asrl(acc1, 23); in arm_fir_q31_1_4_mve() 332 *pOutput++ = (q31_t) acc1; in arm_fir_q31_1_4_mve() [all …]
|
| D | arm_fir_interpolate_q15.c | 119 q63_t acc1 = 0LL; in arm_fir_interpolate_q15() local 132 acc1 = vmlaldavaq(acc1, vecState, vecCoef); in arm_fir_interpolate_q15() 155 acc1 = vmlaldavaq(acc1, vecState, vecCoef); in arm_fir_interpolate_q15() 165 acc1 = asrl(acc1, 15); in arm_fir_interpolate_q15() 170 *pDst++ = (q15_t) __SSAT(acc1, 16); in arm_fir_interpolate_q15() 183 q63_t acc1 = 0LL; in arm_fir_interpolate_q15() local 195 acc1 = vmlaldavaq(acc1, vecState, vecCoef); in arm_fir_interpolate_q15() 215 acc1 = vmlaldavaq(acc1, vecState, vecCoef); in arm_fir_interpolate_q15() 222 acc1 = asrl(acc1, 15); in arm_fir_interpolate_q15() 226 *pDst++ = (q15_t) __SSAT(acc1, 16);; in arm_fir_interpolate_q15() [all …]
|
| D | arm_fir_interpolate_q31.c | 116 q63_t acc1 = 0LL; in arm_fir_interpolate_q31() local 129 acc1 = vrmlaldavhaq(acc1, vecState, vecCoef); in arm_fir_interpolate_q31() 152 acc1 = vrmlaldavhaq(acc1, vecState, vecCoef); in arm_fir_interpolate_q31() 162 acc1 = asrl(acc1, 31 - 8); in arm_fir_interpolate_q31() 167 *pDst++ = (q31_t) acc1; in arm_fir_interpolate_q31() 180 q63_t acc1 = 0LL; in arm_fir_interpolate_q31() local 192 acc1 = vrmlaldavhaq(acc1, vecState, vecCoef); in arm_fir_interpolate_q31() 212 acc1 = vrmlaldavhaq(acc1, vecState, vecCoef); in arm_fir_interpolate_q31() 219 acc1 = asrl(acc1, 31 - 8); in arm_fir_interpolate_q31() 223 *pDst++ = (q31_t) acc1; in arm_fir_interpolate_q31() [all …]
|
| D | arm_conv_q15.c | 117 int64_t acc1 = 0LL; in arm_conv_q15() local 122 MVE_INTR_CONV_DUAL_INC_Y_INC_SIZE_Q15(acc0, acc1, pX, pY, count); in arm_conv_q15() 124 *pDst++ = (q15_t) acc1; in arm_conv_q15() 144 int64_t acc1 = 0LL; in arm_conv_q15() local 155 MVE_INTR_CONV_QUAD_INC_X_FIXED_SIZE_Q15(acc0, acc1, acc2, acc3, pX, pY, count); in arm_conv_q15() 157 *pDst++ = (q15_t) acc1; in arm_conv_q15() 167 int64_t acc1 = 0LL; in arm_conv_q15() local 176 MVE_INTR_CONV_DUAL_INC_X_FIXED_SIZE_Q15(acc0, acc1, pX, pY, count); in arm_conv_q15() 178 *pDst++ = (q15_t) acc1; in arm_conv_q15() 199 int64_t acc1 = 0LL; in arm_conv_q15() local [all …]
|
| D | arm_correlate_q15.c | 137 int64_t acc1 = 0LL; in arm_correlate_q15() local 146 MVE_INTR_CORR_DUAL_DEC_Y_INC_SIZE_Q15(acc0, acc1, pX, pY, count); in arm_correlate_q15() 150 *pDst = (q15_t) acc1; in arm_correlate_q15() 171 int64_t acc1 = 0LL; in arm_correlate_q15() local 182 MVE_INTR_CORR_QUAD_INC_X_FIXED_SIZE_Q15(acc0, acc1, acc2, acc3, pX, pY, srcBLen); in arm_correlate_q15() 186 *pDst = (q15_t) acc1; in arm_correlate_q15() 198 int64_t acc1 = 0LL; in arm_correlate_q15() local 207 MVE_INTR_CORR_DUAL_INC_X_FIXED_SIZE_Q15(acc0, acc1, pX, pY, srcBLen); in arm_correlate_q15() 211 *pDst = (q15_t) acc1; in arm_correlate_q15() 234 int64_t acc1 = 0LL; in arm_correlate_q15() local [all …]
|
| D | arm_fir_q15.c | 216 q63_t acc0, acc1, acc2, acc3; in arm_fir_q15() local 252 acc1 = 0LL; in arm_fir_q15() 275 acc1 = vmlaldavaq(acc1, vecIn0, vecCoeffs); in arm_fir_q15() 292 *pOutput++ = (q15_t) MVE_ASRL_SAT16(acc1, 15); in arm_fir_q15() 312 acc1 = 0LL; in arm_fir_q15() 334 acc1 = vmlaldavaq(acc1, vecIn0, vecCoeffs); in arm_fir_q15() 348 acc1 = asrl(acc1, 15); in arm_fir_q15() 352 *pOutput++ = (q15_t) MVE_ASRL_SAT16(acc1, 15); in arm_fir_q15() 363 acc1 = 0LL; in arm_fir_q15() 383 acc1 = vmlaldavaq(acc1, vecIn0, vecCoeffs); in arm_fir_q15() [all …]
|
| D | arm_fir_q7.c | 207 q31_t acc0, acc1, acc2, acc3; in arm_fir_q7() local 262 acc1 = 0; in arm_fir_q7() 287 acc1 = vmladavaq(acc1, vecIn0, vecCoeffs); in arm_fir_q7() 306 *pOutput++ = (q7_t) __SSAT((acc1 >> 7U), 8); in arm_fir_q7() 326 acc1 = 0; in arm_fir_q7() 344 acc1 = vmladavaq(acc1, vecIn0, vecCoeffs); in arm_fir_q7() 355 *pOutput++ = (q7_t) __SSAT((acc1 >> 7U), 8); in arm_fir_q7() 366 acc1 = 0; in arm_fir_q7() 383 acc1 = vmladavaq(acc1, vecIn0, vecCoeffs); in arm_fir_q7() 391 *pOutput++ = (q7_t) __SSAT((acc1 >> 7U), 8); in arm_fir_q7() [all …]
|
| D | arm_correlate_f16.c | 102 #define MVE_INTR_CORR_DUAL_DEC_Y_INC_SIZE_F16(acc0, acc1, pX, pY, count) … argument 138 … acc1 = vecAddAcrossF16Mve(acc1Vec); \ 173 #define MVE_INTR_CORR_QUAD_INC_X_FIXED_SIZE_F16(acc0, acc1, acc2, acc3, pX, pY, count) … argument 218 … acc1 = vecAddAcrossF16Mve(acc1Vec); \ 223 #define MVE_INTR_CORR_DUAL_INC_X_FIXED_SIZE_F16(acc0, acc1, pX, pY, count) … argument 258 … acc1 = vecAddAcrossF16Mve(acc1Vec); \ 261 #define MVE_INTR_CORR_DUAL_INC_X_DEC_SIZE_F16(acc0, acc1, pX, pY, count) … argument 298 … acc1 = vecAddAcrossF16Mve(acc1Vec); \ 374 _Float16 acc1; in arm_correlate_f16() local 382 MVE_INTR_CORR_DUAL_DEC_Y_INC_SIZE_F16(acc0, acc1, pX, pY, count); in arm_correlate_f16() [all …]
|
| D | arm_fir_decimate_fast_q15.c | 75 q31_t acc0, acc1; in arm_fir_decimate_fast_q15() local 106 acc1 = 0; in arm_fir_decimate_fast_q15() 131 acc1 = __SMLAD(x1, c0, acc1); in arm_fir_decimate_fast_q15() 142 acc1 = __SMLAD(x1, c0, acc1); in arm_fir_decimate_fast_q15() 169 acc1 = __SMLAD(x1, c0, acc1); in arm_fir_decimate_fast_q15() 182 *pDst++ = (q15_t) (__SSAT((acc1 >> 15), 16)); in arm_fir_decimate_fast_q15() 321 q31_t acc0, acc1; in arm_fir_decimate_fast_q15() local 349 acc1 = 0; in arm_fir_decimate_fast_q15() 374 acc1 += x1 * c0; in arm_fir_decimate_fast_q15() 385 acc1 += x1 * c0; in arm_fir_decimate_fast_q15() [all …]
|
| D | arm_conv_q31.c | 116 int64_t acc1 = 0LL; in arm_conv_q31() local 120 MVE_INTR_CONV_DUAL_INC_Y_INC_SIZE_Q31(acc0, acc1, pX, pY, count); in arm_conv_q31() 123 *pDst++ = (q31_t) acc1; in arm_conv_q31() 143 int64_t acc1 = 0LL; in arm_conv_q31() local 154 MVE_INTR_CONV_QUAD_INC_X_FIXED_SIZE_Q31(acc0, acc1, acc2, acc3, pX, pY, count); in arm_conv_q31() 156 *pDst++ = (q31_t) acc1; in arm_conv_q31() 167 int64_t acc1 = 0LL; in arm_conv_q31() local 176 MVE_INTR_CONV_DUAL_INC_X_FIXED_SIZE_Q31(acc0, acc1, pX, pY, count); in arm_conv_q31() 178 *pDst++ = (q31_t) acc1; in arm_conv_q31() 199 int64_t acc1 = 0LL; in arm_conv_q31() local [all …]
|
| D | arm_correlate_q31.c | 137 int64_t acc1 = 0LL; in arm_correlate_q31() local 144 MVE_INTR_CORR_DUAL_DEC_Y_INC_SIZE_Q31(acc0, acc1, pX, pY, count); in arm_correlate_q31() 148 *pDst = (q31_t) acc1; in arm_correlate_q31() 169 int64_t acc1 = 0LL; in arm_correlate_q31() local 178 MVE_INTR_CORR_QUAD_INC_X_FIXED_SIZE_Q31(acc0, acc1, acc2, acc3, pX, pY, srcBLen); in arm_correlate_q31() 182 *pDst = (q31_t) acc1; in arm_correlate_q31() 194 int64_t acc1 = 0LL; in arm_correlate_q31() local 201 MVE_INTR_CORR_DUAL_INC_X_FIXED_SIZE_Q31(acc0, acc1, pX, pY, srcBLen); in arm_correlate_q31() 205 *pDst = (q31_t) acc1; in arm_correlate_q31() 228 int64_t acc1 = 0LL; in arm_correlate_q31() local [all …]
|
| D | arm_conv_fast_q15.c | 73 q31_t sum, acc0, acc1, acc2, acc3; /* Accumulators */ in arm_conv_fast_q15() local 280 acc1 = 0; in arm_conv_fast_q15() 305 acc1 = __SMLADX(x1, c0, acc1); in arm_conv_fast_q15() 326 acc1 = __SMLADX(x3, c0, acc1); in arm_conv_fast_q15() 367 acc1 = __SMLAD(x1, c0, acc1); in arm_conv_fast_q15() 386 acc1 = __SMLADX(x1, c0, acc1); in arm_conv_fast_q15() 404 acc1 = __SMLADX(x1, c0, acc1); in arm_conv_fast_q15() 422 acc1 = __SMLAD(x2, c0, acc1); in arm_conv_fast_q15() 429 write_q15x2_ia (&pOut, __PKHBT((acc0 >> 15), (acc1 >> 15), 16)); in arm_conv_fast_q15() 432 write_q15x2_ia (&pOut, __PKHBT((acc1 >> 15), (acc0 >> 15), 16)); in arm_conv_fast_q15()
|
| D | arm_correlate_f32.c | 170 float32_t acc1; in arm_correlate_f32() local 179 MVE_INTR_CORR_DUAL_DEC_Y_INC_SIZE_F32(acc0, acc1, pX, pY, count); in arm_correlate_f32() 183 *pDst = acc1; in arm_correlate_f32() 204 float32_t acc1; in arm_correlate_f32() local 215 MVE_INTR_CORR_QUAD_INC_X_FIXED_SIZE_F32(acc0, acc1, acc2, acc3, pX, pY, srcBLen); in arm_correlate_f32() 219 *pDst = acc1; in arm_correlate_f32() 231 float32_t acc1; in arm_correlate_f32() local 240 MVE_INTR_CORR_DUAL_INC_X_FIXED_SIZE_F32(acc0, acc1, pX, pY, srcBLen); in arm_correlate_f32() 244 *pDst = acc1; in arm_correlate_f32() 267 float32_t acc1; in arm_correlate_f32() local [all …]
|
| /hal_nxp-latest/mcux/mcux-sdk/CMSIS/DSP/PrivateInclude/ |
| D | arm_vec_filtering.h | 38 #define MVE_INTR_CORR_QUAD_INC_X_FIXED_SIZE_F32(acc0, acc1, acc2, acc3, pX, pY, count)\ argument 87 acc1 = vecAddAcrossF32Mve(acc1Vec); \ 127 #define MVE_INTR_CORR_DUAL_INC_X_DEC_SIZE_F32(acc0, acc1, pX, pY, count)\ argument 168 acc1 = vecAddAcrossF32Mve(acc1Vec); \ 171 #define MVE_INTR_CORR_DUAL_INC_X_FIXED_SIZE_F32(acc0, acc1, pX, pY, count)\ argument 210 acc1 = vecAddAcrossF32Mve(acc1Vec); \ 213 #define MVE_INTR_CORR_DUAL_DEC_Y_INC_SIZE_F32(acc0, acc1, pX, pY, count)\ argument 253 acc1 = vecAddAcrossF32Mve(acc1Vec); \ 256 #define MVE_INTR_CONV_DUAL_INC_X_DEC_SIZE_F32(acc0, acc1, pX, pY, count) … argument 289 … acc1 = vecAddAcrossF32Mve(acc1Vec); \ [all …]
|
| /hal_nxp-latest/mcux/mcux-sdk/CMSIS/DSP/Source/MatrixFunctions/ |
| D | arm_mat_cmplx_mult_f32.c | 82 f32x4_t acc0, acc1; in arm_mat_cmplx_mult_f32_2x2_mve() local 100 acc1 = vcmulq(vecA, vecB); in arm_mat_cmplx_mult_f32_2x2_mve() 101 acc1 = vcmlaq_rot90(acc1, vecA, vecB); in arm_mat_cmplx_mult_f32_2x2_mve() 105 pOut[1 * CMPLX_DIM * MATRIX_DIM2 + 0] = acc1[0] + acc1[2]; in arm_mat_cmplx_mult_f32_2x2_mve() 106 pOut[1 * CMPLX_DIM * MATRIX_DIM2 + 1] = acc1[1] + acc1[3]; in arm_mat_cmplx_mult_f32_2x2_mve() 121 acc1 = vcmulq(vecA, vecB); in arm_mat_cmplx_mult_f32_2x2_mve() 122 acc1 = vcmlaq_rot90(acc1, vecA, vecB); in arm_mat_cmplx_mult_f32_2x2_mve() 126 pOut[1 * CMPLX_DIM * MATRIX_DIM2 + 0] = acc1[0] + acc1[2]; in arm_mat_cmplx_mult_f32_2x2_mve() 127 pOut[1 * CMPLX_DIM * MATRIX_DIM2 + 1] = acc1[1] + acc1[3]; in arm_mat_cmplx_mult_f32_2x2_mve() 147 f32x4_t acc0, acc1, acc2; in arm_mat_cmplx_mult_f32_3x3_mve() local [all …]
|
| D | arm_mat_cmplx_mult_q31.c | 78 q63_t acc0, acc1, acc2, acc3; in arm_mat_cmplx_mult_q31_2x2_mve() local 93 acc1 = vmlaldavxq_s32(vecA, vecB); in arm_mat_cmplx_mult_q31_2x2_mve() 100 pOut[0 * CMPLX_DIM * MATRIX_DIM2 + 1] = (q31_t) asrl(acc1, 31); in arm_mat_cmplx_mult_q31_2x2_mve() 111 acc1 = vmlaldavxq_s32(vecA, vecB); in arm_mat_cmplx_mult_q31_2x2_mve() 120 pOut[0 * CMPLX_DIM * MATRIX_DIM2 + 1] = (q31_t) asrl(acc1, 31); in arm_mat_cmplx_mult_q31_2x2_mve() 141 q63_t acc0, acc1, acc2, acc3; in arm_mat_cmplx_mult_q31_3x3_mve() local 167 acc1 = vmlaldavxq_s32(vecA, vecB); in arm_mat_cmplx_mult_q31_3x3_mve() 175 acc1 = vmlaldavaxq_s32(acc1, vecA, vecB1); in arm_mat_cmplx_mult_q31_3x3_mve() 182 pOut[0 * CMPLX_DIM * MATRIX_DIM3 + 1] = (q31_t) asrl(acc1, 31); in arm_mat_cmplx_mult_q31_3x3_mve() 188 acc1 = vmlaldavxq_s32(vecA, vecB); in arm_mat_cmplx_mult_q31_3x3_mve() [all …]
|
| D | arm_mat_mult_q31.c | 78 q63_t acc0, acc1; in arm_mat_mult_q31_2x2_mve() local 96 acc1 = vrmlaldavhq(vecA1, vecB); in arm_mat_mult_q31_2x2_mve() 99 acc1 = asrl(acc1, 23); in arm_mat_mult_q31_2x2_mve() 102 pOut[1 * MATRIX_DIM2] = (q31_t) acc1; in arm_mat_mult_q31_2x2_mve() 111 acc1 = vrmlaldavhq(vecA1, vecB); in arm_mat_mult_q31_2x2_mve() 114 acc1 = asrl(acc1, 23); in arm_mat_mult_q31_2x2_mve() 117 pOut[1 * MATRIX_DIM2] = (q31_t) acc1; in arm_mat_mult_q31_2x2_mve() 138 q63_t acc0, acc1, acc2; in arm_mat_mult_q31_3x3_mve() local 153 acc1 = vrmlaldavhq(vecA, vecB); in arm_mat_mult_q31_3x3_mve() 158 acc1 = asrl(acc1, 23); in arm_mat_mult_q31_3x3_mve() [all …]
|
| D | arm_mat_mult_q15.c | 79 q63_t acc0, acc1; in arm_mat_mult_q15_2x2_mve() local 93 acc1 = vmlaldavq(vecA1, vecB); in arm_mat_mult_q15_2x2_mve() 96 acc1 = asrl(acc1, 15); in arm_mat_mult_q15_2x2_mve() 99 pOut[1 * MATRIX_DIM2] = (q15_t) __SSAT(acc1, 16); in arm_mat_mult_q15_2x2_mve() 108 acc1 = vmlaldavq(vecA1, vecB); in arm_mat_mult_q15_2x2_mve() 111 acc1 = asrl(acc1, 15); in arm_mat_mult_q15_2x2_mve() 114 pOut[1 * MATRIX_DIM2] = (q15_t) __SSAT(acc1, 16); in arm_mat_mult_q15_2x2_mve() 136 q63_t acc0, acc1, acc2; in arm_mat_mult_q15_3x3_mve() local 152 acc1 = vmlaldavq(vecA1, vecB); in arm_mat_mult_q15_3x3_mve() 156 acc1 = asrl(acc1, 15); in arm_mat_mult_q15_3x3_mve() [all …]
|
| D | arm_mat_cmplx_mult_f16.c | 76 f16x8_t acc0, acc1; in arm_mat_cmplx_mult_f16_2x2_mve() local 107 acc1 = vcmulq(vecA1, vecB); in arm_mat_cmplx_mult_f16_2x2_mve() 108 acc1 = vcmlaq_rot90(acc1, vecA1, vecB); in arm_mat_cmplx_mult_f16_2x2_mve() 124 vecTmp = (f16x8_t) vrev64q_s32((int32x4_t) acc1); in arm_mat_cmplx_mult_f16_2x2_mve() 125 vecTmp = vaddq(vecTmp, acc1); in arm_mat_cmplx_mult_f16_2x2_mve() 151 f16x8_t acc0, acc1, acc2; in arm_mat_cmplx_mult_f16_3x3_mve() local 176 acc1 = vcmulq(vecA1, vecB); in arm_mat_cmplx_mult_f16_3x3_mve() 177 acc1 = vcmlaq_rot90(acc1, vecA1, vecB); in arm_mat_cmplx_mult_f16_3x3_mve() 183 mve_cmplx_sum_intra_vec_f16(acc1, &pOut[1 * CMPLX_DIM * MATRIX_DIM]); in arm_mat_cmplx_mult_f16_3x3_mve() 196 acc1 = vcmulq(vecA1, vecB); in arm_mat_cmplx_mult_f16_3x3_mve() [all …]
|
| D | arm_mat_mult_q7.c | 71 q31_t acc0, acc1; in arm_mat_mult_q7_2x2_mve() local 85 acc1 = vmladavq_s8(vecA1, vecB); in arm_mat_mult_q7_2x2_mve() 88 pOut[1 * MATRIX_DIM] = (q7_t) __SSAT(acc1 >> 7, 8); in arm_mat_mult_q7_2x2_mve() 97 acc1 = vmladavq_s8(vecA1, vecB); in arm_mat_mult_q7_2x2_mve() 100 pOut[1 * MATRIX_DIM] = (q7_t) __SSAT(acc1 >> 7, 8); in arm_mat_mult_q7_2x2_mve() 121 q31_t acc0, acc1, acc2; in arm_mat_mult_q7_3x3_mve() local 137 acc1 = vmladavq_s8(vecA1, vecB); in arm_mat_mult_q7_3x3_mve() 141 pOut[1 * MATRIX_DIM] = (q7_t) __SSAT(acc1 >> 7, 8); in arm_mat_mult_q7_3x3_mve() 151 acc1 = vmladavq_s8(vecA1, vecB); in arm_mat_mult_q7_3x3_mve() 155 pOut[1 * MATRIX_DIM] = (q7_t) __SSAT(acc1 >> 7, 8); in arm_mat_mult_q7_3x3_mve() [all …]
|
| /hal_nxp-latest/mcux/mcux-sdk/CMSIS/DSP/Source/ComplexMathFunctions/ |
| D | arm_cmplx_mag_q31.c | 68 q31_t acc0, acc1; /* Accumulators */ in arm_cmplx_mag_q31() local 111 acc1 = (q31_t) (((q63_t) imag * imag) >> 33); in arm_cmplx_mag_q31() 114 arm_sqrt_q31(acc0 + acc1, pDst++); in arm_cmplx_mag_q31() 129 q31_t acc0, acc1; /* Accumulators */ in arm_cmplx_mag_q31() local 143 acc1 = (q31_t) (((q63_t) imag * imag) >> 33); in arm_cmplx_mag_q31() 146 arm_sqrt_q31(acc0 + acc1, pDst++); in arm_cmplx_mag_q31() 151 acc1 = (q31_t) (((q63_t) imag * imag) >> 33); in arm_cmplx_mag_q31() 152 arm_sqrt_q31(acc0 + acc1, pDst++); in arm_cmplx_mag_q31() 157 acc1 = (q31_t) (((q63_t) imag * imag) >> 33); in arm_cmplx_mag_q31() 158 arm_sqrt_q31(acc0 + acc1, pDst++); in arm_cmplx_mag_q31() [all …]
|
| D | arm_cmplx_mag_squared_q31.c | 65 q31_t acc0, acc1; /* Accumulators */ in arm_cmplx_mag_squared_q31() local 97 acc1 = (q31_t) (((q63_t) imag * imag) >> 33); in arm_cmplx_mag_squared_q31() 100 *pDst++ = acc0 + acc1; in arm_cmplx_mag_squared_q31() 115 q31_t acc0, acc1; /* Accumulators */ in arm_cmplx_mag_squared_q31() local 129 acc1 = (q31_t) (((q63_t) imag * imag) >> 33); in arm_cmplx_mag_squared_q31() 131 *pDst++ = acc0 + acc1; in arm_cmplx_mag_squared_q31() 136 acc1 = (q31_t) (((q63_t) imag * imag) >> 33); in arm_cmplx_mag_squared_q31() 137 *pDst++ = acc0 + acc1; in arm_cmplx_mag_squared_q31() 142 acc1 = (q31_t) (((q63_t) imag * imag) >> 33); in arm_cmplx_mag_squared_q31() 143 *pDst++ = acc0 + acc1; in arm_cmplx_mag_squared_q31() [all …]
|