/hal_nxp-3.5.0/mcux/mcux-sdk/CMSIS/DSP/Source/FilteringFunctions/ |
D | arm_biquad_cascade_stereo_df2T_f32.c | 61 float32_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_stereo_df2T_f32() local 90 a1 = *pCoeffs++; in arm_biquad_cascade_stereo_df2T_f32() 96 aCoeffs = vdupq_n_f32(a1); in arm_biquad_cascade_stereo_df2T_f32() 194 float32_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_stereo_df2T_f32() local 205 a1 = pCoeffs[3]; in arm_biquad_cascade_stereo_df2T_f32() 236 d1a = ((b1 * Xn1a) + (a1 * acc1a)) + d2a; in arm_biquad_cascade_stereo_df2T_f32() 237 d1b = ((b1 * Xn1b) + (a1 * acc1b)) + d2b; in arm_biquad_cascade_stereo_df2T_f32() 252 d1a = ((b1 * Xn1a) + (a1 * acc1a)) + d2a; in arm_biquad_cascade_stereo_df2T_f32() 253 d1b = ((b1 * Xn1b) + (a1 * acc1b)) + d2b; in arm_biquad_cascade_stereo_df2T_f32() 268 d1a = ((b1 * Xn1a) + (a1 * acc1a)) + d2a; in arm_biquad_cascade_stereo_df2T_f32() [all …]
|
D | arm_biquad_cascade_df2T_f64.c | 149 float64_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_df2T_f64() local 161 a1 = pCoeffs[3]; in arm_biquad_cascade_df2T_f64() 187 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f64() 201 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f64() 214 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f64() 227 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f64() 240 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f64() 253 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f64() 266 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f64() 279 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f64() [all …]
|
D | arm_biquad_cascade_stereo_df2T_f16.c | 65 float16_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_stereo_df2T_f16() local 97 a1 = *pCoeffs++; in arm_biquad_cascade_stereo_df2T_f16() 101 aCoeffs = vdupq_n_f16(a1); in arm_biquad_cascade_stereo_df2T_f16() 207 _Float16 b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_stereo_df2T_f16() local 218 a1 = pCoeffs[3]; in arm_biquad_cascade_stereo_df2T_f16() 249 d1a = ((b1 * Xn1a) + (a1 * acc1a)) + d2a; in arm_biquad_cascade_stereo_df2T_f16() 250 d1b = ((b1 * Xn1b) + (a1 * acc1b)) + d2b; in arm_biquad_cascade_stereo_df2T_f16() 265 d1a = ((b1 * Xn1a) + (a1 * acc1a)) + d2a; in arm_biquad_cascade_stereo_df2T_f16() 266 d1b = ((b1 * Xn1b) + (a1 * acc1b)) + d2b; in arm_biquad_cascade_stereo_df2T_f16() 281 d1a = ((b1 * Xn1a) + (a1 * acc1a)) + d2a; in arm_biquad_cascade_stereo_df2T_f16() [all …]
|
D | arm_biquad_cascade_df2T_f16.c | 201 _Float16 b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_df2T_f16() local 212 a1 = pCoeffs[3]; in arm_biquad_cascade_df2T_f16() 238 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f16() 251 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f16() 264 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f16() 277 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f16() 290 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f16() 303 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f16() 316 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f16() 329 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f16() [all …]
|
D | arm_biquad_cascade_df2T_f32.c | 196 float32_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_df2T_f32() local 300 a1 = *pCoeffs++; in arm_biquad_cascade_df2T_f32() 322 d1 = ((b1 * Xn1) + (a1 * acc1)) + d2; in arm_biquad_cascade_df2T_f32() 358 float32_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_df2T_f32() local 369 a1 = pCoeffs[3]; in arm_biquad_cascade_df2T_f32() 395 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f32() 408 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f32() 421 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f32() 434 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f32() 447 d1 += a1 * acc1; in arm_biquad_cascade_df2T_f32() [all …]
|
D | arm_biquad_cascade_df1_32x64_q31.c | 191 q31_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cas_df1_32x64_q31_scalar() local 205 a1 = *pCoeffs++; in arm_biquad_cas_df1_32x64_q31_scalar() 231 acc += mult32x64(Yn1, a1); in arm_biquad_cas_df1_32x64_q31_scalar() 296 q31_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cas_df1_32x64_q31() local 317 a1 = *pCoeffs++; in arm_biquad_cas_df1_32x64_q31() 351 acc = mult32x64(Yn1, a1); in arm_biquad_cas_df1_32x64_q31() 373 acc = mult32x64(Yn1, a1); in arm_biquad_cas_df1_32x64_q31() 386 acc = mult32x64(Yn1, a1); in arm_biquad_cas_df1_32x64_q31() 396 acc = mult32x64(Yn1, a1); in arm_biquad_cas_df1_32x64_q31() 406 acc = mult32x64(Yn1, a1); in arm_biquad_cas_df1_32x64_q31() [all …]
|
D | arm_biquad_cascade_df1_init_f16.c | 99 static void generateCoefsFastBiquadF16(float16_t b0, float16_t b1, float16_t b2, float16_t a1, floa… in generateCoefsFastBiquadF16() argument 103 {0, 0, 0, 0, 0, 0, 0, b0, b1, b2, a1, a2}, in generateCoefsFastBiquadF16() 115 coeffs[1][i] += (a1 * coeffs[0][i]); in generateCoefsFastBiquadF16() 116 coeffs[2][i] += (a1 * coeffs[1][i]) + (a2 * coeffs[0][i]); in generateCoefsFastBiquadF16() 117 coeffs[3][i] += (a1 * coeffs[2][i]) + (a2 * coeffs[1][i]); in generateCoefsFastBiquadF16() 118 coeffs[4][i] += (a1 * coeffs[3][i]) + (a2 * coeffs[2][i]); in generateCoefsFastBiquadF16() 119 coeffs[5][i] += (a1 * coeffs[4][i]) + (a2 * coeffs[3][i]); in generateCoefsFastBiquadF16() 120 coeffs[6][i] += (a1 * coeffs[5][i]) + (a2 * coeffs[4][i]); in generateCoefsFastBiquadF16() 121 coeffs[7][i] += (a1 * coeffs[6][i]) + (a2 * coeffs[5][i]); in generateCoefsFastBiquadF16()
|
D | arm_biquad_cascade_df2T_init_f32.c | 108 float32_t b0[4],b1[4],b2[4],a1[4],a2[4]; in arm_biquad_cascade_df2T_compute_coefs_f32() local 120 a1[i] = pCoeffs[3]; in arm_biquad_cascade_df2T_compute_coefs_f32() 162 *pDstCoeffs++ = a1[0]; in arm_biquad_cascade_df2T_compute_coefs_f32() 163 *pDstCoeffs++ = a1[1]; in arm_biquad_cascade_df2T_compute_coefs_f32() 164 *pDstCoeffs++ = a1[2]; in arm_biquad_cascade_df2T_compute_coefs_f32() 165 *pDstCoeffs++ = a1[3]; in arm_biquad_cascade_df2T_compute_coefs_f32()
|
D | arm_biquad_cascade_df1_fast_q31.c | 73 q31_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_df1_fast_q31() local 85 a1 = *pCoeffs++; in arm_biquad_cascade_df1_fast_q31() 122 multAcc_32x32_keep32_R(acc, a1, Yn1); in arm_biquad_cascade_df1_fast_q31() 148 multAcc_32x32_keep32_R(acc, a1, Yn2); in arm_biquad_cascade_df1_fast_q31() 174 multAcc_32x32_keep32_R(acc, a1, Yn1); in arm_biquad_cascade_df1_fast_q31() 201 multAcc_32x32_keep32_R(acc, a1, Yn2); in arm_biquad_cascade_df1_fast_q31() 252 multAcc_32x32_keep32_R(acc, a1, Yn1); in arm_biquad_cascade_df1_fast_q31()
|
D | arm_biquad_cascade_df1_init_f32.c | 99 static void generateCoefsFastBiquadF32(float32_t b0, float32_t b1, float32_t b2, float32_t a1, floa… in generateCoefsFastBiquadF32() argument 103 {0, 0, 0, b0, b1, b2, a1, a2}, in generateCoefsFastBiquadF32() 111 coeffs[1][i] += a1 * coeffs[0][i]; in generateCoefsFastBiquadF32() 112 coeffs[2][i] += a1 * coeffs[1][i] + a2 * coeffs[0][i]; in generateCoefsFastBiquadF32() 113 coeffs[3][i] += a1 * coeffs[2][i] + a2 * coeffs[1][i]; in generateCoefsFastBiquadF32()
|
D | arm_biquad_cascade_df1_q15.c | 83 q15_t a1 = pCoeffs[4]; in arm_biquad_cascade_df1_q15() local 101 bCoeffs0[7] = a1; in arm_biquad_cascade_df1_q15() 103 bCoeffs1[6] = a1; in arm_biquad_cascade_df1_q15() 328 q31_t b1, a1; /* Filter coefficients */ in arm_biquad_cascade_df1_q15() local 347 a1 = read_q15x2_ia ((q15_t **) &pCoeffs); in arm_biquad_cascade_df1_q15() 377 acc = __SMLALD(a1, state_out, acc); in arm_biquad_cascade_df1_q15() 413 acc = __SMLALD(a1, state_out, acc); in arm_biquad_cascade_df1_q15() 472 acc = __SMLALD(a1, state_out, acc); in arm_biquad_cascade_df1_q15() 526 q15_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_df1_q15() 542 a1 = *pCoeffs++; in arm_biquad_cascade_df1_q15() [all …]
|
D | arm_biquad_cascade_df1_fast_q15.c | 74 q31_t b1, a1; /* Filter coefficients */ in arm_biquad_cascade_df1_fast_q15() local 88 a1 = read_q15x2_ia ((q15_t **) &pCoeffs); in arm_biquad_cascade_df1_fast_q15() 119 acc = __SMLAD(a1, state_out, acc); in arm_biquad_cascade_df1_fast_q15() 146 acc = __SMLAD(a1, state_out, acc); in arm_biquad_cascade_df1_fast_q15() 203 acc = __SMLAD(a1, state_out, acc); in arm_biquad_cascade_df1_fast_q15()
|
D | arm_biquad_cascade_df1_q31.c | 332 q31_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_df1_q31() local 349 a1 = *pCoeffs++; in arm_biquad_cascade_df1_q31() 375 …acc = ((q63_t) b0 * Xn) + ((q63_t) b1 * Xn1) + ((q63_t) b2 * Xn2) + ((q63_t) a1 * Yn1) + ((q63_t) … in arm_biquad_cascade_df1_q31() 391 …acc = ((q63_t) b0 * Xn2) + ((q63_t) b1 * Xn) + ((q63_t) b2 * Xn1) + ((q63_t) a1 * Yn2) + ((q63_t) … in arm_biquad_cascade_df1_q31() 407 …acc = ((q63_t) b0 * Xn1) + ((q63_t) b1 * Xn2) + ((q63_t) b2 * Xn) + ((q63_t) a1 * Yn1) + ((q63_t) … in arm_biquad_cascade_df1_q31() 423 …acc = ((q63_t) b0 * Xn) + ((q63_t) b1 * Xn1) + ((q63_t) b2 * Xn2) + ((q63_t) a1 * Yn2) + ((q63_t) … in arm_biquad_cascade_df1_q31() 464 …acc = ((q63_t) b0 * Xn) + ((q63_t) b1 * Xn1) + ((q63_t) b2 * Xn2) + ((q63_t) a1 * Yn1) + ((q63_t) … in arm_biquad_cascade_df1_q31()
|
D | arm_biquad_cascade_df1_f16.c | 324 _Float16 b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_df1_f16() local 335 a1 = *pCoeffs++; in arm_biquad_cascade_df1_f16() 364 Yn2 = (b0 * Xn) + (b1 * Xn1) + (b2 * Xn2) + (a1 * Yn1) + (a2 * Yn2); in arm_biquad_cascade_df1_f16() 380 Yn1 = (b0 * Xn2) + (b1 * Xn) + (b2 * Xn1) + (a1 * Yn2) + (a2 * Yn1); in arm_biquad_cascade_df1_f16() 396 Yn2 = (b0 * Xn1) + (b1 * Xn2) + (b2 * Xn) + (a1 * Yn1) + (a2 * Yn2); in arm_biquad_cascade_df1_f16() 412 Yn1 = (b0 * Xn) + (b1 * Xn1) + (b2 * Xn2) + (a1 * Yn2) + (a2 * Yn1); in arm_biquad_cascade_df1_f16() 446 acc = (b0 * Xn) + (b1 * Xn1) + (b2 * Xn2) + (a1 * Yn1) + (a2 * Yn2); in arm_biquad_cascade_df1_f16()
|
D | arm_biquad_cascade_df1_f32.c | 517 float32_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_df1_f32() local 528 a1 = *pCoeffs++; in arm_biquad_cascade_df1_f32() 557 Yn2 = (b0 * Xn) + (b1 * Xn1) + (b2 * Xn2) + (a1 * Yn1) + (a2 * Yn2); in arm_biquad_cascade_df1_f32() 573 Yn1 = (b0 * Xn2) + (b1 * Xn) + (b2 * Xn1) + (a1 * Yn2) + (a2 * Yn1); in arm_biquad_cascade_df1_f32() 589 Yn2 = (b0 * Xn1) + (b1 * Xn2) + (b2 * Xn) + (a1 * Yn1) + (a2 * Yn2); in arm_biquad_cascade_df1_f32() 605 Yn1 = (b0 * Xn) + (b1 * Xn1) + (b2 * Xn2) + (a1 * Yn2) + (a2 * Yn1); in arm_biquad_cascade_df1_f32() 639 acc = (b0 * Xn) + (b1 * Xn1) + (b2 * Xn2) + (a1 * Yn1) + (a2 * Yn2); in arm_biquad_cascade_df1_f32()
|
/hal_nxp-3.5.0/mcux/mcux-sdk/CMSIS/DSP/Source/TransformFunctions/ |
D | arm_cfft_radix2_f16.c | 126 float16_t a0, a1; in arm_radix2_butterfly_f16() local 153 a1 = pSrc[2 * l + 1] + pSrc[2 * i + 1]; in arm_radix2_butterfly_f16() 161 pSrc[2 * i + 1] = a1; in arm_radix2_butterfly_f16() 195 a1 = pSrc[2 * l + 1] + pSrc[2 * i + 1]; in arm_radix2_butterfly_f16() 203 pSrc[2 * i + 1] = a1; in arm_radix2_butterfly_f16() 222 a1 = pSrc[2 * i + 3] + pSrc[2 * i + 1]; in arm_radix2_butterfly_f16() 225 pSrc[2 * i + 1] = a1; in arm_radix2_butterfly_f16() 258 a1 = pSrc[2 * l + 1] + pSrc[2 * i + 1]; in arm_radix2_butterfly_f16() 266 pSrc[2 * i + 1] = a1; in arm_radix2_butterfly_f16() 295 float16_t a0, a1; in arm_radix2_butterfly_inverse_f16() local [all …]
|
D | arm_cfft_radix2_f32.c | 124 float32_t a0, a1; in arm_radix2_butterfly_f32() local 151 a1 = pSrc[2 * l + 1] + pSrc[2 * i + 1]; in arm_radix2_butterfly_f32() 159 pSrc[2 * i + 1] = a1; in arm_radix2_butterfly_f32() 193 a1 = pSrc[2 * l + 1] + pSrc[2 * i + 1]; in arm_radix2_butterfly_f32() 201 pSrc[2 * i + 1] = a1; in arm_radix2_butterfly_f32() 220 a1 = pSrc[2 * i + 3] + pSrc[2 * i + 1]; in arm_radix2_butterfly_f32() 223 pSrc[2 * i + 1] = a1; in arm_radix2_butterfly_f32() 256 a1 = pSrc[2 * l + 1] + pSrc[2 * i + 1]; in arm_radix2_butterfly_f32() 264 pSrc[2 * i + 1] = a1; in arm_radix2_butterfly_f32() 293 float32_t a0, a1; in arm_radix2_butterfly_inverse_f32() local [all …]
|
D | arm_cfft_radix4_f16.c | 85 float16_t p0, p1,p2,p3,a0,a1; in arm_cfft_radix4by2_f16() local 102 a1 = pSrc[2 * l + 1] + pSrc[2 * i + 1]; in arm_cfft_radix4by2_f16() 110 pSrc[2 * i + 1] = a1; in arm_cfft_radix4by2_f16() 196 float16_t a0,a1,a2,a3,a4,a5,a6,a7; in arm_radix4_butterfly_f16() local 521 a1 = (Yaplusc + Ybplusd); in arm_radix4_butterfly_f16() 536 ptr1[1] = a1; in arm_radix4_butterfly_f16() 698 float16_t a0,a1,a2,a3,a4,a5,a6,a7; in arm_radix4_butterfly_inverse_f16() local 1027 a1 = (Yaplusc + Ybplusd); in arm_radix4_butterfly_inverse_f16() 1042 p1 = a1 * onebyfftLen; in arm_radix4_butterfly_inverse_f16()
|
D | arm_cfft_f64.c | 203 float64_t p0, p1,p2,p3,a0,a1; in arm_cfft_radix4by2_f64() local 220 a1 = pSrc[2 * l + 1] + pSrc[2 * i + 1]; in arm_cfft_radix4by2_f64() 228 pSrc[2 * i + 1] = a1; in arm_cfft_radix4by2_f64()
|
D | arm_cfft_radix4_f32.c | 131 float32_t a0,a1,a2,a3,a4,a5,a6,a7; in arm_radix4_butterfly_f32() local 456 a1 = (Yaplusc + Ybplusd); in arm_radix4_butterfly_f32() 471 ptr1[1] = a1; in arm_radix4_butterfly_f32() 631 float32_t a0,a1,a2,a3,a4,a5,a6,a7; in arm_radix4_butterfly_inverse_f32() local 960 a1 = (Yaplusc + Ybplusd); in arm_radix4_butterfly_inverse_f32() 975 p1 = a1 * onebyfftLen; in arm_radix4_butterfly_inverse_f32()
|
/hal_nxp-3.5.0/mcux/mcux-sdk/CMSIS/RTOS2/RTX/Source/ |
D | rtx_core_ca.h | 224 __STATIC_INLINE t __svc##f (t1 a1) { \ 225 svc##f(svcRtx##f,a1); \ 231 __STATIC_INLINE t __svc##f (t1 a1) { \ 232 return svc##f(svcRtx##f,a1); \ 238 __STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \ 239 return svc##f(svcRtx##f,a1,a2); \ 245 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \ 246 return svc##f(svcRtx##f,a1,a2,a3); \ 252 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \ 253 return svc##f(svcRtx##f,a1,a2,a3,a4); \ [all …]
|
D | rtx_core_cm.h | 233 __STATIC_INLINE t __svc##f (t1 a1) { \ 234 svc##f(svcRtx##f,a1); \ 240 __STATIC_INLINE t __svc##f (t1 a1) { \ 241 return svc##f(svcRtx##f,a1); \ 247 __STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \ 248 return svc##f(svcRtx##f,a1,a2); \ 254 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \ 255 return svc##f(svcRtx##f,a1,a2,a3); \ 261 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \ 262 return svc##f(svcRtx##f,a1,a2,a3,a4); \ [all …]
|
/hal_nxp-3.5.0/mcux/mcux-sdk/CMSIS/DSP/Source/MatrixFunctions/ |
D | arm_mat_cmplx_mult_f32.c | 853 float32_t a1, a1B,b1, b1B, c1, d1; in arm_mat_cmplx_mult_f32() local 991 a1 = *pIn1; in arm_mat_cmplx_mult_f32() 1001 sumReal1 += a1 * c1; in arm_mat_cmplx_mult_f32() 1012 sumImag2 += a1 * d1; in arm_mat_cmplx_mult_f32() 1129 a1 = *pIn1; in arm_mat_cmplx_mult_f32() 1135 sumReal1 += a1 * c1; in arm_mat_cmplx_mult_f32() 1142 sumImag2 += a1 * d1; in arm_mat_cmplx_mult_f32() 1195 float32_t a1, b1, c1, d1; in arm_mat_cmplx_mult_f32() local 1280 a1 = *(pIn1 ); in arm_mat_cmplx_mult_f32() 1286 sumReal += a1 * c1; in arm_mat_cmplx_mult_f32() [all …]
|
D | arm_mat_cmplx_mult_f16.c | 718 _Float16 a1, b1, c1, d1; in arm_mat_cmplx_mult_f16() local 803 a1 = *(pIn1 ); in arm_mat_cmplx_mult_f16() 809 sumReal += a1 * c1; in arm_mat_cmplx_mult_f16() 818 sumImag += a1 * d1; in arm_mat_cmplx_mult_f16() 839 a1 = *(pIn1 ); in arm_mat_cmplx_mult_f16() 845 sumReal += a1 * c1; in arm_mat_cmplx_mult_f16() 854 sumImag += a1 * d1; in arm_mat_cmplx_mult_f16() 874 a1 = *(pIn1 ); in arm_mat_cmplx_mult_f16() 880 sumReal += a1 * c1; in arm_mat_cmplx_mult_f16() 889 sumImag += a1 * d1; in arm_mat_cmplx_mult_f16()
|
D | arm_mat_cmplx_mult_q31.c | 851 q31_t a1, b1, c1, d1; in arm_mat_cmplx_mult_q31() local 936 a1 = *(pIn1 ); in arm_mat_cmplx_mult_q31() 942 sumReal += (q63_t) a1 * c1; in arm_mat_cmplx_mult_q31() 951 sumImag += (q63_t) a1 * d1; in arm_mat_cmplx_mult_q31() 972 a1 = *(pIn1 ); in arm_mat_cmplx_mult_q31() 978 sumReal += (q63_t) a1 * c1; in arm_mat_cmplx_mult_q31() 987 sumImag += (q63_t) a1 * d1; in arm_mat_cmplx_mult_q31() 1007 a1 = *(pIn1 ); in arm_mat_cmplx_mult_q31() 1013 sumReal += (q63_t) a1 * c1; in arm_mat_cmplx_mult_q31() 1022 sumImag += (q63_t) a1 * d1; in arm_mat_cmplx_mult_q31()
|