/hal_nxp-3.6.0/mcux/mcux-sdk/CMSIS/DSP/Source/FilteringFunctions/ |
D | arm_biquad_cascade_stereo_df2T_f16.c | 65 float16_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_stereo_df2T_f16() local 98 a2 = *pCoeffs++; in arm_biquad_cascade_stereo_df2T_f16() 102 aCoeffs = vsetq_lane(a2, aCoeffs, 2); in arm_biquad_cascade_stereo_df2T_f16() 103 aCoeffs = vsetq_lane(a2, aCoeffs, 3); in arm_biquad_cascade_stereo_df2T_f16() 104 aCoeffs = vsetq_lane(a2, aCoeffs, 6); in arm_biquad_cascade_stereo_df2T_f16() 105 aCoeffs = vsetq_lane(a2, aCoeffs, 7); in arm_biquad_cascade_stereo_df2T_f16() 207 _Float16 b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_stereo_df2T_f16() local 219 a2 = pCoeffs[4]; in arm_biquad_cascade_stereo_df2T_f16() 252 d2a = (b2 * Xn1a) + (a2 * acc1a); in arm_biquad_cascade_stereo_df2T_f16() 253 d2b = (b2 * Xn1b) + (a2 * acc1b); in arm_biquad_cascade_stereo_df2T_f16() [all …]
|
D | arm_biquad_cascade_stereo_df2T_f32.c | 61 float32_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_stereo_df2T_f32() local 91 a2 = *pCoeffs++; in arm_biquad_cascade_stereo_df2T_f32() 97 aCoeffs = vsetq_lane(a2, aCoeffs, 2); in arm_biquad_cascade_stereo_df2T_f32() 98 aCoeffs = vsetq_lane(a2, aCoeffs, 3); in arm_biquad_cascade_stereo_df2T_f32() 194 float32_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_stereo_df2T_f32() local 206 a2 = pCoeffs[4]; in arm_biquad_cascade_stereo_df2T_f32() 239 d2a = (b2 * Xn1a) + (a2 * acc1a); in arm_biquad_cascade_stereo_df2T_f32() 240 d2b = (b2 * Xn1b) + (a2 * acc1b); in arm_biquad_cascade_stereo_df2T_f32() 255 d2a = (b2 * Xn1a) + (a2 * acc1a); in arm_biquad_cascade_stereo_df2T_f32() 256 d2b = (b2 * Xn1b) + (a2 * acc1b); in arm_biquad_cascade_stereo_df2T_f32() [all …]
|
D | arm_biquad_cascade_df2T_f64.c | 149 float64_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_df2T_f64() local 162 a2 = pCoeffs[4]; in arm_biquad_cascade_df2T_f64() 190 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f64() 204 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f64() 217 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f64() 230 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f64() 243 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f64() 256 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f64() 269 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f64() 282 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f64() [all …]
|
D | arm_biquad_cascade_df2T_f16.c | 201 _Float16 b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_df2T_f16() local 213 a2 = pCoeffs[4]; in arm_biquad_cascade_df2T_f16() 241 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f16() 254 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f16() 267 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f16() 280 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f16() 293 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f16() 306 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f16() 319 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f16() 332 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f16() [all …]
|
D | arm_biquad_cascade_df2T_f32.c | 196 float32_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_df2T_f32() local 301 a2 = *pCoeffs++; in arm_biquad_cascade_df2T_f32() 325 d2 = (b2 * Xn1) + (a2 * acc1); in arm_biquad_cascade_df2T_f32() 358 float32_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_df2T_f32() local 370 a2 = pCoeffs[4]; in arm_biquad_cascade_df2T_f32() 398 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f32() 411 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f32() 424 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f32() 437 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f32() 450 d2 += a2 * acc1; in arm_biquad_cascade_df2T_f32() [all …]
|
D | arm_biquad_cascade_df1_32x64_q31.c | 191 q31_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cas_df1_32x64_q31_scalar() local 206 a2 = *pCoeffs++; in arm_biquad_cas_df1_32x64_q31_scalar() 233 acc += mult32x64(Yn2, a2); in arm_biquad_cas_df1_32x64_q31_scalar() 296 q31_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cas_df1_32x64_q31() local 318 a2 = *pCoeffs++; in arm_biquad_cas_df1_32x64_q31() 352 acc += mult32x64(Yn2, a2); in arm_biquad_cas_df1_32x64_q31() 374 acc += mult32x64(Yn2, a2); in arm_biquad_cas_df1_32x64_q31() 387 acc += mult32x64(Yn2, a2); in arm_biquad_cas_df1_32x64_q31() 397 acc += mult32x64(Yn2, a2); in arm_biquad_cas_df1_32x64_q31() 407 acc += mult32x64(Yn2, a2); in arm_biquad_cas_df1_32x64_q31() [all …]
|
D | arm_biquad_cascade_df1_init_f16.c | 99 …id generateCoefsFastBiquadF16(float16_t b0, float16_t b1, float16_t b2, float16_t a1, float16_t a2, in generateCoefsFastBiquadF16() argument 103 {0, 0, 0, 0, 0, 0, 0, b0, b1, b2, a1, a2}, in generateCoefsFastBiquadF16() 104 {0, 0, 0, 0, 0, 0, b0, b1, b2, 0, a2, 0}, in generateCoefsFastBiquadF16() 116 coeffs[2][i] += (a1 * coeffs[1][i]) + (a2 * coeffs[0][i]); in generateCoefsFastBiquadF16() 117 coeffs[3][i] += (a1 * coeffs[2][i]) + (a2 * coeffs[1][i]); in generateCoefsFastBiquadF16() 118 coeffs[4][i] += (a1 * coeffs[3][i]) + (a2 * coeffs[2][i]); in generateCoefsFastBiquadF16() 119 coeffs[5][i] += (a1 * coeffs[4][i]) + (a2 * coeffs[3][i]); in generateCoefsFastBiquadF16() 120 coeffs[6][i] += (a1 * coeffs[5][i]) + (a2 * coeffs[4][i]); in generateCoefsFastBiquadF16() 121 coeffs[7][i] += (a1 * coeffs[6][i]) + (a2 * coeffs[5][i]); in generateCoefsFastBiquadF16()
|
D | arm_biquad_cascade_df1_fast_q31.c | 73 q31_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_df1_fast_q31() local 86 a2 = *pCoeffs++; in arm_biquad_cascade_df1_fast_q31() 125 multAcc_32x32_keep32_R(acc, a2, Yn2); in arm_biquad_cascade_df1_fast_q31() 151 multAcc_32x32_keep32_R(acc, a2, Yn1); in arm_biquad_cascade_df1_fast_q31() 177 multAcc_32x32_keep32_R(acc, a2, Yn2); in arm_biquad_cascade_df1_fast_q31() 204 multAcc_32x32_keep32_R(acc, a2, Yn1); in arm_biquad_cascade_df1_fast_q31() 255 multAcc_32x32_keep32_R(acc, a2, Yn2); in arm_biquad_cascade_df1_fast_q31()
|
D | arm_biquad_cascade_df2T_init_f32.c | 108 float32_t b0[4],b1[4],b2[4],a1[4],a2[4]; in arm_biquad_cascade_df2T_compute_coefs_f32() local 121 a2[i] = pCoeffs[4]; in arm_biquad_cascade_df2T_compute_coefs_f32() 168 *pDstCoeffs++ = a2[0]; in arm_biquad_cascade_df2T_compute_coefs_f32() 169 *pDstCoeffs++ = a2[1]; in arm_biquad_cascade_df2T_compute_coefs_f32() 170 *pDstCoeffs++ = a2[2]; in arm_biquad_cascade_df2T_compute_coefs_f32() 171 *pDstCoeffs++ = a2[3]; in arm_biquad_cascade_df2T_compute_coefs_f32()
|
D | arm_biquad_cascade_df1_init_f32.c | 99 …id generateCoefsFastBiquadF32(float32_t b0, float32_t b1, float32_t b2, float32_t a1, float32_t a2, in generateCoefsFastBiquadF32() argument 103 {0, 0, 0, b0, b1, b2, a1, a2}, in generateCoefsFastBiquadF32() 104 {0, 0, b0, b1, b2, 0, a2, 0}, in generateCoefsFastBiquadF32() 112 coeffs[2][i] += a1 * coeffs[1][i] + a2 * coeffs[0][i]; in generateCoefsFastBiquadF32() 113 coeffs[3][i] += a1 * coeffs[2][i] + a2 * coeffs[1][i]; in generateCoefsFastBiquadF32()
|
D | arm_biquad_cascade_df1_f16.c | 324 _Float16 b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_df1_f16() local 336 a2 = *pCoeffs++; in arm_biquad_cascade_df1_f16() 364 Yn2 = (b0 * Xn) + (b1 * Xn1) + (b2 * Xn2) + (a1 * Yn1) + (a2 * Yn2); in arm_biquad_cascade_df1_f16() 380 Yn1 = (b0 * Xn2) + (b1 * Xn) + (b2 * Xn1) + (a1 * Yn2) + (a2 * Yn1); in arm_biquad_cascade_df1_f16() 396 Yn2 = (b0 * Xn1) + (b1 * Xn2) + (b2 * Xn) + (a1 * Yn1) + (a2 * Yn2); in arm_biquad_cascade_df1_f16() 412 Yn1 = (b0 * Xn) + (b1 * Xn1) + (b2 * Xn2) + (a1 * Yn2) + (a2 * Yn1); in arm_biquad_cascade_df1_f16() 446 acc = (b0 * Xn) + (b1 * Xn1) + (b2 * Xn2) + (a1 * Yn1) + (a2 * Yn2); in arm_biquad_cascade_df1_f16()
|
D | arm_biquad_cascade_df1_q31.c | 332 q31_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_df1_q31() local 350 a2 = *pCoeffs++; in arm_biquad_cascade_df1_q31() 375 …63_t) b0 * Xn) + ((q63_t) b1 * Xn1) + ((q63_t) b2 * Xn2) + ((q63_t) a1 * Yn1) + ((q63_t) a2 * Yn2); in arm_biquad_cascade_df1_q31() 391 …63_t) b0 * Xn2) + ((q63_t) b1 * Xn) + ((q63_t) b2 * Xn1) + ((q63_t) a1 * Yn2) + ((q63_t) a2 * Yn1); in arm_biquad_cascade_df1_q31() 407 …63_t) b0 * Xn1) + ((q63_t) b1 * Xn2) + ((q63_t) b2 * Xn) + ((q63_t) a1 * Yn1) + ((q63_t) a2 * Yn2); in arm_biquad_cascade_df1_q31() 423 …63_t) b0 * Xn) + ((q63_t) b1 * Xn1) + ((q63_t) b2 * Xn2) + ((q63_t) a1 * Yn2) + ((q63_t) a2 * Yn1); in arm_biquad_cascade_df1_q31() 464 …63_t) b0 * Xn) + ((q63_t) b1 * Xn1) + ((q63_t) b2 * Xn2) + ((q63_t) a1 * Yn1) + ((q63_t) a2 * Yn2); in arm_biquad_cascade_df1_q31()
|
D | arm_biquad_cascade_df1_f32.c | 517 float32_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_df1_f32() local 529 a2 = *pCoeffs++; in arm_biquad_cascade_df1_f32() 557 Yn2 = (b0 * Xn) + (b1 * Xn1) + (b2 * Xn2) + (a1 * Yn1) + (a2 * Yn2); in arm_biquad_cascade_df1_f32() 573 Yn1 = (b0 * Xn2) + (b1 * Xn) + (b2 * Xn1) + (a1 * Yn2) + (a2 * Yn1); in arm_biquad_cascade_df1_f32() 589 Yn2 = (b0 * Xn1) + (b1 * Xn2) + (b2 * Xn) + (a1 * Yn1) + (a2 * Yn2); in arm_biquad_cascade_df1_f32() 605 Yn1 = (b0 * Xn) + (b1 * Xn1) + (b2 * Xn2) + (a1 * Yn2) + (a2 * Yn1); in arm_biquad_cascade_df1_f32() 639 acc = (b0 * Xn) + (b1 * Xn1) + (b2 * Xn2) + (a1 * Yn1) + (a2 * Yn2); in arm_biquad_cascade_df1_f32()
|
D | arm_biquad_cascade_df1_q15.c | 82 q15_t a2 = pCoeffs[5]; in arm_biquad_cascade_df1_q15() local 100 bCoeffs0[6] = a2; in arm_biquad_cascade_df1_q15() 102 bCoeffs1[7] = a2; in arm_biquad_cascade_df1_q15() 526 q15_t b0, b1, b2, a1, a2; /* Filter coefficients */ in arm_biquad_cascade_df1_q15() 543 a2 = *pCoeffs++; in arm_biquad_cascade_df1_q15() 573 acc += (q31_t) a2 *Yn2; in arm_biquad_cascade_df1_q15()
|
/hal_nxp-3.6.0/mcux/mcux-sdk/CMSIS/RTOS2/RTX/Source/ |
D | rtx_core_ca.h | 238 __STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \ 239 return svc##f(svcRtx##f,a1,a2); \ 245 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \ 246 return svc##f(svcRtx##f,a1,a2,a3); \ 252 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \ 253 return svc##f(svcRtx##f,a1,a2,a3,a4); \ 300 __SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2); \ 302 __STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \ 304 return svc##f(a1,a2); \ 308 __SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2, t3 a3); \ [all …]
|
D | rtx_core_cm.h | 247 __STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \ 248 return svc##f(svcRtx##f,a1,a2); \ 254 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \ 255 return svc##f(svcRtx##f,a1,a2,a3); \ 261 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \ 262 return svc##f(svcRtx##f,a1,a2,a3,a4); \ 321 SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2); \ 323 __STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \ 325 return svc##f(a1,a2); \ 329 SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2, t3 a3); \ [all …]
|
/hal_nxp-3.6.0/mcux/mcux-sdk/CMSIS/DSP/Source/TransformFunctions/ |
D | arm_cfft_radix4_f32.c | 131 float32_t a0,a1,a2,a3,a4,a5,a6,a7; in arm_radix4_butterfly_f32() local 458 a2 = (Xaplusc - Xbplusd); in arm_radix4_butterfly_f32() 472 ptr1[2] = a2; in arm_radix4_butterfly_f32() 631 float32_t a0,a1,a2,a3,a4,a5,a6,a7; in arm_radix4_butterfly_inverse_f32() local 962 a2 = (Xaplusc - Xbplusd); in arm_radix4_butterfly_inverse_f32() 976 p2 = a2 * onebyfftLen; in arm_radix4_butterfly_inverse_f32()
|
D | arm_cfft_radix4_f16.c | 196 float16_t a0,a1,a2,a3,a4,a5,a6,a7; in arm_radix4_butterfly_f16() local 523 a2 = (Xaplusc - Xbplusd); in arm_radix4_butterfly_f16() 537 ptr1[2] = a2; in arm_radix4_butterfly_f16() 698 float16_t a0,a1,a2,a3,a4,a5,a6,a7; in arm_radix4_butterfly_inverse_f16() local 1029 a2 = (Xaplusc - Xbplusd); in arm_radix4_butterfly_inverse_f16() 1043 p2 = a2 * onebyfftLen; in arm_radix4_butterfly_inverse_f16()
|
/hal_nxp-3.6.0/mcux/mcux-sdk/components/codec/tfa9xxx/vas_tfa_drv/ |
D | tfa9xxx_parameters.h | 108 int a2; member 127 float a2; member
|