/cmsis-dsp-latest/Source/TransformFunctions/ |
D | arm_cfft_f16.c | 130 f16x8_t vecW; in _arm_radix4_butterfly_f16_mve() local 162 vecW = vld1q(pW2); in _arm_radix4_butterfly_f16_mve() 164 vecTmp1 = MVE_CMPLX_MULT_FLT_Conj_AxB(vecW, vecTmp0); in _arm_radix4_butterfly_f16_mve() 175 vecW = vld1q(pW1); in _arm_radix4_butterfly_f16_mve() 177 vecTmp1 = MVE_CMPLX_MULT_FLT_Conj_AxB(vecW, vecTmp0); in _arm_radix4_butterfly_f16_mve() 188 vecW = vld1q(pW3); in _arm_radix4_butterfly_f16_mve() 190 vecTmp1 = MVE_CMPLX_MULT_FLT_Conj_AxB(vecW, vecTmp0); in _arm_radix4_butterfly_f16_mve() 338 f16x8_t vecW; in _arm_radix4_butterfly_inverse_f16_mve() local 369 vecW = vld1q(pW2); in _arm_radix4_butterfly_inverse_f16_mve() 371 vecTmp1 = MVE_CMPLX_MULT_FLT_AxB(vecW, vecTmp0); in _arm_radix4_butterfly_inverse_f16_mve() [all …]
|
D | arm_cfft_q31.c | 85 q31x4_t vecW; in _arm_radix4_butterfly_q31_mve() local 117 vecW = vld1q(pW2); in _arm_radix4_butterfly_q31_mve() 119 vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0, q31x4_t); in _arm_radix4_butterfly_q31_mve() 130 vecW = vld1q(pW1); in _arm_radix4_butterfly_q31_mve() 132 vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0, q31x4_t); in _arm_radix4_butterfly_q31_mve() 142 vecW = vld1q(pW3); in _arm_radix4_butterfly_q31_mve() 144 vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0, q31x4_t); in _arm_radix4_butterfly_q31_mve() 336 q31x4_t vecW; in _arm_radix4_butterfly_inverse_q31_mve() local 367 vecW = vld1q(pW2); in _arm_radix4_butterfly_inverse_q31_mve() 369 vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW, q31x4_t); in _arm_radix4_butterfly_inverse_q31_mve() [all …]
|
D | arm_cfft_q15.c | 82 q15x8_t vecW; in _arm_radix4_butterfly_q15_mve() local 113 vecW = vld1q(pW2); in _arm_radix4_butterfly_q15_mve() 115 vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0, q15x8_t); in _arm_radix4_butterfly_q15_mve() 126 vecW = vld1q(pW1); in _arm_radix4_butterfly_q15_mve() 128 vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0, q15x8_t); in _arm_radix4_butterfly_q15_mve() 139 vecW = vld1q(pW3); in _arm_radix4_butterfly_q15_mve() 141 vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0, q15x8_t); in _arm_radix4_butterfly_q15_mve() 319 q15x8_t vecW; in _arm_radix4_butterfly_inverse_q15_mve() local 351 vecW = vld1q(pW2); in _arm_radix4_butterfly_inverse_q15_mve() 353 vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW, q15x8_t); in _arm_radix4_butterfly_inverse_q15_mve() [all …]
|
D | arm_cfft_f32.c | 133 f32x4_t vecW; in _arm_radix4_butterfly_f32_mve() local 165 vecW = vld1q(pW2); in _arm_radix4_butterfly_f32_mve() 167 vecTmp1 = MVE_CMPLX_MULT_FLT_Conj_AxB(vecW, vecTmp0); in _arm_radix4_butterfly_f32_mve() 178 vecW = vld1q(pW1); in _arm_radix4_butterfly_f32_mve() 180 vecTmp1 = MVE_CMPLX_MULT_FLT_Conj_AxB(vecW, vecTmp0); in _arm_radix4_butterfly_f32_mve() 191 vecW = vld1q(pW3); in _arm_radix4_butterfly_f32_mve() 193 vecTmp1 = MVE_CMPLX_MULT_FLT_Conj_AxB(vecW, vecTmp0); in _arm_radix4_butterfly_f32_mve() 341 f32x4_t vecW; in _arm_radix4_butterfly_inverse_f32_mve() local 372 vecW = vld1q(pW2); in _arm_radix4_butterfly_inverse_f32_mve() 374 vecTmp1 = MVE_CMPLX_MULT_FLT_AxB(vecW, vecTmp0); in _arm_radix4_butterfly_inverse_f32_mve() [all …]
|
/cmsis-dsp-latest/Include/ |
D | arm_vec_math_f16.h | 257 f16x8_t vecSx, vecW, vecTmp; in vrecip_f16() local 265 vecW = vmulq(vecSx, v.f); in vrecip_f16() 268 vecTmp = vsubq(vdupq_n_f16(8.0f16), vecW); in vrecip_f16() 269 vecTmp = vfmasq_n_f16(vecW, vecTmp, -28.0f16); in vrecip_f16() 270 vecTmp = vfmasq_n_f16(vecW, vecTmp, 56.0f16); in vrecip_f16() 271 vecTmp = vfmasq_n_f16(vecW, vecTmp, -70.0f16); in vrecip_f16() 272 vecTmp = vfmasq_n_f16(vecW, vecTmp, 56.0f16); in vrecip_f16() 273 vecTmp = vfmasq_n_f16(vecW, vecTmp, -28.0f16); in vrecip_f16() 274 vecTmp = vfmasq_n_f16(vecW, vecTmp, 8.0f16); in vrecip_f16()
|
D | arm_vec_math.h | 250 f32x4_t vecSx, vecW, vecTmp; in vrecip_f32() local 258 vecW = vmulq(vecSx, v.f); in vrecip_f32() 261 vecTmp = vsubq(vdupq_n_f32(8.0f), vecW); in vrecip_f32() 262 vecTmp = vfmasq(vecW, vecTmp, -28.0f); in vrecip_f32() 263 vecTmp = vfmasq(vecW, vecTmp, 56.0f); in vrecip_f32() 264 vecTmp = vfmasq(vecW, vecTmp, -70.0f); in vrecip_f32() 265 vecTmp = vfmasq(vecW, vecTmp, 56.0f); in vrecip_f32() 266 vecTmp = vfmasq(vecW, vecTmp, -28.0f); in vrecip_f32() 267 vecTmp = vfmasq(vecW, vecTmp, 8.0f); in vrecip_f32()
|
/cmsis-dsp-latest/Source/MatrixFunctions/ |
D | arm_mat_ldlt_f32.c | 204 f32x4_t vecA,vecX,vecW; in arm_mat_ldlt_f32() local 209 vecW = vdupq_n_f32(pA[w*n+k]); in arm_mat_ldlt_f32() 222 vecA = vfmsq_m(vecA, vecW, vecX, p0); in arm_mat_ldlt_f32()
|