Searched refs:vecSum0 (Results 1 – 4 of 4) sorted by relevance
/cmsis-dsp-latest/Source/TransformFunctions/ |
D | arm_cfft_f16.c | 94 f16x8_t vecSum0, vecDiff0, vecSum1, vecDiff1; in _arm_radix4_butterfly_f16_mve() local 143 vecSum0 = vecA + vecC; /* vecSum0 = vaddq(vecA, vecC) */ in _arm_radix4_butterfly_f16_mve() 151 vecTmp0 = vecSum0 + vecSum1; in _arm_radix4_butterfly_f16_mve() 158 vecTmp0 = vecSum0 - vecSum1; in _arm_radix4_butterfly_f16_mve() 220 vecSum0 = vecA + vecC; /* vecSum0 = vaddq(vecA, vecC) */ in _arm_radix4_butterfly_f16_mve() 233 vecTmp0 = vecSum0 + vecSum1; in _arm_radix4_butterfly_f16_mve() 236 vecTmp0 = vecSum0 - vecSum1; in _arm_radix4_butterfly_f16_mve() 300 f16x8_t vecSum0, vecDiff0, vecSum1, vecDiff1; in _arm_radix4_butterfly_inverse_f16_mve() local 351 vecSum0 = vecA + vecC; /* vecSum0 = vaddq(vecA, vecC) */ in _arm_radix4_butterfly_inverse_f16_mve() 359 vecTmp0 = vecSum0 + vecSum1; in _arm_radix4_butterfly_inverse_f16_mve() [all …]
|
D | arm_cfft_q31.c | 44 q31x4_t vecSum0, vecDiff0, vecSum1, vecDiff1; in _arm_radix4_butterfly_q31_mve() local 99 vecSum0 = vhaddq(vecA, vecC); in _arm_radix4_butterfly_q31_mve() 107 vecTmp0 = vhaddq(vecSum0, vecSum1); in _arm_radix4_butterfly_q31_mve() 113 vecTmp0 = vhsubq(vecSum0, vecSum1); in _arm_radix4_butterfly_q31_mve() 184 vecSum0 = vhaddq(vecA, vecC); in _arm_radix4_butterfly_q31_mve() 198 vecTmp0 = vhaddq(vecSum0, vecSum1); in _arm_radix4_butterfly_q31_mve() 201 vecTmp0 = vhsubq(vecSum0, vecSum1); in _arm_radix4_butterfly_q31_mve() 296 q31x4_t vecSum0, vecDiff0, vecSum1, vecDiff1; in _arm_radix4_butterfly_inverse_q31_mve() local 349 vecSum0 = vhaddq(vecA, vecC); in _arm_radix4_butterfly_inverse_q31_mve() 357 vecTmp0 = vhaddq(vecSum0, vecSum1); in _arm_radix4_butterfly_inverse_q31_mve() [all …]
|
D | arm_cfft_q15.c | 42 q15x8_t vecSum0, vecDiff0, vecSum1, vecDiff1; in _arm_radix4_butterfly_q15_mve() local 95 vecSum0 = vhaddq(vecA, vecC); in _arm_radix4_butterfly_q15_mve() 103 vecTmp0 = vhaddq(vecSum0, vecSum1); in _arm_radix4_butterfly_q15_mve() 109 vecTmp0 = vhsubq(vecSum0, vecSum1); in _arm_radix4_butterfly_q15_mve() 173 vecSum0 = vhaddq(vecA, vecC); in _arm_radix4_butterfly_q15_mve() 187 vecTmp0 = vhaddq(vecSum0, vecSum1); in _arm_radix4_butterfly_q15_mve() 190 vecTmp0 = vhsubq(vecSum0, vecSum1); in _arm_radix4_butterfly_q15_mve() 278 q15x8_t vecSum0, vecDiff0, vecSum1, vecDiff1; in _arm_radix4_butterfly_inverse_q15_mve() local 333 vecSum0 = vhaddq(vecA, vecC); in _arm_radix4_butterfly_inverse_q15_mve() 341 vecTmp0 = vhaddq(vecSum0, vecSum1); in _arm_radix4_butterfly_inverse_q15_mve() [all …]
|
D | arm_cfft_f32.c | 95 f32x4_t vecSum0, vecDiff0, vecSum1, vecDiff1; in _arm_radix4_butterfly_f32_mve() local 146 vecSum0 = vecA + vecC; /* vecSum0 = vaddq(vecA, vecC) */ in _arm_radix4_butterfly_f32_mve() 154 vecTmp0 = vecSum0 + vecSum1; in _arm_radix4_butterfly_f32_mve() 161 vecTmp0 = vecSum0 - vecSum1; in _arm_radix4_butterfly_f32_mve() 223 vecSum0 = vecA + vecC; /* vecSum0 = vaddq(vecA, vecC) */ in _arm_radix4_butterfly_f32_mve() 236 vecTmp0 = vecSum0 + vecSum1; in _arm_radix4_butterfly_f32_mve() 239 vecTmp0 = vecSum0 - vecSum1; in _arm_radix4_butterfly_f32_mve() 303 f32x4_t vecSum0, vecDiff0, vecSum1, vecDiff1; in _arm_radix4_butterfly_inverse_f32_mve() local 354 vecSum0 = vecA + vecC; /* vecSum0 = vaddq(vecA, vecC) */ in _arm_radix4_butterfly_inverse_f32_mve() 362 vecTmp0 = vecSum0 + vecSum1; in _arm_radix4_butterfly_inverse_f32_mve() [all …]
|