Home
last modified time | relevance | path

Searched refs:inA (Results 1 – 5 of 5) sorted by relevance

/cmsis-3.4.0/CMSIS/DSP/Source/TransformFunctions/
Darm_cfft_f16.c123 float16_t *inA = pSrc + CMPLX_DIM * i * n1; in _arm_radix4_butterfly_f16_mve() local
124 float16_t *inB = inA + n2 * CMPLX_DIM; in _arm_radix4_butterfly_f16_mve()
138 vecA = vldrhq_f16(inA); in _arm_radix4_butterfly_f16_mve()
154 vst1q(inA, vecTmp0); in _arm_radix4_butterfly_f16_mve()
155 inA += 8; in _arm_radix4_butterfly_f16_mve()
196 vecA = vldrhq_f16(inA); in _arm_radix4_butterfly_f16_mve()
332 float16_t *inA = pSrc + CMPLX_DIM * i * n1; in _arm_radix4_butterfly_inverse_f16_mve() local
333 float16_t *inB = inA + n2 * CMPLX_DIM; in _arm_radix4_butterfly_inverse_f16_mve()
345 vecA = vldrhq_f16(inA); in _arm_radix4_butterfly_inverse_f16_mve()
361 vst1q(inA, vecTmp0); in _arm_radix4_butterfly_inverse_f16_mve()
[all …]
Darm_cfft_q31.c78 q31_t *inA = pSrc + CMPLX_DIM * i * n1; in _arm_radix4_butterfly_q31_mve() local
79 q31_t *inB = inA + n2 * CMPLX_DIM; in _arm_radix4_butterfly_q31_mve()
91 vecA = vldrwq_s32(inA); in _arm_radix4_butterfly_q31_mve()
107 vst1q(inA, vecTmp0); in _arm_radix4_butterfly_q31_mve()
108 inA += 4; in _arm_radix4_butterfly_q31_mve()
147 vecA = vldrwq_s32(inA); in _arm_radix4_butterfly_q31_mve()
328 q31_t *inA = pSrc + CMPLX_DIM * i * n1; in _arm_radix4_butterfly_inverse_q31_mve() local
329 q31_t *inB = inA + n2 * CMPLX_DIM; in _arm_radix4_butterfly_inverse_q31_mve()
341 vecA = vldrwq_s32(inA); in _arm_radix4_butterfly_inverse_q31_mve()
357 vst1q(inA, vecTmp0); in _arm_radix4_butterfly_inverse_q31_mve()
[all …]
Darm_cfft_q15.c75 q15_t *inA = pSrc + CMPLX_DIM * i * n1; in _arm_radix4_butterfly_q15_mve() local
76 q15_t *inB = inA + n2 * CMPLX_DIM; in _arm_radix4_butterfly_q15_mve()
88 vecA = vldrhq_s16(inA); in _arm_radix4_butterfly_q15_mve()
104 vst1q(inA, vecTmp0); in _arm_radix4_butterfly_q15_mve()
105 inA += 8; in _arm_radix4_butterfly_q15_mve()
145 vecA = vldrhq_s16(inA); in _arm_radix4_butterfly_q15_mve()
311 q15_t *inA = pSrc + CMPLX_DIM * i * n1; in _arm_radix4_butterfly_inverse_q15_mve() local
312 q15_t *inB = inA + n2 * CMPLX_DIM; in _arm_radix4_butterfly_inverse_q15_mve()
324 vecA = vldrhq_s16(inA); in _arm_radix4_butterfly_inverse_q15_mve()
340 vst1q(inA, vecTmp0); in _arm_radix4_butterfly_inverse_q15_mve()
[all …]
Darm_cfft_f32.c125 float32_t *inA = pSrc + CMPLX_DIM * i * n1; in _arm_radix4_butterfly_f32_mve() local
126 float32_t *inB = inA + n2 * CMPLX_DIM; in _arm_radix4_butterfly_f32_mve()
140 vecA = vldrwq_f32(inA); in _arm_radix4_butterfly_f32_mve()
156 vst1q(inA, vecTmp0); in _arm_radix4_butterfly_f32_mve()
157 inA += 4; in _arm_radix4_butterfly_f32_mve()
198 vecA = vldrwq_f32(inA); in _arm_radix4_butterfly_f32_mve()
334 float32_t *inA = pSrc + CMPLX_DIM * i * n1; in _arm_radix4_butterfly_inverse_f32_mve() local
335 float32_t *inB = inA + n2 * CMPLX_DIM; in _arm_radix4_butterfly_inverse_f32_mve()
347 vecA = vldrwq_f32(inA); in _arm_radix4_butterfly_inverse_f32_mve()
363 vst1q(inA, vecTmp0); in _arm_radix4_butterfly_inverse_f32_mve()
[all …]
/cmsis-3.4.0/CMSIS/NN/Include/
Darm_nnsupportfunctions.h573 q31_t inA = arm_nn_read_q7x4_ia(&source); in read_and_pad() local
574 q31_t inAbuf1 = __SXTB16(__ROR((uint32_t)inA, 8)); in read_and_pad()
575 q31_t inAbuf2 = __SXTB16(inA); in read_and_pad()
594 q31_t inA = arm_nn_read_q7x4_ia(&source); in read_and_pad_reordered() local
596 *out2 = __SXTB16(__ROR((uint32_t)inA, 8)); in read_and_pad_reordered()
597 *out1 = __SXTB16(inA); in read_and_pad_reordered()
599 *out1 = __SXTB16(__ROR((uint32_t)inA, 8)); in read_and_pad_reordered()
600 *out2 = __SXTB16(inA); in read_and_pad_reordered()
612 q31_t inA = arm_nn_read_q7x4_ia(&source); in read_and_pad_reordered_with_offset() local
615 *out2 = __SXTB16(__ROR((uint32_t)inA, 8)); in read_and_pad_reordered_with_offset()
[all …]