Home
last modified time | relevance | path

Searched refs:vecTmp (Results 1 – 7 of 7) sorted by relevance

/cmsis-dsp-3.7.0-3.6.0/Include/
Darm_vec_math_f16.h257 f16x8_t vecSx, vecW, vecTmp; in vrecip_f16() local
268 vecTmp = vsubq(vdupq_n_f16(8.0f16), vecW); in vrecip_f16()
269 vecTmp = vfmasq_n_f16(vecW, vecTmp, -28.0f16); in vrecip_f16()
270 vecTmp = vfmasq_n_f16(vecW, vecTmp, 56.0f16); in vrecip_f16()
271 vecTmp = vfmasq_n_f16(vecW, vecTmp, -70.0f16); in vrecip_f16()
272 vecTmp = vfmasq_n_f16(vecW, vecTmp, 56.0f16); in vrecip_f16()
273 vecTmp = vfmasq_n_f16(vecW, vecTmp, -28.0f16); in vrecip_f16()
274 vecTmp = vfmasq_n_f16(vecW, vecTmp, 8.0f16); in vrecip_f16()
275 v.f = vmulq(v.f, vecTmp); in vrecip_f16()
Darm_vec_math.h250 f32x4_t vecSx, vecW, vecTmp; in vrecip_f32() local
261 vecTmp = vsubq(vdupq_n_f32(8.0f), vecW); in vrecip_f32()
262 vecTmp = vfmasq(vecW, vecTmp, -28.0f); in vrecip_f32()
263 vecTmp = vfmasq(vecW, vecTmp, 56.0f); in vrecip_f32()
264 vecTmp = vfmasq(vecW, vecTmp, -70.0f); in vrecip_f32()
265 vecTmp = vfmasq(vecW, vecTmp, 56.0f); in vrecip_f32()
266 vecTmp = vfmasq(vecW, vecTmp, -28.0f); in vrecip_f32()
267 vecTmp = vfmasq(vecW, vecTmp, 8.0f); in vrecip_f32()
268 v.f = vmulq(v.f, vecTmp); in vrecip_f32()
Darm_helium_utils.h114 float16x8_t vecTmp, vecOut; in __mve_cmplx_sum_intra_vec_f16() local
117 vecTmp = (float16x8_t) vrev64q_s32((int32x4_t) vecIn); in __mve_cmplx_sum_intra_vec_f16()
125 vecTmp = vaddq_f16(vecTmp, vecIn); in __mve_cmplx_sum_intra_vec_f16()
126 vecOut = vecTmp; in __mve_cmplx_sum_intra_vec_f16()
136 vecOut = vaddq_f16(vecOut, vecTmp); in __mve_cmplx_sum_intra_vec_f16()
/cmsis-dsp-3.7.0-3.6.0/Source/MatrixFunctions/
Darm_mat_cmplx_mult_f16.c78 f16x8_t vecTmp; in arm_mat_cmplx_mult_f16_2x2_mve() local
117 vecTmp = (f16x8_t) vrev64q_s32((int32x4_t) acc0); in arm_mat_cmplx_mult_f16_2x2_mve()
118 vecTmp = vaddq(vecTmp, acc0); in arm_mat_cmplx_mult_f16_2x2_mve()
121 *(float32_t *)(&pOut[0 * CMPLX_DIM * MATRIX_DIM]) = ((f32x4_t)vecTmp)[0]; in arm_mat_cmplx_mult_f16_2x2_mve()
122 *(float32_t *)(&pOut[0 * CMPLX_DIM * MATRIX_DIM + CMPLX_DIM]) = ((f32x4_t)vecTmp)[2]; in arm_mat_cmplx_mult_f16_2x2_mve()
124 vecTmp = (f16x8_t) vrev64q_s32((int32x4_t) acc1); in arm_mat_cmplx_mult_f16_2x2_mve()
125 vecTmp = vaddq(vecTmp, acc1); in arm_mat_cmplx_mult_f16_2x2_mve()
127 *(float32_t *)(&pOut[1 * CMPLX_DIM * MATRIX_DIM]) = ((f32x4_t)vecTmp)[0]; in arm_mat_cmplx_mult_f16_2x2_mve()
128 *(float32_t *)(&pOut[1 * CMPLX_DIM * MATRIX_DIM + CMPLX_DIM]) = ((f32x4_t)vecTmp)[2]; in arm_mat_cmplx_mult_f16_2x2_mve()
/cmsis-dsp-3.7.0-3.6.0/Source/FilteringFunctions/
Darm_levinson_durbin_f16.c131 uint16x8_t offset,offsetInc,vecTmp; in arm_levinson_durbin_f16() local
135 vecTmp = vdupq_n_u16(p); in arm_levinson_durbin_f16()
137 offset = vaddq_m_u16(offset,offset,vecTmp,LANE4567_MASK); in arm_levinson_durbin_f16()
Darm_levinson_durbin_f32.c134 uint32x4_t offset,offsetInc,vecTmp; in arm_levinson_durbin_f32() local
138 vecTmp = vdupq_n_u32(p); in arm_levinson_durbin_f32()
140 offset = vaddq_m_u32(offset,offset,vecTmp,LANE23_MASK); in arm_levinson_durbin_f32()
Darm_levinson_durbin_q31.c198 uint32x4_t offset,offsetInc,vecTmp; in arm_levinson_durbin_q31() local
202 vecTmp = vdupq_n_u32(p); in arm_levinson_durbin_q31()
204 offset = vaddq_m_u32(offset,offset,vecTmp,LANE23_MASK); in arm_levinson_durbin_q31()