Home
last modified time | relevance | path

Searched refs:CMPLX_DIM (Results 1 – 25 of 30) sorted by relevance

12

/cmsis-3.4.0/CMSIS/DSP/Source/MatrixFunctions/
Darm_mat_cmplx_mult_f16.c81 MATRIX_DIM * CMPLX_DIM, MATRIX_DIM * CMPLX_DIM + 1, in arm_mat_cmplx_mult_f16_2x2_mve()
83 MATRIX_DIM * CMPLX_DIM + 2 , MATRIX_DIM * CMPLX_DIM + 3, in arm_mat_cmplx_mult_f16_2x2_mve()
92 tmp = (CMPLX_DIM * MATRIX_DIM); in arm_mat_cmplx_mult_f16_2x2_mve()
93 vecColAOffs1 = vecColAOffs0 + (uint16_t)(CMPLX_DIM * MATRIX_DIM); in arm_mat_cmplx_mult_f16_2x2_mve()
121 *(float32_t *)(&pOut[0 * CMPLX_DIM * MATRIX_DIM]) = ((f32x4_t)vecTmp)[0]; in arm_mat_cmplx_mult_f16_2x2_mve()
122 *(float32_t *)(&pOut[0 * CMPLX_DIM * MATRIX_DIM + CMPLX_DIM]) = ((f32x4_t)vecTmp)[2]; in arm_mat_cmplx_mult_f16_2x2_mve()
127 *(float32_t *)(&pOut[1 * CMPLX_DIM * MATRIX_DIM]) = ((f32x4_t)vecTmp)[0]; in arm_mat_cmplx_mult_f16_2x2_mve()
128 *(float32_t *)(&pOut[1 * CMPLX_DIM * MATRIX_DIM + CMPLX_DIM]) = ((f32x4_t)vecTmp)[2]; in arm_mat_cmplx_mult_f16_2x2_mve()
149 float16_t *pInA1 = pInA0 + CMPLX_DIM * MATRIX_DIM; in arm_mat_cmplx_mult_f16_3x3_mve()
150 float16_t *pInA2 = pInA1 + CMPLX_DIM * MATRIX_DIM; in arm_mat_cmplx_mult_f16_3x3_mve()
[all …]
Darm_mat_cmplx_mult_q31.c77 q31_t const *pInA1 = pInA0 + CMPLX_DIM * MATRIX_DIM2; in arm_mat_cmplx_mult_q31_2x2_mve()
83 MATRIX_DIM2 * CMPLX_DIM, MATRIX_DIM2 * CMPLX_DIM + 1 in arm_mat_cmplx_mult_q31_2x2_mve()
99 pOut[0 * CMPLX_DIM * MATRIX_DIM2 + 0] = (q31_t) asrl(acc0, 31); in arm_mat_cmplx_mult_q31_2x2_mve()
100 pOut[0 * CMPLX_DIM * MATRIX_DIM2 + 1] = (q31_t) asrl(acc1, 31); in arm_mat_cmplx_mult_q31_2x2_mve()
101 pOut[1 * CMPLX_DIM * MATRIX_DIM2 + 0] = (q31_t) asrl(acc2, 31); in arm_mat_cmplx_mult_q31_2x2_mve()
102 pOut[1 * CMPLX_DIM * MATRIX_DIM2 + 1] = (q31_t) asrl(acc3, 31); in arm_mat_cmplx_mult_q31_2x2_mve()
106 pInB = pInB + CMPLX_DIM; in arm_mat_cmplx_mult_q31_2x2_mve()
117 pOut += CMPLX_DIM; in arm_mat_cmplx_mult_q31_2x2_mve()
119 pOut[0 * CMPLX_DIM * MATRIX_DIM2 + 0] = (q31_t) asrl(acc0, 31); in arm_mat_cmplx_mult_q31_2x2_mve()
120 pOut[0 * CMPLX_DIM * MATRIX_DIM2 + 1] = (q31_t) asrl(acc1, 31); in arm_mat_cmplx_mult_q31_2x2_mve()
[all …]
Darm_mat_cmplx_mult_f32.c81 float32_t *pInA1 = pInA0 + CMPLX_DIM * MATRIX_DIM2; in arm_mat_cmplx_mult_f32_2x2_mve()
86 MATRIX_DIM2 * CMPLX_DIM, MATRIX_DIM2 * CMPLX_DIM + 1 in arm_mat_cmplx_mult_f32_2x2_mve()
103 pOut[0 * CMPLX_DIM * MATRIX_DIM2 + 0] = acc0[0] + acc0[2]; in arm_mat_cmplx_mult_f32_2x2_mve()
104 pOut[0 * CMPLX_DIM * MATRIX_DIM2 + 1] = acc0[1] + acc0[3]; in arm_mat_cmplx_mult_f32_2x2_mve()
105 pOut[1 * CMPLX_DIM * MATRIX_DIM2 + 0] = acc1[0] + acc1[2]; in arm_mat_cmplx_mult_f32_2x2_mve()
106 pOut[1 * CMPLX_DIM * MATRIX_DIM2 + 1] = acc1[1] + acc1[3]; in arm_mat_cmplx_mult_f32_2x2_mve()
107 pOut += CMPLX_DIM; in arm_mat_cmplx_mult_f32_2x2_mve()
112 pInB = pInB + CMPLX_DIM; in arm_mat_cmplx_mult_f32_2x2_mve()
124 pOut[0 * CMPLX_DIM * MATRIX_DIM2 + 0] = acc0[0] + acc0[2]; in arm_mat_cmplx_mult_f32_2x2_mve()
125 pOut[0 * CMPLX_DIM * MATRIX_DIM2 + 1] = acc0[1] + acc0[3]; in arm_mat_cmplx_mult_f32_2x2_mve()
[all …]
Darm_mat_cmplx_mult_q15.c101 vecColBOffs[2] = numColsB * CMPLX_DIM; in arm_mat_cmplx_mult_q15()
102 vecColBOffs[3] = (numColsB * CMPLX_DIM) + 1; in arm_mat_cmplx_mult_q15()
103 vecColBOffs[4] = 2 * numColsB * CMPLX_DIM; in arm_mat_cmplx_mult_q15()
104 vecColBOffs[5] = 2 * (numColsB * CMPLX_DIM) + 1; in arm_mat_cmplx_mult_q15()
105 vecColBOffs[6] = 3 * numColsB * CMPLX_DIM; in arm_mat_cmplx_mult_q15()
106 vecColBOffs[7] = 3 * (numColsB * CMPLX_DIM) + 1; in arm_mat_cmplx_mult_q15()
144 pInB2 = pInB + CMPLX_DIM; in arm_mat_cmplx_mult_q15()
146 j += 2 * CMPLX_DIM; in arm_mat_cmplx_mult_q15()
167 blkCnt = (numColsA * CMPLX_DIM) >> 3; in arm_mat_cmplx_mult_q15()
180 vecOffs = vaddq_n_u16(vecOffs, (uint16_t) (numColsB * 4 * CMPLX_DIM)); in arm_mat_cmplx_mult_q15()
[all …]
Darm_mat_cmplx_trans_f16.c93 px = pOut + CMPLX_DIM * i; in arm_mat_cmplx_trans_f16()
105 px += CMPLX_DIM * nRows; in arm_mat_cmplx_trans_f16()
Darm_mat_cmplx_trans_f32.c98 px = pOut + CMPLX_DIM * i; in arm_mat_cmplx_trans_f32()
110 px += CMPLX_DIM * nRows; in arm_mat_cmplx_trans_f32()
Darm_mat_cmplx_trans_q15.c89 pOut = pDst->pData + CMPLX_DIM * i; in arm_mat_cmplx_trans_q15()
101 pOut += CMPLX_DIM *nRows; in arm_mat_cmplx_trans_q15()
Darm_mat_cmplx_trans_q31.c93 px = pOut + CMPLX_DIM * i; in arm_mat_cmplx_trans_q31()
105 px += CMPLX_DIM * nRows; in arm_mat_cmplx_trans_q31()
/cmsis-3.4.0/CMSIS/DSP/Include/
Darm_helium_utils.h361 blkCnt = (srcRows * CMPLX_DIM) >> 2; in arm_mat_cmplx_trans_32bit()
377 blkCnt = (srcRows * CMPLX_DIM) & 3; in arm_mat_cmplx_trans_32bit()
385 pDataRow += CMPLX_DIM; in arm_mat_cmplx_trans_32bit()
386 pDataDestRow += (srcRows * CMPLX_DIM); in arm_mat_cmplx_trans_32bit()
554 vecOffsRef = vmulq(vecOffsRef, (uint16_t) (srcCols * CMPLX_DIM)) in arm_mat_cmplx_trans_16bit()
566 blkCnt = (srcRows * CMPLX_DIM) >> 3; in arm_mat_cmplx_trans_16bit()
582 blkCnt = (srcRows * CMPLX_DIM) & 0x7; in arm_mat_cmplx_trans_16bit()
590 pDataRow += CMPLX_DIM; in arm_mat_cmplx_trans_16bit()
591 pDataDestRow += (srcRows * CMPLX_DIM); in arm_mat_cmplx_trans_16bit()
/cmsis-3.4.0/CMSIS/DSP/Source/TransformFunctions/
Darm_cfft_f16.c123 float16_t *inA = pSrc + CMPLX_DIM * i * n1; in _arm_radix4_butterfly_f16_mve()
124 float16_t *inB = inA + n2 * CMPLX_DIM; in _arm_radix4_butterfly_f16_mve()
125 float16_t *inC = inB + n2 * CMPLX_DIM; in _arm_radix4_butterfly_f16_mve()
126 float16_t *inD = inC + n2 * CMPLX_DIM; in _arm_radix4_butterfly_f16_mve()
332 float16_t *inA = pSrc + CMPLX_DIM * i * n1; in _arm_radix4_butterfly_inverse_f16_mve()
333 float16_t *inB = inA + n2 * CMPLX_DIM; in _arm_radix4_butterfly_inverse_f16_mve()
334 float16_t *inC = inB + n2 * CMPLX_DIM; in _arm_radix4_butterfly_inverse_f16_mve()
335 float16_t *inD = inC + n2 * CMPLX_DIM; in _arm_radix4_butterfly_inverse_f16_mve()
Darm_cfft_q31.c78 q31_t *inA = pSrc + CMPLX_DIM * i * n1; in _arm_radix4_butterfly_q31_mve()
79 q31_t *inB = inA + n2 * CMPLX_DIM; in _arm_radix4_butterfly_q31_mve()
80 q31_t *inC = inB + n2 * CMPLX_DIM; in _arm_radix4_butterfly_q31_mve()
81 q31_t *inD = inC + n2 * CMPLX_DIM; in _arm_radix4_butterfly_q31_mve()
328 q31_t *inA = pSrc + CMPLX_DIM * i * n1; in _arm_radix4_butterfly_inverse_q31_mve()
329 q31_t *inB = inA + n2 * CMPLX_DIM; in _arm_radix4_butterfly_inverse_q31_mve()
330 q31_t *inC = inB + n2 * CMPLX_DIM; in _arm_radix4_butterfly_inverse_q31_mve()
331 q31_t *inD = inC + n2 * CMPLX_DIM; in _arm_radix4_butterfly_inverse_q31_mve()
Darm_cfft_q15.c75 q15_t *inA = pSrc + CMPLX_DIM * i * n1; in _arm_radix4_butterfly_q15_mve()
76 q15_t *inB = inA + n2 * CMPLX_DIM; in _arm_radix4_butterfly_q15_mve()
77 q15_t *inC = inB + n2 * CMPLX_DIM; in _arm_radix4_butterfly_q15_mve()
78 q15_t *inD = inC + n2 * CMPLX_DIM; in _arm_radix4_butterfly_q15_mve()
311 q15_t *inA = pSrc + CMPLX_DIM * i * n1; in _arm_radix4_butterfly_inverse_q15_mve()
312 q15_t *inB = inA + n2 * CMPLX_DIM; in _arm_radix4_butterfly_inverse_q15_mve()
313 q15_t *inC = inB + n2 * CMPLX_DIM; in _arm_radix4_butterfly_inverse_q15_mve()
314 q15_t *inD = inC + n2 * CMPLX_DIM; in _arm_radix4_butterfly_inverse_q15_mve()
Darm_cfft_f32.c125 float32_t *inA = pSrc + CMPLX_DIM * i * n1; in _arm_radix4_butterfly_f32_mve()
126 float32_t *inB = inA + n2 * CMPLX_DIM; in _arm_radix4_butterfly_f32_mve()
127 float32_t *inC = inB + n2 * CMPLX_DIM; in _arm_radix4_butterfly_f32_mve()
128 float32_t *inD = inC + n2 * CMPLX_DIM; in _arm_radix4_butterfly_f32_mve()
334 float32_t *inA = pSrc + CMPLX_DIM * i * n1; in _arm_radix4_butterfly_inverse_f32_mve()
335 float32_t *inB = inA + n2 * CMPLX_DIM; in _arm_radix4_butterfly_inverse_f32_mve()
336 float32_t *inC = inB + n2 * CMPLX_DIM; in _arm_radix4_butterfly_inverse_f32_mve()
337 float32_t *inD = inC + n2 * CMPLX_DIM; in _arm_radix4_butterfly_inverse_f32_mve()
/cmsis-3.4.0/CMSIS/DSP/Source/ComplexMathFunctions/
Darm_cmplx_mult_cmplx_f16.c138 blkCnt = CMPLX_DIM * (numSamples & 7); in arm_cmplx_mult_cmplx_f16()
156 blkCnt = numSamples * CMPLX_DIM; in arm_cmplx_mult_cmplx_f16()
Darm_cmplx_dot_prod_f16.c141 blkCnt = CMPLX_DIM * (numSamples & 7); in arm_cmplx_dot_prod_f16()
156 blkCnt = numSamples * CMPLX_DIM; in arm_cmplx_dot_prod_f16()
Darm_cmplx_dot_prod_q15.c119 blkCnt = CMPLX_DIM * (numSamples & 7); in arm_cmplx_dot_prod_q15()
136 blkCnt = numSamples * CMPLX_DIM; in arm_cmplx_dot_prod_q15()
Darm_cmplx_dot_prod_q31.c121 blkCnt = CMPLX_DIM * (numSamples & 3); in arm_cmplx_dot_prod_q31()
138 blkCnt = numSamples * CMPLX_DIM; in arm_cmplx_dot_prod_q31()
Darm_cmplx_mult_cmplx_f32.c136 blkCnt = CMPLX_DIM * (numSamples & 3); in arm_cmplx_mult_cmplx_f32()
154 blkCnt = numSamples * CMPLX_DIM; in arm_cmplx_mult_cmplx_f32()
Darm_cmplx_mult_cmplx_q15.c127 blkCnt = CMPLX_DIM * (numSamples & 7); in arm_cmplx_mult_cmplx_q15()
151 blkCnt = numSamples * CMPLX_DIM; in arm_cmplx_mult_cmplx_q15()
Darm_cmplx_mult_cmplx_q31.c125 blkCnt = CMPLX_DIM * (numSamples & 3); in arm_cmplx_mult_cmplx_q31()
146 blkCnt = numSamples * CMPLX_DIM; in arm_cmplx_mult_cmplx_q31()
Darm_cmplx_conj_f16.c80 uint32_t blockSize = numSamples * CMPLX_DIM; /* loop counters */ in arm_cmplx_conj_f16()
Darm_cmplx_dot_prod_f32.c136 blkCnt = CMPLX_DIM * (numSamples & 3); in arm_cmplx_dot_prod_f32()
149 blkCnt = numSamples * CMPLX_DIM; in arm_cmplx_dot_prod_f32()
Darm_cmplx_conj_f32.c79 uint32_t blockSize = numSamples * CMPLX_DIM; /* loop counters */ in arm_cmplx_conj_f32()
Darm_cmplx_mult_real_f16.c85 uint32_t blockSizeC = numSamples * CMPLX_DIM; /* loop counters */ in arm_cmplx_mult_real_f16()
Darm_cmplx_conj_q31.c60 uint32_t blockSize = numSamples * CMPLX_DIM; /* loop counters */ in arm_cmplx_conj_q31()

12