Searched refs:hn (Results 1 – 3 of 3) sorted by relevance
/liblc3-3.5.0-3.4.0/src/ |
D | ltpf_arm.h | 56 const int16x2_t *hn = h + (i % (2*p)) * (48 / p); in arm_resample_x64k_12k8() local 59 int32_t un = __smlad(*(xn++), *(hn++), 0); in arm_resample_x64k_12k8() 62 un = __smlad(*(xn++), *(hn++), un); in arm_resample_x64k_12k8() 63 un = __smlad(*(xn++), *(hn++), un); in arm_resample_x64k_12k8() 64 un = __smlad(*(xn++), *(hn++), un); in arm_resample_x64k_12k8() 65 un = __smlad(*(xn++), *(hn++), un); in arm_resample_x64k_12k8() 66 un = __smlad(*(xn++), *(hn++), un); in arm_resample_x64k_12k8() 87 const int16x2_t *hn = h + (i % (2*p)) * (128 / p); in arm_resample_x192k_12k8() local 90 int32_t un = __smlad(*(xn++), *(hn++), 0); in arm_resample_x192k_12k8() 93 un = __smlad(*(xn++), *(hn++), un); in arm_resample_x192k_12k8() [all …]
|
D | ltpf_neon.h | 61 const int16_t *hn = h[i & 3]; in neon_resample_16k_12k8() local 65 un = vmull_s16( vld1_s16(xn), vld1_s16(hn)), xn += 4, hn += 4; in neon_resample_16k_12k8() 66 un = vmlal_s16(un, vld1_s16(xn), vld1_s16(hn)), xn += 4, hn += 4; in neon_resample_16k_12k8() 67 un = vmlal_s16(un, vld1_s16(xn), vld1_s16(hn)), xn += 4, hn += 4; in neon_resample_16k_12k8() 68 un = vmlal_s16(un, vld1_s16(xn), vld1_s16(hn)), xn += 4, hn += 4; in neon_resample_16k_12k8() 69 un = vmlal_s16(un, vld1_s16(xn), vld1_s16(hn)), xn += 4, hn += 4; in neon_resample_16k_12k8() 107 const int16_t *hn = h[i & 1]; in neon_resample_32k_12k8() local 110 int32x4_t un = vmull_s16(vld1_s16(xn), vld1_s16(hn)); in neon_resample_32k_12k8() 111 xn += 4, hn += 4; in neon_resample_32k_12k8() 114 un = vmlal_s16(un, vld1_s16(xn), vld1_s16(hn)), xn += 4, hn += 4; in neon_resample_32k_12k8() [all …]
|
D | ltpf.c | 191 const int16_t *hn = h + (i % p) * w; in resample_x64k_12k8() local 196 un += *(xn++) * *(hn++); in resample_x64k_12k8() 197 un += *(xn++) * *(hn++); in resample_x64k_12k8() 198 un += *(xn++) * *(hn++); in resample_x64k_12k8() 199 un += *(xn++) * *(hn++); in resample_x64k_12k8() 200 un += *(xn++) * *(hn++); in resample_x64k_12k8() 201 un += *(xn++) * *(hn++); in resample_x64k_12k8() 202 un += *(xn++) * *(hn++); in resample_x64k_12k8() 203 un += *(xn++) * *(hn++); in resample_x64k_12k8() 204 un += *(xn++) * *(hn++); in resample_x64k_12k8() [all …]
|