/cmsis-nn-latest/Source/NNSupportFunctions/ |
D | arm_nn_depthwise_conv_nt_t_padded_s8.c | 128 out_0 = arm_requantize_mve_32x4(out_0, mult, shift); in arm_nn_depthwise_conv_nt_t_padded_s8() 135 out_1 = arm_requantize_mve_32x4(out_1, mult, shift); in arm_nn_depthwise_conv_nt_t_padded_s8() 141 out_2 = arm_requantize_mve_32x4(out_2, mult, shift); in arm_nn_depthwise_conv_nt_t_padded_s8() 147 out_3 = arm_requantize_mve_32x4(out_3, mult, shift); in arm_nn_depthwise_conv_nt_t_padded_s8()
|
D | arm_nn_depthwise_conv_nt_t_s8.c | 123 out_0 = arm_requantize_mve_32x4(out_0, mult, shift); in arm_nn_depthwise_conv_nt_t_s8() 129 out_1 = arm_requantize_mve_32x4(out_1, mult, shift); in arm_nn_depthwise_conv_nt_t_s8() 135 out_2 = arm_requantize_mve_32x4(out_2, mult, shift); in arm_nn_depthwise_conv_nt_t_s8() 141 out_3 = arm_requantize_mve_32x4(out_3, mult, shift); in arm_nn_depthwise_conv_nt_t_s8()
|
D | arm_nn_depthwise_conv_nt_t_s4.c | 181 out_0 = arm_requantize_mve_32x4(out_0, mult, shift); in arm_nn_depthwise_conv_nt_t_s4() 187 out_1 = arm_requantize_mve_32x4(out_1, mult, shift); in arm_nn_depthwise_conv_nt_t_s4() 193 out_2 = arm_requantize_mve_32x4(out_2, mult, shift); in arm_nn_depthwise_conv_nt_t_s4() 199 out_3 = arm_requantize_mve_32x4(out_3, mult, shift); in arm_nn_depthwise_conv_nt_t_s4()
|
D | arm_nn_mat_mul_core_1x_s8.c | 110 res = arm_requantize_mve_32x4(res, vldrwq_s32(output_mult), vldrwq_s32(output_shift)); in arm_nn_mat_mul_core_1x_s8()
|
D | arm_nn_mat_mul_core_1x_s4.c | 105 res = arm_requantize_mve_32x4(res, vldrwq_s32(output_mult), vldrwq_s32(output_shift)); in arm_nn_mat_mul_core_1x_s4()
|
D | arm_nn_mat_mult_nt_t_s16.c | 253 res = arm_requantize_mve_32x4(res, vldrwq_s32(multipliers), vldrwq_s32(shifts)); in arm_nn_mat_mult_nt_t_s16()
|
D | arm_nn_mat_mult_nt_t_s8.c | 201 res = arm_requantize_mve_32x4(res, vldrwq_s32(multipliers), vldrwq_s32(shifts)); in arm_nn_mat_mult_nt_t_s8()
|
D | arm_nn_mat_mult_nt_t_s4.c | 340 res = arm_requantize_mve_32x4(res, vldrwq_s32(multipliers), vldrwq_s32(shifts)); in arm_nn_mat_mult_nt_t_s4()
|
/cmsis-nn-latest/Source/ConvolutionFunctions/ |
D | arm_nn_depthwise_conv_s8_core.c | 138 out_0 = arm_requantize_mve_32x4(out_0, mult, shift); in arm_nn_depthwise_conv_s8_core() 139 out_1 = arm_requantize_mve_32x4(out_1, mult, shift); in arm_nn_depthwise_conv_s8_core() 187 col_0_sum = arm_requantize_mve_32x4(col_0_sum, mult, shift); in arm_nn_depthwise_conv_s8_core() 188 col_1_sum = arm_requantize_mve_32x4(col_1_sum, mult, shift); in arm_nn_depthwise_conv_s8_core()
|
D | arm_transpose_conv_s8.c | 185 result = arm_requantize_mve_32x4(result, in arm_transpose_conv_s8()
|
D | arm_depthwise_conv_s8.c | 117 … res = arm_requantize_mve_32x4(res, vldrwq_s32(output_mult), vldrwq_s32(output_shift)); in depthwise_conv_s8_mult_4()
|
D | arm_depthwise_conv_s8_opt.c | 194 out_0 = arm_requantize_mve_32x4(out_0, mult, shift); in arm_depthwise_conv_s8_opt()
|
D | arm_depthwise_conv_s4_opt.c | 236 out_0 = arm_requantize_mve_32x4(out_0, mult, shift); in arm_depthwise_conv_s4_opt()
|
/cmsis-nn-latest/Source/BasicMathFunctions/ |
D | arm_elementwise_mul_s16_s8.c | 77 res_0 = arm_requantize_mve_32x4(res_0, vdupq_n_s32(out_mult), vdupq_n_s32(out_shift)); in arm_elementwise_mul_s16_s8()
|
D | arm_elementwise_mul_s16.c | 79 res_0 = arm_requantize_mve_32x4(res_0, vdupq_n_s32(out_mult), vdupq_n_s32(out_shift)); in arm_elementwise_mul_s16()
|
D | arm_elementwise_mul_acc_s16.c | 82 res_0 = arm_requantize_mve_32x4(res_0, vdupq_n_s32(out_mult), vdupq_n_s32(out_shift)); in arm_elementwise_mul_acc_s16()
|
D | arm_elementwise_mul_s16_batch_offset.c | 82 res_0 = arm_requantize_mve_32x4(res_0, vdupq_n_s32(out_mult), vdupq_n_s32(out_shift)); in arm_elementwise_mul_s16_batch_offset()
|
D | arm_elementwise_mul_s8.c | 81 res_0 = arm_requantize_mve_32x4(res_0, vdupq_n_s32(out_mult), vdupq_n_s32(out_shift)); in arm_elementwise_mul_s8()
|
/cmsis-nn-latest/Include/ |
D | arm_nnsupportfunctions.h | 1619 __STATIC_FORCEINLINE int32x4_t arm_requantize_mve_32x4(const int32x4_t val, in arm_requantize_mve_32x4() function
|