1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include "en/params.h"
5
mlx5e_rx_is_xdp(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)6 static inline bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
7 struct mlx5e_xsk_param *xsk)
8 {
9 return params->xdp_prog || xsk;
10 }
11
mlx5e_get_linear_rq_headroom(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)12 u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
13 struct mlx5e_xsk_param *xsk)
14 {
15 u16 headroom = NET_IP_ALIGN;
16
17 if (mlx5e_rx_is_xdp(params, xsk)) {
18 headroom += XDP_PACKET_HEADROOM;
19 if (xsk)
20 headroom += xsk->headroom;
21 } else {
22 headroom += MLX5_RX_HEADROOM;
23 }
24
25 return headroom;
26 }
27
mlx5e_rx_get_min_frag_sz(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)28 u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
29 struct mlx5e_xsk_param *xsk)
30 {
31 u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
32 u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
33
34 return linear_rq_headroom + hw_mtu;
35 }
36
mlx5e_rx_get_linear_frag_sz(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)37 u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
38 struct mlx5e_xsk_param *xsk)
39 {
40 u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk);
41
42 /* AF_XDP doesn't build SKBs in place. */
43 if (!xsk)
44 frag_sz = MLX5_SKB_FRAG_SZ(frag_sz);
45
46 /* XDP in mlx5e doesn't support multiple packets per page. AF_XDP is a
47 * special case. It can run with frames smaller than a page, as it
48 * doesn't allocate pages dynamically. However, here we pretend that
49 * fragments are page-sized: it allows to treat XSK frames like pages
50 * by redirecting alloc and free operations to XSK rings and by using
51 * the fact there are no multiple packets per "page" (which is a frame).
52 * The latter is important, because frames may come in a random order,
53 * and we will have trouble assemblying a real page of multiple frames.
54 */
55 if (mlx5e_rx_is_xdp(params, xsk))
56 frag_sz = max_t(u32, frag_sz, PAGE_SIZE);
57
58 /* Even if we can go with a smaller fragment size, we must not put
59 * multiple packets into a single frame.
60 */
61 if (xsk)
62 frag_sz = max_t(u32, frag_sz, xsk->chunk_size);
63
64 return frag_sz;
65 }
66
mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)67 u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
68 struct mlx5e_xsk_param *xsk)
69 {
70 u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
71
72 return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
73 }
74
mlx5e_rx_is_linear_skb(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)75 bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
76 struct mlx5e_xsk_param *xsk)
77 {
78 /* AF_XDP allocates SKBs on XDP_PASS - ensure they don't occupy more
79 * than one page. For this, check both with and without xsk.
80 */
81 u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk),
82 mlx5e_rx_get_linear_frag_sz(params, NULL));
83
84 return !params->lro_en && linear_frag_sz <= PAGE_SIZE;
85 }
86
87 #define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
88 MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)89 bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
90 struct mlx5e_params *params,
91 struct mlx5e_xsk_param *xsk)
92 {
93 u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
94 s8 signed_log_num_strides_param;
95 u8 log_num_strides;
96
97 if (!mlx5e_rx_is_linear_skb(params, xsk))
98 return false;
99
100 if (order_base_2(linear_frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
101 return false;
102
103 if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
104 return true;
105
106 log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
107 signed_log_num_strides_param =
108 (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
109
110 return signed_log_num_strides_param >= 0;
111 }
112
mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)113 u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
114 struct mlx5e_xsk_param *xsk)
115 {
116 u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params, xsk);
117
118 /* Numbers are unsigned, don't subtract to avoid underflow. */
119 if (params->log_rq_mtu_frames <
120 log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
121 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
122
123 return params->log_rq_mtu_frames - log_pkts_per_wqe;
124 }
125
mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)126 u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
127 struct mlx5e_params *params,
128 struct mlx5e_xsk_param *xsk)
129 {
130 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
131 return order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
132
133 return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
134 }
135
mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)136 u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
137 struct mlx5e_params *params,
138 struct mlx5e_xsk_param *xsk)
139 {
140 return MLX5_MPWRQ_LOG_WQE_SZ -
141 mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
142 }
143
mlx5e_get_rq_headroom(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)144 u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
145 struct mlx5e_params *params,
146 struct mlx5e_xsk_param *xsk)
147 {
148 bool is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
149 mlx5e_rx_is_linear_skb(params, xsk) :
150 mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
151
152 return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0;
153 }
154