1 /*
2  * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #ifndef __MLX5_EN_XDP_H__
33 #define __MLX5_EN_XDP_H__
34 
35 #include "en.h"
36 #include "en/txrx.h"
37 
38 #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
39 #define MLX5E_XDP_TX_EMPTY_DS_COUNT \
40 	(sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
41 #define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
42 
43 #define MLX5E_XDPSQ_STOP_ROOM (MLX5E_SQ_STOP_ROOM)
44 
45 #define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg))
46 #define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT \
47 	DIV_ROUND_UP(MLX5E_XDP_INLINE_WQE_SZ_THRSD, MLX5_SEND_WQE_DS)
48 
49 /* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
50  * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
51  * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
52  * full-session WQE be cache-aligned.
53  */
54 #if L1_CACHE_BYTES < 128
55 #define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
56 #else
57 #define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
58 #endif
59 
60 #define MLX5E_XDP_MPW_MAX_NUM_DS \
61 	(MLX5E_XDP_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
62 
63 struct mlx5e_xsk_param;
64 int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
65 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
66 		      void *va, u16 *rx_headroom, u32 *len, bool xsk);
67 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
68 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
69 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
70 void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw);
71 void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
72 int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
73 		   u32 flags);
74 
mlx5e_xdp_tx_enable(struct mlx5e_priv * priv)75 static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
76 {
77 	set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
78 }
79 
mlx5e_xdp_tx_disable(struct mlx5e_priv * priv)80 static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
81 {
82 	clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
83 	/* let other device's napi(s) see our new state */
84 	synchronize_rcu();
85 }
86 
mlx5e_xdp_tx_is_enabled(struct mlx5e_priv * priv)87 static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
88 {
89 	return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
90 }
91 
mlx5e_xdp_set_open(struct mlx5e_priv * priv)92 static inline void mlx5e_xdp_set_open(struct mlx5e_priv *priv)
93 {
94 	set_bit(MLX5E_STATE_XDP_OPEN, &priv->state);
95 }
96 
mlx5e_xdp_set_closed(struct mlx5e_priv * priv)97 static inline void mlx5e_xdp_set_closed(struct mlx5e_priv *priv)
98 {
99 	clear_bit(MLX5E_STATE_XDP_OPEN, &priv->state);
100 }
101 
mlx5e_xdp_is_open(struct mlx5e_priv * priv)102 static inline bool mlx5e_xdp_is_open(struct mlx5e_priv *priv)
103 {
104 	return test_bit(MLX5E_STATE_XDP_OPEN, &priv->state);
105 }
106 
mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq * sq)107 static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
108 {
109 	if (sq->doorbell_cseg) {
110 		mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
111 		sq->doorbell_cseg = NULL;
112 	}
113 }
114 
115 /* Enable inline WQEs to shift some load from a congested HCA (HW) to
116  * a less congested cpu (SW).
117  */
mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq * sq)118 static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq)
119 {
120 	u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc;
121 	struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
122 
123 #define MLX5E_XDP_INLINE_WATERMARK_LOW	10
124 #define MLX5E_XDP_INLINE_WATERMARK_HIGH 128
125 
126 	if (session->inline_on) {
127 		if (outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW)
128 			session->inline_on = 0;
129 		return;
130 	}
131 
132 	/* inline is false */
133 	if (outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH)
134 		session->inline_on = 1;
135 }
136 
137 static inline bool
mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe * session)138 mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session)
139 {
140 	return session->inline_on &&
141 	       session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS;
142 }
143 
144 static inline void
mlx5e_fill_xdpsq_frag_edge(struct mlx5e_xdpsq * sq,struct mlx5_wq_cyc * wq,u16 pi,u16 nnops)145 mlx5e_fill_xdpsq_frag_edge(struct mlx5e_xdpsq *sq, struct mlx5_wq_cyc *wq,
146 			   u16 pi, u16 nnops)
147 {
148 	struct mlx5e_xdp_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
149 
150 	edge_wi = wi + nnops;
151 	/* fill sq frag edge with nops to avoid wqe wrapping two pages */
152 	for (; wi < edge_wi; wi++) {
153 		wi->num_wqebbs = 1;
154 		wi->num_pkts   = 0;
155 		mlx5e_post_nop(wq, sq->sqn, &sq->pc);
156 	}
157 
158 	sq->stats->nops += nnops;
159 }
160 
161 static inline void
mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq * sq,struct mlx5e_xdp_xmit_data * xdptxd,struct mlx5e_xdpsq_stats * stats)162 mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
163 			 struct mlx5e_xdp_xmit_data *xdptxd,
164 			 struct mlx5e_xdpsq_stats *stats)
165 {
166 	struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
167 	struct mlx5_wqe_data_seg *dseg =
168 		(struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
169 	u32 dma_len = xdptxd->len;
170 
171 	session->pkt_count++;
172 
173 	if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) {
174 		struct mlx5_wqe_inline_seg *inline_dseg =
175 			(struct mlx5_wqe_inline_seg *)dseg;
176 		u16 ds_len = sizeof(*inline_dseg) + dma_len;
177 		u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS);
178 
179 		inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG);
180 		memcpy(inline_dseg->data, xdptxd->data, dma_len);
181 
182 		session->ds_count += ds_cnt;
183 		stats->inlnw++;
184 		return;
185 	}
186 
187 	dseg->addr       = cpu_to_be64(xdptxd->dma_addr);
188 	dseg->byte_count = cpu_to_be32(dma_len);
189 	dseg->lkey       = sq->mkey_be;
190 	session->ds_count++;
191 }
192 
193 static inline struct mlx5e_tx_wqe *
mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq * sq,u16 * pi)194 mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, u16 *pi)
195 {
196 	struct mlx5_wq_cyc *wq = &sq->wq;
197 	struct mlx5e_tx_wqe *wqe;
198 
199 	*pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
200 	wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
201 	memset(wqe, 0, sizeof(*wqe));
202 
203 	return wqe;
204 }
205 
206 static inline void
mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo * fifo,struct mlx5e_xdp_info * xi)207 mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo *fifo,
208 		     struct mlx5e_xdp_info *xi)
209 {
210 	u32 i = (*fifo->pc)++ & fifo->mask;
211 
212 	fifo->xi[i] = *xi;
213 }
214 
215 static inline struct mlx5e_xdp_info
mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo * fifo)216 mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo *fifo)
217 {
218 	return fifo->xi[(*fifo->cc)++ & fifo->mask];
219 }
220 #endif
221