1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #ifndef _I40E_TXRX_H_
5 #define _I40E_TXRX_H_
6
7 #include <net/xdp.h>
8
9 /* Interrupt Throttling and Rate Limiting Goodies */
10 #define I40E_DEFAULT_IRQ_WORK 256
11
12 /* The datasheet for the X710 and XL710 indicate that the maximum value for
13 * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
14 * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
15 * the register value which is divided by 2 lets use the actual values and
16 * avoid an excessive amount of translation.
17 */
18 #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
19 #define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */
20 #define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */
21 #define I40E_ITR_20K 50
22 #define I40E_ITR_8K 122
23 #define I40E_MAX_ITR 8160 /* maximum value as per datasheet */
24 #define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
25 #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
26 #define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
27
28 #define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
29 #define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
30
31 /* 0x40 is the enable bit for interrupt rate limiting, and must be set if
32 * the value of the rate limit is non-zero
33 */
34 #define INTRL_ENA BIT(6)
35 #define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
36 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
37
38 /**
39 * i40e_intrl_usec_to_reg - convert interrupt rate limit to register
40 * @intrl: interrupt rate limit to convert
41 *
42 * This function converts a decimal interrupt rate limit to the appropriate
43 * register format expected by the firmware when setting interrupt rate limit.
44 */
i40e_intrl_usec_to_reg(int intrl)45 static inline u16 i40e_intrl_usec_to_reg(int intrl)
46 {
47 if (intrl >> 2)
48 return ((intrl >> 2) | INTRL_ENA);
49 else
50 return 0;
51 }
52
53 #define I40E_QUEUE_END_OF_LIST 0x7FF
54
55 /* this enum matches hardware bits and is meant to be used by DYN_CTLN
56 * registers and QINT registers or more generally anywhere in the manual
57 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
58 * register but instead is a special value meaning "don't update" ITR0/1/2.
59 */
60 enum i40e_dyn_idx_t {
61 I40E_IDX_ITR0 = 0,
62 I40E_IDX_ITR1 = 1,
63 I40E_IDX_ITR2 = 2,
64 I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
65 };
66
67 /* these are indexes into ITRN registers */
68 #define I40E_RX_ITR I40E_IDX_ITR0
69 #define I40E_TX_ITR I40E_IDX_ITR1
70
71 /* Supported RSS offloads */
72 #define I40E_DEFAULT_RSS_HENA ( \
73 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
74 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
75 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
76 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
77 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
78 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
79 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
80 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
81 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
82 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
83 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
84
85 #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
86 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
87 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
88 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
89 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
90 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
91 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
92
93 #define i40e_pf_get_default_rss_hena(pf) \
94 (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
95 I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
96
97 /* Supported Rx Buffer Sizes (a multiple of 128) */
98 #define I40E_RXBUFFER_256 256
99 #define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
100 #define I40E_RXBUFFER_2048 2048
101 #define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
102 #define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
103
104 /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
105 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
106 * this adds up to 512 bytes of extra data meaning the smallest allocation
107 * we could have is 1K.
108 * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
109 * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
110 */
111 #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
112 #define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
113 #define i40e_rx_desc i40e_16byte_rx_desc
114
115 #define I40E_RX_DMA_ATTR \
116 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
117
118 /* Attempt to maximize the headroom available for incoming frames. We
119 * use a 2K buffer for receives and need 1536/1534 to store the data for
120 * the frame. This leaves us with 512 bytes of room. From that we need
121 * to deduct the space needed for the shared info and the padding needed
122 * to IP align the frame.
123 *
124 * Note: For cache line sizes 256 or larger this value is going to end
125 * up negative. In these cases we should fall back to the legacy
126 * receive path.
127 */
128 #if (PAGE_SIZE < 8192)
129 #define I40E_2K_TOO_SMALL_WITH_PADDING \
130 ((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
131
i40e_compute_pad(int rx_buf_len)132 static inline int i40e_compute_pad(int rx_buf_len)
133 {
134 int page_size, pad_size;
135
136 page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
137 pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
138
139 return pad_size;
140 }
141
i40e_skb_pad(void)142 static inline int i40e_skb_pad(void)
143 {
144 int rx_buf_len;
145
146 /* If a 2K buffer cannot handle a standard Ethernet frame then
147 * optimize padding for a 3K buffer instead of a 1.5K buffer.
148 *
149 * For a 3K buffer we need to add enough padding to allow for
150 * tailroom due to NET_IP_ALIGN possibly shifting us out of
151 * cache-line alignment.
152 */
153 if (I40E_2K_TOO_SMALL_WITH_PADDING)
154 rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
155 else
156 rx_buf_len = I40E_RXBUFFER_1536;
157
158 /* if needed make room for NET_IP_ALIGN */
159 rx_buf_len -= NET_IP_ALIGN;
160
161 return i40e_compute_pad(rx_buf_len);
162 }
163
164 #define I40E_SKB_PAD i40e_skb_pad()
165 #else
166 #define I40E_2K_TOO_SMALL_WITH_PADDING false
167 #define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
168 #endif
169
170 /**
171 * i40e_test_staterr - tests bits in Rx descriptor status and error fields
172 * @rx_desc: pointer to receive descriptor (in le64 format)
173 * @stat_err_bits: value to mask
174 *
175 * This function does some fast chicanery in order to return the
176 * value of the mask which is really only used for boolean tests.
177 * The status_error_len doesn't need to be shifted because it begins
178 * at offset zero.
179 */
i40e_test_staterr(union i40e_rx_desc * rx_desc,const u64 stat_err_bits)180 static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
181 const u64 stat_err_bits)
182 {
183 return !!(rx_desc->wb.qword1.status_error_len &
184 cpu_to_le64(stat_err_bits));
185 }
186
187 /* How many Rx Buffers do we bundle into one write to the hardware ? */
188 #define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */
189
190 #define I40E_RX_NEXT_DESC(r, i, n) \
191 do { \
192 (i)++; \
193 if ((i) == (r)->count) \
194 i = 0; \
195 (n) = I40E_RX_DESC((r), (i)); \
196 } while (0)
197
198
199 #define I40E_MAX_BUFFER_TXD 8
200 #define I40E_MIN_TX_LEN 17
201
202 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
203 * In order to align with the read requests we will align the value to
204 * the nearest 4K which represents our maximum read request size.
205 */
206 #define I40E_MAX_READ_REQ_SIZE 4096
207 #define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
208 #define I40E_MAX_DATA_PER_TXD_ALIGNED \
209 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
210
211 /**
212 * i40e_txd_use_count - estimate the number of descriptors needed for Tx
213 * @size: transmit request size in bytes
214 *
215 * Due to hardware alignment restrictions (4K alignment), we need to
216 * assume that we can have no more than 12K of data per descriptor, even
217 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
218 * Thus, we need to divide by 12K. But division is slow! Instead,
219 * we decompose the operation into shifts and one relatively cheap
220 * multiply operation.
221 *
222 * To divide by 12K, we first divide by 4K, then divide by 3:
223 * To divide by 4K, shift right by 12 bits
224 * To divide by 3, multiply by 85, then divide by 256
225 * (Divide by 256 is done by shifting right by 8 bits)
226 * Finally, we add one to round up. Because 256 isn't an exact multiple of
227 * 3, we'll underestimate near each multiple of 12K. This is actually more
228 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
229 * segment. For our purposes this is accurate out to 1M which is orders of
230 * magnitude greater than our largest possible GSO size.
231 *
232 * This would then be implemented as:
233 * return (((size >> 12) * 85) >> 8) + 1;
234 *
235 * Since multiplication and division are commutative, we can reorder
236 * operations into:
237 * return ((size * 85) >> 20) + 1;
238 */
i40e_txd_use_count(unsigned int size)239 static inline unsigned int i40e_txd_use_count(unsigned int size)
240 {
241 return ((size * 85) >> 20) + 1;
242 }
243
244 /* Tx Descriptors needed, worst case */
245 #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
246
247 #define I40E_TX_FLAGS_HW_VLAN BIT(1)
248 #define I40E_TX_FLAGS_SW_VLAN BIT(2)
249 #define I40E_TX_FLAGS_TSO BIT(3)
250 #define I40E_TX_FLAGS_IPV4 BIT(4)
251 #define I40E_TX_FLAGS_IPV6 BIT(5)
252 #define I40E_TX_FLAGS_TSYN BIT(8)
253 #define I40E_TX_FLAGS_FD_SB BIT(9)
254 #define I40E_TX_FLAGS_UDP_TUNNEL BIT(10)
255 #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
256 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
257 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
258 #define I40E_TX_FLAGS_VLAN_SHIFT 16
259
260 struct i40e_tx_buffer {
261 struct i40e_tx_desc *next_to_watch;
262 union {
263 struct xdp_frame *xdpf;
264 struct sk_buff *skb;
265 void *raw_buf;
266 };
267 unsigned int bytecount;
268 unsigned short gso_segs;
269
270 DEFINE_DMA_UNMAP_ADDR(dma);
271 DEFINE_DMA_UNMAP_LEN(len);
272 u32 tx_flags;
273 };
274
275 struct i40e_rx_buffer {
276 dma_addr_t dma;
277 struct page *page;
278 __u32 page_offset;
279 __u16 pagecnt_bias;
280 __u32 page_count;
281 };
282
283 struct i40e_queue_stats {
284 u64 packets;
285 u64 bytes;
286 };
287
288 struct i40e_tx_queue_stats {
289 u64 restart_queue;
290 u64 tx_busy;
291 u64 tx_done_old;
292 u64 tx_linearize;
293 u64 tx_force_wb;
294 u64 tx_stopped;
295 int prev_pkt_ctr;
296 };
297
298 struct i40e_rx_queue_stats {
299 u64 non_eop_descs;
300 u64 alloc_page_failed;
301 u64 alloc_buff_failed;
302 u64 page_reuse_count;
303 u64 page_alloc_count;
304 u64 page_waive_count;
305 u64 page_busy_count;
306 };
307
308 enum i40e_ring_state_t {
309 __I40E_TX_FDIR_INIT_DONE,
310 __I40E_TX_XPS_INIT_DONE,
311 __I40E_RING_STATE_NBITS /* must be last */
312 };
313
314 /* some useful defines for virtchannel interface, which
315 * is the only remaining user of header split
316 */
317 #define I40E_RX_DTYPE_HEADER_SPLIT 1
318 #define I40E_RX_SPLIT_L2 0x1
319 #define I40E_RX_SPLIT_IP 0x2
320 #define I40E_RX_SPLIT_TCP_UDP 0x4
321 #define I40E_RX_SPLIT_SCTP 0x8
322
323 /* struct that defines a descriptor ring, associated with a VSI */
324 struct i40e_ring {
325 struct i40e_ring *next; /* pointer to next ring in q_vector */
326 void *desc; /* Descriptor ring memory */
327 struct device *dev; /* Used for DMA mapping */
328 struct net_device *netdev; /* netdev ring maps to */
329 struct bpf_prog *xdp_prog;
330 union {
331 struct i40e_tx_buffer *tx_bi;
332 struct i40e_rx_buffer *rx_bi;
333 struct xdp_buff **rx_bi_zc;
334 };
335 DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
336 u16 queue_index; /* Queue number of ring */
337 u8 dcb_tc; /* Traffic class of ring */
338 u8 __iomem *tail;
339
340 /* Storing xdp_buff on ring helps in saving the state of partially built
341 * packet when i40e_clean_rx_ring_irq() must return before it sees EOP
342 * and to resume packet building for this ring in the next call to
343 * i40e_clean_rx_ring_irq().
344 */
345 struct xdp_buff xdp;
346
347 /* Next descriptor to be processed; next_to_clean is updated only on
348 * processing EOP descriptor
349 */
350 u16 next_to_process;
351 /* high bit set means dynamic, use accessor routines to read/write.
352 * hardware only supports 2us resolution for the ITR registers.
353 * these values always store the USER setting, and must be converted
354 * before programming to a register.
355 */
356 u16 itr_setting;
357
358 u16 count; /* Number of descriptors */
359 u16 reg_idx; /* HW register index of the ring */
360 u16 rx_buf_len;
361
362 /* used in interrupt processing */
363 u16 next_to_use;
364 u16 next_to_clean;
365 u16 xdp_tx_active;
366
367 u8 atr_sample_rate;
368 u8 atr_count;
369
370 bool ring_active; /* is ring online or not */
371 bool arm_wb; /* do something to arm write back */
372 u8 packet_stride;
373
374 u16 flags;
375 #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
376 #define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
377 #define I40E_TXR_FLAGS_XDP BIT(2)
378
379 /* stats structs */
380 struct i40e_queue_stats stats;
381 struct u64_stats_sync syncp;
382 union {
383 struct i40e_tx_queue_stats tx_stats;
384 struct i40e_rx_queue_stats rx_stats;
385 };
386
387 unsigned int size; /* length of descriptor ring in bytes */
388 dma_addr_t dma; /* physical address of ring */
389
390 struct i40e_vsi *vsi; /* Backreference to associated VSI */
391 struct i40e_q_vector *q_vector; /* Backreference to associated vector */
392
393 struct rcu_head rcu; /* to avoid race on free */
394 u16 next_to_alloc;
395
396 struct i40e_channel *ch;
397 u16 rx_offset;
398 struct xdp_rxq_info xdp_rxq;
399 struct xsk_buff_pool *xsk_pool;
400 } ____cacheline_internodealigned_in_smp;
401
ring_uses_build_skb(struct i40e_ring * ring)402 static inline bool ring_uses_build_skb(struct i40e_ring *ring)
403 {
404 return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
405 }
406
set_ring_build_skb_enabled(struct i40e_ring * ring)407 static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
408 {
409 ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
410 }
411
clear_ring_build_skb_enabled(struct i40e_ring * ring)412 static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
413 {
414 ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
415 }
416
ring_is_xdp(struct i40e_ring * ring)417 static inline bool ring_is_xdp(struct i40e_ring *ring)
418 {
419 return !!(ring->flags & I40E_TXR_FLAGS_XDP);
420 }
421
set_ring_xdp(struct i40e_ring * ring)422 static inline void set_ring_xdp(struct i40e_ring *ring)
423 {
424 ring->flags |= I40E_TXR_FLAGS_XDP;
425 }
426
427 #define I40E_ITR_ADAPTIVE_MIN_INC 0x0002
428 #define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002
429 #define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e
430 #define I40E_ITR_ADAPTIVE_LATENCY 0x8000
431 #define I40E_ITR_ADAPTIVE_BULK 0x0000
432
433 struct i40e_ring_container {
434 struct i40e_ring *ring; /* pointer to linked list of ring(s) */
435 unsigned long next_update; /* jiffies value of next update */
436 unsigned int total_bytes; /* total bytes processed this int */
437 unsigned int total_packets; /* total packets processed this int */
438 u16 count;
439 u16 target_itr; /* target ITR setting for ring(s) */
440 u16 current_itr; /* current ITR setting for ring(s) */
441 };
442
443 /* iterator for handling rings in ring container */
444 #define i40e_for_each_ring(pos, head) \
445 for (pos = (head).ring; pos != NULL; pos = pos->next)
446
i40e_rx_pg_order(struct i40e_ring * ring)447 static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
448 {
449 #if (PAGE_SIZE < 8192)
450 if (ring->rx_buf_len > (PAGE_SIZE / 2))
451 return 1;
452 #endif
453 return 0;
454 }
455
456 #define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
457
458 bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
459 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
460 u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb,
461 struct net_device *sb_dev);
462 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
463 void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
464 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
465 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
466 void i40e_free_tx_resources(struct i40e_ring *tx_ring);
467 void i40e_free_rx_resources(struct i40e_ring *rx_ring);
468 int i40e_napi_poll(struct napi_struct *napi, int budget);
469 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
470 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
471 void i40e_detect_recover_hung(struct i40e_vsi *vsi);
472 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
473 bool __i40e_chk_linearize(struct sk_buff *skb);
474 int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
475 u32 flags);
476 bool i40e_is_non_eop(struct i40e_ring *rx_ring,
477 union i40e_rx_desc *rx_desc);
478
479 /**
480 * i40e_get_head - Retrieve head from head writeback
481 * @tx_ring: tx ring to fetch head of
482 *
483 * Returns value of Tx ring head based on value stored
484 * in head write-back location
485 **/
i40e_get_head(struct i40e_ring * tx_ring)486 static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
487 {
488 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
489
490 return le32_to_cpu(*(volatile __le32 *)head);
491 }
492
493 /**
494 * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
495 * @skb: send buffer
496 *
497 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
498 * there is not enough descriptors available in this ring since we need at least
499 * one descriptor.
500 **/
i40e_xmit_descriptor_count(struct sk_buff * skb)501 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
502 {
503 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
504 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
505 int count = 0, size = skb_headlen(skb);
506
507 for (;;) {
508 count += i40e_txd_use_count(size);
509
510 if (!nr_frags--)
511 break;
512
513 size = skb_frag_size(frag++);
514 }
515
516 return count;
517 }
518
519 /**
520 * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
521 * @tx_ring: the ring to be checked
522 * @size: the size buffer we want to assure is available
523 *
524 * Returns 0 if stop is not needed
525 **/
i40e_maybe_stop_tx(struct i40e_ring * tx_ring,int size)526 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
527 {
528 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
529 return 0;
530 return __i40e_maybe_stop_tx(tx_ring, size);
531 }
532
533 /**
534 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
535 * @skb: send buffer
536 * @count: number of buffers used
537 *
538 * Note: Our HW can't scatter-gather more than 8 fragments to build
539 * a packet on the wire and so we need to figure out the cases where we
540 * need to linearize the skb.
541 **/
i40e_chk_linearize(struct sk_buff * skb,int count)542 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
543 {
544 /* Both TSO and single send will work if count is less than 8 */
545 if (likely(count < I40E_MAX_BUFFER_TXD))
546 return false;
547
548 if (skb_is_gso(skb))
549 return __i40e_chk_linearize(skb);
550
551 /* we can support up to 8 data buffers for a single send */
552 return count != I40E_MAX_BUFFER_TXD;
553 }
554
555 /**
556 * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
557 * @ring: Tx ring to find the netdev equivalent of
558 **/
txring_txq(const struct i40e_ring * ring)559 static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
560 {
561 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
562 }
563 #endif /* _I40E_TXRX_H_ */
564