1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 /* The driver transmit and receive code */
5
6 #include <linux/prefetch.h>
7 #include <linux/mm.h>
8 #include <linux/bpf_trace.h>
9 #include <net/xdp.h>
10 #include "ice_txrx_lib.h"
11 #include "ice_lib.h"
12 #include "ice.h"
13 #include "ice_dcb_lib.h"
14 #include "ice_xsk.h"
15
16 #define ICE_RX_HDR_SIZE 256
17
18 #define FDIR_DESC_RXDID 0x40
19 #define ICE_FDIR_CLEAN_DELAY 10
20
21 /**
22 * ice_prgm_fdir_fltr - Program a Flow Director filter
23 * @vsi: VSI to send dummy packet
24 * @fdir_desc: flow director descriptor
25 * @raw_packet: allocated buffer for flow director
26 */
27 int
ice_prgm_fdir_fltr(struct ice_vsi * vsi,struct ice_fltr_desc * fdir_desc,u8 * raw_packet)28 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
29 u8 *raw_packet)
30 {
31 struct ice_tx_buf *tx_buf, *first;
32 struct ice_fltr_desc *f_desc;
33 struct ice_tx_desc *tx_desc;
34 struct ice_ring *tx_ring;
35 struct device *dev;
36 dma_addr_t dma;
37 u32 td_cmd;
38 u16 i;
39
40 /* VSI and Tx ring */
41 if (!vsi)
42 return -ENOENT;
43 tx_ring = vsi->tx_rings[0];
44 if (!tx_ring || !tx_ring->desc)
45 return -ENOENT;
46 dev = tx_ring->dev;
47
48 /* we are using two descriptors to add/del a filter and we can wait */
49 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
50 if (!i)
51 return -EAGAIN;
52 msleep_interruptible(1);
53 }
54
55 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
56 DMA_TO_DEVICE);
57
58 if (dma_mapping_error(dev, dma))
59 return -EINVAL;
60
61 /* grab the next descriptor */
62 i = tx_ring->next_to_use;
63 first = &tx_ring->tx_buf[i];
64 f_desc = ICE_TX_FDIRDESC(tx_ring, i);
65 memcpy(f_desc, fdir_desc, sizeof(*f_desc));
66
67 i++;
68 i = (i < tx_ring->count) ? i : 0;
69 tx_desc = ICE_TX_DESC(tx_ring, i);
70 tx_buf = &tx_ring->tx_buf[i];
71
72 i++;
73 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
74
75 memset(tx_buf, 0, sizeof(*tx_buf));
76 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
77 dma_unmap_addr_set(tx_buf, dma, dma);
78
79 tx_desc->buf_addr = cpu_to_le64(dma);
80 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
81 ICE_TX_DESC_CMD_RE;
82
83 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
84 tx_buf->raw_buf = raw_packet;
85
86 tx_desc->cmd_type_offset_bsz =
87 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
88
89 /* Force memory write to complete before letting h/w know
90 * there are new descriptors to fetch.
91 */
92 wmb();
93
94 /* mark the data descriptor to be watched */
95 first->next_to_watch = tx_desc;
96
97 writel(tx_ring->next_to_use, tx_ring->tail);
98
99 return 0;
100 }
101
102 /**
103 * ice_unmap_and_free_tx_buf - Release a Tx buffer
104 * @ring: the ring that owns the buffer
105 * @tx_buf: the buffer to free
106 */
107 static void
ice_unmap_and_free_tx_buf(struct ice_ring * ring,struct ice_tx_buf * tx_buf)108 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
109 {
110 if (tx_buf->skb) {
111 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
112 devm_kfree(ring->dev, tx_buf->raw_buf);
113 else if (ice_ring_is_xdp(ring))
114 page_frag_free(tx_buf->raw_buf);
115 else
116 dev_kfree_skb_any(tx_buf->skb);
117 if (dma_unmap_len(tx_buf, len))
118 dma_unmap_single(ring->dev,
119 dma_unmap_addr(tx_buf, dma),
120 dma_unmap_len(tx_buf, len),
121 DMA_TO_DEVICE);
122 } else if (dma_unmap_len(tx_buf, len)) {
123 dma_unmap_page(ring->dev,
124 dma_unmap_addr(tx_buf, dma),
125 dma_unmap_len(tx_buf, len),
126 DMA_TO_DEVICE);
127 }
128
129 tx_buf->next_to_watch = NULL;
130 tx_buf->skb = NULL;
131 dma_unmap_len_set(tx_buf, len, 0);
132 /* tx_buf must be completely set up in the transmit path */
133 }
134
txring_txq(const struct ice_ring * ring)135 static struct netdev_queue *txring_txq(const struct ice_ring *ring)
136 {
137 return netdev_get_tx_queue(ring->netdev, ring->q_index);
138 }
139
140 /**
141 * ice_clean_tx_ring - Free any empty Tx buffers
142 * @tx_ring: ring to be cleaned
143 */
ice_clean_tx_ring(struct ice_ring * tx_ring)144 void ice_clean_tx_ring(struct ice_ring *tx_ring)
145 {
146 u16 i;
147
148 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
149 ice_xsk_clean_xdp_ring(tx_ring);
150 goto tx_skip_free;
151 }
152
153 /* ring already cleared, nothing to do */
154 if (!tx_ring->tx_buf)
155 return;
156
157 /* Free all the Tx ring sk_buffs */
158 for (i = 0; i < tx_ring->count; i++)
159 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
160
161 tx_skip_free:
162 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
163
164 /* Zero out the descriptor ring */
165 memset(tx_ring->desc, 0, tx_ring->size);
166
167 tx_ring->next_to_use = 0;
168 tx_ring->next_to_clean = 0;
169
170 if (!tx_ring->netdev)
171 return;
172
173 /* cleanup Tx queue statistics */
174 netdev_tx_reset_queue(txring_txq(tx_ring));
175 }
176
177 /**
178 * ice_free_tx_ring - Free Tx resources per queue
179 * @tx_ring: Tx descriptor ring for a specific queue
180 *
181 * Free all transmit software resources
182 */
ice_free_tx_ring(struct ice_ring * tx_ring)183 void ice_free_tx_ring(struct ice_ring *tx_ring)
184 {
185 ice_clean_tx_ring(tx_ring);
186 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
187 tx_ring->tx_buf = NULL;
188
189 if (tx_ring->desc) {
190 dmam_free_coherent(tx_ring->dev, tx_ring->size,
191 tx_ring->desc, tx_ring->dma);
192 tx_ring->desc = NULL;
193 }
194 }
195
196 /**
197 * ice_clean_tx_irq - Reclaim resources after transmit completes
198 * @tx_ring: Tx ring to clean
199 * @napi_budget: Used to determine if we are in netpoll
200 *
201 * Returns true if there's any budget left (e.g. the clean is finished)
202 */
ice_clean_tx_irq(struct ice_ring * tx_ring,int napi_budget)203 static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
204 {
205 unsigned int total_bytes = 0, total_pkts = 0;
206 unsigned int budget = ICE_DFLT_IRQ_WORK;
207 struct ice_vsi *vsi = tx_ring->vsi;
208 s16 i = tx_ring->next_to_clean;
209 struct ice_tx_desc *tx_desc;
210 struct ice_tx_buf *tx_buf;
211
212 tx_buf = &tx_ring->tx_buf[i];
213 tx_desc = ICE_TX_DESC(tx_ring, i);
214 i -= tx_ring->count;
215
216 prefetch(&vsi->state);
217
218 do {
219 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
220
221 /* if next_to_watch is not set then there is no work pending */
222 if (!eop_desc)
223 break;
224
225 smp_rmb(); /* prevent any other reads prior to eop_desc */
226
227 /* if the descriptor isn't done, no work yet to do */
228 if (!(eop_desc->cmd_type_offset_bsz &
229 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
230 break;
231
232 /* clear next_to_watch to prevent false hangs */
233 tx_buf->next_to_watch = NULL;
234
235 /* update the statistics for this packet */
236 total_bytes += tx_buf->bytecount;
237 total_pkts += tx_buf->gso_segs;
238
239 if (ice_ring_is_xdp(tx_ring))
240 page_frag_free(tx_buf->raw_buf);
241 else
242 /* free the skb */
243 napi_consume_skb(tx_buf->skb, napi_budget);
244
245 /* unmap skb header data */
246 dma_unmap_single(tx_ring->dev,
247 dma_unmap_addr(tx_buf, dma),
248 dma_unmap_len(tx_buf, len),
249 DMA_TO_DEVICE);
250
251 /* clear tx_buf data */
252 tx_buf->skb = NULL;
253 dma_unmap_len_set(tx_buf, len, 0);
254
255 /* unmap remaining buffers */
256 while (tx_desc != eop_desc) {
257 tx_buf++;
258 tx_desc++;
259 i++;
260 if (unlikely(!i)) {
261 i -= tx_ring->count;
262 tx_buf = tx_ring->tx_buf;
263 tx_desc = ICE_TX_DESC(tx_ring, 0);
264 }
265
266 /* unmap any remaining paged data */
267 if (dma_unmap_len(tx_buf, len)) {
268 dma_unmap_page(tx_ring->dev,
269 dma_unmap_addr(tx_buf, dma),
270 dma_unmap_len(tx_buf, len),
271 DMA_TO_DEVICE);
272 dma_unmap_len_set(tx_buf, len, 0);
273 }
274 }
275
276 /* move us one more past the eop_desc for start of next pkt */
277 tx_buf++;
278 tx_desc++;
279 i++;
280 if (unlikely(!i)) {
281 i -= tx_ring->count;
282 tx_buf = tx_ring->tx_buf;
283 tx_desc = ICE_TX_DESC(tx_ring, 0);
284 }
285
286 prefetch(tx_desc);
287
288 /* update budget accounting */
289 budget--;
290 } while (likely(budget));
291
292 i += tx_ring->count;
293 tx_ring->next_to_clean = i;
294
295 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
296
297 if (ice_ring_is_xdp(tx_ring))
298 return !!budget;
299
300 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
301 total_bytes);
302
303 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
304 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
305 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
306 /* Make sure that anybody stopping the queue after this
307 * sees the new next_to_clean.
308 */
309 smp_mb();
310 if (__netif_subqueue_stopped(tx_ring->netdev,
311 tx_ring->q_index) &&
312 !test_bit(__ICE_DOWN, vsi->state)) {
313 netif_wake_subqueue(tx_ring->netdev,
314 tx_ring->q_index);
315 ++tx_ring->tx_stats.restart_q;
316 }
317 }
318
319 return !!budget;
320 }
321
322 /**
323 * ice_setup_tx_ring - Allocate the Tx descriptors
324 * @tx_ring: the Tx ring to set up
325 *
326 * Return 0 on success, negative on error
327 */
ice_setup_tx_ring(struct ice_ring * tx_ring)328 int ice_setup_tx_ring(struct ice_ring *tx_ring)
329 {
330 struct device *dev = tx_ring->dev;
331
332 if (!dev)
333 return -ENOMEM;
334
335 /* warn if we are about to overwrite the pointer */
336 WARN_ON(tx_ring->tx_buf);
337 tx_ring->tx_buf =
338 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
339 GFP_KERNEL);
340 if (!tx_ring->tx_buf)
341 return -ENOMEM;
342
343 /* round up to nearest page */
344 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
345 PAGE_SIZE);
346 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
347 GFP_KERNEL);
348 if (!tx_ring->desc) {
349 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
350 tx_ring->size);
351 goto err;
352 }
353
354 tx_ring->next_to_use = 0;
355 tx_ring->next_to_clean = 0;
356 tx_ring->tx_stats.prev_pkt = -1;
357 return 0;
358
359 err:
360 devm_kfree(dev, tx_ring->tx_buf);
361 tx_ring->tx_buf = NULL;
362 return -ENOMEM;
363 }
364
365 /**
366 * ice_clean_rx_ring - Free Rx buffers
367 * @rx_ring: ring to be cleaned
368 */
ice_clean_rx_ring(struct ice_ring * rx_ring)369 void ice_clean_rx_ring(struct ice_ring *rx_ring)
370 {
371 struct device *dev = rx_ring->dev;
372 u16 i;
373
374 /* ring already cleared, nothing to do */
375 if (!rx_ring->rx_buf)
376 return;
377
378 if (rx_ring->xsk_pool) {
379 ice_xsk_clean_rx_ring(rx_ring);
380 goto rx_skip_free;
381 }
382
383 /* Free all the Rx ring sk_buffs */
384 for (i = 0; i < rx_ring->count; i++) {
385 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
386
387 if (rx_buf->skb) {
388 dev_kfree_skb(rx_buf->skb);
389 rx_buf->skb = NULL;
390 }
391 if (!rx_buf->page)
392 continue;
393
394 /* Invalidate cache lines that may have been written to by
395 * device so that we avoid corrupting memory.
396 */
397 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
398 rx_buf->page_offset,
399 rx_ring->rx_buf_len,
400 DMA_FROM_DEVICE);
401
402 /* free resources associated with mapping */
403 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
404 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
405 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
406
407 rx_buf->page = NULL;
408 rx_buf->page_offset = 0;
409 }
410
411 rx_skip_free:
412 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
413
414 /* Zero out the descriptor ring */
415 memset(rx_ring->desc, 0, rx_ring->size);
416
417 rx_ring->next_to_alloc = 0;
418 rx_ring->next_to_clean = 0;
419 rx_ring->next_to_use = 0;
420 }
421
422 /**
423 * ice_free_rx_ring - Free Rx resources
424 * @rx_ring: ring to clean the resources from
425 *
426 * Free all receive software resources
427 */
ice_free_rx_ring(struct ice_ring * rx_ring)428 void ice_free_rx_ring(struct ice_ring *rx_ring)
429 {
430 ice_clean_rx_ring(rx_ring);
431 if (rx_ring->vsi->type == ICE_VSI_PF)
432 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
433 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
434 rx_ring->xdp_prog = NULL;
435 devm_kfree(rx_ring->dev, rx_ring->rx_buf);
436 rx_ring->rx_buf = NULL;
437
438 if (rx_ring->desc) {
439 dmam_free_coherent(rx_ring->dev, rx_ring->size,
440 rx_ring->desc, rx_ring->dma);
441 rx_ring->desc = NULL;
442 }
443 }
444
445 /**
446 * ice_setup_rx_ring - Allocate the Rx descriptors
447 * @rx_ring: the Rx ring to set up
448 *
449 * Return 0 on success, negative on error
450 */
ice_setup_rx_ring(struct ice_ring * rx_ring)451 int ice_setup_rx_ring(struct ice_ring *rx_ring)
452 {
453 struct device *dev = rx_ring->dev;
454
455 if (!dev)
456 return -ENOMEM;
457
458 /* warn if we are about to overwrite the pointer */
459 WARN_ON(rx_ring->rx_buf);
460 rx_ring->rx_buf =
461 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
462 GFP_KERNEL);
463 if (!rx_ring->rx_buf)
464 return -ENOMEM;
465
466 /* round up to nearest page */
467 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
468 PAGE_SIZE);
469 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
470 GFP_KERNEL);
471 if (!rx_ring->desc) {
472 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
473 rx_ring->size);
474 goto err;
475 }
476
477 rx_ring->next_to_use = 0;
478 rx_ring->next_to_clean = 0;
479
480 if (ice_is_xdp_ena_vsi(rx_ring->vsi))
481 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
482
483 if (rx_ring->vsi->type == ICE_VSI_PF &&
484 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
485 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
486 rx_ring->q_index))
487 goto err;
488 return 0;
489
490 err:
491 devm_kfree(dev, rx_ring->rx_buf);
492 rx_ring->rx_buf = NULL;
493 return -ENOMEM;
494 }
495
496 /**
497 * ice_rx_offset - Return expected offset into page to access data
498 * @rx_ring: Ring we are requesting offset of
499 *
500 * Returns the offset value for ring into the data buffer.
501 */
ice_rx_offset(struct ice_ring * rx_ring)502 static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
503 {
504 if (ice_ring_uses_build_skb(rx_ring))
505 return ICE_SKB_PAD;
506 else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
507 return XDP_PACKET_HEADROOM;
508
509 return 0;
510 }
511
512 static unsigned int
ice_rx_frame_truesize(struct ice_ring * rx_ring,unsigned int __maybe_unused size)513 ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
514 {
515 unsigned int truesize;
516
517 #if (PAGE_SIZE < 8192)
518 truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
519 #else
520 truesize = ice_rx_offset(rx_ring) ?
521 SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + size) +
522 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
523 SKB_DATA_ALIGN(size);
524 #endif
525 return truesize;
526 }
527
528 /**
529 * ice_run_xdp - Executes an XDP program on initialized xdp_buff
530 * @rx_ring: Rx ring
531 * @xdp: xdp_buff used as input to the XDP program
532 * @xdp_prog: XDP program to run
533 *
534 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
535 */
536 static int
ice_run_xdp(struct ice_ring * rx_ring,struct xdp_buff * xdp,struct bpf_prog * xdp_prog)537 ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
538 struct bpf_prog *xdp_prog)
539 {
540 int err, result = ICE_XDP_PASS;
541 struct ice_ring *xdp_ring;
542 u32 act;
543
544 act = bpf_prog_run_xdp(xdp_prog, xdp);
545 switch (act) {
546 case XDP_PASS:
547 break;
548 case XDP_TX:
549 xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
550 result = ice_xmit_xdp_buff(xdp, xdp_ring);
551 break;
552 case XDP_REDIRECT:
553 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
554 result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
555 break;
556 default:
557 bpf_warn_invalid_xdp_action(act);
558 fallthrough;
559 case XDP_ABORTED:
560 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
561 fallthrough;
562 case XDP_DROP:
563 result = ICE_XDP_CONSUMED;
564 break;
565 }
566
567 return result;
568 }
569
570 /**
571 * ice_xdp_xmit - submit packets to XDP ring for transmission
572 * @dev: netdev
573 * @n: number of XDP frames to be transmitted
574 * @frames: XDP frames to be transmitted
575 * @flags: transmit flags
576 *
577 * Returns number of frames successfully sent. Frames that fail are
578 * free'ed via XDP return API.
579 * For error cases, a negative errno code is returned and no-frames
580 * are transmitted (caller must handle freeing frames).
581 */
582 int
ice_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)583 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
584 u32 flags)
585 {
586 struct ice_netdev_priv *np = netdev_priv(dev);
587 unsigned int queue_index = smp_processor_id();
588 struct ice_vsi *vsi = np->vsi;
589 struct ice_ring *xdp_ring;
590 int drops = 0, i;
591
592 if (test_bit(__ICE_DOWN, vsi->state))
593 return -ENETDOWN;
594
595 if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
596 return -ENXIO;
597
598 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
599 return -EINVAL;
600
601 xdp_ring = vsi->xdp_rings[queue_index];
602 for (i = 0; i < n; i++) {
603 struct xdp_frame *xdpf = frames[i];
604 int err;
605
606 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
607 if (err != ICE_XDP_TX) {
608 xdp_return_frame_rx_napi(xdpf);
609 drops++;
610 }
611 }
612
613 if (unlikely(flags & XDP_XMIT_FLUSH))
614 ice_xdp_ring_update_tail(xdp_ring);
615
616 return n - drops;
617 }
618
619 /**
620 * ice_alloc_mapped_page - recycle or make a new page
621 * @rx_ring: ring to use
622 * @bi: rx_buf struct to modify
623 *
624 * Returns true if the page was successfully allocated or
625 * reused.
626 */
627 static bool
ice_alloc_mapped_page(struct ice_ring * rx_ring,struct ice_rx_buf * bi)628 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
629 {
630 struct page *page = bi->page;
631 dma_addr_t dma;
632
633 /* since we are recycling buffers we should seldom need to alloc */
634 if (likely(page))
635 return true;
636
637 /* alloc new page for storage */
638 page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
639 if (unlikely(!page)) {
640 rx_ring->rx_stats.alloc_page_failed++;
641 return false;
642 }
643
644 /* map page for use */
645 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
646 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
647
648 /* if mapping failed free memory back to system since
649 * there isn't much point in holding memory we can't use
650 */
651 if (dma_mapping_error(rx_ring->dev, dma)) {
652 __free_pages(page, ice_rx_pg_order(rx_ring));
653 rx_ring->rx_stats.alloc_page_failed++;
654 return false;
655 }
656
657 bi->dma = dma;
658 bi->page = page;
659 bi->page_offset = ice_rx_offset(rx_ring);
660 page_ref_add(page, USHRT_MAX - 1);
661 bi->pagecnt_bias = USHRT_MAX;
662
663 return true;
664 }
665
666 /**
667 * ice_alloc_rx_bufs - Replace used receive buffers
668 * @rx_ring: ring to place buffers on
669 * @cleaned_count: number of buffers to replace
670 *
671 * Returns false if all allocations were successful, true if any fail. Returning
672 * true signals to the caller that we didn't replace cleaned_count buffers and
673 * there is more work to do.
674 *
675 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
676 * buffers. Then bump tail at most one time. Grouping like this lets us avoid
677 * multiple tail writes per call.
678 */
ice_alloc_rx_bufs(struct ice_ring * rx_ring,u16 cleaned_count)679 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
680 {
681 union ice_32b_rx_flex_desc *rx_desc;
682 u16 ntu = rx_ring->next_to_use;
683 struct ice_rx_buf *bi;
684
685 /* do nothing if no valid netdev defined */
686 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
687 !cleaned_count)
688 return false;
689
690 /* get the Rx descriptor and buffer based on next_to_use */
691 rx_desc = ICE_RX_DESC(rx_ring, ntu);
692 bi = &rx_ring->rx_buf[ntu];
693
694 do {
695 /* if we fail here, we have work remaining */
696 if (!ice_alloc_mapped_page(rx_ring, bi))
697 break;
698
699 /* sync the buffer for use by the device */
700 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
701 bi->page_offset,
702 rx_ring->rx_buf_len,
703 DMA_FROM_DEVICE);
704
705 /* Refresh the desc even if buffer_addrs didn't change
706 * because each write-back erases this info.
707 */
708 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
709
710 rx_desc++;
711 bi++;
712 ntu++;
713 if (unlikely(ntu == rx_ring->count)) {
714 rx_desc = ICE_RX_DESC(rx_ring, 0);
715 bi = rx_ring->rx_buf;
716 ntu = 0;
717 }
718
719 /* clear the status bits for the next_to_use descriptor */
720 rx_desc->wb.status_error0 = 0;
721
722 cleaned_count--;
723 } while (cleaned_count);
724
725 if (rx_ring->next_to_use != ntu)
726 ice_release_rx_desc(rx_ring, ntu);
727
728 return !!cleaned_count;
729 }
730
731 /**
732 * ice_page_is_reserved - check if reuse is possible
733 * @page: page struct to check
734 */
ice_page_is_reserved(struct page * page)735 static bool ice_page_is_reserved(struct page *page)
736 {
737 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
738 }
739
740 /**
741 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
742 * @rx_buf: Rx buffer to adjust
743 * @size: Size of adjustment
744 *
745 * Update the offset within page so that Rx buf will be ready to be reused.
746 * For systems with PAGE_SIZE < 8192 this function will flip the page offset
747 * so the second half of page assigned to Rx buffer will be used, otherwise
748 * the offset is moved by "size" bytes
749 */
750 static void
ice_rx_buf_adjust_pg_offset(struct ice_rx_buf * rx_buf,unsigned int size)751 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
752 {
753 #if (PAGE_SIZE < 8192)
754 /* flip page offset to other buffer */
755 rx_buf->page_offset ^= size;
756 #else
757 /* move offset up to the next cache line */
758 rx_buf->page_offset += size;
759 #endif
760 }
761
762 /**
763 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
764 * @rx_buf: buffer containing the page
765 * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
766 *
767 * If page is reusable, we have a green light for calling ice_reuse_rx_page,
768 * which will assign the current buffer to the buffer that next_to_alloc is
769 * pointing to; otherwise, the DMA mapping needs to be destroyed and
770 * page freed
771 */
772 static bool
ice_can_reuse_rx_page(struct ice_rx_buf * rx_buf,int rx_buf_pgcnt)773 ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
774 {
775 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
776 struct page *page = rx_buf->page;
777
778 /* avoid re-using remote pages */
779 if (unlikely(ice_page_is_reserved(page)))
780 return false;
781
782 #if (PAGE_SIZE < 8192)
783 /* if we are only owner of page we can reuse it */
784 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
785 return false;
786 #else
787 #define ICE_LAST_OFFSET \
788 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
789 if (rx_buf->page_offset > ICE_LAST_OFFSET)
790 return false;
791 #endif /* PAGE_SIZE < 8192) */
792
793 /* If we have drained the page fragment pool we need to update
794 * the pagecnt_bias and page count so that we fully restock the
795 * number of references the driver holds.
796 */
797 if (unlikely(pagecnt_bias == 1)) {
798 page_ref_add(page, USHRT_MAX - 1);
799 rx_buf->pagecnt_bias = USHRT_MAX;
800 }
801
802 return true;
803 }
804
805 /**
806 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
807 * @rx_ring: Rx descriptor ring to transact packets on
808 * @rx_buf: buffer containing page to add
809 * @skb: sk_buff to place the data into
810 * @size: packet length from rx_desc
811 *
812 * This function will add the data contained in rx_buf->page to the skb.
813 * It will just attach the page as a frag to the skb.
814 * The function will then update the page offset.
815 */
816 static void
ice_add_rx_frag(struct ice_ring * rx_ring,struct ice_rx_buf * rx_buf,struct sk_buff * skb,unsigned int size)817 ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
818 struct sk_buff *skb, unsigned int size)
819 {
820 #if (PAGE_SIZE >= 8192)
821 unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring));
822 #else
823 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
824 #endif
825
826 if (!size)
827 return;
828 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
829 rx_buf->page_offset, size, truesize);
830
831 /* page is being used so we must update the page offset */
832 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
833 }
834
835 /**
836 * ice_reuse_rx_page - page flip buffer and store it back on the ring
837 * @rx_ring: Rx descriptor ring to store buffers on
838 * @old_buf: donor buffer to have page reused
839 *
840 * Synchronizes page for reuse by the adapter
841 */
842 static void
ice_reuse_rx_page(struct ice_ring * rx_ring,struct ice_rx_buf * old_buf)843 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
844 {
845 u16 nta = rx_ring->next_to_alloc;
846 struct ice_rx_buf *new_buf;
847
848 new_buf = &rx_ring->rx_buf[nta];
849
850 /* update, and store next to alloc */
851 nta++;
852 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
853
854 /* Transfer page from old buffer to new buffer.
855 * Move each member individually to avoid possible store
856 * forwarding stalls and unnecessary copy of skb.
857 */
858 new_buf->dma = old_buf->dma;
859 new_buf->page = old_buf->page;
860 new_buf->page_offset = old_buf->page_offset;
861 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
862 }
863
864 /**
865 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
866 * @rx_ring: Rx descriptor ring to transact packets on
867 * @skb: skb to be used
868 * @size: size of buffer to add to skb
869 * @rx_buf_pgcnt: rx_buf page refcount
870 *
871 * This function will pull an Rx buffer from the ring and synchronize it
872 * for use by the CPU.
873 */
874 static struct ice_rx_buf *
ice_get_rx_buf(struct ice_ring * rx_ring,struct sk_buff ** skb,const unsigned int size,int * rx_buf_pgcnt)875 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
876 const unsigned int size, int *rx_buf_pgcnt)
877 {
878 struct ice_rx_buf *rx_buf;
879
880 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
881 *rx_buf_pgcnt =
882 #if (PAGE_SIZE < 8192)
883 page_count(rx_buf->page);
884 #else
885 0;
886 #endif
887 prefetchw(rx_buf->page);
888 *skb = rx_buf->skb;
889
890 if (!size)
891 return rx_buf;
892 /* we are reusing so sync this buffer for CPU use */
893 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
894 rx_buf->page_offset, size,
895 DMA_FROM_DEVICE);
896
897 /* We have pulled a buffer for use, so decrement pagecnt_bias */
898 rx_buf->pagecnt_bias--;
899
900 return rx_buf;
901 }
902
903 /**
904 * ice_build_skb - Build skb around an existing buffer
905 * @rx_ring: Rx descriptor ring to transact packets on
906 * @rx_buf: Rx buffer to pull data from
907 * @xdp: xdp_buff pointing to the data
908 *
909 * This function builds an skb around an existing Rx buffer, taking care
910 * to set up the skb correctly and avoid any memcpy overhead.
911 */
912 static struct sk_buff *
ice_build_skb(struct ice_ring * rx_ring,struct ice_rx_buf * rx_buf,struct xdp_buff * xdp)913 ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
914 struct xdp_buff *xdp)
915 {
916 u8 metasize = xdp->data - xdp->data_meta;
917 #if (PAGE_SIZE < 8192)
918 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
919 #else
920 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
921 SKB_DATA_ALIGN(xdp->data_end -
922 xdp->data_hard_start);
923 #endif
924 struct sk_buff *skb;
925
926 /* Prefetch first cache line of first page. If xdp->data_meta
927 * is unused, this points exactly as xdp->data, otherwise we
928 * likely have a consumer accessing first few bytes of meta
929 * data, and then actual data.
930 */
931 net_prefetch(xdp->data_meta);
932 /* build an skb around the page buffer */
933 skb = build_skb(xdp->data_hard_start, truesize);
934 if (unlikely(!skb))
935 return NULL;
936
937 /* must to record Rx queue, otherwise OS features such as
938 * symmetric queue won't work
939 */
940 skb_record_rx_queue(skb, rx_ring->q_index);
941
942 /* update pointers within the skb to store the data */
943 skb_reserve(skb, xdp->data - xdp->data_hard_start);
944 __skb_put(skb, xdp->data_end - xdp->data);
945 if (metasize)
946 skb_metadata_set(skb, metasize);
947
948 /* buffer is used by skb, update page_offset */
949 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
950
951 return skb;
952 }
953
954 /**
955 * ice_construct_skb - Allocate skb and populate it
956 * @rx_ring: Rx descriptor ring to transact packets on
957 * @rx_buf: Rx buffer to pull data from
958 * @xdp: xdp_buff pointing to the data
959 *
960 * This function allocates an skb. It then populates it with the page
961 * data from the current receive descriptor, taking care to set up the
962 * skb correctly.
963 */
964 static struct sk_buff *
ice_construct_skb(struct ice_ring * rx_ring,struct ice_rx_buf * rx_buf,struct xdp_buff * xdp)965 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
966 struct xdp_buff *xdp)
967 {
968 unsigned int size = xdp->data_end - xdp->data;
969 unsigned int headlen;
970 struct sk_buff *skb;
971
972 /* prefetch first cache line of first page */
973 net_prefetch(xdp->data);
974
975 /* allocate a skb to store the frags */
976 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
977 GFP_ATOMIC | __GFP_NOWARN);
978 if (unlikely(!skb))
979 return NULL;
980
981 skb_record_rx_queue(skb, rx_ring->q_index);
982 /* Determine available headroom for copy */
983 headlen = size;
984 if (headlen > ICE_RX_HDR_SIZE)
985 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
986
987 /* align pull length to size of long to optimize memcpy performance */
988 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
989 sizeof(long)));
990
991 /* if we exhaust the linear part then add what is left as a frag */
992 size -= headlen;
993 if (size) {
994 #if (PAGE_SIZE >= 8192)
995 unsigned int truesize = SKB_DATA_ALIGN(size);
996 #else
997 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
998 #endif
999 skb_add_rx_frag(skb, 0, rx_buf->page,
1000 rx_buf->page_offset + headlen, size, truesize);
1001 /* buffer is used by skb, update page_offset */
1002 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
1003 } else {
1004 /* buffer is unused, reset bias back to rx_buf; data was copied
1005 * onto skb's linear part so there's no need for adjusting
1006 * page offset and we can reuse this buffer as-is
1007 */
1008 rx_buf->pagecnt_bias++;
1009 }
1010
1011 return skb;
1012 }
1013
1014 /**
1015 * ice_put_rx_buf - Clean up used buffer and either recycle or free
1016 * @rx_ring: Rx descriptor ring to transact packets on
1017 * @rx_buf: Rx buffer to pull data from
1018 * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
1019 *
1020 * This function will update next_to_clean and then clean up the contents
1021 * of the rx_buf. It will either recycle the buffer or unmap it and free
1022 * the associated resources.
1023 */
1024 static void
ice_put_rx_buf(struct ice_ring * rx_ring,struct ice_rx_buf * rx_buf,int rx_buf_pgcnt)1025 ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
1026 int rx_buf_pgcnt)
1027 {
1028 u16 ntc = rx_ring->next_to_clean + 1;
1029
1030 /* fetch, update, and store next to clean */
1031 ntc = (ntc < rx_ring->count) ? ntc : 0;
1032 rx_ring->next_to_clean = ntc;
1033
1034 if (!rx_buf)
1035 return;
1036
1037 if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
1038 /* hand second half of page back to the ring */
1039 ice_reuse_rx_page(rx_ring, rx_buf);
1040 } else {
1041 /* we are not reusing the buffer so unmap it */
1042 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1043 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1044 ICE_RX_DMA_ATTR);
1045 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1046 }
1047
1048 /* clear contents of buffer_info */
1049 rx_buf->page = NULL;
1050 rx_buf->skb = NULL;
1051 }
1052
1053 /**
1054 * ice_is_non_eop - process handling of non-EOP buffers
1055 * @rx_ring: Rx ring being processed
1056 * @rx_desc: Rx descriptor for current buffer
1057 * @skb: Current socket buffer containing buffer in progress
1058 *
1059 * If the buffer is an EOP buffer, this function exits returning false,
1060 * otherwise return true indicating that this is in fact a non-EOP buffer.
1061 */
1062 static bool
ice_is_non_eop(struct ice_ring * rx_ring,union ice_32b_rx_flex_desc * rx_desc,struct sk_buff * skb)1063 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
1064 struct sk_buff *skb)
1065 {
1066 /* if we are the last buffer then there is nothing else to do */
1067 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
1068 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
1069 return false;
1070
1071 /* place skb in next buffer to be received */
1072 rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb;
1073 rx_ring->rx_stats.non_eop_descs++;
1074
1075 return true;
1076 }
1077
1078 /**
1079 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1080 * @rx_ring: Rx descriptor ring to transact packets on
1081 * @budget: Total limit on number of packets to process
1082 *
1083 * This function provides a "bounce buffer" approach to Rx interrupt
1084 * processing. The advantage to this is that on systems that have
1085 * expensive overhead for IOMMU access this provides a means of avoiding
1086 * it by maintaining the mapping of the page to the system.
1087 *
1088 * Returns amount of work completed
1089 */
ice_clean_rx_irq(struct ice_ring * rx_ring,int budget)1090 int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
1091 {
1092 unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
1093 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1094 unsigned int xdp_res, xdp_xmit = 0;
1095 struct bpf_prog *xdp_prog = NULL;
1096 struct xdp_buff xdp;
1097 bool failure;
1098
1099 xdp.rxq = &rx_ring->xdp_rxq;
1100 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
1101 #if (PAGE_SIZE < 8192)
1102 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1103 #endif
1104
1105 /* start the loop to process Rx packets bounded by 'budget' */
1106 while (likely(total_rx_pkts < (unsigned int)budget)) {
1107 union ice_32b_rx_flex_desc *rx_desc;
1108 struct ice_rx_buf *rx_buf;
1109 struct sk_buff *skb;
1110 unsigned int size;
1111 u16 stat_err_bits;
1112 int rx_buf_pgcnt;
1113 u16 vlan_tag = 0;
1114 u8 rx_ptype;
1115
1116 /* get the Rx desc from Rx ring based on 'next_to_clean' */
1117 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1118
1119 /* status_error_len will always be zero for unused descriptors
1120 * because it's cleared in cleanup, and overlaps with hdr_addr
1121 * which is always zero because packet split isn't used, if the
1122 * hardware wrote DD then it will be non-zero
1123 */
1124 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1125 if (!ice_test_staterr(rx_desc, stat_err_bits))
1126 break;
1127
1128 /* This memory barrier is needed to keep us from reading
1129 * any other fields out of the rx_desc until we know the
1130 * DD bit is set.
1131 */
1132 dma_rmb();
1133
1134 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1135 ice_put_rx_buf(rx_ring, NULL, 0);
1136 cleaned_count++;
1137 continue;
1138 }
1139
1140 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1141 ICE_RX_FLX_DESC_PKT_LEN_M;
1142
1143 /* retrieve a buffer from the ring */
1144 rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt);
1145
1146 if (!size) {
1147 xdp.data = NULL;
1148 xdp.data_end = NULL;
1149 xdp.data_hard_start = NULL;
1150 xdp.data_meta = NULL;
1151 goto construct_skb;
1152 }
1153
1154 xdp.data = page_address(rx_buf->page) + rx_buf->page_offset;
1155 xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
1156 xdp.data_meta = xdp.data;
1157 xdp.data_end = xdp.data + size;
1158 #if (PAGE_SIZE > 4096)
1159 /* At larger PAGE_SIZE, frame_sz depend on len size */
1160 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
1161 #endif
1162
1163 rcu_read_lock();
1164 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1165 if (!xdp_prog) {
1166 rcu_read_unlock();
1167 goto construct_skb;
1168 }
1169
1170 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
1171 rcu_read_unlock();
1172 if (!xdp_res)
1173 goto construct_skb;
1174 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1175 xdp_xmit |= xdp_res;
1176 ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1177 } else {
1178 rx_buf->pagecnt_bias++;
1179 }
1180 total_rx_bytes += size;
1181 total_rx_pkts++;
1182
1183 cleaned_count++;
1184 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1185 continue;
1186 construct_skb:
1187 if (skb) {
1188 ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1189 } else if (likely(xdp.data)) {
1190 if (ice_ring_uses_build_skb(rx_ring))
1191 skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1192 else
1193 skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1194 }
1195 /* exit if we failed to retrieve a buffer */
1196 if (!skb) {
1197 rx_ring->rx_stats.alloc_buf_failed++;
1198 if (rx_buf)
1199 rx_buf->pagecnt_bias++;
1200 break;
1201 }
1202
1203 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1204 cleaned_count++;
1205
1206 /* skip if it is NOP desc */
1207 if (ice_is_non_eop(rx_ring, rx_desc, skb))
1208 continue;
1209
1210 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1211 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1212 dev_kfree_skb_any(skb);
1213 continue;
1214 }
1215
1216 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1217 if (ice_test_staterr(rx_desc, stat_err_bits))
1218 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1219
1220 /* pad the skb if needed, to make a valid ethernet frame */
1221 if (eth_skb_pad(skb)) {
1222 skb = NULL;
1223 continue;
1224 }
1225
1226 /* probably a little skewed due to removing CRC */
1227 total_rx_bytes += skb->len;
1228
1229 /* populate checksum, VLAN, and protocol */
1230 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1231 ICE_RX_FLEX_DESC_PTYPE_M;
1232
1233 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1234
1235 /* send completed skb up the stack */
1236 ice_receive_skb(rx_ring, skb, vlan_tag);
1237
1238 /* update budget accounting */
1239 total_rx_pkts++;
1240 }
1241
1242 /* return up to cleaned_count buffers to hardware */
1243 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1244
1245 if (xdp_prog)
1246 ice_finalize_xdp_rx(rx_ring, xdp_xmit);
1247
1248 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1249
1250 /* guarantee a trip back through this routine if there was a failure */
1251 return failure ? budget : (int)total_rx_pkts;
1252 }
1253
1254 /**
1255 * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
1256 * @port_info: port_info structure containing the current link speed
1257 * @avg_pkt_size: average size of Tx or Rx packets based on clean routine
1258 * @itr: ITR value to update
1259 *
1260 * Calculate how big of an increment should be applied to the ITR value passed
1261 * in based on wmem_default, SKB overhead, ethernet overhead, and the current
1262 * link speed.
1263 *
1264 * The following is a calculation derived from:
1265 * wmem_default / (size + overhead) = desired_pkts_per_int
1266 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1267 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1268 *
1269 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1270 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1271 * formula down to:
1272 *
1273 * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24
1274 * ITR = -------------------------------------------- * --------------
1275 * rate pkt_size + 640
1276 */
1277 static unsigned int
ice_adjust_itr_by_size_and_speed(struct ice_port_info * port_info,unsigned int avg_pkt_size,unsigned int itr)1278 ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
1279 unsigned int avg_pkt_size,
1280 unsigned int itr)
1281 {
1282 switch (port_info->phy.link_info.link_speed) {
1283 case ICE_AQ_LINK_SPEED_100GB:
1284 itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
1285 avg_pkt_size + 640);
1286 break;
1287 case ICE_AQ_LINK_SPEED_50GB:
1288 itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
1289 avg_pkt_size + 640);
1290 break;
1291 case ICE_AQ_LINK_SPEED_40GB:
1292 itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
1293 avg_pkt_size + 640);
1294 break;
1295 case ICE_AQ_LINK_SPEED_25GB:
1296 itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
1297 avg_pkt_size + 640);
1298 break;
1299 case ICE_AQ_LINK_SPEED_20GB:
1300 itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
1301 avg_pkt_size + 640);
1302 break;
1303 case ICE_AQ_LINK_SPEED_10GB:
1304 default:
1305 itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
1306 avg_pkt_size + 640);
1307 break;
1308 }
1309
1310 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1311 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1312 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1313 }
1314
1315 return itr;
1316 }
1317
1318 /**
1319 * ice_update_itr - update the adaptive ITR value based on statistics
1320 * @q_vector: structure containing interrupt and ring information
1321 * @rc: structure containing ring performance data
1322 *
1323 * Stores a new ITR value based on packets and byte
1324 * counts during the last interrupt. The advantage of per interrupt
1325 * computation is faster updates and more accurate ITR for the current
1326 * traffic pattern. Constants in this function were computed
1327 * based on theoretical maximum wire speed and thresholds were set based
1328 * on testing data as well as attempting to minimize response time
1329 * while increasing bulk throughput.
1330 */
1331 static void
ice_update_itr(struct ice_q_vector * q_vector,struct ice_ring_container * rc)1332 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
1333 {
1334 unsigned long next_update = jiffies;
1335 unsigned int packets, bytes, itr;
1336 bool container_is_rx;
1337
1338 if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
1339 return;
1340
1341 /* If itr_countdown is set it means we programmed an ITR within
1342 * the last 4 interrupt cycles. This has a side effect of us
1343 * potentially firing an early interrupt. In order to work around
1344 * this we need to throw out any data received for a few
1345 * interrupts following the update.
1346 */
1347 if (q_vector->itr_countdown) {
1348 itr = rc->target_itr;
1349 goto clear_counts;
1350 }
1351
1352 container_is_rx = (&q_vector->rx == rc);
1353 /* For Rx we want to push the delay up and default to low latency.
1354 * for Tx we want to pull the delay down and default to high latency.
1355 */
1356 itr = container_is_rx ?
1357 ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
1358 ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
1359
1360 /* If we didn't update within up to 1 - 2 jiffies we can assume
1361 * that either packets are coming in so slow there hasn't been
1362 * any work, or that there is so much work that NAPI is dealing
1363 * with interrupt moderation and we don't need to do anything.
1364 */
1365 if (time_after(next_update, rc->next_update))
1366 goto clear_counts;
1367
1368 prefetch(q_vector->vsi->port_info);
1369
1370 packets = rc->total_pkts;
1371 bytes = rc->total_bytes;
1372
1373 if (container_is_rx) {
1374 /* If Rx there are 1 to 4 packets and bytes are less than
1375 * 9000 assume insufficient data to use bulk rate limiting
1376 * approach unless Tx is already in bulk rate limiting. We
1377 * are likely latency driven.
1378 */
1379 if (packets && packets < 4 && bytes < 9000 &&
1380 (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
1381 itr = ICE_ITR_ADAPTIVE_LATENCY;
1382 goto adjust_by_size_and_speed;
1383 }
1384 } else if (packets < 4) {
1385 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1386 * bulk mode and we are receiving 4 or fewer packets just
1387 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1388 * that the Rx can relax.
1389 */
1390 if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
1391 (q_vector->rx.target_itr & ICE_ITR_MASK) ==
1392 ICE_ITR_ADAPTIVE_MAX_USECS)
1393 goto clear_counts;
1394 } else if (packets > 32) {
1395 /* If we have processed over 32 packets in a single interrupt
1396 * for Tx assume we need to switch over to "bulk" mode.
1397 */
1398 rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
1399 }
1400
1401 /* We have no packets to actually measure against. This means
1402 * either one of the other queues on this vector is active or
1403 * we are a Tx queue doing TSO with too high of an interrupt rate.
1404 *
1405 * Between 4 and 56 we can assume that our current interrupt delay
1406 * is only slightly too low. As such we should increase it by a small
1407 * fixed amount.
1408 */
1409 if (packets < 56) {
1410 itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
1411 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1412 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1413 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1414 }
1415 goto clear_counts;
1416 }
1417
1418 if (packets <= 256) {
1419 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1420 itr &= ICE_ITR_MASK;
1421
1422 /* Between 56 and 112 is our "goldilocks" zone where we are
1423 * working out "just right". Just report that our current
1424 * ITR is good for us.
1425 */
1426 if (packets <= 112)
1427 goto clear_counts;
1428
1429 /* If packet count is 128 or greater we are likely looking
1430 * at a slight overrun of the delay we want. Try halving
1431 * our delay to see if that will cut the number of packets
1432 * in half per interrupt.
1433 */
1434 itr >>= 1;
1435 itr &= ICE_ITR_MASK;
1436 if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
1437 itr = ICE_ITR_ADAPTIVE_MIN_USECS;
1438
1439 goto clear_counts;
1440 }
1441
1442 /* The paths below assume we are dealing with a bulk ITR since
1443 * number of packets is greater than 256. We are just going to have
1444 * to compute a value and try to bring the count under control,
1445 * though for smaller packet sizes there isn't much we can do as
1446 * NAPI polling will likely be kicking in sooner rather than later.
1447 */
1448 itr = ICE_ITR_ADAPTIVE_BULK;
1449
1450 adjust_by_size_and_speed:
1451
1452 /* based on checks above packets cannot be 0 so division is safe */
1453 itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
1454 bytes / packets, itr);
1455
1456 clear_counts:
1457 /* write back value */
1458 rc->target_itr = itr;
1459
1460 /* next update should occur within next jiffy */
1461 rc->next_update = next_update + 1;
1462
1463 rc->total_bytes = 0;
1464 rc->total_pkts = 0;
1465 }
1466
1467 /**
1468 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1469 * @itr_idx: interrupt throttling index
1470 * @itr: interrupt throttling value in usecs
1471 */
ice_buildreg_itr(u16 itr_idx,u16 itr)1472 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1473 {
1474 /* The ITR value is reported in microseconds, and the register value is
1475 * recorded in 2 microsecond units. For this reason we only need to
1476 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1477 * granularity as a shift instead of division. The mask makes sure the
1478 * ITR value is never odd so we don't accidentally write into the field
1479 * prior to the ITR field.
1480 */
1481 itr &= ICE_ITR_MASK;
1482
1483 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1484 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1485 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1486 }
1487
1488 /* The act of updating the ITR will cause it to immediately trigger. In order
1489 * to prevent this from throwing off adaptive update statistics we defer the
1490 * update so that it can only happen so often. So after either Tx or Rx are
1491 * updated we make the adaptive scheme wait until either the ITR completely
1492 * expires via the next_update expiration or we have been through at least
1493 * 3 interrupts.
1494 */
1495 #define ITR_COUNTDOWN_START 3
1496
1497 /**
1498 * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
1499 * @q_vector: q_vector for which ITR is being updated and interrupt enabled
1500 */
ice_update_ena_itr(struct ice_q_vector * q_vector)1501 static void ice_update_ena_itr(struct ice_q_vector *q_vector)
1502 {
1503 struct ice_ring_container *tx = &q_vector->tx;
1504 struct ice_ring_container *rx = &q_vector->rx;
1505 struct ice_vsi *vsi = q_vector->vsi;
1506 u32 itr_val;
1507
1508 /* when exiting WB_ON_ITR lets set a low ITR value and trigger
1509 * interrupts to expire right away in case we have more work ready to go
1510 * already
1511 */
1512 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
1513 itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
1514 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1515 /* set target back to last user set value */
1516 rx->target_itr = rx->itr_setting;
1517 /* set current to what we just wrote and dynamic if needed */
1518 rx->current_itr = ICE_WB_ON_ITR_USECS |
1519 (rx->itr_setting & ICE_ITR_DYNAMIC);
1520 /* allow normal interrupt flow to start */
1521 q_vector->itr_countdown = 0;
1522 return;
1523 }
1524
1525 /* This will do nothing if dynamic updates are not enabled */
1526 ice_update_itr(q_vector, tx);
1527 ice_update_itr(q_vector, rx);
1528
1529 /* This block of logic allows us to get away with only updating
1530 * one ITR value with each interrupt. The idea is to perform a
1531 * pseudo-lazy update with the following criteria.
1532 *
1533 * 1. Rx is given higher priority than Tx if both are in same state
1534 * 2. If we must reduce an ITR that is given highest priority.
1535 * 3. We then give priority to increasing ITR based on amount.
1536 */
1537 if (rx->target_itr < rx->current_itr) {
1538 /* Rx ITR needs to be reduced, this is highest priority */
1539 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1540 rx->current_itr = rx->target_itr;
1541 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1542 } else if ((tx->target_itr < tx->current_itr) ||
1543 ((rx->target_itr - rx->current_itr) <
1544 (tx->target_itr - tx->current_itr))) {
1545 /* Tx ITR needs to be reduced, this is second priority
1546 * Tx ITR needs to be increased more than Rx, fourth priority
1547 */
1548 itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
1549 tx->current_itr = tx->target_itr;
1550 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1551 } else if (rx->current_itr != rx->target_itr) {
1552 /* Rx ITR needs to be increased, third priority */
1553 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1554 rx->current_itr = rx->target_itr;
1555 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1556 } else {
1557 /* Still have to re-enable the interrupts */
1558 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1559 if (q_vector->itr_countdown)
1560 q_vector->itr_countdown--;
1561 }
1562
1563 if (!test_bit(__ICE_DOWN, q_vector->vsi->state))
1564 wr32(&q_vector->vsi->back->hw,
1565 GLINT_DYN_CTL(q_vector->reg_idx),
1566 itr_val);
1567 }
1568
1569 /**
1570 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1571 * @q_vector: q_vector to set WB_ON_ITR on
1572 *
1573 * We need to tell hardware to write-back completed descriptors even when
1574 * interrupts are disabled. Descriptors will be written back on cache line
1575 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1576 * descriptors may not be written back if they don't fill a cache line until the
1577 * next interrupt.
1578 *
1579 * This sets the write-back frequency to 2 microseconds as that is the minimum
1580 * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to
1581 * make sure hardware knows we aren't meddling with the INTENA_M bit.
1582 */
ice_set_wb_on_itr(struct ice_q_vector * q_vector)1583 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1584 {
1585 struct ice_vsi *vsi = q_vector->vsi;
1586
1587 /* already in WB_ON_ITR mode no need to change it */
1588 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
1589 return;
1590
1591 if (q_vector->num_ring_rx)
1592 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1593 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1594 ICE_RX_ITR));
1595
1596 if (q_vector->num_ring_tx)
1597 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1598 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1599 ICE_TX_ITR));
1600
1601 q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
1602 }
1603
1604 /**
1605 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1606 * @napi: napi struct with our devices info in it
1607 * @budget: amount of work driver is allowed to do this pass, in packets
1608 *
1609 * This function will clean all queues associated with a q_vector.
1610 *
1611 * Returns the amount of work done
1612 */
ice_napi_poll(struct napi_struct * napi,int budget)1613 int ice_napi_poll(struct napi_struct *napi, int budget)
1614 {
1615 struct ice_q_vector *q_vector =
1616 container_of(napi, struct ice_q_vector, napi);
1617 bool clean_complete = true;
1618 struct ice_ring *ring;
1619 int budget_per_ring;
1620 int work_done = 0;
1621
1622 /* Since the actual Tx work is minimal, we can give the Tx a larger
1623 * budget and be more aggressive about cleaning up the Tx descriptors.
1624 */
1625 ice_for_each_ring(ring, q_vector->tx) {
1626 bool wd = ring->xsk_pool ?
1627 ice_clean_tx_irq_zc(ring, budget) :
1628 ice_clean_tx_irq(ring, budget);
1629
1630 if (!wd)
1631 clean_complete = false;
1632 }
1633
1634 /* Handle case where we are called by netpoll with a budget of 0 */
1635 if (unlikely(budget <= 0))
1636 return budget;
1637
1638 /* normally we have 1 Rx ring per q_vector */
1639 if (unlikely(q_vector->num_ring_rx > 1))
1640 /* We attempt to distribute budget to each Rx queue fairly, but
1641 * don't allow the budget to go below 1 because that would exit
1642 * polling early.
1643 */
1644 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1645 else
1646 /* Max of 1 Rx ring in this q_vector so give it the budget */
1647 budget_per_ring = budget;
1648
1649 ice_for_each_ring(ring, q_vector->rx) {
1650 int cleaned;
1651
1652 /* A dedicated path for zero-copy allows making a single
1653 * comparison in the irq context instead of many inside the
1654 * ice_clean_rx_irq function and makes the codebase cleaner.
1655 */
1656 cleaned = ring->xsk_pool ?
1657 ice_clean_rx_irq_zc(ring, budget_per_ring) :
1658 ice_clean_rx_irq(ring, budget_per_ring);
1659 work_done += cleaned;
1660 /* if we clean as many as budgeted, we must not be done */
1661 if (cleaned >= budget_per_ring)
1662 clean_complete = false;
1663 }
1664
1665 /* If work not completed, return budget and polling will return */
1666 if (!clean_complete)
1667 return budget;
1668
1669 /* Exit the polling mode, but don't re-enable interrupts if stack might
1670 * poll us due to busy-polling
1671 */
1672 if (likely(napi_complete_done(napi, work_done)))
1673 ice_update_ena_itr(q_vector);
1674 else
1675 ice_set_wb_on_itr(q_vector);
1676
1677 return min_t(int, work_done, budget - 1);
1678 }
1679
1680 /**
1681 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1682 * @tx_ring: the ring to be checked
1683 * @size: the size buffer we want to assure is available
1684 *
1685 * Returns -EBUSY if a stop is needed, else 0
1686 */
__ice_maybe_stop_tx(struct ice_ring * tx_ring,unsigned int size)1687 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1688 {
1689 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1690 /* Memory barrier before checking head and tail */
1691 smp_mb();
1692
1693 /* Check again in a case another CPU has just made room available. */
1694 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1695 return -EBUSY;
1696
1697 /* A reprieve! - use start_subqueue because it doesn't call schedule */
1698 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1699 ++tx_ring->tx_stats.restart_q;
1700 return 0;
1701 }
1702
1703 /**
1704 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1705 * @tx_ring: the ring to be checked
1706 * @size: the size buffer we want to assure is available
1707 *
1708 * Returns 0 if stop is not needed
1709 */
ice_maybe_stop_tx(struct ice_ring * tx_ring,unsigned int size)1710 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1711 {
1712 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1713 return 0;
1714
1715 return __ice_maybe_stop_tx(tx_ring, size);
1716 }
1717
1718 /**
1719 * ice_tx_map - Build the Tx descriptor
1720 * @tx_ring: ring to send buffer on
1721 * @first: first buffer info buffer to use
1722 * @off: pointer to struct that holds offload parameters
1723 *
1724 * This function loops over the skb data pointed to by *first
1725 * and gets a physical address for each memory location and programs
1726 * it and the length into the transmit descriptor.
1727 */
1728 static void
ice_tx_map(struct ice_ring * tx_ring,struct ice_tx_buf * first,struct ice_tx_offload_params * off)1729 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1730 struct ice_tx_offload_params *off)
1731 {
1732 u64 td_offset, td_tag, td_cmd;
1733 u16 i = tx_ring->next_to_use;
1734 unsigned int data_len, size;
1735 struct ice_tx_desc *tx_desc;
1736 struct ice_tx_buf *tx_buf;
1737 struct sk_buff *skb;
1738 skb_frag_t *frag;
1739 dma_addr_t dma;
1740
1741 td_tag = off->td_l2tag1;
1742 td_cmd = off->td_cmd;
1743 td_offset = off->td_offset;
1744 skb = first->skb;
1745
1746 data_len = skb->data_len;
1747 size = skb_headlen(skb);
1748
1749 tx_desc = ICE_TX_DESC(tx_ring, i);
1750
1751 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1752 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1753 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1754 ICE_TX_FLAGS_VLAN_S;
1755 }
1756
1757 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1758
1759 tx_buf = first;
1760
1761 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1762 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1763
1764 if (dma_mapping_error(tx_ring->dev, dma))
1765 goto dma_error;
1766
1767 /* record length, and DMA address */
1768 dma_unmap_len_set(tx_buf, len, size);
1769 dma_unmap_addr_set(tx_buf, dma, dma);
1770
1771 /* align size to end of page */
1772 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1773 tx_desc->buf_addr = cpu_to_le64(dma);
1774
1775 /* account for data chunks larger than the hardware
1776 * can handle
1777 */
1778 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1779 tx_desc->cmd_type_offset_bsz =
1780 ice_build_ctob(td_cmd, td_offset, max_data,
1781 td_tag);
1782
1783 tx_desc++;
1784 i++;
1785
1786 if (i == tx_ring->count) {
1787 tx_desc = ICE_TX_DESC(tx_ring, 0);
1788 i = 0;
1789 }
1790
1791 dma += max_data;
1792 size -= max_data;
1793
1794 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1795 tx_desc->buf_addr = cpu_to_le64(dma);
1796 }
1797
1798 if (likely(!data_len))
1799 break;
1800
1801 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1802 size, td_tag);
1803
1804 tx_desc++;
1805 i++;
1806
1807 if (i == tx_ring->count) {
1808 tx_desc = ICE_TX_DESC(tx_ring, 0);
1809 i = 0;
1810 }
1811
1812 size = skb_frag_size(frag);
1813 data_len -= size;
1814
1815 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1816 DMA_TO_DEVICE);
1817
1818 tx_buf = &tx_ring->tx_buf[i];
1819 }
1820
1821 /* record bytecount for BQL */
1822 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1823
1824 /* record SW timestamp if HW timestamp is not available */
1825 skb_tx_timestamp(first->skb);
1826
1827 i++;
1828 if (i == tx_ring->count)
1829 i = 0;
1830
1831 /* write last descriptor with RS and EOP bits */
1832 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1833 tx_desc->cmd_type_offset_bsz =
1834 ice_build_ctob(td_cmd, td_offset, size, td_tag);
1835
1836 /* Force memory writes to complete before letting h/w know there
1837 * are new descriptors to fetch.
1838 *
1839 * We also use this memory barrier to make certain all of the
1840 * status bits have been updated before next_to_watch is written.
1841 */
1842 wmb();
1843
1844 /* set next_to_watch value indicating a packet is present */
1845 first->next_to_watch = tx_desc;
1846
1847 tx_ring->next_to_use = i;
1848
1849 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1850
1851 /* notify HW of packet */
1852 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
1853 writel(i, tx_ring->tail);
1854
1855 return;
1856
1857 dma_error:
1858 /* clear DMA mappings for failed tx_buf map */
1859 for (;;) {
1860 tx_buf = &tx_ring->tx_buf[i];
1861 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1862 if (tx_buf == first)
1863 break;
1864 if (i == 0)
1865 i = tx_ring->count;
1866 i--;
1867 }
1868
1869 tx_ring->next_to_use = i;
1870 }
1871
1872 /**
1873 * ice_tx_csum - Enable Tx checksum offloads
1874 * @first: pointer to the first descriptor
1875 * @off: pointer to struct that holds offload parameters
1876 *
1877 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1878 */
1879 static
ice_tx_csum(struct ice_tx_buf * first,struct ice_tx_offload_params * off)1880 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1881 {
1882 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1883 struct sk_buff *skb = first->skb;
1884 union {
1885 struct iphdr *v4;
1886 struct ipv6hdr *v6;
1887 unsigned char *hdr;
1888 } ip;
1889 union {
1890 struct tcphdr *tcp;
1891 unsigned char *hdr;
1892 } l4;
1893 __be16 frag_off, protocol;
1894 unsigned char *exthdr;
1895 u32 offset, cmd = 0;
1896 u8 l4_proto = 0;
1897
1898 if (skb->ip_summed != CHECKSUM_PARTIAL)
1899 return 0;
1900
1901 ip.hdr = skb_network_header(skb);
1902 l4.hdr = skb_transport_header(skb);
1903
1904 /* compute outer L2 header size */
1905 l2_len = ip.hdr - skb->data;
1906 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1907
1908 protocol = vlan_get_protocol(skb);
1909
1910 if (protocol == htons(ETH_P_IP))
1911 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1912 else if (protocol == htons(ETH_P_IPV6))
1913 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1914
1915 if (skb->encapsulation) {
1916 bool gso_ena = false;
1917 u32 tunnel = 0;
1918
1919 /* define outer network header type */
1920 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1921 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1922 ICE_TX_CTX_EIPT_IPV4 :
1923 ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1924 l4_proto = ip.v4->protocol;
1925 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1926 tunnel |= ICE_TX_CTX_EIPT_IPV6;
1927 exthdr = ip.hdr + sizeof(*ip.v6);
1928 l4_proto = ip.v6->nexthdr;
1929 if (l4.hdr != exthdr)
1930 ipv6_skip_exthdr(skb, exthdr - skb->data,
1931 &l4_proto, &frag_off);
1932 }
1933
1934 /* define outer transport */
1935 switch (l4_proto) {
1936 case IPPROTO_UDP:
1937 tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1938 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1939 break;
1940 case IPPROTO_GRE:
1941 tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1942 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1943 break;
1944 case IPPROTO_IPIP:
1945 case IPPROTO_IPV6:
1946 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1947 l4.hdr = skb_inner_network_header(skb);
1948 break;
1949 default:
1950 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1951 return -1;
1952
1953 skb_checksum_help(skb);
1954 return 0;
1955 }
1956
1957 /* compute outer L3 header size */
1958 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1959 ICE_TXD_CTX_QW0_EIPLEN_S;
1960
1961 /* switch IP header pointer from outer to inner header */
1962 ip.hdr = skb_inner_network_header(skb);
1963
1964 /* compute tunnel header size */
1965 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1966 ICE_TXD_CTX_QW0_NATLEN_S;
1967
1968 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1969 /* indicate if we need to offload outer UDP header */
1970 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1971 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1972 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1973
1974 /* record tunnel offload values */
1975 off->cd_tunnel_params |= tunnel;
1976
1977 /* set DTYP=1 to indicate that it's an Tx context descriptor
1978 * in IPsec tunnel mode with Tx offloads in Quad word 1
1979 */
1980 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1981
1982 /* switch L4 header pointer from outer to inner */
1983 l4.hdr = skb_inner_transport_header(skb);
1984 l4_proto = 0;
1985
1986 /* reset type as we transition from outer to inner headers */
1987 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1988 if (ip.v4->version == 4)
1989 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1990 if (ip.v6->version == 6)
1991 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1992 }
1993
1994 /* Enable IP checksum offloads */
1995 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1996 l4_proto = ip.v4->protocol;
1997 /* the stack computes the IP header already, the only time we
1998 * need the hardware to recompute it is in the case of TSO.
1999 */
2000 if (first->tx_flags & ICE_TX_FLAGS_TSO)
2001 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2002 else
2003 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2004
2005 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
2006 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2007 exthdr = ip.hdr + sizeof(*ip.v6);
2008 l4_proto = ip.v6->nexthdr;
2009 if (l4.hdr != exthdr)
2010 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
2011 &frag_off);
2012 } else {
2013 return -1;
2014 }
2015
2016 /* compute inner L3 header size */
2017 l3_len = l4.hdr - ip.hdr;
2018 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
2019
2020 /* Enable L4 checksum offloads */
2021 switch (l4_proto) {
2022 case IPPROTO_TCP:
2023 /* enable checksum offloads */
2024 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2025 l4_len = l4.tcp->doff;
2026 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2027 break;
2028 case IPPROTO_UDP:
2029 /* enable UDP checksum offload */
2030 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2031 l4_len = (sizeof(struct udphdr) >> 2);
2032 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2033 break;
2034 case IPPROTO_SCTP:
2035 /* enable SCTP checksum offload */
2036 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2037 l4_len = sizeof(struct sctphdr) >> 2;
2038 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2039 break;
2040
2041 default:
2042 if (first->tx_flags & ICE_TX_FLAGS_TSO)
2043 return -1;
2044 skb_checksum_help(skb);
2045 return 0;
2046 }
2047
2048 off->td_cmd |= cmd;
2049 off->td_offset |= offset;
2050 return 1;
2051 }
2052
2053 /**
2054 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
2055 * @tx_ring: ring to send buffer on
2056 * @first: pointer to struct ice_tx_buf
2057 *
2058 * Checks the skb and set up correspondingly several generic transmit flags
2059 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2060 */
2061 static void
ice_tx_prepare_vlan_flags(struct ice_ring * tx_ring,struct ice_tx_buf * first)2062 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
2063 {
2064 struct sk_buff *skb = first->skb;
2065
2066 /* nothing left to do, software offloaded VLAN */
2067 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
2068 return;
2069
2070 /* currently, we always assume 802.1Q for VLAN insertion as VLAN
2071 * insertion for 802.1AD is not supported
2072 */
2073 if (skb_vlan_tag_present(skb)) {
2074 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
2075 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
2076 }
2077
2078 ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
2079 }
2080
2081 /**
2082 * ice_tso - computes mss and TSO length to prepare for TSO
2083 * @first: pointer to struct ice_tx_buf
2084 * @off: pointer to struct that holds offload parameters
2085 *
2086 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
2087 */
2088 static
ice_tso(struct ice_tx_buf * first,struct ice_tx_offload_params * off)2089 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2090 {
2091 struct sk_buff *skb = first->skb;
2092 union {
2093 struct iphdr *v4;
2094 struct ipv6hdr *v6;
2095 unsigned char *hdr;
2096 } ip;
2097 union {
2098 struct tcphdr *tcp;
2099 struct udphdr *udp;
2100 unsigned char *hdr;
2101 } l4;
2102 u64 cd_mss, cd_tso_len;
2103 u32 paylen;
2104 u8 l4_start;
2105 int err;
2106
2107 if (skb->ip_summed != CHECKSUM_PARTIAL)
2108 return 0;
2109
2110 if (!skb_is_gso(skb))
2111 return 0;
2112
2113 err = skb_cow_head(skb, 0);
2114 if (err < 0)
2115 return err;
2116
2117 /* cppcheck-suppress unreadVariable */
2118 ip.hdr = skb_network_header(skb);
2119 l4.hdr = skb_transport_header(skb);
2120
2121 /* initialize outer IP header fields */
2122 if (ip.v4->version == 4) {
2123 ip.v4->tot_len = 0;
2124 ip.v4->check = 0;
2125 } else {
2126 ip.v6->payload_len = 0;
2127 }
2128
2129 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2130 SKB_GSO_GRE_CSUM |
2131 SKB_GSO_IPXIP4 |
2132 SKB_GSO_IPXIP6 |
2133 SKB_GSO_UDP_TUNNEL |
2134 SKB_GSO_UDP_TUNNEL_CSUM)) {
2135 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2136 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2137 l4.udp->len = 0;
2138
2139 /* determine offset of outer transport header */
2140 l4_start = (u8)(l4.hdr - skb->data);
2141
2142 /* remove payload length from outer checksum */
2143 paylen = skb->len - l4_start;
2144 csum_replace_by_diff(&l4.udp->check,
2145 (__force __wsum)htonl(paylen));
2146 }
2147
2148 /* reset pointers to inner headers */
2149
2150 /* cppcheck-suppress unreadVariable */
2151 ip.hdr = skb_inner_network_header(skb);
2152 l4.hdr = skb_inner_transport_header(skb);
2153
2154 /* initialize inner IP header fields */
2155 if (ip.v4->version == 4) {
2156 ip.v4->tot_len = 0;
2157 ip.v4->check = 0;
2158 } else {
2159 ip.v6->payload_len = 0;
2160 }
2161 }
2162
2163 /* determine offset of transport header */
2164 l4_start = (u8)(l4.hdr - skb->data);
2165
2166 /* remove payload length from checksum */
2167 paylen = skb->len - l4_start;
2168
2169 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2170 csum_replace_by_diff(&l4.udp->check,
2171 (__force __wsum)htonl(paylen));
2172 /* compute length of UDP segmentation header */
2173 off->header_len = (u8)sizeof(l4.udp) + l4_start;
2174 } else {
2175 csum_replace_by_diff(&l4.tcp->check,
2176 (__force __wsum)htonl(paylen));
2177 /* compute length of TCP segmentation header */
2178 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
2179 }
2180
2181 /* update gso_segs and bytecount */
2182 first->gso_segs = skb_shinfo(skb)->gso_segs;
2183 first->bytecount += (first->gso_segs - 1) * off->header_len;
2184
2185 cd_tso_len = skb->len - off->header_len;
2186 cd_mss = skb_shinfo(skb)->gso_size;
2187
2188 /* record cdesc_qw1 with TSO parameters */
2189 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2190 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
2191 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2192 (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
2193 first->tx_flags |= ICE_TX_FLAGS_TSO;
2194 return 1;
2195 }
2196
2197 /**
2198 * ice_txd_use_count - estimate the number of descriptors needed for Tx
2199 * @size: transmit request size in bytes
2200 *
2201 * Due to hardware alignment restrictions (4K alignment), we need to
2202 * assume that we can have no more than 12K of data per descriptor, even
2203 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
2204 * Thus, we need to divide by 12K. But division is slow! Instead,
2205 * we decompose the operation into shifts and one relatively cheap
2206 * multiply operation.
2207 *
2208 * To divide by 12K, we first divide by 4K, then divide by 3:
2209 * To divide by 4K, shift right by 12 bits
2210 * To divide by 3, multiply by 85, then divide by 256
2211 * (Divide by 256 is done by shifting right by 8 bits)
2212 * Finally, we add one to round up. Because 256 isn't an exact multiple of
2213 * 3, we'll underestimate near each multiple of 12K. This is actually more
2214 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
2215 * segment. For our purposes this is accurate out to 1M which is orders of
2216 * magnitude greater than our largest possible GSO size.
2217 *
2218 * This would then be implemented as:
2219 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
2220 *
2221 * Since multiplication and division are commutative, we can reorder
2222 * operations into:
2223 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2224 */
ice_txd_use_count(unsigned int size)2225 static unsigned int ice_txd_use_count(unsigned int size)
2226 {
2227 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2228 }
2229
2230 /**
2231 * ice_xmit_desc_count - calculate number of Tx descriptors needed
2232 * @skb: send buffer
2233 *
2234 * Returns number of data descriptors needed for this skb.
2235 */
ice_xmit_desc_count(struct sk_buff * skb)2236 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2237 {
2238 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2239 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2240 unsigned int count = 0, size = skb_headlen(skb);
2241
2242 for (;;) {
2243 count += ice_txd_use_count(size);
2244
2245 if (!nr_frags--)
2246 break;
2247
2248 size = skb_frag_size(frag++);
2249 }
2250
2251 return count;
2252 }
2253
2254 /**
2255 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2256 * @skb: send buffer
2257 *
2258 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2259 * and so we need to figure out the cases where we need to linearize the skb.
2260 *
2261 * For TSO we need to count the TSO header and segment payload separately.
2262 * As such we need to check cases where we have 7 fragments or more as we
2263 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2264 * the segment payload in the first descriptor, and another 7 for the
2265 * fragments.
2266 */
__ice_chk_linearize(struct sk_buff * skb)2267 static bool __ice_chk_linearize(struct sk_buff *skb)
2268 {
2269 const skb_frag_t *frag, *stale;
2270 int nr_frags, sum;
2271
2272 /* no need to check if number of frags is less than 7 */
2273 nr_frags = skb_shinfo(skb)->nr_frags;
2274 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2275 return false;
2276
2277 /* We need to walk through the list and validate that each group
2278 * of 6 fragments totals at least gso_size.
2279 */
2280 nr_frags -= ICE_MAX_BUF_TXD - 2;
2281 frag = &skb_shinfo(skb)->frags[0];
2282
2283 /* Initialize size to the negative value of gso_size minus 1. We
2284 * use this as the worst case scenario in which the frag ahead
2285 * of us only provides one byte which is why we are limited to 6
2286 * descriptors for a single transmit as the header and previous
2287 * fragment are already consuming 2 descriptors.
2288 */
2289 sum = 1 - skb_shinfo(skb)->gso_size;
2290
2291 /* Add size of frags 0 through 4 to create our initial sum */
2292 sum += skb_frag_size(frag++);
2293 sum += skb_frag_size(frag++);
2294 sum += skb_frag_size(frag++);
2295 sum += skb_frag_size(frag++);
2296 sum += skb_frag_size(frag++);
2297
2298 /* Walk through fragments adding latest fragment, testing it, and
2299 * then removing stale fragments from the sum.
2300 */
2301 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2302 int stale_size = skb_frag_size(stale);
2303
2304 sum += skb_frag_size(frag++);
2305
2306 /* The stale fragment may present us with a smaller
2307 * descriptor than the actual fragment size. To account
2308 * for that we need to remove all the data on the front and
2309 * figure out what the remainder would be in the last
2310 * descriptor associated with the fragment.
2311 */
2312 if (stale_size > ICE_MAX_DATA_PER_TXD) {
2313 int align_pad = -(skb_frag_off(stale)) &
2314 (ICE_MAX_READ_REQ_SIZE - 1);
2315
2316 sum -= align_pad;
2317 stale_size -= align_pad;
2318
2319 do {
2320 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2321 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2322 } while (stale_size > ICE_MAX_DATA_PER_TXD);
2323 }
2324
2325 /* if sum is negative we failed to make sufficient progress */
2326 if (sum < 0)
2327 return true;
2328
2329 if (!nr_frags--)
2330 break;
2331
2332 sum -= stale_size;
2333 }
2334
2335 return false;
2336 }
2337
2338 /**
2339 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2340 * @skb: send buffer
2341 * @count: number of buffers used
2342 *
2343 * Note: Our HW can't scatter-gather more than 8 fragments to build
2344 * a packet on the wire and so we need to figure out the cases where we
2345 * need to linearize the skb.
2346 */
ice_chk_linearize(struct sk_buff * skb,unsigned int count)2347 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2348 {
2349 /* Both TSO and single send will work if count is less than 8 */
2350 if (likely(count < ICE_MAX_BUF_TXD))
2351 return false;
2352
2353 if (skb_is_gso(skb))
2354 return __ice_chk_linearize(skb);
2355
2356 /* we can support up to 8 data buffers for a single send */
2357 return count != ICE_MAX_BUF_TXD;
2358 }
2359
2360 /**
2361 * ice_xmit_frame_ring - Sends buffer on Tx ring
2362 * @skb: send buffer
2363 * @tx_ring: ring to send buffer on
2364 *
2365 * Returns NETDEV_TX_OK if sent, else an error code
2366 */
2367 static netdev_tx_t
ice_xmit_frame_ring(struct sk_buff * skb,struct ice_ring * tx_ring)2368 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2369 {
2370 struct ice_tx_offload_params offload = { 0 };
2371 struct ice_vsi *vsi = tx_ring->vsi;
2372 struct ice_tx_buf *first;
2373 unsigned int count;
2374 int tso, csum;
2375
2376 count = ice_xmit_desc_count(skb);
2377 if (ice_chk_linearize(skb, count)) {
2378 if (__skb_linearize(skb))
2379 goto out_drop;
2380 count = ice_txd_use_count(skb->len);
2381 tx_ring->tx_stats.tx_linearize++;
2382 }
2383
2384 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2385 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2386 * + 4 desc gap to avoid the cache line where head is,
2387 * + 1 desc for context descriptor,
2388 * otherwise try next time
2389 */
2390 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2391 ICE_DESCS_FOR_CTX_DESC)) {
2392 tx_ring->tx_stats.tx_busy++;
2393 return NETDEV_TX_BUSY;
2394 }
2395
2396 offload.tx_ring = tx_ring;
2397
2398 /* record the location of the first descriptor for this packet */
2399 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2400 first->skb = skb;
2401 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2402 first->gso_segs = 1;
2403 first->tx_flags = 0;
2404
2405 /* prepare the VLAN tagging flags for Tx */
2406 ice_tx_prepare_vlan_flags(tx_ring, first);
2407
2408 /* set up TSO offload */
2409 tso = ice_tso(first, &offload);
2410 if (tso < 0)
2411 goto out_drop;
2412
2413 /* always set up Tx checksum offload */
2414 csum = ice_tx_csum(first, &offload);
2415 if (csum < 0)
2416 goto out_drop;
2417
2418 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2419 if (unlikely(skb->priority == TC_PRIO_CONTROL &&
2420 vsi->type == ICE_VSI_PF &&
2421 vsi->port_info->is_sw_lldp))
2422 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2423 ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2424 ICE_TXD_CTX_QW1_CMD_S);
2425
2426 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2427 struct ice_tx_ctx_desc *cdesc;
2428 u16 i = tx_ring->next_to_use;
2429
2430 /* grab the next descriptor */
2431 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2432 i++;
2433 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2434
2435 /* setup context descriptor */
2436 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2437 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2438 cdesc->rsvd = cpu_to_le16(0);
2439 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2440 }
2441
2442 ice_tx_map(tx_ring, first, &offload);
2443 return NETDEV_TX_OK;
2444
2445 out_drop:
2446 dev_kfree_skb_any(skb);
2447 return NETDEV_TX_OK;
2448 }
2449
2450 /**
2451 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2452 * @skb: send buffer
2453 * @netdev: network interface device structure
2454 *
2455 * Returns NETDEV_TX_OK if sent, else an error code
2456 */
ice_start_xmit(struct sk_buff * skb,struct net_device * netdev)2457 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2458 {
2459 struct ice_netdev_priv *np = netdev_priv(netdev);
2460 struct ice_vsi *vsi = np->vsi;
2461 struct ice_ring *tx_ring;
2462
2463 tx_ring = vsi->tx_rings[skb->queue_mapping];
2464
2465 /* hardware can't handle really short frames, hardware padding works
2466 * beyond this point
2467 */
2468 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2469 return NETDEV_TX_OK;
2470
2471 return ice_xmit_frame_ring(skb, tx_ring);
2472 }
2473
2474 /**
2475 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2476 * @tx_ring: tx_ring to clean
2477 */
ice_clean_ctrl_tx_irq(struct ice_ring * tx_ring)2478 void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
2479 {
2480 struct ice_vsi *vsi = tx_ring->vsi;
2481 s16 i = tx_ring->next_to_clean;
2482 int budget = ICE_DFLT_IRQ_WORK;
2483 struct ice_tx_desc *tx_desc;
2484 struct ice_tx_buf *tx_buf;
2485
2486 tx_buf = &tx_ring->tx_buf[i];
2487 tx_desc = ICE_TX_DESC(tx_ring, i);
2488 i -= tx_ring->count;
2489
2490 do {
2491 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2492
2493 /* if next_to_watch is not set then there is no pending work */
2494 if (!eop_desc)
2495 break;
2496
2497 /* prevent any other reads prior to eop_desc */
2498 smp_rmb();
2499
2500 /* if the descriptor isn't done, no work to do */
2501 if (!(eop_desc->cmd_type_offset_bsz &
2502 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2503 break;
2504
2505 /* clear next_to_watch to prevent false hangs */
2506 tx_buf->next_to_watch = NULL;
2507 tx_desc->buf_addr = 0;
2508 tx_desc->cmd_type_offset_bsz = 0;
2509
2510 /* move past filter desc */
2511 tx_buf++;
2512 tx_desc++;
2513 i++;
2514 if (unlikely(!i)) {
2515 i -= tx_ring->count;
2516 tx_buf = tx_ring->tx_buf;
2517 tx_desc = ICE_TX_DESC(tx_ring, 0);
2518 }
2519
2520 /* unmap the data header */
2521 if (dma_unmap_len(tx_buf, len))
2522 dma_unmap_single(tx_ring->dev,
2523 dma_unmap_addr(tx_buf, dma),
2524 dma_unmap_len(tx_buf, len),
2525 DMA_TO_DEVICE);
2526 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
2527 devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2528
2529 /* clear next_to_watch to prevent false hangs */
2530 tx_buf->raw_buf = NULL;
2531 tx_buf->tx_flags = 0;
2532 tx_buf->next_to_watch = NULL;
2533 dma_unmap_len_set(tx_buf, len, 0);
2534 tx_desc->buf_addr = 0;
2535 tx_desc->cmd_type_offset_bsz = 0;
2536
2537 /* move past eop_desc for start of next FD desc */
2538 tx_buf++;
2539 tx_desc++;
2540 i++;
2541 if (unlikely(!i)) {
2542 i -= tx_ring->count;
2543 tx_buf = tx_ring->tx_buf;
2544 tx_desc = ICE_TX_DESC(tx_ring, 0);
2545 }
2546
2547 budget--;
2548 } while (likely(budget));
2549
2550 i += tx_ring->count;
2551 tx_ring->next_to_clean = i;
2552
2553 /* re-enable interrupt if needed */
2554 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
2555 }
2556