1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #include <linux/prefetch.h>
5 #include <net/busy_poll.h>
6
7 #include "i40evf.h"
8 #include "i40e_trace.h"
9 #include "i40e_prototype.h"
10
build_ctob(u32 td_cmd,u32 td_offset,unsigned int size,u32 td_tag)11 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
12 u32 td_tag)
13 {
14 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
15 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
16 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
17 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
18 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
19 }
20
21 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
22
23 /**
24 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
25 * @ring: the ring that owns the buffer
26 * @tx_buffer: the buffer to free
27 **/
i40e_unmap_and_free_tx_resource(struct i40e_ring * ring,struct i40e_tx_buffer * tx_buffer)28 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
29 struct i40e_tx_buffer *tx_buffer)
30 {
31 if (tx_buffer->skb) {
32 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
33 kfree(tx_buffer->raw_buf);
34 else
35 dev_kfree_skb_any(tx_buffer->skb);
36 if (dma_unmap_len(tx_buffer, len))
37 dma_unmap_single(ring->dev,
38 dma_unmap_addr(tx_buffer, dma),
39 dma_unmap_len(tx_buffer, len),
40 DMA_TO_DEVICE);
41 } else if (dma_unmap_len(tx_buffer, len)) {
42 dma_unmap_page(ring->dev,
43 dma_unmap_addr(tx_buffer, dma),
44 dma_unmap_len(tx_buffer, len),
45 DMA_TO_DEVICE);
46 }
47
48 tx_buffer->next_to_watch = NULL;
49 tx_buffer->skb = NULL;
50 dma_unmap_len_set(tx_buffer, len, 0);
51 /* tx_buffer must be completely set up in the transmit path */
52 }
53
54 /**
55 * i40evf_clean_tx_ring - Free any empty Tx buffers
56 * @tx_ring: ring to be cleaned
57 **/
i40evf_clean_tx_ring(struct i40e_ring * tx_ring)58 void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
59 {
60 unsigned long bi_size;
61 u16 i;
62
63 /* ring already cleared, nothing to do */
64 if (!tx_ring->tx_bi)
65 return;
66
67 /* Free all the Tx ring sk_buffs */
68 for (i = 0; i < tx_ring->count; i++)
69 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
70
71 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
72 memset(tx_ring->tx_bi, 0, bi_size);
73
74 /* Zero out the descriptor ring */
75 memset(tx_ring->desc, 0, tx_ring->size);
76
77 tx_ring->next_to_use = 0;
78 tx_ring->next_to_clean = 0;
79
80 if (!tx_ring->netdev)
81 return;
82
83 /* cleanup Tx queue statistics */
84 netdev_tx_reset_queue(txring_txq(tx_ring));
85 }
86
87 /**
88 * i40evf_free_tx_resources - Free Tx resources per queue
89 * @tx_ring: Tx descriptor ring for a specific queue
90 *
91 * Free all transmit software resources
92 **/
i40evf_free_tx_resources(struct i40e_ring * tx_ring)93 void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
94 {
95 i40evf_clean_tx_ring(tx_ring);
96 kfree(tx_ring->tx_bi);
97 tx_ring->tx_bi = NULL;
98
99 if (tx_ring->desc) {
100 dma_free_coherent(tx_ring->dev, tx_ring->size,
101 tx_ring->desc, tx_ring->dma);
102 tx_ring->desc = NULL;
103 }
104 }
105
106 /**
107 * i40evf_get_tx_pending - how many Tx descriptors not processed
108 * @ring: the ring of descriptors
109 * @in_sw: is tx_pending being checked in SW or HW
110 *
111 * Since there is no access to the ring head register
112 * in XL710, we need to use our local copies
113 **/
i40evf_get_tx_pending(struct i40e_ring * ring,bool in_sw)114 u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
115 {
116 u32 head, tail;
117
118 head = ring->next_to_clean;
119 tail = readl(ring->tail);
120
121 if (head != tail)
122 return (head < tail) ?
123 tail - head : (tail + ring->count - head);
124
125 return 0;
126 }
127
128 /**
129 * i40evf_detect_recover_hung - Function to detect and recover hung_queues
130 * @vsi: pointer to vsi struct with tx queues
131 *
132 * VSI has netdev and netdev has TX queues. This function is to check each of
133 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
134 **/
i40evf_detect_recover_hung(struct i40e_vsi * vsi)135 void i40evf_detect_recover_hung(struct i40e_vsi *vsi)
136 {
137 struct i40e_ring *tx_ring = NULL;
138 struct net_device *netdev;
139 unsigned int i;
140 int packets;
141
142 if (!vsi)
143 return;
144
145 if (test_bit(__I40E_VSI_DOWN, vsi->state))
146 return;
147
148 netdev = vsi->netdev;
149 if (!netdev)
150 return;
151
152 if (!netif_carrier_ok(netdev))
153 return;
154
155 for (i = 0; i < vsi->back->num_active_queues; i++) {
156 tx_ring = &vsi->back->tx_rings[i];
157 if (tx_ring && tx_ring->desc) {
158 /* If packet counter has not changed the queue is
159 * likely stalled, so force an interrupt for this
160 * queue.
161 *
162 * prev_pkt_ctr would be negative if there was no
163 * pending work.
164 */
165 packets = tx_ring->stats.packets & INT_MAX;
166 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
167 i40evf_force_wb(vsi, tx_ring->q_vector);
168 continue;
169 }
170
171 /* Memory barrier between read of packet count and call
172 * to i40evf_get_tx_pending()
173 */
174 smp_rmb();
175 tx_ring->tx_stats.prev_pkt_ctr =
176 i40evf_get_tx_pending(tx_ring, true) ? packets : -1;
177 }
178 }
179 }
180
181 #define WB_STRIDE 4
182
183 /**
184 * i40e_clean_tx_irq - Reclaim resources after transmit completes
185 * @vsi: the VSI we care about
186 * @tx_ring: Tx ring to clean
187 * @napi_budget: Used to determine if we are in netpoll
188 *
189 * Returns true if there's any budget left (e.g. the clean is finished)
190 **/
i40e_clean_tx_irq(struct i40e_vsi * vsi,struct i40e_ring * tx_ring,int napi_budget)191 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
192 struct i40e_ring *tx_ring, int napi_budget)
193 {
194 u16 i = tx_ring->next_to_clean;
195 struct i40e_tx_buffer *tx_buf;
196 struct i40e_tx_desc *tx_desc;
197 unsigned int total_bytes = 0, total_packets = 0;
198 unsigned int budget = vsi->work_limit;
199
200 tx_buf = &tx_ring->tx_bi[i];
201 tx_desc = I40E_TX_DESC(tx_ring, i);
202 i -= tx_ring->count;
203
204 do {
205 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
206
207 /* if next_to_watch is not set then there is no work pending */
208 if (!eop_desc)
209 break;
210
211 /* prevent any other reads prior to eop_desc */
212 smp_rmb();
213
214 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
215 /* if the descriptor isn't done, no work yet to do */
216 if (!(eop_desc->cmd_type_offset_bsz &
217 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
218 break;
219
220 /* clear next_to_watch to prevent false hangs */
221 tx_buf->next_to_watch = NULL;
222
223 /* update the statistics for this packet */
224 total_bytes += tx_buf->bytecount;
225 total_packets += tx_buf->gso_segs;
226
227 /* free the skb */
228 napi_consume_skb(tx_buf->skb, napi_budget);
229
230 /* unmap skb header data */
231 dma_unmap_single(tx_ring->dev,
232 dma_unmap_addr(tx_buf, dma),
233 dma_unmap_len(tx_buf, len),
234 DMA_TO_DEVICE);
235
236 /* clear tx_buffer data */
237 tx_buf->skb = NULL;
238 dma_unmap_len_set(tx_buf, len, 0);
239
240 /* unmap remaining buffers */
241 while (tx_desc != eop_desc) {
242 i40e_trace(clean_tx_irq_unmap,
243 tx_ring, tx_desc, tx_buf);
244
245 tx_buf++;
246 tx_desc++;
247 i++;
248 if (unlikely(!i)) {
249 i -= tx_ring->count;
250 tx_buf = tx_ring->tx_bi;
251 tx_desc = I40E_TX_DESC(tx_ring, 0);
252 }
253
254 /* unmap any remaining paged data */
255 if (dma_unmap_len(tx_buf, len)) {
256 dma_unmap_page(tx_ring->dev,
257 dma_unmap_addr(tx_buf, dma),
258 dma_unmap_len(tx_buf, len),
259 DMA_TO_DEVICE);
260 dma_unmap_len_set(tx_buf, len, 0);
261 }
262 }
263
264 /* move us one more past the eop_desc for start of next pkt */
265 tx_buf++;
266 tx_desc++;
267 i++;
268 if (unlikely(!i)) {
269 i -= tx_ring->count;
270 tx_buf = tx_ring->tx_bi;
271 tx_desc = I40E_TX_DESC(tx_ring, 0);
272 }
273
274 prefetch(tx_desc);
275
276 /* update budget accounting */
277 budget--;
278 } while (likely(budget));
279
280 i += tx_ring->count;
281 tx_ring->next_to_clean = i;
282 u64_stats_update_begin(&tx_ring->syncp);
283 tx_ring->stats.bytes += total_bytes;
284 tx_ring->stats.packets += total_packets;
285 u64_stats_update_end(&tx_ring->syncp);
286 tx_ring->q_vector->tx.total_bytes += total_bytes;
287 tx_ring->q_vector->tx.total_packets += total_packets;
288
289 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
290 /* check to see if there are < 4 descriptors
291 * waiting to be written back, then kick the hardware to force
292 * them to be written back in case we stay in NAPI.
293 * In this mode on X722 we do not enable Interrupt.
294 */
295 unsigned int j = i40evf_get_tx_pending(tx_ring, false);
296
297 if (budget &&
298 ((j / WB_STRIDE) == 0) && (j > 0) &&
299 !test_bit(__I40E_VSI_DOWN, vsi->state) &&
300 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
301 tx_ring->arm_wb = true;
302 }
303
304 /* notify netdev of completed buffers */
305 netdev_tx_completed_queue(txring_txq(tx_ring),
306 total_packets, total_bytes);
307
308 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
309 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
310 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
311 /* Make sure that anybody stopping the queue after this
312 * sees the new next_to_clean.
313 */
314 smp_mb();
315 if (__netif_subqueue_stopped(tx_ring->netdev,
316 tx_ring->queue_index) &&
317 !test_bit(__I40E_VSI_DOWN, vsi->state)) {
318 netif_wake_subqueue(tx_ring->netdev,
319 tx_ring->queue_index);
320 ++tx_ring->tx_stats.restart_queue;
321 }
322 }
323
324 return !!budget;
325 }
326
327 /**
328 * i40evf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
329 * @vsi: the VSI we care about
330 * @q_vector: the vector on which to enable writeback
331 *
332 **/
i40e_enable_wb_on_itr(struct i40e_vsi * vsi,struct i40e_q_vector * q_vector)333 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
334 struct i40e_q_vector *q_vector)
335 {
336 u16 flags = q_vector->tx.ring[0].flags;
337 u32 val;
338
339 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
340 return;
341
342 if (q_vector->arm_wb_state)
343 return;
344
345 val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
346 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
347
348 wr32(&vsi->back->hw,
349 I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
350 q_vector->arm_wb_state = true;
351 }
352
353 /**
354 * i40evf_force_wb - Issue SW Interrupt so HW does a wb
355 * @vsi: the VSI we care about
356 * @q_vector: the vector on which to force writeback
357 *
358 **/
i40evf_force_wb(struct i40e_vsi * vsi,struct i40e_q_vector * q_vector)359 void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
360 {
361 u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
362 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
363 I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
364 I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
365 /* allow 00 to be written to the index */;
366
367 wr32(&vsi->back->hw,
368 I40E_VFINT_DYN_CTLN1(q_vector->reg_idx),
369 val);
370 }
371
i40e_container_is_rx(struct i40e_q_vector * q_vector,struct i40e_ring_container * rc)372 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
373 struct i40e_ring_container *rc)
374 {
375 return &q_vector->rx == rc;
376 }
377
i40e_itr_divisor(struct i40e_q_vector * q_vector)378 static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
379 {
380 unsigned int divisor;
381
382 switch (q_vector->adapter->link_speed) {
383 case I40E_LINK_SPEED_40GB:
384 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
385 break;
386 case I40E_LINK_SPEED_25GB:
387 case I40E_LINK_SPEED_20GB:
388 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
389 break;
390 default:
391 case I40E_LINK_SPEED_10GB:
392 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
393 break;
394 case I40E_LINK_SPEED_1GB:
395 case I40E_LINK_SPEED_100MB:
396 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
397 break;
398 }
399
400 return divisor;
401 }
402
403 /**
404 * i40e_update_itr - update the dynamic ITR value based on statistics
405 * @q_vector: structure containing interrupt and ring information
406 * @rc: structure containing ring performance data
407 *
408 * Stores a new ITR value based on packets and byte
409 * counts during the last interrupt. The advantage of per interrupt
410 * computation is faster updates and more accurate ITR for the current
411 * traffic pattern. Constants in this function were computed
412 * based on theoretical maximum wire speed and thresholds were set based
413 * on testing data as well as attempting to minimize response time
414 * while increasing bulk throughput.
415 **/
i40e_update_itr(struct i40e_q_vector * q_vector,struct i40e_ring_container * rc)416 static void i40e_update_itr(struct i40e_q_vector *q_vector,
417 struct i40e_ring_container *rc)
418 {
419 unsigned int avg_wire_size, packets, bytes, itr;
420 unsigned long next_update = jiffies;
421
422 /* If we don't have any rings just leave ourselves set for maximum
423 * possible latency so we take ourselves out of the equation.
424 */
425 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
426 return;
427
428 /* For Rx we want to push the delay up and default to low latency.
429 * for Tx we want to pull the delay down and default to high latency.
430 */
431 itr = i40e_container_is_rx(q_vector, rc) ?
432 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
433 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
434
435 /* If we didn't update within up to 1 - 2 jiffies we can assume
436 * that either packets are coming in so slow there hasn't been
437 * any work, or that there is so much work that NAPI is dealing
438 * with interrupt moderation and we don't need to do anything.
439 */
440 if (time_after(next_update, rc->next_update))
441 goto clear_counts;
442
443 /* If itr_countdown is set it means we programmed an ITR within
444 * the last 4 interrupt cycles. This has a side effect of us
445 * potentially firing an early interrupt. In order to work around
446 * this we need to throw out any data received for a few
447 * interrupts following the update.
448 */
449 if (q_vector->itr_countdown) {
450 itr = rc->target_itr;
451 goto clear_counts;
452 }
453
454 packets = rc->total_packets;
455 bytes = rc->total_bytes;
456
457 if (i40e_container_is_rx(q_vector, rc)) {
458 /* If Rx there are 1 to 4 packets and bytes are less than
459 * 9000 assume insufficient data to use bulk rate limiting
460 * approach unless Tx is already in bulk rate limiting. We
461 * are likely latency driven.
462 */
463 if (packets && packets < 4 && bytes < 9000 &&
464 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
465 itr = I40E_ITR_ADAPTIVE_LATENCY;
466 goto adjust_by_size;
467 }
468 } else if (packets < 4) {
469 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
470 * bulk mode and we are receiving 4 or fewer packets just
471 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
472 * that the Rx can relax.
473 */
474 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
475 (q_vector->rx.target_itr & I40E_ITR_MASK) ==
476 I40E_ITR_ADAPTIVE_MAX_USECS)
477 goto clear_counts;
478 } else if (packets > 32) {
479 /* If we have processed over 32 packets in a single interrupt
480 * for Tx assume we need to switch over to "bulk" mode.
481 */
482 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
483 }
484
485 /* We have no packets to actually measure against. This means
486 * either one of the other queues on this vector is active or
487 * we are a Tx queue doing TSO with too high of an interrupt rate.
488 *
489 * Between 4 and 56 we can assume that our current interrupt delay
490 * is only slightly too low. As such we should increase it by a small
491 * fixed amount.
492 */
493 if (packets < 56) {
494 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
495 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
496 itr &= I40E_ITR_ADAPTIVE_LATENCY;
497 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
498 }
499 goto clear_counts;
500 }
501
502 if (packets <= 256) {
503 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
504 itr &= I40E_ITR_MASK;
505
506 /* Between 56 and 112 is our "goldilocks" zone where we are
507 * working out "just right". Just report that our current
508 * ITR is good for us.
509 */
510 if (packets <= 112)
511 goto clear_counts;
512
513 /* If packet count is 128 or greater we are likely looking
514 * at a slight overrun of the delay we want. Try halving
515 * our delay to see if that will cut the number of packets
516 * in half per interrupt.
517 */
518 itr /= 2;
519 itr &= I40E_ITR_MASK;
520 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
521 itr = I40E_ITR_ADAPTIVE_MIN_USECS;
522
523 goto clear_counts;
524 }
525
526 /* The paths below assume we are dealing with a bulk ITR since
527 * number of packets is greater than 256. We are just going to have
528 * to compute a value and try to bring the count under control,
529 * though for smaller packet sizes there isn't much we can do as
530 * NAPI polling will likely be kicking in sooner rather than later.
531 */
532 itr = I40E_ITR_ADAPTIVE_BULK;
533
534 adjust_by_size:
535 /* If packet counts are 256 or greater we can assume we have a gross
536 * overestimation of what the rate should be. Instead of trying to fine
537 * tune it just use the formula below to try and dial in an exact value
538 * give the current packet size of the frame.
539 */
540 avg_wire_size = bytes / packets;
541
542 /* The following is a crude approximation of:
543 * wmem_default / (size + overhead) = desired_pkts_per_int
544 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
545 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
546 *
547 * Assuming wmem_default is 212992 and overhead is 640 bytes per
548 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
549 * formula down to
550 *
551 * (170 * (size + 24)) / (size + 640) = ITR
552 *
553 * We first do some math on the packet size and then finally bitshift
554 * by 8 after rounding up. We also have to account for PCIe link speed
555 * difference as ITR scales based on this.
556 */
557 if (avg_wire_size <= 60) {
558 /* Start at 250k ints/sec */
559 avg_wire_size = 4096;
560 } else if (avg_wire_size <= 380) {
561 /* 250K ints/sec to 60K ints/sec */
562 avg_wire_size *= 40;
563 avg_wire_size += 1696;
564 } else if (avg_wire_size <= 1084) {
565 /* 60K ints/sec to 36K ints/sec */
566 avg_wire_size *= 15;
567 avg_wire_size += 11452;
568 } else if (avg_wire_size <= 1980) {
569 /* 36K ints/sec to 30K ints/sec */
570 avg_wire_size *= 5;
571 avg_wire_size += 22420;
572 } else {
573 /* plateau at a limit of 30K ints/sec */
574 avg_wire_size = 32256;
575 }
576
577 /* If we are in low latency mode halve our delay which doubles the
578 * rate to somewhere between 100K to 16K ints/sec
579 */
580 if (itr & I40E_ITR_ADAPTIVE_LATENCY)
581 avg_wire_size /= 2;
582
583 /* Resultant value is 256 times larger than it needs to be. This
584 * gives us room to adjust the value as needed to either increase
585 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
586 *
587 * Use addition as we have already recorded the new latency flag
588 * for the ITR value.
589 */
590 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
591 I40E_ITR_ADAPTIVE_MIN_INC;
592
593 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
594 itr &= I40E_ITR_ADAPTIVE_LATENCY;
595 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
596 }
597
598 clear_counts:
599 /* write back value */
600 rc->target_itr = itr;
601
602 /* next update should occur within next jiffy */
603 rc->next_update = next_update + 1;
604
605 rc->total_bytes = 0;
606 rc->total_packets = 0;
607 }
608
609 /**
610 * i40evf_setup_tx_descriptors - Allocate the Tx descriptors
611 * @tx_ring: the tx ring to set up
612 *
613 * Return 0 on success, negative on error
614 **/
i40evf_setup_tx_descriptors(struct i40e_ring * tx_ring)615 int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
616 {
617 struct device *dev = tx_ring->dev;
618 int bi_size;
619
620 if (!dev)
621 return -ENOMEM;
622
623 /* warn if we are about to overwrite the pointer */
624 WARN_ON(tx_ring->tx_bi);
625 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
626 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
627 if (!tx_ring->tx_bi)
628 goto err;
629
630 /* round up to nearest 4K */
631 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
632 tx_ring->size = ALIGN(tx_ring->size, 4096);
633 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
634 &tx_ring->dma, GFP_KERNEL);
635 if (!tx_ring->desc) {
636 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
637 tx_ring->size);
638 goto err;
639 }
640
641 tx_ring->next_to_use = 0;
642 tx_ring->next_to_clean = 0;
643 tx_ring->tx_stats.prev_pkt_ctr = -1;
644 return 0;
645
646 err:
647 kfree(tx_ring->tx_bi);
648 tx_ring->tx_bi = NULL;
649 return -ENOMEM;
650 }
651
652 /**
653 * i40evf_clean_rx_ring - Free Rx buffers
654 * @rx_ring: ring to be cleaned
655 **/
i40evf_clean_rx_ring(struct i40e_ring * rx_ring)656 void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
657 {
658 unsigned long bi_size;
659 u16 i;
660
661 /* ring already cleared, nothing to do */
662 if (!rx_ring->rx_bi)
663 return;
664
665 if (rx_ring->skb) {
666 dev_kfree_skb(rx_ring->skb);
667 rx_ring->skb = NULL;
668 }
669
670 /* Free all the Rx ring sk_buffs */
671 for (i = 0; i < rx_ring->count; i++) {
672 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
673
674 if (!rx_bi->page)
675 continue;
676
677 /* Invalidate cache lines that may have been written to by
678 * device so that we avoid corrupting memory.
679 */
680 dma_sync_single_range_for_cpu(rx_ring->dev,
681 rx_bi->dma,
682 rx_bi->page_offset,
683 rx_ring->rx_buf_len,
684 DMA_FROM_DEVICE);
685
686 /* free resources associated with mapping */
687 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
688 i40e_rx_pg_size(rx_ring),
689 DMA_FROM_DEVICE,
690 I40E_RX_DMA_ATTR);
691
692 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
693
694 rx_bi->page = NULL;
695 rx_bi->page_offset = 0;
696 }
697
698 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
699 memset(rx_ring->rx_bi, 0, bi_size);
700
701 /* Zero out the descriptor ring */
702 memset(rx_ring->desc, 0, rx_ring->size);
703
704 rx_ring->next_to_alloc = 0;
705 rx_ring->next_to_clean = 0;
706 rx_ring->next_to_use = 0;
707 }
708
709 /**
710 * i40evf_free_rx_resources - Free Rx resources
711 * @rx_ring: ring to clean the resources from
712 *
713 * Free all receive software resources
714 **/
i40evf_free_rx_resources(struct i40e_ring * rx_ring)715 void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
716 {
717 i40evf_clean_rx_ring(rx_ring);
718 kfree(rx_ring->rx_bi);
719 rx_ring->rx_bi = NULL;
720
721 if (rx_ring->desc) {
722 dma_free_coherent(rx_ring->dev, rx_ring->size,
723 rx_ring->desc, rx_ring->dma);
724 rx_ring->desc = NULL;
725 }
726 }
727
728 /**
729 * i40evf_setup_rx_descriptors - Allocate Rx descriptors
730 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
731 *
732 * Returns 0 on success, negative on failure
733 **/
i40evf_setup_rx_descriptors(struct i40e_ring * rx_ring)734 int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
735 {
736 struct device *dev = rx_ring->dev;
737 int bi_size;
738
739 /* warn if we are about to overwrite the pointer */
740 WARN_ON(rx_ring->rx_bi);
741 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
742 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
743 if (!rx_ring->rx_bi)
744 goto err;
745
746 u64_stats_init(&rx_ring->syncp);
747
748 /* Round up to nearest 4K */
749 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
750 rx_ring->size = ALIGN(rx_ring->size, 4096);
751 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
752 &rx_ring->dma, GFP_KERNEL);
753
754 if (!rx_ring->desc) {
755 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
756 rx_ring->size);
757 goto err;
758 }
759
760 rx_ring->next_to_alloc = 0;
761 rx_ring->next_to_clean = 0;
762 rx_ring->next_to_use = 0;
763
764 return 0;
765 err:
766 kfree(rx_ring->rx_bi);
767 rx_ring->rx_bi = NULL;
768 return -ENOMEM;
769 }
770
771 /**
772 * i40e_release_rx_desc - Store the new tail and head values
773 * @rx_ring: ring to bump
774 * @val: new head index
775 **/
i40e_release_rx_desc(struct i40e_ring * rx_ring,u32 val)776 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
777 {
778 rx_ring->next_to_use = val;
779
780 /* update next to alloc since we have filled the ring */
781 rx_ring->next_to_alloc = val;
782
783 /* Force memory writes to complete before letting h/w
784 * know there are new descriptors to fetch. (Only
785 * applicable for weak-ordered memory model archs,
786 * such as IA-64).
787 */
788 wmb();
789 writel(val, rx_ring->tail);
790 }
791
792 /**
793 * i40e_rx_offset - Return expected offset into page to access data
794 * @rx_ring: Ring we are requesting offset of
795 *
796 * Returns the offset value for ring into the data buffer.
797 */
i40e_rx_offset(struct i40e_ring * rx_ring)798 static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
799 {
800 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
801 }
802
803 /**
804 * i40e_alloc_mapped_page - recycle or make a new page
805 * @rx_ring: ring to use
806 * @bi: rx_buffer struct to modify
807 *
808 * Returns true if the page was successfully allocated or
809 * reused.
810 **/
i40e_alloc_mapped_page(struct i40e_ring * rx_ring,struct i40e_rx_buffer * bi)811 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
812 struct i40e_rx_buffer *bi)
813 {
814 struct page *page = bi->page;
815 dma_addr_t dma;
816
817 /* since we are recycling buffers we should seldom need to alloc */
818 if (likely(page)) {
819 rx_ring->rx_stats.page_reuse_count++;
820 return true;
821 }
822
823 /* alloc new page for storage */
824 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
825 if (unlikely(!page)) {
826 rx_ring->rx_stats.alloc_page_failed++;
827 return false;
828 }
829
830 /* map page for use */
831 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
832 i40e_rx_pg_size(rx_ring),
833 DMA_FROM_DEVICE,
834 I40E_RX_DMA_ATTR);
835
836 /* if mapping failed free memory back to system since
837 * there isn't much point in holding memory we can't use
838 */
839 if (dma_mapping_error(rx_ring->dev, dma)) {
840 __free_pages(page, i40e_rx_pg_order(rx_ring));
841 rx_ring->rx_stats.alloc_page_failed++;
842 return false;
843 }
844
845 bi->dma = dma;
846 bi->page = page;
847 bi->page_offset = i40e_rx_offset(rx_ring);
848
849 /* initialize pagecnt_bias to 1 representing we fully own page */
850 bi->pagecnt_bias = 1;
851
852 return true;
853 }
854
855 /**
856 * i40e_receive_skb - Send a completed packet up the stack
857 * @rx_ring: rx ring in play
858 * @skb: packet to send up
859 * @vlan_tag: vlan tag for packet
860 **/
i40e_receive_skb(struct i40e_ring * rx_ring,struct sk_buff * skb,u16 vlan_tag)861 static void i40e_receive_skb(struct i40e_ring *rx_ring,
862 struct sk_buff *skb, u16 vlan_tag)
863 {
864 struct i40e_q_vector *q_vector = rx_ring->q_vector;
865
866 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
867 (vlan_tag & VLAN_VID_MASK))
868 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
869
870 napi_gro_receive(&q_vector->napi, skb);
871 }
872
873 /**
874 * i40evf_alloc_rx_buffers - Replace used receive buffers
875 * @rx_ring: ring to place buffers on
876 * @cleaned_count: number of buffers to replace
877 *
878 * Returns false if all allocations were successful, true if any fail
879 **/
i40evf_alloc_rx_buffers(struct i40e_ring * rx_ring,u16 cleaned_count)880 bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
881 {
882 u16 ntu = rx_ring->next_to_use;
883 union i40e_rx_desc *rx_desc;
884 struct i40e_rx_buffer *bi;
885
886 /* do nothing if no valid netdev defined */
887 if (!rx_ring->netdev || !cleaned_count)
888 return false;
889
890 rx_desc = I40E_RX_DESC(rx_ring, ntu);
891 bi = &rx_ring->rx_bi[ntu];
892
893 do {
894 if (!i40e_alloc_mapped_page(rx_ring, bi))
895 goto no_buffers;
896
897 /* sync the buffer for use by the device */
898 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
899 bi->page_offset,
900 rx_ring->rx_buf_len,
901 DMA_FROM_DEVICE);
902
903 /* Refresh the desc even if buffer_addrs didn't change
904 * because each write-back erases this info.
905 */
906 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
907
908 rx_desc++;
909 bi++;
910 ntu++;
911 if (unlikely(ntu == rx_ring->count)) {
912 rx_desc = I40E_RX_DESC(rx_ring, 0);
913 bi = rx_ring->rx_bi;
914 ntu = 0;
915 }
916
917 /* clear the status bits for the next_to_use descriptor */
918 rx_desc->wb.qword1.status_error_len = 0;
919
920 cleaned_count--;
921 } while (cleaned_count);
922
923 if (rx_ring->next_to_use != ntu)
924 i40e_release_rx_desc(rx_ring, ntu);
925
926 return false;
927
928 no_buffers:
929 if (rx_ring->next_to_use != ntu)
930 i40e_release_rx_desc(rx_ring, ntu);
931
932 /* make sure to come back via polling to try again after
933 * allocation failure
934 */
935 return true;
936 }
937
938 /**
939 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
940 * @vsi: the VSI we care about
941 * @skb: skb currently being received and modified
942 * @rx_desc: the receive descriptor
943 **/
i40e_rx_checksum(struct i40e_vsi * vsi,struct sk_buff * skb,union i40e_rx_desc * rx_desc)944 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
945 struct sk_buff *skb,
946 union i40e_rx_desc *rx_desc)
947 {
948 struct i40e_rx_ptype_decoded decoded;
949 u32 rx_error, rx_status;
950 bool ipv4, ipv6;
951 u8 ptype;
952 u64 qword;
953
954 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
955 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
956 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
957 I40E_RXD_QW1_ERROR_SHIFT;
958 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
959 I40E_RXD_QW1_STATUS_SHIFT;
960 decoded = decode_rx_desc_ptype(ptype);
961
962 skb->ip_summed = CHECKSUM_NONE;
963
964 skb_checksum_none_assert(skb);
965
966 /* Rx csum enabled and ip headers found? */
967 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
968 return;
969
970 /* did the hardware decode the packet and checksum? */
971 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
972 return;
973
974 /* both known and outer_ip must be set for the below code to work */
975 if (!(decoded.known && decoded.outer_ip))
976 return;
977
978 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
979 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
980 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
981 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
982
983 if (ipv4 &&
984 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
985 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
986 goto checksum_fail;
987
988 /* likely incorrect csum if alternate IP extension headers found */
989 if (ipv6 &&
990 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
991 /* don't increment checksum err here, non-fatal err */
992 return;
993
994 /* there was some L4 error, count error and punt packet to the stack */
995 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
996 goto checksum_fail;
997
998 /* handle packets that were not able to be checksummed due
999 * to arrival speed, in this case the stack can compute
1000 * the csum.
1001 */
1002 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1003 return;
1004
1005 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1006 switch (decoded.inner_prot) {
1007 case I40E_RX_PTYPE_INNER_PROT_TCP:
1008 case I40E_RX_PTYPE_INNER_PROT_UDP:
1009 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1010 skb->ip_summed = CHECKSUM_UNNECESSARY;
1011 /* fall though */
1012 default:
1013 break;
1014 }
1015
1016 return;
1017
1018 checksum_fail:
1019 vsi->back->hw_csum_rx_error++;
1020 }
1021
1022 /**
1023 * i40e_ptype_to_htype - get a hash type
1024 * @ptype: the ptype value from the descriptor
1025 *
1026 * Returns a hash type to be used by skb_set_hash
1027 **/
i40e_ptype_to_htype(u8 ptype)1028 static inline int i40e_ptype_to_htype(u8 ptype)
1029 {
1030 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1031
1032 if (!decoded.known)
1033 return PKT_HASH_TYPE_NONE;
1034
1035 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1036 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1037 return PKT_HASH_TYPE_L4;
1038 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1039 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1040 return PKT_HASH_TYPE_L3;
1041 else
1042 return PKT_HASH_TYPE_L2;
1043 }
1044
1045 /**
1046 * i40e_rx_hash - set the hash value in the skb
1047 * @ring: descriptor ring
1048 * @rx_desc: specific descriptor
1049 * @skb: skb currently being received and modified
1050 * @rx_ptype: Rx packet type
1051 **/
i40e_rx_hash(struct i40e_ring * ring,union i40e_rx_desc * rx_desc,struct sk_buff * skb,u8 rx_ptype)1052 static inline void i40e_rx_hash(struct i40e_ring *ring,
1053 union i40e_rx_desc *rx_desc,
1054 struct sk_buff *skb,
1055 u8 rx_ptype)
1056 {
1057 u32 hash;
1058 const __le64 rss_mask =
1059 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1060 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1061
1062 if (ring->netdev->features & NETIF_F_RXHASH)
1063 return;
1064
1065 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1066 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1067 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1068 }
1069 }
1070
1071 /**
1072 * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor
1073 * @rx_ring: rx descriptor ring packet is being transacted on
1074 * @rx_desc: pointer to the EOP Rx descriptor
1075 * @skb: pointer to current skb being populated
1076 * @rx_ptype: the packet type decoded by hardware
1077 *
1078 * This function checks the ring, descriptor, and packet information in
1079 * order to populate the hash, checksum, VLAN, protocol, and
1080 * other fields within the skb.
1081 **/
1082 static inline
i40evf_process_skb_fields(struct i40e_ring * rx_ring,union i40e_rx_desc * rx_desc,struct sk_buff * skb,u8 rx_ptype)1083 void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
1084 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1085 u8 rx_ptype)
1086 {
1087 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1088
1089 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1090
1091 skb_record_rx_queue(skb, rx_ring->queue_index);
1092
1093 /* modifies the skb - consumes the enet header */
1094 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1095 }
1096
1097 /**
1098 * i40e_cleanup_headers - Correct empty headers
1099 * @rx_ring: rx descriptor ring packet is being transacted on
1100 * @skb: pointer to current skb being fixed
1101 *
1102 * Also address the case where we are pulling data in on pages only
1103 * and as such no data is present in the skb header.
1104 *
1105 * In addition if skb is not at least 60 bytes we need to pad it so that
1106 * it is large enough to qualify as a valid Ethernet frame.
1107 *
1108 * Returns true if an error was encountered and skb was freed.
1109 **/
i40e_cleanup_headers(struct i40e_ring * rx_ring,struct sk_buff * skb)1110 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
1111 {
1112 /* if eth_skb_pad returns an error the skb was freed */
1113 if (eth_skb_pad(skb))
1114 return true;
1115
1116 return false;
1117 }
1118
1119 /**
1120 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1121 * @rx_ring: rx descriptor ring to store buffers on
1122 * @old_buff: donor buffer to have page reused
1123 *
1124 * Synchronizes page for reuse by the adapter
1125 **/
i40e_reuse_rx_page(struct i40e_ring * rx_ring,struct i40e_rx_buffer * old_buff)1126 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1127 struct i40e_rx_buffer *old_buff)
1128 {
1129 struct i40e_rx_buffer *new_buff;
1130 u16 nta = rx_ring->next_to_alloc;
1131
1132 new_buff = &rx_ring->rx_bi[nta];
1133
1134 /* update, and store next to alloc */
1135 nta++;
1136 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1137
1138 /* transfer page from old buffer to new buffer */
1139 new_buff->dma = old_buff->dma;
1140 new_buff->page = old_buff->page;
1141 new_buff->page_offset = old_buff->page_offset;
1142 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1143 }
1144
1145 /**
1146 * i40e_page_is_reusable - check if any reuse is possible
1147 * @page: page struct to check
1148 *
1149 * A page is not reusable if it was allocated under low memory
1150 * conditions, or it's not in the same NUMA node as this CPU.
1151 */
i40e_page_is_reusable(struct page * page)1152 static inline bool i40e_page_is_reusable(struct page *page)
1153 {
1154 return (page_to_nid(page) == numa_mem_id()) &&
1155 !page_is_pfmemalloc(page);
1156 }
1157
1158 /**
1159 * i40e_can_reuse_rx_page - Determine if this page can be reused by
1160 * the adapter for another receive
1161 *
1162 * @rx_buffer: buffer containing the page
1163 *
1164 * If page is reusable, rx_buffer->page_offset is adjusted to point to
1165 * an unused region in the page.
1166 *
1167 * For small pages, @truesize will be a constant value, half the size
1168 * of the memory at page. We'll attempt to alternate between high and
1169 * low halves of the page, with one half ready for use by the hardware
1170 * and the other half being consumed by the stack. We use the page
1171 * ref count to determine whether the stack has finished consuming the
1172 * portion of this page that was passed up with a previous packet. If
1173 * the page ref count is >1, we'll assume the "other" half page is
1174 * still busy, and this page cannot be reused.
1175 *
1176 * For larger pages, @truesize will be the actual space used by the
1177 * received packet (adjusted upward to an even multiple of the cache
1178 * line size). This will advance through the page by the amount
1179 * actually consumed by the received packets while there is still
1180 * space for a buffer. Each region of larger pages will be used at
1181 * most once, after which the page will not be reused.
1182 *
1183 * In either case, if the page is reusable its refcount is increased.
1184 **/
i40e_can_reuse_rx_page(struct i40e_rx_buffer * rx_buffer)1185 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
1186 {
1187 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1188 struct page *page = rx_buffer->page;
1189
1190 /* Is any reuse possible? */
1191 if (unlikely(!i40e_page_is_reusable(page)))
1192 return false;
1193
1194 #if (PAGE_SIZE < 8192)
1195 /* if we are only owner of page we can reuse it */
1196 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1197 return false;
1198 #else
1199 #define I40E_LAST_OFFSET \
1200 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1201 if (rx_buffer->page_offset > I40E_LAST_OFFSET)
1202 return false;
1203 #endif
1204
1205 /* If we have drained the page fragment pool we need to update
1206 * the pagecnt_bias and page count so that we fully restock the
1207 * number of references the driver holds.
1208 */
1209 if (unlikely(!pagecnt_bias)) {
1210 page_ref_add(page, USHRT_MAX);
1211 rx_buffer->pagecnt_bias = USHRT_MAX;
1212 }
1213
1214 return true;
1215 }
1216
1217 /**
1218 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1219 * @rx_ring: rx descriptor ring to transact packets on
1220 * @rx_buffer: buffer containing page to add
1221 * @skb: sk_buff to place the data into
1222 * @size: packet length from rx_desc
1223 *
1224 * This function will add the data contained in rx_buffer->page to the skb.
1225 * It will just attach the page as a frag to the skb.
1226 *
1227 * The function will then update the page offset.
1228 **/
i40e_add_rx_frag(struct i40e_ring * rx_ring,struct i40e_rx_buffer * rx_buffer,struct sk_buff * skb,unsigned int size)1229 static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1230 struct i40e_rx_buffer *rx_buffer,
1231 struct sk_buff *skb,
1232 unsigned int size)
1233 {
1234 #if (PAGE_SIZE < 8192)
1235 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1236 #else
1237 unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
1238 #endif
1239
1240 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1241 rx_buffer->page_offset, size, truesize);
1242
1243 /* page is being used so we must update the page offset */
1244 #if (PAGE_SIZE < 8192)
1245 rx_buffer->page_offset ^= truesize;
1246 #else
1247 rx_buffer->page_offset += truesize;
1248 #endif
1249 }
1250
1251 /**
1252 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1253 * @rx_ring: rx descriptor ring to transact packets on
1254 * @size: size of buffer to add to skb
1255 *
1256 * This function will pull an Rx buffer from the ring and synchronize it
1257 * for use by the CPU.
1258 */
i40e_get_rx_buffer(struct i40e_ring * rx_ring,const unsigned int size)1259 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
1260 const unsigned int size)
1261 {
1262 struct i40e_rx_buffer *rx_buffer;
1263
1264 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1265 prefetchw(rx_buffer->page);
1266
1267 /* we are reusing so sync this buffer for CPU use */
1268 dma_sync_single_range_for_cpu(rx_ring->dev,
1269 rx_buffer->dma,
1270 rx_buffer->page_offset,
1271 size,
1272 DMA_FROM_DEVICE);
1273
1274 /* We have pulled a buffer for use, so decrement pagecnt_bias */
1275 rx_buffer->pagecnt_bias--;
1276
1277 return rx_buffer;
1278 }
1279
1280 /**
1281 * i40e_construct_skb - Allocate skb and populate it
1282 * @rx_ring: rx descriptor ring to transact packets on
1283 * @rx_buffer: rx buffer to pull data from
1284 * @size: size of buffer to add to skb
1285 *
1286 * This function allocates an skb. It then populates it with the page
1287 * data from the current receive descriptor, taking care to set up the
1288 * skb correctly.
1289 */
i40e_construct_skb(struct i40e_ring * rx_ring,struct i40e_rx_buffer * rx_buffer,unsigned int size)1290 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
1291 struct i40e_rx_buffer *rx_buffer,
1292 unsigned int size)
1293 {
1294 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1295 #if (PAGE_SIZE < 8192)
1296 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1297 #else
1298 unsigned int truesize = SKB_DATA_ALIGN(size);
1299 #endif
1300 unsigned int headlen;
1301 struct sk_buff *skb;
1302
1303 /* prefetch first cache line of first page */
1304 prefetch(va);
1305 #if L1_CACHE_BYTES < 128
1306 prefetch(va + L1_CACHE_BYTES);
1307 #endif
1308
1309 /* allocate a skb to store the frags */
1310 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1311 I40E_RX_HDR_SIZE,
1312 GFP_ATOMIC | __GFP_NOWARN);
1313 if (unlikely(!skb))
1314 return NULL;
1315
1316 /* Determine available headroom for copy */
1317 headlen = size;
1318 if (headlen > I40E_RX_HDR_SIZE)
1319 headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
1320
1321 /* align pull length to size of long to optimize memcpy performance */
1322 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1323
1324 /* update all of the pointers */
1325 size -= headlen;
1326 if (size) {
1327 skb_add_rx_frag(skb, 0, rx_buffer->page,
1328 rx_buffer->page_offset + headlen,
1329 size, truesize);
1330
1331 /* buffer is used by skb, update page_offset */
1332 #if (PAGE_SIZE < 8192)
1333 rx_buffer->page_offset ^= truesize;
1334 #else
1335 rx_buffer->page_offset += truesize;
1336 #endif
1337 } else {
1338 /* buffer is unused, reset bias back to rx_buffer */
1339 rx_buffer->pagecnt_bias++;
1340 }
1341
1342 return skb;
1343 }
1344
1345 /**
1346 * i40e_build_skb - Build skb around an existing buffer
1347 * @rx_ring: Rx descriptor ring to transact packets on
1348 * @rx_buffer: Rx buffer to pull data from
1349 * @size: size of buffer to add to skb
1350 *
1351 * This function builds an skb around an existing Rx buffer, taking care
1352 * to set up the skb correctly and avoid any memcpy overhead.
1353 */
i40e_build_skb(struct i40e_ring * rx_ring,struct i40e_rx_buffer * rx_buffer,unsigned int size)1354 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
1355 struct i40e_rx_buffer *rx_buffer,
1356 unsigned int size)
1357 {
1358 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1359 #if (PAGE_SIZE < 8192)
1360 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1361 #else
1362 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1363 SKB_DATA_ALIGN(I40E_SKB_PAD + size);
1364 #endif
1365 struct sk_buff *skb;
1366
1367 /* prefetch first cache line of first page */
1368 prefetch(va);
1369 #if L1_CACHE_BYTES < 128
1370 prefetch(va + L1_CACHE_BYTES);
1371 #endif
1372 /* build an skb around the page buffer */
1373 skb = build_skb(va - I40E_SKB_PAD, truesize);
1374 if (unlikely(!skb))
1375 return NULL;
1376
1377 /* update pointers within the skb to store the data */
1378 skb_reserve(skb, I40E_SKB_PAD);
1379 __skb_put(skb, size);
1380
1381 /* buffer is used by skb, update page_offset */
1382 #if (PAGE_SIZE < 8192)
1383 rx_buffer->page_offset ^= truesize;
1384 #else
1385 rx_buffer->page_offset += truesize;
1386 #endif
1387
1388 return skb;
1389 }
1390
1391 /**
1392 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
1393 * @rx_ring: rx descriptor ring to transact packets on
1394 * @rx_buffer: rx buffer to pull data from
1395 *
1396 * This function will clean up the contents of the rx_buffer. It will
1397 * either recycle the buffer or unmap it and free the associated resources.
1398 */
i40e_put_rx_buffer(struct i40e_ring * rx_ring,struct i40e_rx_buffer * rx_buffer)1399 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
1400 struct i40e_rx_buffer *rx_buffer)
1401 {
1402 if (i40e_can_reuse_rx_page(rx_buffer)) {
1403 /* hand second half of page back to the ring */
1404 i40e_reuse_rx_page(rx_ring, rx_buffer);
1405 rx_ring->rx_stats.page_reuse_count++;
1406 } else {
1407 /* we are not reusing the buffer so unmap it */
1408 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1409 i40e_rx_pg_size(rx_ring),
1410 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
1411 __page_frag_cache_drain(rx_buffer->page,
1412 rx_buffer->pagecnt_bias);
1413 }
1414
1415 /* clear contents of buffer_info */
1416 rx_buffer->page = NULL;
1417 }
1418
1419 /**
1420 * i40e_is_non_eop - process handling of non-EOP buffers
1421 * @rx_ring: Rx ring being processed
1422 * @rx_desc: Rx descriptor for current buffer
1423 * @skb: Current socket buffer containing buffer in progress
1424 *
1425 * This function updates next to clean. If the buffer is an EOP buffer
1426 * this function exits returning false, otherwise it will place the
1427 * sk_buff in the next buffer to be chained and return true indicating
1428 * that this is in fact a non-EOP buffer.
1429 **/
i40e_is_non_eop(struct i40e_ring * rx_ring,union i40e_rx_desc * rx_desc,struct sk_buff * skb)1430 static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
1431 union i40e_rx_desc *rx_desc,
1432 struct sk_buff *skb)
1433 {
1434 u32 ntc = rx_ring->next_to_clean + 1;
1435
1436 /* fetch, update, and store next to clean */
1437 ntc = (ntc < rx_ring->count) ? ntc : 0;
1438 rx_ring->next_to_clean = ntc;
1439
1440 prefetch(I40E_RX_DESC(rx_ring, ntc));
1441
1442 /* if we are the last buffer then there is nothing else to do */
1443 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
1444 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
1445 return false;
1446
1447 rx_ring->rx_stats.non_eop_descs++;
1448
1449 return true;
1450 }
1451
1452 /**
1453 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1454 * @rx_ring: rx descriptor ring to transact packets on
1455 * @budget: Total limit on number of packets to process
1456 *
1457 * This function provides a "bounce buffer" approach to Rx interrupt
1458 * processing. The advantage to this is that on systems that have
1459 * expensive overhead for IOMMU access this provides a means of avoiding
1460 * it by maintaining the mapping of the page to the system.
1461 *
1462 * Returns amount of work completed
1463 **/
i40e_clean_rx_irq(struct i40e_ring * rx_ring,int budget)1464 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1465 {
1466 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1467 struct sk_buff *skb = rx_ring->skb;
1468 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1469 bool failure = false;
1470
1471 while (likely(total_rx_packets < (unsigned int)budget)) {
1472 struct i40e_rx_buffer *rx_buffer;
1473 union i40e_rx_desc *rx_desc;
1474 unsigned int size;
1475 u16 vlan_tag;
1476 u8 rx_ptype;
1477 u64 qword;
1478
1479 /* return some buffers to hardware, one at a time is too slow */
1480 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1481 failure = failure ||
1482 i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
1483 cleaned_count = 0;
1484 }
1485
1486 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
1487
1488 /* status_error_len will always be zero for unused descriptors
1489 * because it's cleared in cleanup, and overlaps with hdr_addr
1490 * which is always zero because packet split isn't used, if the
1491 * hardware wrote DD then the length will be non-zero
1492 */
1493 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1494
1495 /* This memory barrier is needed to keep us from reading
1496 * any other fields out of the rx_desc until we have
1497 * verified the descriptor has been written back.
1498 */
1499 dma_rmb();
1500
1501 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1502 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1503 if (!size)
1504 break;
1505
1506 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
1507 rx_buffer = i40e_get_rx_buffer(rx_ring, size);
1508
1509 /* retrieve a buffer from the ring */
1510 if (skb)
1511 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
1512 else if (ring_uses_build_skb(rx_ring))
1513 skb = i40e_build_skb(rx_ring, rx_buffer, size);
1514 else
1515 skb = i40e_construct_skb(rx_ring, rx_buffer, size);
1516
1517 /* exit if we failed to retrieve a buffer */
1518 if (!skb) {
1519 rx_ring->rx_stats.alloc_buff_failed++;
1520 rx_buffer->pagecnt_bias++;
1521 break;
1522 }
1523
1524 i40e_put_rx_buffer(rx_ring, rx_buffer);
1525 cleaned_count++;
1526
1527 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
1528 continue;
1529
1530 /* ERR_MASK will only have valid bits if EOP set, and
1531 * what we are doing here is actually checking
1532 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1533 * the error field
1534 */
1535 if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1536 dev_kfree_skb_any(skb);
1537 skb = NULL;
1538 continue;
1539 }
1540
1541 if (i40e_cleanup_headers(rx_ring, skb)) {
1542 skb = NULL;
1543 continue;
1544 }
1545
1546 /* probably a little skewed due to removing CRC */
1547 total_rx_bytes += skb->len;
1548
1549 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1550 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1551 I40E_RXD_QW1_PTYPE_SHIFT;
1552
1553 /* populate checksum, VLAN, and protocol */
1554 i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1555
1556
1557 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
1558 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
1559
1560 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
1561 i40e_receive_skb(rx_ring, skb, vlan_tag);
1562 skb = NULL;
1563
1564 /* update budget accounting */
1565 total_rx_packets++;
1566 }
1567
1568 rx_ring->skb = skb;
1569
1570 u64_stats_update_begin(&rx_ring->syncp);
1571 rx_ring->stats.packets += total_rx_packets;
1572 rx_ring->stats.bytes += total_rx_bytes;
1573 u64_stats_update_end(&rx_ring->syncp);
1574 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1575 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1576
1577 /* guarantee a trip back through this routine if there was a failure */
1578 return failure ? budget : (int)total_rx_packets;
1579 }
1580
i40e_buildreg_itr(const int type,u16 itr)1581 static inline u32 i40e_buildreg_itr(const int type, u16 itr)
1582 {
1583 u32 val;
1584
1585 /* We don't bother with setting the CLEARPBA bit as the data sheet
1586 * points out doing so is "meaningless since it was already
1587 * auto-cleared". The auto-clearing happens when the interrupt is
1588 * asserted.
1589 *
1590 * Hardware errata 28 for also indicates that writing to a
1591 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
1592 * an event in the PBA anyway so we need to rely on the automask
1593 * to hold pending events for us until the interrupt is re-enabled
1594 *
1595 * The itr value is reported in microseconds, and the register
1596 * value is recorded in 2 microsecond units. For this reason we
1597 * only need to shift by the interval shift - 1 instead of the
1598 * full value.
1599 */
1600 itr &= I40E_ITR_MASK;
1601
1602 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1603 (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1604 (itr << (I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1));
1605
1606 return val;
1607 }
1608
1609 /* a small macro to shorten up some long lines */
1610 #define INTREG I40E_VFINT_DYN_CTLN1
1611
1612 /* The act of updating the ITR will cause it to immediately trigger. In order
1613 * to prevent this from throwing off adaptive update statistics we defer the
1614 * update so that it can only happen so often. So after either Tx or Rx are
1615 * updated we make the adaptive scheme wait until either the ITR completely
1616 * expires via the next_update expiration or we have been through at least
1617 * 3 interrupts.
1618 */
1619 #define ITR_COUNTDOWN_START 3
1620
1621 /**
1622 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1623 * @vsi: the VSI we care about
1624 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1625 *
1626 **/
i40e_update_enable_itr(struct i40e_vsi * vsi,struct i40e_q_vector * q_vector)1627 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1628 struct i40e_q_vector *q_vector)
1629 {
1630 struct i40e_hw *hw = &vsi->back->hw;
1631 u32 intval;
1632
1633 /* These will do nothing if dynamic updates are not enabled */
1634 i40e_update_itr(q_vector, &q_vector->tx);
1635 i40e_update_itr(q_vector, &q_vector->rx);
1636
1637 /* This block of logic allows us to get away with only updating
1638 * one ITR value with each interrupt. The idea is to perform a
1639 * pseudo-lazy update with the following criteria.
1640 *
1641 * 1. Rx is given higher priority than Tx if both are in same state
1642 * 2. If we must reduce an ITR that is given highest priority.
1643 * 3. We then give priority to increasing ITR based on amount.
1644 */
1645 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
1646 /* Rx ITR needs to be reduced, this is highest priority */
1647 intval = i40e_buildreg_itr(I40E_RX_ITR,
1648 q_vector->rx.target_itr);
1649 q_vector->rx.current_itr = q_vector->rx.target_itr;
1650 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1651 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
1652 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
1653 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
1654 /* Tx ITR needs to be reduced, this is second priority
1655 * Tx ITR needs to be increased more than Rx, fourth priority
1656 */
1657 intval = i40e_buildreg_itr(I40E_TX_ITR,
1658 q_vector->tx.target_itr);
1659 q_vector->tx.current_itr = q_vector->tx.target_itr;
1660 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1661 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
1662 /* Rx ITR needs to be increased, third priority */
1663 intval = i40e_buildreg_itr(I40E_RX_ITR,
1664 q_vector->rx.target_itr);
1665 q_vector->rx.current_itr = q_vector->rx.target_itr;
1666 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1667 } else {
1668 /* No ITR update, lowest priority */
1669 intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
1670 if (q_vector->itr_countdown)
1671 q_vector->itr_countdown--;
1672 }
1673
1674 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
1675 wr32(hw, INTREG(q_vector->reg_idx), intval);
1676 }
1677
1678 /**
1679 * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
1680 * @napi: napi struct with our devices info in it
1681 * @budget: amount of work driver is allowed to do this pass, in packets
1682 *
1683 * This function will clean all queues associated with a q_vector.
1684 *
1685 * Returns the amount of work done
1686 **/
i40evf_napi_poll(struct napi_struct * napi,int budget)1687 int i40evf_napi_poll(struct napi_struct *napi, int budget)
1688 {
1689 struct i40e_q_vector *q_vector =
1690 container_of(napi, struct i40e_q_vector, napi);
1691 struct i40e_vsi *vsi = q_vector->vsi;
1692 struct i40e_ring *ring;
1693 bool clean_complete = true;
1694 bool arm_wb = false;
1695 int budget_per_ring;
1696 int work_done = 0;
1697
1698 if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
1699 napi_complete(napi);
1700 return 0;
1701 }
1702
1703 /* Since the actual Tx work is minimal, we can give the Tx a larger
1704 * budget and be more aggressive about cleaning up the Tx descriptors.
1705 */
1706 i40e_for_each_ring(ring, q_vector->tx) {
1707 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
1708 clean_complete = false;
1709 continue;
1710 }
1711 arm_wb |= ring->arm_wb;
1712 ring->arm_wb = false;
1713 }
1714
1715 /* Handle case where we are called by netpoll with a budget of 0 */
1716 if (budget <= 0)
1717 goto tx_only;
1718
1719 /* We attempt to distribute budget to each Rx queue fairly, but don't
1720 * allow the budget to go below 1 because that would exit polling early.
1721 */
1722 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1723
1724 i40e_for_each_ring(ring, q_vector->rx) {
1725 int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
1726
1727 work_done += cleaned;
1728 /* if we clean as many as budgeted, we must not be done */
1729 if (cleaned >= budget_per_ring)
1730 clean_complete = false;
1731 }
1732
1733 /* If work not completed, return budget and polling will return */
1734 if (!clean_complete) {
1735 int cpu_id = smp_processor_id();
1736
1737 /* It is possible that the interrupt affinity has changed but,
1738 * if the cpu is pegged at 100%, polling will never exit while
1739 * traffic continues and the interrupt will be stuck on this
1740 * cpu. We check to make sure affinity is correct before we
1741 * continue to poll, otherwise we must stop polling so the
1742 * interrupt can move to the correct cpu.
1743 */
1744 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
1745 /* Tell napi that we are done polling */
1746 napi_complete_done(napi, work_done);
1747
1748 /* Force an interrupt */
1749 i40evf_force_wb(vsi, q_vector);
1750
1751 /* Return budget-1 so that polling stops */
1752 return budget - 1;
1753 }
1754 tx_only:
1755 if (arm_wb) {
1756 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
1757 i40e_enable_wb_on_itr(vsi, q_vector);
1758 }
1759 return budget;
1760 }
1761
1762 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
1763 q_vector->arm_wb_state = false;
1764
1765 /* Work is done so exit the polling mode and re-enable the interrupt */
1766 napi_complete_done(napi, work_done);
1767
1768 i40e_update_enable_itr(vsi, q_vector);
1769
1770 return min(work_done, budget - 1);
1771 }
1772
1773 /**
1774 * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1775 * @skb: send buffer
1776 * @tx_ring: ring to send buffer on
1777 * @flags: the tx flags to be set
1778 *
1779 * Checks the skb and set up correspondingly several generic transmit flags
1780 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1781 *
1782 * Returns error code indicate the frame should be dropped upon error and the
1783 * otherwise returns 0 to indicate the flags has been set properly.
1784 **/
i40evf_tx_prepare_vlan_flags(struct sk_buff * skb,struct i40e_ring * tx_ring,u32 * flags)1785 static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
1786 struct i40e_ring *tx_ring,
1787 u32 *flags)
1788 {
1789 __be16 protocol = skb->protocol;
1790 u32 tx_flags = 0;
1791
1792 if (protocol == htons(ETH_P_8021Q) &&
1793 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1794 /* When HW VLAN acceleration is turned off by the user the
1795 * stack sets the protocol to 8021q so that the driver
1796 * can take any steps required to support the SW only
1797 * VLAN handling. In our case the driver doesn't need
1798 * to take any further steps so just set the protocol
1799 * to the encapsulated ethertype.
1800 */
1801 skb->protocol = vlan_get_protocol(skb);
1802 goto out;
1803 }
1804
1805 /* if we have a HW VLAN tag being added, default to the HW one */
1806 if (skb_vlan_tag_present(skb)) {
1807 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
1808 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
1809 /* else if it is a SW VLAN, check the next protocol and store the tag */
1810 } else if (protocol == htons(ETH_P_8021Q)) {
1811 struct vlan_hdr *vhdr, _vhdr;
1812
1813 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
1814 if (!vhdr)
1815 return -EINVAL;
1816
1817 protocol = vhdr->h_vlan_encapsulated_proto;
1818 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
1819 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
1820 }
1821
1822 out:
1823 *flags = tx_flags;
1824 return 0;
1825 }
1826
1827 /**
1828 * i40e_tso - set up the tso context descriptor
1829 * @first: pointer to first Tx buffer for xmit
1830 * @hdr_len: ptr to the size of the packet header
1831 * @cd_type_cmd_tso_mss: Quad Word 1
1832 *
1833 * Returns 0 if no TSO can happen, 1 if tso is going, or error
1834 **/
i40e_tso(struct i40e_tx_buffer * first,u8 * hdr_len,u64 * cd_type_cmd_tso_mss)1835 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
1836 u64 *cd_type_cmd_tso_mss)
1837 {
1838 struct sk_buff *skb = first->skb;
1839 u64 cd_cmd, cd_tso_len, cd_mss;
1840 union {
1841 struct iphdr *v4;
1842 struct ipv6hdr *v6;
1843 unsigned char *hdr;
1844 } ip;
1845 union {
1846 struct tcphdr *tcp;
1847 struct udphdr *udp;
1848 unsigned char *hdr;
1849 } l4;
1850 u32 paylen, l4_offset;
1851 u16 gso_segs, gso_size;
1852 int err;
1853
1854 if (skb->ip_summed != CHECKSUM_PARTIAL)
1855 return 0;
1856
1857 if (!skb_is_gso(skb))
1858 return 0;
1859
1860 err = skb_cow_head(skb, 0);
1861 if (err < 0)
1862 return err;
1863
1864 ip.hdr = skb_network_header(skb);
1865 l4.hdr = skb_transport_header(skb);
1866
1867 /* initialize outer IP header fields */
1868 if (ip.v4->version == 4) {
1869 ip.v4->tot_len = 0;
1870 ip.v4->check = 0;
1871 } else {
1872 ip.v6->payload_len = 0;
1873 }
1874
1875 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1876 SKB_GSO_GRE_CSUM |
1877 SKB_GSO_IPXIP4 |
1878 SKB_GSO_IPXIP6 |
1879 SKB_GSO_UDP_TUNNEL |
1880 SKB_GSO_UDP_TUNNEL_CSUM)) {
1881 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
1882 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
1883 l4.udp->len = 0;
1884
1885 /* determine offset of outer transport header */
1886 l4_offset = l4.hdr - skb->data;
1887
1888 /* remove payload length from outer checksum */
1889 paylen = skb->len - l4_offset;
1890 csum_replace_by_diff(&l4.udp->check,
1891 (__force __wsum)htonl(paylen));
1892 }
1893
1894 /* reset pointers to inner headers */
1895 ip.hdr = skb_inner_network_header(skb);
1896 l4.hdr = skb_inner_transport_header(skb);
1897
1898 /* initialize inner IP header fields */
1899 if (ip.v4->version == 4) {
1900 ip.v4->tot_len = 0;
1901 ip.v4->check = 0;
1902 } else {
1903 ip.v6->payload_len = 0;
1904 }
1905 }
1906
1907 /* determine offset of inner transport header */
1908 l4_offset = l4.hdr - skb->data;
1909
1910 /* remove payload length from inner checksum */
1911 paylen = skb->len - l4_offset;
1912 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1913
1914 /* compute length of segmentation header */
1915 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1916
1917 /* pull values out of skb_shinfo */
1918 gso_size = skb_shinfo(skb)->gso_size;
1919 gso_segs = skb_shinfo(skb)->gso_segs;
1920
1921 /* update GSO size and bytecount with header size */
1922 first->gso_segs = gso_segs;
1923 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1924
1925 /* find the field values */
1926 cd_cmd = I40E_TX_CTX_DESC_TSO;
1927 cd_tso_len = skb->len - *hdr_len;
1928 cd_mss = gso_size;
1929 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
1930 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1931 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
1932 return 1;
1933 }
1934
1935 /**
1936 * i40e_tx_enable_csum - Enable Tx checksum offloads
1937 * @skb: send buffer
1938 * @tx_flags: pointer to Tx flags currently set
1939 * @td_cmd: Tx descriptor command bits to set
1940 * @td_offset: Tx descriptor header offsets to set
1941 * @tx_ring: Tx descriptor ring
1942 * @cd_tunneling: ptr to context desc bits
1943 **/
i40e_tx_enable_csum(struct sk_buff * skb,u32 * tx_flags,u32 * td_cmd,u32 * td_offset,struct i40e_ring * tx_ring,u32 * cd_tunneling)1944 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
1945 u32 *td_cmd, u32 *td_offset,
1946 struct i40e_ring *tx_ring,
1947 u32 *cd_tunneling)
1948 {
1949 union {
1950 struct iphdr *v4;
1951 struct ipv6hdr *v6;
1952 unsigned char *hdr;
1953 } ip;
1954 union {
1955 struct tcphdr *tcp;
1956 struct udphdr *udp;
1957 unsigned char *hdr;
1958 } l4;
1959 unsigned char *exthdr;
1960 u32 offset, cmd = 0;
1961 __be16 frag_off;
1962 u8 l4_proto = 0;
1963
1964 if (skb->ip_summed != CHECKSUM_PARTIAL)
1965 return 0;
1966
1967 ip.hdr = skb_network_header(skb);
1968 l4.hdr = skb_transport_header(skb);
1969
1970 /* compute outer L2 header size */
1971 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
1972
1973 if (skb->encapsulation) {
1974 u32 tunnel = 0;
1975 /* define outer network header type */
1976 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
1977 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
1978 I40E_TX_CTX_EXT_IP_IPV4 :
1979 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1980
1981 l4_proto = ip.v4->protocol;
1982 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
1983 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
1984
1985 exthdr = ip.hdr + sizeof(*ip.v6);
1986 l4_proto = ip.v6->nexthdr;
1987 if (l4.hdr != exthdr)
1988 ipv6_skip_exthdr(skb, exthdr - skb->data,
1989 &l4_proto, &frag_off);
1990 }
1991
1992 /* define outer transport */
1993 switch (l4_proto) {
1994 case IPPROTO_UDP:
1995 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
1996 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
1997 break;
1998 case IPPROTO_GRE:
1999 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
2000 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
2001 break;
2002 case IPPROTO_IPIP:
2003 case IPPROTO_IPV6:
2004 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
2005 l4.hdr = skb_inner_network_header(skb);
2006 break;
2007 default:
2008 if (*tx_flags & I40E_TX_FLAGS_TSO)
2009 return -1;
2010
2011 skb_checksum_help(skb);
2012 return 0;
2013 }
2014
2015 /* compute outer L3 header size */
2016 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2017 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2018
2019 /* switch IP header pointer from outer to inner header */
2020 ip.hdr = skb_inner_network_header(skb);
2021
2022 /* compute tunnel header size */
2023 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2024 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2025
2026 /* indicate if we need to offload outer UDP header */
2027 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
2028 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2029 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2030 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2031
2032 /* record tunnel offload values */
2033 *cd_tunneling |= tunnel;
2034
2035 /* switch L4 header pointer from outer to inner */
2036 l4.hdr = skb_inner_transport_header(skb);
2037 l4_proto = 0;
2038
2039 /* reset type as we transition from outer to inner headers */
2040 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
2041 if (ip.v4->version == 4)
2042 *tx_flags |= I40E_TX_FLAGS_IPV4;
2043 if (ip.v6->version == 6)
2044 *tx_flags |= I40E_TX_FLAGS_IPV6;
2045 }
2046
2047 /* Enable IP checksum offloads */
2048 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2049 l4_proto = ip.v4->protocol;
2050 /* the stack computes the IP header already, the only time we
2051 * need the hardware to recompute it is in the case of TSO.
2052 */
2053 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2054 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
2055 I40E_TX_DESC_CMD_IIPT_IPV4;
2056 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2057 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2058
2059 exthdr = ip.hdr + sizeof(*ip.v6);
2060 l4_proto = ip.v6->nexthdr;
2061 if (l4.hdr != exthdr)
2062 ipv6_skip_exthdr(skb, exthdr - skb->data,
2063 &l4_proto, &frag_off);
2064 }
2065
2066 /* compute inner L3 header size */
2067 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2068
2069 /* Enable L4 checksum offloads */
2070 switch (l4_proto) {
2071 case IPPROTO_TCP:
2072 /* enable checksum offloads */
2073 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2074 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2075 break;
2076 case IPPROTO_SCTP:
2077 /* enable SCTP checksum offload */
2078 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2079 offset |= (sizeof(struct sctphdr) >> 2) <<
2080 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2081 break;
2082 case IPPROTO_UDP:
2083 /* enable UDP checksum offload */
2084 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2085 offset |= (sizeof(struct udphdr) >> 2) <<
2086 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2087 break;
2088 default:
2089 if (*tx_flags & I40E_TX_FLAGS_TSO)
2090 return -1;
2091 skb_checksum_help(skb);
2092 return 0;
2093 }
2094
2095 *td_cmd |= cmd;
2096 *td_offset |= offset;
2097
2098 return 1;
2099 }
2100
2101 /**
2102 * i40e_create_tx_ctx Build the Tx context descriptor
2103 * @tx_ring: ring to create the descriptor on
2104 * @cd_type_cmd_tso_mss: Quad Word 1
2105 * @cd_tunneling: Quad Word 0 - bits 0-31
2106 * @cd_l2tag2: Quad Word 0 - bits 32-63
2107 **/
i40e_create_tx_ctx(struct i40e_ring * tx_ring,const u64 cd_type_cmd_tso_mss,const u32 cd_tunneling,const u32 cd_l2tag2)2108 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2109 const u64 cd_type_cmd_tso_mss,
2110 const u32 cd_tunneling, const u32 cd_l2tag2)
2111 {
2112 struct i40e_tx_context_desc *context_desc;
2113 int i = tx_ring->next_to_use;
2114
2115 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2116 !cd_tunneling && !cd_l2tag2)
2117 return;
2118
2119 /* grab the next descriptor */
2120 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2121
2122 i++;
2123 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2124
2125 /* cpu_to_le32 and assign to struct fields */
2126 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2127 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2128 context_desc->rsvd = cpu_to_le16(0);
2129 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2130 }
2131
2132 /**
2133 * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
2134 * @skb: send buffer
2135 *
2136 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2137 * and so we need to figure out the cases where we need to linearize the skb.
2138 *
2139 * For TSO we need to count the TSO header and segment payload separately.
2140 * As such we need to check cases where we have 7 fragments or more as we
2141 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2142 * the segment payload in the first descriptor, and another 7 for the
2143 * fragments.
2144 **/
__i40evf_chk_linearize(struct sk_buff * skb)2145 bool __i40evf_chk_linearize(struct sk_buff *skb)
2146 {
2147 const struct skb_frag_struct *frag, *stale;
2148 int nr_frags, sum;
2149
2150 /* no need to check if number of frags is less than 7 */
2151 nr_frags = skb_shinfo(skb)->nr_frags;
2152 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
2153 return false;
2154
2155 /* We need to walk through the list and validate that each group
2156 * of 6 fragments totals at least gso_size.
2157 */
2158 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
2159 frag = &skb_shinfo(skb)->frags[0];
2160
2161 /* Initialize size to the negative value of gso_size minus 1. We
2162 * use this as the worst case scenerio in which the frag ahead
2163 * of us only provides one byte which is why we are limited to 6
2164 * descriptors for a single transmit as the header and previous
2165 * fragment are already consuming 2 descriptors.
2166 */
2167 sum = 1 - skb_shinfo(skb)->gso_size;
2168
2169 /* Add size of frags 0 through 4 to create our initial sum */
2170 sum += skb_frag_size(frag++);
2171 sum += skb_frag_size(frag++);
2172 sum += skb_frag_size(frag++);
2173 sum += skb_frag_size(frag++);
2174 sum += skb_frag_size(frag++);
2175
2176 /* Walk through fragments adding latest fragment, testing it, and
2177 * then removing stale fragments from the sum.
2178 */
2179 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2180 int stale_size = skb_frag_size(stale);
2181
2182 sum += skb_frag_size(frag++);
2183
2184 /* The stale fragment may present us with a smaller
2185 * descriptor than the actual fragment size. To account
2186 * for that we need to remove all the data on the front and
2187 * figure out what the remainder would be in the last
2188 * descriptor associated with the fragment.
2189 */
2190 if (stale_size > I40E_MAX_DATA_PER_TXD) {
2191 int align_pad = -(stale->page_offset) &
2192 (I40E_MAX_READ_REQ_SIZE - 1);
2193
2194 sum -= align_pad;
2195 stale_size -= align_pad;
2196
2197 do {
2198 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
2199 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
2200 } while (stale_size > I40E_MAX_DATA_PER_TXD);
2201 }
2202
2203 /* if sum is negative we failed to make sufficient progress */
2204 if (sum < 0)
2205 return true;
2206
2207 if (!nr_frags--)
2208 break;
2209
2210 sum -= stale_size;
2211 }
2212
2213 return false;
2214 }
2215
2216 /**
2217 * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions
2218 * @tx_ring: the ring to be checked
2219 * @size: the size buffer we want to assure is available
2220 *
2221 * Returns -EBUSY if a stop is needed, else 0
2222 **/
__i40evf_maybe_stop_tx(struct i40e_ring * tx_ring,int size)2223 int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2224 {
2225 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2226 /* Memory barrier before checking head and tail */
2227 smp_mb();
2228
2229 /* Check again in a case another CPU has just made room available. */
2230 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2231 return -EBUSY;
2232
2233 /* A reprieve! - use start_queue because it doesn't call schedule */
2234 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2235 ++tx_ring->tx_stats.restart_queue;
2236 return 0;
2237 }
2238
2239 /**
2240 * i40evf_tx_map - Build the Tx descriptor
2241 * @tx_ring: ring to send buffer on
2242 * @skb: send buffer
2243 * @first: first buffer info buffer to use
2244 * @tx_flags: collected send information
2245 * @hdr_len: size of the packet header
2246 * @td_cmd: the command field in the descriptor
2247 * @td_offset: offset for checksum or crc
2248 **/
i40evf_tx_map(struct i40e_ring * tx_ring,struct sk_buff * skb,struct i40e_tx_buffer * first,u32 tx_flags,const u8 hdr_len,u32 td_cmd,u32 td_offset)2249 static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2250 struct i40e_tx_buffer *first, u32 tx_flags,
2251 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2252 {
2253 unsigned int data_len = skb->data_len;
2254 unsigned int size = skb_headlen(skb);
2255 struct skb_frag_struct *frag;
2256 struct i40e_tx_buffer *tx_bi;
2257 struct i40e_tx_desc *tx_desc;
2258 u16 i = tx_ring->next_to_use;
2259 u32 td_tag = 0;
2260 dma_addr_t dma;
2261
2262 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2263 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2264 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2265 I40E_TX_FLAGS_VLAN_SHIFT;
2266 }
2267
2268 first->tx_flags = tx_flags;
2269
2270 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2271
2272 tx_desc = I40E_TX_DESC(tx_ring, i);
2273 tx_bi = first;
2274
2275 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2276 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2277
2278 if (dma_mapping_error(tx_ring->dev, dma))
2279 goto dma_error;
2280
2281 /* record length, and DMA address */
2282 dma_unmap_len_set(tx_bi, len, size);
2283 dma_unmap_addr_set(tx_bi, dma, dma);
2284
2285 /* align size to end of page */
2286 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
2287 tx_desc->buffer_addr = cpu_to_le64(dma);
2288
2289 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2290 tx_desc->cmd_type_offset_bsz =
2291 build_ctob(td_cmd, td_offset,
2292 max_data, td_tag);
2293
2294 tx_desc++;
2295 i++;
2296
2297 if (i == tx_ring->count) {
2298 tx_desc = I40E_TX_DESC(tx_ring, 0);
2299 i = 0;
2300 }
2301
2302 dma += max_data;
2303 size -= max_data;
2304
2305 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2306 tx_desc->buffer_addr = cpu_to_le64(dma);
2307 }
2308
2309 if (likely(!data_len))
2310 break;
2311
2312 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2313 size, td_tag);
2314
2315 tx_desc++;
2316 i++;
2317
2318 if (i == tx_ring->count) {
2319 tx_desc = I40E_TX_DESC(tx_ring, 0);
2320 i = 0;
2321 }
2322
2323 size = skb_frag_size(frag);
2324 data_len -= size;
2325
2326 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2327 DMA_TO_DEVICE);
2328
2329 tx_bi = &tx_ring->tx_bi[i];
2330 }
2331
2332 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
2333
2334 i++;
2335 if (i == tx_ring->count)
2336 i = 0;
2337
2338 tx_ring->next_to_use = i;
2339
2340 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2341
2342 /* write last descriptor with RS and EOP bits */
2343 td_cmd |= I40E_TXD_CMD;
2344 tx_desc->cmd_type_offset_bsz =
2345 build_ctob(td_cmd, td_offset, size, td_tag);
2346
2347 /* Force memory writes to complete before letting h/w know there
2348 * are new descriptors to fetch.
2349 *
2350 * We also use this memory barrier to make certain all of the
2351 * status bits have been updated before next_to_watch is written.
2352 */
2353 wmb();
2354
2355 /* set next_to_watch value indicating a packet is present */
2356 first->next_to_watch = tx_desc;
2357
2358 /* notify HW of packet */
2359 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
2360 writel(i, tx_ring->tail);
2361
2362 /* we need this if more than one processor can write to our tail
2363 * at a time, it synchronizes IO on IA64/Altix systems
2364 */
2365 mmiowb();
2366 }
2367
2368 return;
2369
2370 dma_error:
2371 dev_info(tx_ring->dev, "TX DMA map failed\n");
2372
2373 /* clear dma mappings for failed tx_bi map */
2374 for (;;) {
2375 tx_bi = &tx_ring->tx_bi[i];
2376 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
2377 if (tx_bi == first)
2378 break;
2379 if (i == 0)
2380 i = tx_ring->count;
2381 i--;
2382 }
2383
2384 tx_ring->next_to_use = i;
2385 }
2386
2387 /**
2388 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2389 * @skb: send buffer
2390 * @tx_ring: ring to send buffer on
2391 *
2392 * Returns NETDEV_TX_OK if sent, else an error code
2393 **/
i40e_xmit_frame_ring(struct sk_buff * skb,struct i40e_ring * tx_ring)2394 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2395 struct i40e_ring *tx_ring)
2396 {
2397 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2398 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2399 struct i40e_tx_buffer *first;
2400 u32 td_offset = 0;
2401 u32 tx_flags = 0;
2402 __be16 protocol;
2403 u32 td_cmd = 0;
2404 u8 hdr_len = 0;
2405 int tso, count;
2406
2407 /* prefetch the data, we'll need it later */
2408 prefetch(skb->data);
2409
2410 i40e_trace(xmit_frame_ring, skb, tx_ring);
2411
2412 count = i40e_xmit_descriptor_count(skb);
2413 if (i40e_chk_linearize(skb, count)) {
2414 if (__skb_linearize(skb)) {
2415 dev_kfree_skb_any(skb);
2416 return NETDEV_TX_OK;
2417 }
2418 count = i40e_txd_use_count(skb->len);
2419 tx_ring->tx_stats.tx_linearize++;
2420 }
2421
2422 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2423 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2424 * + 4 desc gap to avoid the cache line where head is,
2425 * + 1 desc for context descriptor,
2426 * otherwise try next time
2427 */
2428 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2429 tx_ring->tx_stats.tx_busy++;
2430 return NETDEV_TX_BUSY;
2431 }
2432
2433 /* record the location of the first descriptor for this packet */
2434 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2435 first->skb = skb;
2436 first->bytecount = skb->len;
2437 first->gso_segs = 1;
2438
2439 /* prepare the xmit flags */
2440 if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2441 goto out_drop;
2442
2443 /* obtain protocol of skb */
2444 protocol = vlan_get_protocol(skb);
2445
2446 /* setup IPv4/IPv6 offloads */
2447 if (protocol == htons(ETH_P_IP))
2448 tx_flags |= I40E_TX_FLAGS_IPV4;
2449 else if (protocol == htons(ETH_P_IPV6))
2450 tx_flags |= I40E_TX_FLAGS_IPV6;
2451
2452 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
2453
2454 if (tso < 0)
2455 goto out_drop;
2456 else if (tso)
2457 tx_flags |= I40E_TX_FLAGS_TSO;
2458
2459 /* Always offload the checksum, since it's in the data descriptor */
2460 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2461 tx_ring, &cd_tunneling);
2462 if (tso < 0)
2463 goto out_drop;
2464
2465 skb_tx_timestamp(skb);
2466
2467 /* always enable CRC insertion offload */
2468 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2469
2470 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2471 cd_tunneling, cd_l2tag2);
2472
2473 i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2474 td_cmd, td_offset);
2475
2476 return NETDEV_TX_OK;
2477
2478 out_drop:
2479 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
2480 dev_kfree_skb_any(first->skb);
2481 first->skb = NULL;
2482 return NETDEV_TX_OK;
2483 }
2484
2485 /**
2486 * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2487 * @skb: send buffer
2488 * @netdev: network interface device structure
2489 *
2490 * Returns NETDEV_TX_OK if sent, else an error code
2491 **/
i40evf_xmit_frame(struct sk_buff * skb,struct net_device * netdev)2492 netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2493 {
2494 struct i40evf_adapter *adapter = netdev_priv(netdev);
2495 struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
2496
2497 /* hardware can't handle really short frames, hardware padding works
2498 * beyond this point
2499 */
2500 if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
2501 if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
2502 return NETDEV_TX_OK;
2503 skb->len = I40E_MIN_TX_LEN;
2504 skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
2505 }
2506
2507 return i40e_xmit_frame_ring(skb, tx_ring);
2508 }
2509