1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2005-2013 Solarflare Communications Inc.
6 */
7
8 #include <linux/pci.h>
9 #include <linux/tcp.h>
10 #include <linux/ip.h>
11 #include <linux/in.h>
12 #include <linux/ipv6.h>
13 #include <linux/slab.h>
14 #include <net/ipv6.h>
15 #include <linux/if_ether.h>
16 #include <linux/highmem.h>
17 #include <linux/cache.h>
18 #include "net_driver.h"
19 #include "efx.h"
20 #include "io.h"
21 #include "nic.h"
22 #include "tx.h"
23 #include "workarounds.h"
24 #include "ef10_regs.h"
25
26 #ifdef EFX_USE_PIO
27
28 #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
29 unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
30
31 #endif /* EFX_USE_PIO */
32
efx_tx_get_copy_buffer(struct efx_tx_queue * tx_queue,struct efx_tx_buffer * buffer)33 static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
34 struct efx_tx_buffer *buffer)
35 {
36 unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
37 struct efx_buffer *page_buf =
38 &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
39 unsigned int offset =
40 ((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
41
42 if (unlikely(!page_buf->addr) &&
43 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
44 GFP_ATOMIC))
45 return NULL;
46 buffer->dma_addr = page_buf->dma_addr + offset;
47 buffer->unmap_len = 0;
48 return (u8 *)page_buf->addr + offset;
49 }
50
efx_tx_get_copy_buffer_limited(struct efx_tx_queue * tx_queue,struct efx_tx_buffer * buffer,size_t len)51 u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
52 struct efx_tx_buffer *buffer, size_t len)
53 {
54 if (len > EFX_TX_CB_SIZE)
55 return NULL;
56 return efx_tx_get_copy_buffer(tx_queue, buffer);
57 }
58
efx_dequeue_buffer(struct efx_tx_queue * tx_queue,struct efx_tx_buffer * buffer,unsigned int * pkts_compl,unsigned int * bytes_compl)59 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
60 struct efx_tx_buffer *buffer,
61 unsigned int *pkts_compl,
62 unsigned int *bytes_compl)
63 {
64 if (buffer->unmap_len) {
65 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
66 dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
67 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
68 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
69 DMA_TO_DEVICE);
70 else
71 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
72 DMA_TO_DEVICE);
73 buffer->unmap_len = 0;
74 }
75
76 if (buffer->flags & EFX_TX_BUF_SKB) {
77 struct sk_buff *skb = (struct sk_buff *)buffer->skb;
78
79 EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
80 (*pkts_compl)++;
81 (*bytes_compl) += skb->len;
82 if (tx_queue->timestamping &&
83 (tx_queue->completed_timestamp_major ||
84 tx_queue->completed_timestamp_minor)) {
85 struct skb_shared_hwtstamps hwtstamp;
86
87 hwtstamp.hwtstamp =
88 efx_ptp_nic_to_kernel_time(tx_queue);
89 skb_tstamp_tx(skb, &hwtstamp);
90
91 tx_queue->completed_timestamp_major = 0;
92 tx_queue->completed_timestamp_minor = 0;
93 }
94 dev_consume_skb_any((struct sk_buff *)buffer->skb);
95 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
96 "TX queue %d transmission id %x complete\n",
97 tx_queue->queue, tx_queue->read_count);
98 }
99
100 buffer->len = 0;
101 buffer->flags = 0;
102 }
103
efx_tx_max_skb_descs(struct efx_nic * efx)104 unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
105 {
106 /* Header and payload descriptor for each output segment, plus
107 * one for every input fragment boundary within a segment
108 */
109 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
110
111 /* Possibly one more per segment for option descriptors */
112 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
113 max_descs += EFX_TSO_MAX_SEGS;
114
115 /* Possibly more for PCIe page boundaries within input fragments */
116 if (PAGE_SIZE > EFX_PAGE_SIZE)
117 max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
118 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
119
120 return max_descs;
121 }
122
efx_tx_maybe_stop_queue(struct efx_tx_queue * txq1)123 static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
124 {
125 /* We need to consider both queues that the net core sees as one */
126 struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
127 struct efx_nic *efx = txq1->efx;
128 unsigned int fill_level;
129
130 fill_level = max(txq1->insert_count - txq1->old_read_count,
131 txq2->insert_count - txq2->old_read_count);
132 if (likely(fill_level < efx->txq_stop_thresh))
133 return;
134
135 /* We used the stale old_read_count above, which gives us a
136 * pessimistic estimate of the fill level (which may even
137 * validly be >= efx->txq_entries). Now try again using
138 * read_count (more likely to be a cache miss).
139 *
140 * If we read read_count and then conditionally stop the
141 * queue, it is possible for the completion path to race with
142 * us and complete all outstanding descriptors in the middle,
143 * after which there will be no more completions to wake it.
144 * Therefore we stop the queue first, then read read_count
145 * (with a memory barrier to ensure the ordering), then
146 * restart the queue if the fill level turns out to be low
147 * enough.
148 */
149 netif_tx_stop_queue(txq1->core_txq);
150 smp_mb();
151 txq1->old_read_count = READ_ONCE(txq1->read_count);
152 txq2->old_read_count = READ_ONCE(txq2->read_count);
153
154 fill_level = max(txq1->insert_count - txq1->old_read_count,
155 txq2->insert_count - txq2->old_read_count);
156 EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
157 if (likely(fill_level < efx->txq_stop_thresh)) {
158 smp_mb();
159 if (likely(!efx->loopback_selftest))
160 netif_tx_start_queue(txq1->core_txq);
161 }
162 }
163
efx_enqueue_skb_copy(struct efx_tx_queue * tx_queue,struct sk_buff * skb)164 static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
165 struct sk_buff *skb)
166 {
167 unsigned int copy_len = skb->len;
168 struct efx_tx_buffer *buffer;
169 u8 *copy_buffer;
170 int rc;
171
172 EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE);
173
174 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
175
176 copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
177 if (unlikely(!copy_buffer))
178 return -ENOMEM;
179
180 rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
181 EFX_WARN_ON_PARANOID(rc);
182 buffer->len = copy_len;
183
184 buffer->skb = skb;
185 buffer->flags = EFX_TX_BUF_SKB;
186
187 ++tx_queue->insert_count;
188 return rc;
189 }
190
191 #ifdef EFX_USE_PIO
192
193 struct efx_short_copy_buffer {
194 int used;
195 u8 buf[L1_CACHE_BYTES];
196 };
197
198 /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
199 * Advances piobuf pointer. Leaves additional data in the copy buffer.
200 */
efx_memcpy_toio_aligned(struct efx_nic * efx,u8 __iomem ** piobuf,u8 * data,int len,struct efx_short_copy_buffer * copy_buf)201 static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
202 u8 *data, int len,
203 struct efx_short_copy_buffer *copy_buf)
204 {
205 int block_len = len & ~(sizeof(copy_buf->buf) - 1);
206
207 __iowrite64_copy(*piobuf, data, block_len >> 3);
208 *piobuf += block_len;
209 len -= block_len;
210
211 if (len) {
212 data += block_len;
213 BUG_ON(copy_buf->used);
214 BUG_ON(len > sizeof(copy_buf->buf));
215 memcpy(copy_buf->buf, data, len);
216 copy_buf->used = len;
217 }
218 }
219
220 /* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
221 * Advances piobuf pointer. Leaves additional data in the copy buffer.
222 */
efx_memcpy_toio_aligned_cb(struct efx_nic * efx,u8 __iomem ** piobuf,u8 * data,int len,struct efx_short_copy_buffer * copy_buf)223 static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
224 u8 *data, int len,
225 struct efx_short_copy_buffer *copy_buf)
226 {
227 if (copy_buf->used) {
228 /* if the copy buffer is partially full, fill it up and write */
229 int copy_to_buf =
230 min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
231
232 memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
233 copy_buf->used += copy_to_buf;
234
235 /* if we didn't fill it up then we're done for now */
236 if (copy_buf->used < sizeof(copy_buf->buf))
237 return;
238
239 __iowrite64_copy(*piobuf, copy_buf->buf,
240 sizeof(copy_buf->buf) >> 3);
241 *piobuf += sizeof(copy_buf->buf);
242 data += copy_to_buf;
243 len -= copy_to_buf;
244 copy_buf->used = 0;
245 }
246
247 efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
248 }
249
efx_flush_copy_buffer(struct efx_nic * efx,u8 __iomem * piobuf,struct efx_short_copy_buffer * copy_buf)250 static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
251 struct efx_short_copy_buffer *copy_buf)
252 {
253 /* if there's anything in it, write the whole buffer, including junk */
254 if (copy_buf->used)
255 __iowrite64_copy(piobuf, copy_buf->buf,
256 sizeof(copy_buf->buf) >> 3);
257 }
258
259 /* Traverse skb structure and copy fragments in to PIO buffer.
260 * Advances piobuf pointer.
261 */
efx_skb_copy_bits_to_pio(struct efx_nic * efx,struct sk_buff * skb,u8 __iomem ** piobuf,struct efx_short_copy_buffer * copy_buf)262 static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
263 u8 __iomem **piobuf,
264 struct efx_short_copy_buffer *copy_buf)
265 {
266 int i;
267
268 efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
269 copy_buf);
270
271 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
272 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
273 u8 *vaddr;
274
275 vaddr = kmap_atomic(skb_frag_page(f));
276
277 efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f),
278 skb_frag_size(f), copy_buf);
279 kunmap_atomic(vaddr);
280 }
281
282 EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list);
283 }
284
efx_enqueue_skb_pio(struct efx_tx_queue * tx_queue,struct sk_buff * skb)285 static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
286 struct sk_buff *skb)
287 {
288 struct efx_tx_buffer *buffer =
289 efx_tx_queue_get_insert_buffer(tx_queue);
290 u8 __iomem *piobuf = tx_queue->piobuf;
291
292 /* Copy to PIO buffer. Ensure the writes are padded to the end
293 * of a cache line, as this is required for write-combining to be
294 * effective on at least x86.
295 */
296
297 if (skb_shinfo(skb)->nr_frags) {
298 /* The size of the copy buffer will ensure all writes
299 * are the size of a cache line.
300 */
301 struct efx_short_copy_buffer copy_buf;
302
303 copy_buf.used = 0;
304
305 efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
306 &piobuf, ©_buf);
307 efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf);
308 } else {
309 /* Pad the write to the size of a cache line.
310 * We can do this because we know the skb_shared_info struct is
311 * after the source, and the destination buffer is big enough.
312 */
313 BUILD_BUG_ON(L1_CACHE_BYTES >
314 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
315 __iowrite64_copy(tx_queue->piobuf, skb->data,
316 ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
317 }
318
319 buffer->skb = skb;
320 buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION;
321
322 EFX_POPULATE_QWORD_5(buffer->option,
323 ESF_DZ_TX_DESC_IS_OPT, 1,
324 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
325 ESF_DZ_TX_PIO_CONT, 0,
326 ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
327 ESF_DZ_TX_PIO_BUF_ADDR,
328 tx_queue->piobuf_offset);
329 ++tx_queue->insert_count;
330 return 0;
331 }
332 #endif /* EFX_USE_PIO */
333
efx_tx_map_chunk(struct efx_tx_queue * tx_queue,dma_addr_t dma_addr,size_t len)334 static struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
335 dma_addr_t dma_addr,
336 size_t len)
337 {
338 const struct efx_nic_type *nic_type = tx_queue->efx->type;
339 struct efx_tx_buffer *buffer;
340 unsigned int dma_len;
341
342 /* Map the fragment taking account of NIC-dependent DMA limits. */
343 do {
344 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
345 dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
346
347 buffer->len = dma_len;
348 buffer->dma_addr = dma_addr;
349 buffer->flags = EFX_TX_BUF_CONT;
350 len -= dma_len;
351 dma_addr += dma_len;
352 ++tx_queue->insert_count;
353 } while (len);
354
355 return buffer;
356 }
357
358 /* Map all data from an SKB for DMA and create descriptors on the queue.
359 */
efx_tx_map_data(struct efx_tx_queue * tx_queue,struct sk_buff * skb,unsigned int segment_count)360 static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
361 unsigned int segment_count)
362 {
363 struct efx_nic *efx = tx_queue->efx;
364 struct device *dma_dev = &efx->pci_dev->dev;
365 unsigned int frag_index, nr_frags;
366 dma_addr_t dma_addr, unmap_addr;
367 unsigned short dma_flags;
368 size_t len, unmap_len;
369
370 nr_frags = skb_shinfo(skb)->nr_frags;
371 frag_index = 0;
372
373 /* Map header data. */
374 len = skb_headlen(skb);
375 dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
376 dma_flags = EFX_TX_BUF_MAP_SINGLE;
377 unmap_len = len;
378 unmap_addr = dma_addr;
379
380 if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
381 return -EIO;
382
383 if (segment_count) {
384 /* For TSO we need to put the header in to a separate
385 * descriptor. Map this separately if necessary.
386 */
387 size_t header_len = skb_transport_header(skb) - skb->data +
388 (tcp_hdr(skb)->doff << 2u);
389
390 if (header_len != len) {
391 tx_queue->tso_long_headers++;
392 efx_tx_map_chunk(tx_queue, dma_addr, header_len);
393 len -= header_len;
394 dma_addr += header_len;
395 }
396 }
397
398 /* Add descriptors for each fragment. */
399 do {
400 struct efx_tx_buffer *buffer;
401 skb_frag_t *fragment;
402
403 buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
404
405 /* The final descriptor for a fragment is responsible for
406 * unmapping the whole fragment.
407 */
408 buffer->flags = EFX_TX_BUF_CONT | dma_flags;
409 buffer->unmap_len = unmap_len;
410 buffer->dma_offset = buffer->dma_addr - unmap_addr;
411
412 if (frag_index >= nr_frags) {
413 /* Store SKB details with the final buffer for
414 * the completion.
415 */
416 buffer->skb = skb;
417 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
418 return 0;
419 }
420
421 /* Move on to the next fragment. */
422 fragment = &skb_shinfo(skb)->frags[frag_index++];
423 len = skb_frag_size(fragment);
424 dma_addr = skb_frag_dma_map(dma_dev, fragment,
425 0, len, DMA_TO_DEVICE);
426 dma_flags = 0;
427 unmap_len = len;
428 unmap_addr = dma_addr;
429
430 if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
431 return -EIO;
432 } while (1);
433 }
434
435 /* Remove buffers put into a tx_queue for the current packet.
436 * None of the buffers must have an skb attached.
437 */
efx_enqueue_unwind(struct efx_tx_queue * tx_queue,unsigned int insert_count)438 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
439 unsigned int insert_count)
440 {
441 struct efx_tx_buffer *buffer;
442 unsigned int bytes_compl = 0;
443 unsigned int pkts_compl = 0;
444
445 /* Work backwards until we hit the original insert pointer value */
446 while (tx_queue->insert_count != insert_count) {
447 --tx_queue->insert_count;
448 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
449 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
450 }
451 }
452
453 /*
454 * Fallback to software TSO.
455 *
456 * This is used if we are unable to send a GSO packet through hardware TSO.
457 * This should only ever happen due to per-queue restrictions - unsupported
458 * packets should first be filtered by the feature flags.
459 *
460 * Returns 0 on success, error code otherwise.
461 */
efx_tx_tso_fallback(struct efx_tx_queue * tx_queue,struct sk_buff * skb)462 static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
463 struct sk_buff *skb)
464 {
465 struct sk_buff *segments, *next;
466
467 segments = skb_gso_segment(skb, 0);
468 if (IS_ERR(segments))
469 return PTR_ERR(segments);
470
471 dev_consume_skb_any(skb);
472 skb = segments;
473
474 while (skb) {
475 next = skb->next;
476 skb->next = NULL;
477
478 efx_enqueue_skb(tx_queue, skb);
479 skb = next;
480 }
481
482 return 0;
483 }
484
485 /*
486 * Add a socket buffer to a TX queue
487 *
488 * This maps all fragments of a socket buffer for DMA and adds them to
489 * the TX queue. The queue's insert pointer will be incremented by
490 * the number of fragments in the socket buffer.
491 *
492 * If any DMA mapping fails, any mapped fragments will be unmapped,
493 * the queue's insert pointer will be restored to its original value.
494 *
495 * This function is split out from efx_hard_start_xmit to allow the
496 * loopback test to direct packets via specific TX queues.
497 *
498 * Returns NETDEV_TX_OK.
499 * You must hold netif_tx_lock() to call this function.
500 */
efx_enqueue_skb(struct efx_tx_queue * tx_queue,struct sk_buff * skb)501 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
502 {
503 unsigned int old_insert_count = tx_queue->insert_count;
504 bool xmit_more = netdev_xmit_more();
505 bool data_mapped = false;
506 unsigned int segments;
507 unsigned int skb_len;
508 int rc;
509
510 skb_len = skb->len;
511 segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
512 if (segments == 1)
513 segments = 0; /* Don't use TSO for a single segment. */
514
515 /* Handle TSO first - it's *possible* (although unlikely) that we might
516 * be passed a packet to segment that's smaller than the copybreak/PIO
517 * size limit.
518 */
519 if (segments) {
520 EFX_WARN_ON_ONCE_PARANOID(!tx_queue->handle_tso);
521 rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped);
522 if (rc == -EINVAL) {
523 rc = efx_tx_tso_fallback(tx_queue, skb);
524 tx_queue->tso_fallbacks++;
525 if (rc == 0)
526 return 0;
527 }
528 if (rc)
529 goto err;
530 #ifdef EFX_USE_PIO
531 } else if (skb_len <= efx_piobuf_size && !xmit_more &&
532 efx_nic_may_tx_pio(tx_queue)) {
533 /* Use PIO for short packets with an empty queue. */
534 if (efx_enqueue_skb_pio(tx_queue, skb))
535 goto err;
536 tx_queue->pio_packets++;
537 data_mapped = true;
538 #endif
539 } else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
540 /* Pad short packets or coalesce short fragmented packets. */
541 if (efx_enqueue_skb_copy(tx_queue, skb))
542 goto err;
543 tx_queue->cb_packets++;
544 data_mapped = true;
545 }
546
547 /* Map for DMA and create descriptors if we haven't done so already. */
548 if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
549 goto err;
550
551 efx_tx_maybe_stop_queue(tx_queue);
552
553 /* Pass off to hardware */
554 if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
555 struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
556
557 /* There could be packets left on the partner queue if
558 * xmit_more was set. If we do not push those they
559 * could be left for a long time and cause a netdev watchdog.
560 */
561 if (txq2->xmit_more_available)
562 efx_nic_push_buffers(txq2);
563
564 efx_nic_push_buffers(tx_queue);
565 } else {
566 tx_queue->xmit_more_available = xmit_more;
567 }
568
569 if (segments) {
570 tx_queue->tso_bursts++;
571 tx_queue->tso_packets += segments;
572 tx_queue->tx_packets += segments;
573 } else {
574 tx_queue->tx_packets++;
575 }
576
577 return NETDEV_TX_OK;
578
579
580 err:
581 efx_enqueue_unwind(tx_queue, old_insert_count);
582 dev_kfree_skb_any(skb);
583
584 /* If we're not expecting another transmit and we had something to push
585 * on this queue or a partner queue then we need to push here to get the
586 * previous packets out.
587 */
588 if (!xmit_more) {
589 struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
590
591 if (txq2->xmit_more_available)
592 efx_nic_push_buffers(txq2);
593
594 efx_nic_push_buffers(tx_queue);
595 }
596
597 return NETDEV_TX_OK;
598 }
599
600 /* Remove packets from the TX queue
601 *
602 * This removes packets from the TX queue, up to and including the
603 * specified index.
604 */
efx_dequeue_buffers(struct efx_tx_queue * tx_queue,unsigned int index,unsigned int * pkts_compl,unsigned int * bytes_compl)605 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
606 unsigned int index,
607 unsigned int *pkts_compl,
608 unsigned int *bytes_compl)
609 {
610 struct efx_nic *efx = tx_queue->efx;
611 unsigned int stop_index, read_ptr;
612
613 stop_index = (index + 1) & tx_queue->ptr_mask;
614 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
615
616 while (read_ptr != stop_index) {
617 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
618
619 if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
620 unlikely(buffer->len == 0)) {
621 netif_err(efx, tx_err, efx->net_dev,
622 "TX queue %d spurious TX completion id %x\n",
623 tx_queue->queue, read_ptr);
624 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
625 return;
626 }
627
628 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
629
630 ++tx_queue->read_count;
631 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
632 }
633 }
634
635 /* Initiate a packet transmission. We use one channel per CPU
636 * (sharing when we have more CPUs than channels). On Falcon, the TX
637 * completion events will be directed back to the CPU that transmitted
638 * the packet, which should be cache-efficient.
639 *
640 * Context: non-blocking.
641 * Note that returning anything other than NETDEV_TX_OK will cause the
642 * OS to free the skb.
643 */
efx_hard_start_xmit(struct sk_buff * skb,struct net_device * net_dev)644 netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
645 struct net_device *net_dev)
646 {
647 struct efx_nic *efx = netdev_priv(net_dev);
648 struct efx_tx_queue *tx_queue;
649 unsigned index, type;
650
651 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
652
653 /* PTP "event" packet */
654 if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
655 unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
656 return efx_ptp_tx(efx, skb);
657 }
658
659 index = skb_get_queue_mapping(skb);
660 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
661 if (index >= efx->n_tx_channels) {
662 index -= efx->n_tx_channels;
663 type |= EFX_TXQ_TYPE_HIGHPRI;
664 }
665 tx_queue = efx_get_tx_queue(efx, index, type);
666
667 return efx_enqueue_skb(tx_queue, skb);
668 }
669
efx_init_tx_queue_core_txq(struct efx_tx_queue * tx_queue)670 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
671 {
672 struct efx_nic *efx = tx_queue->efx;
673
674 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
675 tx_queue->core_txq =
676 netdev_get_tx_queue(efx->net_dev,
677 tx_queue->queue / EFX_TXQ_TYPES +
678 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
679 efx->n_tx_channels : 0));
680 }
681
efx_setup_tc(struct net_device * net_dev,enum tc_setup_type type,void * type_data)682 int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
683 void *type_data)
684 {
685 struct efx_nic *efx = netdev_priv(net_dev);
686 struct tc_mqprio_qopt *mqprio = type_data;
687 struct efx_channel *channel;
688 struct efx_tx_queue *tx_queue;
689 unsigned tc, num_tc;
690 int rc;
691
692 if (type != TC_SETUP_QDISC_MQPRIO)
693 return -EOPNOTSUPP;
694
695 num_tc = mqprio->num_tc;
696
697 if (num_tc > EFX_MAX_TX_TC)
698 return -EINVAL;
699
700 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
701
702 if (num_tc == net_dev->num_tc)
703 return 0;
704
705 for (tc = 0; tc < num_tc; tc++) {
706 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
707 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
708 }
709
710 if (num_tc > net_dev->num_tc) {
711 /* Initialise high-priority queues as necessary */
712 efx_for_each_channel(channel, efx) {
713 efx_for_each_possible_channel_tx_queue(tx_queue,
714 channel) {
715 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
716 continue;
717 if (!tx_queue->buffer) {
718 rc = efx_probe_tx_queue(tx_queue);
719 if (rc)
720 return rc;
721 }
722 if (!tx_queue->initialised)
723 efx_init_tx_queue(tx_queue);
724 efx_init_tx_queue_core_txq(tx_queue);
725 }
726 }
727 } else {
728 /* Reduce number of classes before number of queues */
729 net_dev->num_tc = num_tc;
730 }
731
732 rc = netif_set_real_num_tx_queues(net_dev,
733 max_t(int, num_tc, 1) *
734 efx->n_tx_channels);
735 if (rc)
736 return rc;
737
738 /* Do not destroy high-priority queues when they become
739 * unused. We would have to flush them first, and it is
740 * fairly difficult to flush a subset of TX queues. Leave
741 * it to efx_fini_channels().
742 */
743
744 net_dev->num_tc = num_tc;
745 return 0;
746 }
747
efx_xmit_done(struct efx_tx_queue * tx_queue,unsigned int index)748 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
749 {
750 unsigned fill_level;
751 struct efx_nic *efx = tx_queue->efx;
752 struct efx_tx_queue *txq2;
753 unsigned int pkts_compl = 0, bytes_compl = 0;
754
755 EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
756
757 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
758 tx_queue->pkts_compl += pkts_compl;
759 tx_queue->bytes_compl += bytes_compl;
760
761 if (pkts_compl > 1)
762 ++tx_queue->merge_events;
763
764 /* See if we need to restart the netif queue. This memory
765 * barrier ensures that we write read_count (inside
766 * efx_dequeue_buffers()) before reading the queue status.
767 */
768 smp_mb();
769 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
770 likely(efx->port_enabled) &&
771 likely(netif_device_present(efx->net_dev))) {
772 txq2 = efx_tx_queue_partner(tx_queue);
773 fill_level = max(tx_queue->insert_count - tx_queue->read_count,
774 txq2->insert_count - txq2->read_count);
775 if (fill_level <= efx->txq_wake_thresh)
776 netif_tx_wake_queue(tx_queue->core_txq);
777 }
778
779 /* Check whether the hardware queue is now empty */
780 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
781 tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
782 if (tx_queue->read_count == tx_queue->old_write_count) {
783 smp_mb();
784 tx_queue->empty_read_count =
785 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
786 }
787 }
788 }
789
efx_tx_cb_page_count(struct efx_tx_queue * tx_queue)790 static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
791 {
792 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EFX_TX_CB_ORDER);
793 }
794
efx_probe_tx_queue(struct efx_tx_queue * tx_queue)795 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
796 {
797 struct efx_nic *efx = tx_queue->efx;
798 unsigned int entries;
799 int rc;
800
801 /* Create the smallest power-of-two aligned ring */
802 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
803 EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
804 tx_queue->ptr_mask = entries - 1;
805
806 netif_dbg(efx, probe, efx->net_dev,
807 "creating TX queue %d size %#x mask %#x\n",
808 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
809
810 /* Allocate software ring */
811 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
812 GFP_KERNEL);
813 if (!tx_queue->buffer)
814 return -ENOMEM;
815
816 tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
817 sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
818 if (!tx_queue->cb_page) {
819 rc = -ENOMEM;
820 goto fail1;
821 }
822
823 /* Allocate hardware ring */
824 rc = efx_nic_probe_tx(tx_queue);
825 if (rc)
826 goto fail2;
827
828 return 0;
829
830 fail2:
831 kfree(tx_queue->cb_page);
832 tx_queue->cb_page = NULL;
833 fail1:
834 kfree(tx_queue->buffer);
835 tx_queue->buffer = NULL;
836 return rc;
837 }
838
efx_init_tx_queue(struct efx_tx_queue * tx_queue)839 void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
840 {
841 struct efx_nic *efx = tx_queue->efx;
842
843 netif_dbg(efx, drv, efx->net_dev,
844 "initialising TX queue %d\n", tx_queue->queue);
845
846 tx_queue->insert_count = 0;
847 tx_queue->write_count = 0;
848 tx_queue->packet_write_count = 0;
849 tx_queue->old_write_count = 0;
850 tx_queue->read_count = 0;
851 tx_queue->old_read_count = 0;
852 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
853 tx_queue->xmit_more_available = false;
854 tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
855 tx_queue->channel == efx_ptp_channel(efx));
856 tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
857 tx_queue->completed_timestamp_major = 0;
858 tx_queue->completed_timestamp_minor = 0;
859
860 /* Set up default function pointers. These may get replaced by
861 * efx_nic_init_tx() based off NIC/queue capabilities.
862 */
863 tx_queue->handle_tso = efx_enqueue_skb_tso;
864
865 /* Set up TX descriptor ring */
866 efx_nic_init_tx(tx_queue);
867
868 tx_queue->initialised = true;
869 }
870
efx_fini_tx_queue(struct efx_tx_queue * tx_queue)871 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
872 {
873 struct efx_tx_buffer *buffer;
874
875 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
876 "shutting down TX queue %d\n", tx_queue->queue);
877
878 if (!tx_queue->buffer)
879 return;
880
881 /* Free any buffers left in the ring */
882 while (tx_queue->read_count != tx_queue->write_count) {
883 unsigned int pkts_compl = 0, bytes_compl = 0;
884 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
885 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
886
887 ++tx_queue->read_count;
888 }
889 tx_queue->xmit_more_available = false;
890 netdev_tx_reset_queue(tx_queue->core_txq);
891 }
892
efx_remove_tx_queue(struct efx_tx_queue * tx_queue)893 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
894 {
895 int i;
896
897 if (!tx_queue->buffer)
898 return;
899
900 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
901 "destroying TX queue %d\n", tx_queue->queue);
902 efx_nic_remove_tx(tx_queue);
903
904 if (tx_queue->cb_page) {
905 for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
906 efx_nic_free_buffer(tx_queue->efx,
907 &tx_queue->cb_page[i]);
908 kfree(tx_queue->cb_page);
909 tx_queue->cb_page = NULL;
910 }
911
912 kfree(tx_queue->buffer);
913 tx_queue->buffer = NULL;
914 }
915