1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
13 #include <linux/ip.h>
14 #include <linux/in.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
17 #include <net/ipv6.h>
18 #include <linux/if_ether.h>
19 #include <linux/highmem.h>
20 #include <linux/cache.h>
21 #include "net_driver.h"
22 #include "efx.h"
23 #include "io.h"
24 #include "nic.h"
25 #include "tx.h"
26 #include "workarounds.h"
27 #include "ef10_regs.h"
28
29 #ifdef EFX_USE_PIO
30
31 #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
32 unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
33
34 #endif /* EFX_USE_PIO */
35
efx_tx_get_copy_buffer(struct efx_tx_queue * tx_queue,struct efx_tx_buffer * buffer)36 static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
37 struct efx_tx_buffer *buffer)
38 {
39 unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
40 struct efx_buffer *page_buf =
41 &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
42 unsigned int offset =
43 ((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
44
45 if (unlikely(!page_buf->addr) &&
46 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
47 GFP_ATOMIC))
48 return NULL;
49 buffer->dma_addr = page_buf->dma_addr + offset;
50 buffer->unmap_len = 0;
51 return (u8 *)page_buf->addr + offset;
52 }
53
efx_tx_get_copy_buffer_limited(struct efx_tx_queue * tx_queue,struct efx_tx_buffer * buffer,size_t len)54 u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
55 struct efx_tx_buffer *buffer, size_t len)
56 {
57 if (len > EFX_TX_CB_SIZE)
58 return NULL;
59 return efx_tx_get_copy_buffer(tx_queue, buffer);
60 }
61
efx_dequeue_buffer(struct efx_tx_queue * tx_queue,struct efx_tx_buffer * buffer,unsigned int * pkts_compl,unsigned int * bytes_compl)62 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
63 struct efx_tx_buffer *buffer,
64 unsigned int *pkts_compl,
65 unsigned int *bytes_compl)
66 {
67 if (buffer->unmap_len) {
68 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
69 dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
70 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
71 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
72 DMA_TO_DEVICE);
73 else
74 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
75 DMA_TO_DEVICE);
76 buffer->unmap_len = 0;
77 }
78
79 if (buffer->flags & EFX_TX_BUF_SKB) {
80 struct sk_buff *skb = (struct sk_buff *)buffer->skb;
81
82 EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
83 (*pkts_compl)++;
84 (*bytes_compl) += skb->len;
85 if (tx_queue->timestamping &&
86 (tx_queue->completed_timestamp_major ||
87 tx_queue->completed_timestamp_minor)) {
88 struct skb_shared_hwtstamps hwtstamp;
89
90 hwtstamp.hwtstamp =
91 efx_ptp_nic_to_kernel_time(tx_queue);
92 skb_tstamp_tx(skb, &hwtstamp);
93
94 tx_queue->completed_timestamp_major = 0;
95 tx_queue->completed_timestamp_minor = 0;
96 }
97 dev_consume_skb_any((struct sk_buff *)buffer->skb);
98 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
99 "TX queue %d transmission id %x complete\n",
100 tx_queue->queue, tx_queue->read_count);
101 }
102
103 buffer->len = 0;
104 buffer->flags = 0;
105 }
106
efx_tx_max_skb_descs(struct efx_nic * efx)107 unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
108 {
109 /* Header and payload descriptor for each output segment, plus
110 * one for every input fragment boundary within a segment
111 */
112 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
113
114 /* Possibly one more per segment for option descriptors */
115 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
116 max_descs += EFX_TSO_MAX_SEGS;
117
118 /* Possibly more for PCIe page boundaries within input fragments */
119 if (PAGE_SIZE > EFX_PAGE_SIZE)
120 max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
121 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
122
123 return max_descs;
124 }
125
efx_tx_maybe_stop_queue(struct efx_tx_queue * txq1)126 static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
127 {
128 /* We need to consider both queues that the net core sees as one */
129 struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
130 struct efx_nic *efx = txq1->efx;
131 unsigned int fill_level;
132
133 fill_level = max(txq1->insert_count - txq1->old_read_count,
134 txq2->insert_count - txq2->old_read_count);
135 if (likely(fill_level < efx->txq_stop_thresh))
136 return;
137
138 /* We used the stale old_read_count above, which gives us a
139 * pessimistic estimate of the fill level (which may even
140 * validly be >= efx->txq_entries). Now try again using
141 * read_count (more likely to be a cache miss).
142 *
143 * If we read read_count and then conditionally stop the
144 * queue, it is possible for the completion path to race with
145 * us and complete all outstanding descriptors in the middle,
146 * after which there will be no more completions to wake it.
147 * Therefore we stop the queue first, then read read_count
148 * (with a memory barrier to ensure the ordering), then
149 * restart the queue if the fill level turns out to be low
150 * enough.
151 */
152 netif_tx_stop_queue(txq1->core_txq);
153 smp_mb();
154 txq1->old_read_count = READ_ONCE(txq1->read_count);
155 txq2->old_read_count = READ_ONCE(txq2->read_count);
156
157 fill_level = max(txq1->insert_count - txq1->old_read_count,
158 txq2->insert_count - txq2->old_read_count);
159 EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
160 if (likely(fill_level < efx->txq_stop_thresh)) {
161 smp_mb();
162 if (likely(!efx->loopback_selftest))
163 netif_tx_start_queue(txq1->core_txq);
164 }
165 }
166
efx_enqueue_skb_copy(struct efx_tx_queue * tx_queue,struct sk_buff * skb)167 static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
168 struct sk_buff *skb)
169 {
170 unsigned int copy_len = skb->len;
171 struct efx_tx_buffer *buffer;
172 u8 *copy_buffer;
173 int rc;
174
175 EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE);
176
177 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
178
179 copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
180 if (unlikely(!copy_buffer))
181 return -ENOMEM;
182
183 rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
184 EFX_WARN_ON_PARANOID(rc);
185 buffer->len = copy_len;
186
187 buffer->skb = skb;
188 buffer->flags = EFX_TX_BUF_SKB;
189
190 ++tx_queue->insert_count;
191 return rc;
192 }
193
194 #ifdef EFX_USE_PIO
195
196 struct efx_short_copy_buffer {
197 int used;
198 u8 buf[L1_CACHE_BYTES];
199 };
200
201 /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
202 * Advances piobuf pointer. Leaves additional data in the copy buffer.
203 */
efx_memcpy_toio_aligned(struct efx_nic * efx,u8 __iomem ** piobuf,u8 * data,int len,struct efx_short_copy_buffer * copy_buf)204 static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
205 u8 *data, int len,
206 struct efx_short_copy_buffer *copy_buf)
207 {
208 int block_len = len & ~(sizeof(copy_buf->buf) - 1);
209
210 __iowrite64_copy(*piobuf, data, block_len >> 3);
211 *piobuf += block_len;
212 len -= block_len;
213
214 if (len) {
215 data += block_len;
216 BUG_ON(copy_buf->used);
217 BUG_ON(len > sizeof(copy_buf->buf));
218 memcpy(copy_buf->buf, data, len);
219 copy_buf->used = len;
220 }
221 }
222
223 /* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
224 * Advances piobuf pointer. Leaves additional data in the copy buffer.
225 */
efx_memcpy_toio_aligned_cb(struct efx_nic * efx,u8 __iomem ** piobuf,u8 * data,int len,struct efx_short_copy_buffer * copy_buf)226 static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
227 u8 *data, int len,
228 struct efx_short_copy_buffer *copy_buf)
229 {
230 if (copy_buf->used) {
231 /* if the copy buffer is partially full, fill it up and write */
232 int copy_to_buf =
233 min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
234
235 memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
236 copy_buf->used += copy_to_buf;
237
238 /* if we didn't fill it up then we're done for now */
239 if (copy_buf->used < sizeof(copy_buf->buf))
240 return;
241
242 __iowrite64_copy(*piobuf, copy_buf->buf,
243 sizeof(copy_buf->buf) >> 3);
244 *piobuf += sizeof(copy_buf->buf);
245 data += copy_to_buf;
246 len -= copy_to_buf;
247 copy_buf->used = 0;
248 }
249
250 efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
251 }
252
efx_flush_copy_buffer(struct efx_nic * efx,u8 __iomem * piobuf,struct efx_short_copy_buffer * copy_buf)253 static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
254 struct efx_short_copy_buffer *copy_buf)
255 {
256 /* if there's anything in it, write the whole buffer, including junk */
257 if (copy_buf->used)
258 __iowrite64_copy(piobuf, copy_buf->buf,
259 sizeof(copy_buf->buf) >> 3);
260 }
261
262 /* Traverse skb structure and copy fragments in to PIO buffer.
263 * Advances piobuf pointer.
264 */
efx_skb_copy_bits_to_pio(struct efx_nic * efx,struct sk_buff * skb,u8 __iomem ** piobuf,struct efx_short_copy_buffer * copy_buf)265 static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
266 u8 __iomem **piobuf,
267 struct efx_short_copy_buffer *copy_buf)
268 {
269 int i;
270
271 efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
272 copy_buf);
273
274 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
275 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
276 u8 *vaddr;
277
278 vaddr = kmap_atomic(skb_frag_page(f));
279
280 efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
281 skb_frag_size(f), copy_buf);
282 kunmap_atomic(vaddr);
283 }
284
285 EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list);
286 }
287
efx_enqueue_skb_pio(struct efx_tx_queue * tx_queue,struct sk_buff * skb)288 static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
289 struct sk_buff *skb)
290 {
291 struct efx_tx_buffer *buffer =
292 efx_tx_queue_get_insert_buffer(tx_queue);
293 u8 __iomem *piobuf = tx_queue->piobuf;
294
295 /* Copy to PIO buffer. Ensure the writes are padded to the end
296 * of a cache line, as this is required for write-combining to be
297 * effective on at least x86.
298 */
299
300 if (skb_shinfo(skb)->nr_frags) {
301 /* The size of the copy buffer will ensure all writes
302 * are the size of a cache line.
303 */
304 struct efx_short_copy_buffer copy_buf;
305
306 copy_buf.used = 0;
307
308 efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
309 &piobuf, ©_buf);
310 efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf);
311 } else {
312 /* Pad the write to the size of a cache line.
313 * We can do this because we know the skb_shared_info struct is
314 * after the source, and the destination buffer is big enough.
315 */
316 BUILD_BUG_ON(L1_CACHE_BYTES >
317 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
318 __iowrite64_copy(tx_queue->piobuf, skb->data,
319 ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
320 }
321
322 buffer->skb = skb;
323 buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION;
324
325 EFX_POPULATE_QWORD_5(buffer->option,
326 ESF_DZ_TX_DESC_IS_OPT, 1,
327 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
328 ESF_DZ_TX_PIO_CONT, 0,
329 ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
330 ESF_DZ_TX_PIO_BUF_ADDR,
331 tx_queue->piobuf_offset);
332 ++tx_queue->insert_count;
333 return 0;
334 }
335 #endif /* EFX_USE_PIO */
336
efx_tx_map_chunk(struct efx_tx_queue * tx_queue,dma_addr_t dma_addr,size_t len)337 static struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
338 dma_addr_t dma_addr,
339 size_t len)
340 {
341 const struct efx_nic_type *nic_type = tx_queue->efx->type;
342 struct efx_tx_buffer *buffer;
343 unsigned int dma_len;
344
345 /* Map the fragment taking account of NIC-dependent DMA limits. */
346 do {
347 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
348 dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
349
350 buffer->len = dma_len;
351 buffer->dma_addr = dma_addr;
352 buffer->flags = EFX_TX_BUF_CONT;
353 len -= dma_len;
354 dma_addr += dma_len;
355 ++tx_queue->insert_count;
356 } while (len);
357
358 return buffer;
359 }
360
361 /* Map all data from an SKB for DMA and create descriptors on the queue.
362 */
efx_tx_map_data(struct efx_tx_queue * tx_queue,struct sk_buff * skb,unsigned int segment_count)363 static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
364 unsigned int segment_count)
365 {
366 struct efx_nic *efx = tx_queue->efx;
367 struct device *dma_dev = &efx->pci_dev->dev;
368 unsigned int frag_index, nr_frags;
369 dma_addr_t dma_addr, unmap_addr;
370 unsigned short dma_flags;
371 size_t len, unmap_len;
372
373 nr_frags = skb_shinfo(skb)->nr_frags;
374 frag_index = 0;
375
376 /* Map header data. */
377 len = skb_headlen(skb);
378 dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
379 dma_flags = EFX_TX_BUF_MAP_SINGLE;
380 unmap_len = len;
381 unmap_addr = dma_addr;
382
383 if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
384 return -EIO;
385
386 if (segment_count) {
387 /* For TSO we need to put the header in to a separate
388 * descriptor. Map this separately if necessary.
389 */
390 size_t header_len = skb_transport_header(skb) - skb->data +
391 (tcp_hdr(skb)->doff << 2u);
392
393 if (header_len != len) {
394 tx_queue->tso_long_headers++;
395 efx_tx_map_chunk(tx_queue, dma_addr, header_len);
396 len -= header_len;
397 dma_addr += header_len;
398 }
399 }
400
401 /* Add descriptors for each fragment. */
402 do {
403 struct efx_tx_buffer *buffer;
404 skb_frag_t *fragment;
405
406 buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
407
408 /* The final descriptor for a fragment is responsible for
409 * unmapping the whole fragment.
410 */
411 buffer->flags = EFX_TX_BUF_CONT | dma_flags;
412 buffer->unmap_len = unmap_len;
413 buffer->dma_offset = buffer->dma_addr - unmap_addr;
414
415 if (frag_index >= nr_frags) {
416 /* Store SKB details with the final buffer for
417 * the completion.
418 */
419 buffer->skb = skb;
420 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
421 return 0;
422 }
423
424 /* Move on to the next fragment. */
425 fragment = &skb_shinfo(skb)->frags[frag_index++];
426 len = skb_frag_size(fragment);
427 dma_addr = skb_frag_dma_map(dma_dev, fragment,
428 0, len, DMA_TO_DEVICE);
429 dma_flags = 0;
430 unmap_len = len;
431 unmap_addr = dma_addr;
432
433 if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
434 return -EIO;
435 } while (1);
436 }
437
438 /* Remove buffers put into a tx_queue for the current packet.
439 * None of the buffers must have an skb attached.
440 */
efx_enqueue_unwind(struct efx_tx_queue * tx_queue,unsigned int insert_count)441 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
442 unsigned int insert_count)
443 {
444 struct efx_tx_buffer *buffer;
445 unsigned int bytes_compl = 0;
446 unsigned int pkts_compl = 0;
447
448 /* Work backwards until we hit the original insert pointer value */
449 while (tx_queue->insert_count != insert_count) {
450 --tx_queue->insert_count;
451 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
452 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
453 }
454 }
455
456 /*
457 * Fallback to software TSO.
458 *
459 * This is used if we are unable to send a GSO packet through hardware TSO.
460 * This should only ever happen due to per-queue restrictions - unsupported
461 * packets should first be filtered by the feature flags.
462 *
463 * Returns 0 on success, error code otherwise.
464 */
efx_tx_tso_fallback(struct efx_tx_queue * tx_queue,struct sk_buff * skb)465 static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
466 struct sk_buff *skb)
467 {
468 struct sk_buff *segments, *next;
469
470 segments = skb_gso_segment(skb, 0);
471 if (IS_ERR(segments))
472 return PTR_ERR(segments);
473
474 dev_kfree_skb_any(skb);
475 skb = segments;
476
477 while (skb) {
478 next = skb->next;
479 skb->next = NULL;
480
481 if (next)
482 skb->xmit_more = true;
483 efx_enqueue_skb(tx_queue, skb);
484 skb = next;
485 }
486
487 return 0;
488 }
489
490 /*
491 * Add a socket buffer to a TX queue
492 *
493 * This maps all fragments of a socket buffer for DMA and adds them to
494 * the TX queue. The queue's insert pointer will be incremented by
495 * the number of fragments in the socket buffer.
496 *
497 * If any DMA mapping fails, any mapped fragments will be unmapped,
498 * the queue's insert pointer will be restored to its original value.
499 *
500 * This function is split out from efx_hard_start_xmit to allow the
501 * loopback test to direct packets via specific TX queues.
502 *
503 * Returns NETDEV_TX_OK.
504 * You must hold netif_tx_lock() to call this function.
505 */
efx_enqueue_skb(struct efx_tx_queue * tx_queue,struct sk_buff * skb)506 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
507 {
508 unsigned int old_insert_count = tx_queue->insert_count;
509 bool xmit_more = skb->xmit_more;
510 bool data_mapped = false;
511 unsigned int segments;
512 unsigned int skb_len;
513 int rc;
514
515 skb_len = skb->len;
516 segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
517 if (segments == 1)
518 segments = 0; /* Don't use TSO for a single segment. */
519
520 /* Handle TSO first - it's *possible* (although unlikely) that we might
521 * be passed a packet to segment that's smaller than the copybreak/PIO
522 * size limit.
523 */
524 if (segments) {
525 EFX_WARN_ON_ONCE_PARANOID(!tx_queue->handle_tso);
526 rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped);
527 if (rc == -EINVAL) {
528 rc = efx_tx_tso_fallback(tx_queue, skb);
529 tx_queue->tso_fallbacks++;
530 if (rc == 0)
531 return 0;
532 }
533 if (rc)
534 goto err;
535 #ifdef EFX_USE_PIO
536 } else if (skb_len <= efx_piobuf_size && !skb->xmit_more &&
537 efx_nic_may_tx_pio(tx_queue)) {
538 /* Use PIO for short packets with an empty queue. */
539 if (efx_enqueue_skb_pio(tx_queue, skb))
540 goto err;
541 tx_queue->pio_packets++;
542 data_mapped = true;
543 #endif
544 } else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
545 /* Pad short packets or coalesce short fragmented packets. */
546 if (efx_enqueue_skb_copy(tx_queue, skb))
547 goto err;
548 tx_queue->cb_packets++;
549 data_mapped = true;
550 }
551
552 /* Map for DMA and create descriptors if we haven't done so already. */
553 if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
554 goto err;
555
556 /* Update BQL */
557 netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
558
559 efx_tx_maybe_stop_queue(tx_queue);
560
561 /* Pass off to hardware */
562 if (!xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
563 struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
564
565 /* There could be packets left on the partner queue if those
566 * SKBs had skb->xmit_more set. If we do not push those they
567 * could be left for a long time and cause a netdev watchdog.
568 */
569 if (txq2->xmit_more_available)
570 efx_nic_push_buffers(txq2);
571
572 efx_nic_push_buffers(tx_queue);
573 } else {
574 tx_queue->xmit_more_available = skb->xmit_more;
575 }
576
577 if (segments) {
578 tx_queue->tso_bursts++;
579 tx_queue->tso_packets += segments;
580 tx_queue->tx_packets += segments;
581 } else {
582 tx_queue->tx_packets++;
583 }
584
585 return NETDEV_TX_OK;
586
587
588 err:
589 efx_enqueue_unwind(tx_queue, old_insert_count);
590 dev_kfree_skb_any(skb);
591
592 /* If we're not expecting another transmit and we had something to push
593 * on this queue or a partner queue then we need to push here to get the
594 * previous packets out.
595 */
596 if (!xmit_more) {
597 struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
598
599 if (txq2->xmit_more_available)
600 efx_nic_push_buffers(txq2);
601
602 efx_nic_push_buffers(tx_queue);
603 }
604
605 return NETDEV_TX_OK;
606 }
607
608 /* Remove packets from the TX queue
609 *
610 * This removes packets from the TX queue, up to and including the
611 * specified index.
612 */
efx_dequeue_buffers(struct efx_tx_queue * tx_queue,unsigned int index,unsigned int * pkts_compl,unsigned int * bytes_compl)613 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
614 unsigned int index,
615 unsigned int *pkts_compl,
616 unsigned int *bytes_compl)
617 {
618 struct efx_nic *efx = tx_queue->efx;
619 unsigned int stop_index, read_ptr;
620
621 stop_index = (index + 1) & tx_queue->ptr_mask;
622 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
623
624 while (read_ptr != stop_index) {
625 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
626
627 if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
628 unlikely(buffer->len == 0)) {
629 netif_err(efx, tx_err, efx->net_dev,
630 "TX queue %d spurious TX completion id %x\n",
631 tx_queue->queue, read_ptr);
632 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
633 return;
634 }
635
636 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
637
638 ++tx_queue->read_count;
639 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
640 }
641 }
642
643 /* Initiate a packet transmission. We use one channel per CPU
644 * (sharing when we have more CPUs than channels). On Falcon, the TX
645 * completion events will be directed back to the CPU that transmitted
646 * the packet, which should be cache-efficient.
647 *
648 * Context: non-blocking.
649 * Note that returning anything other than NETDEV_TX_OK will cause the
650 * OS to free the skb.
651 */
efx_hard_start_xmit(struct sk_buff * skb,struct net_device * net_dev)652 netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
653 struct net_device *net_dev)
654 {
655 struct efx_nic *efx = netdev_priv(net_dev);
656 struct efx_tx_queue *tx_queue;
657 unsigned index, type;
658
659 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
660
661 /* PTP "event" packet */
662 if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
663 unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
664 return efx_ptp_tx(efx, skb);
665 }
666
667 index = skb_get_queue_mapping(skb);
668 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
669 if (index >= efx->n_tx_channels) {
670 index -= efx->n_tx_channels;
671 type |= EFX_TXQ_TYPE_HIGHPRI;
672 }
673 tx_queue = efx_get_tx_queue(efx, index, type);
674
675 return efx_enqueue_skb(tx_queue, skb);
676 }
677
efx_init_tx_queue_core_txq(struct efx_tx_queue * tx_queue)678 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
679 {
680 struct efx_nic *efx = tx_queue->efx;
681
682 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
683 tx_queue->core_txq =
684 netdev_get_tx_queue(efx->net_dev,
685 tx_queue->queue / EFX_TXQ_TYPES +
686 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
687 efx->n_tx_channels : 0));
688 }
689
efx_setup_tc(struct net_device * net_dev,enum tc_setup_type type,void * type_data)690 int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
691 void *type_data)
692 {
693 struct efx_nic *efx = netdev_priv(net_dev);
694 struct tc_mqprio_qopt *mqprio = type_data;
695 struct efx_channel *channel;
696 struct efx_tx_queue *tx_queue;
697 unsigned tc, num_tc;
698 int rc;
699
700 if (type != TC_SETUP_QDISC_MQPRIO)
701 return -EOPNOTSUPP;
702
703 num_tc = mqprio->num_tc;
704
705 if (num_tc > EFX_MAX_TX_TC)
706 return -EINVAL;
707
708 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
709
710 if (num_tc == net_dev->num_tc)
711 return 0;
712
713 for (tc = 0; tc < num_tc; tc++) {
714 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
715 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
716 }
717
718 if (num_tc > net_dev->num_tc) {
719 /* Initialise high-priority queues as necessary */
720 efx_for_each_channel(channel, efx) {
721 efx_for_each_possible_channel_tx_queue(tx_queue,
722 channel) {
723 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
724 continue;
725 if (!tx_queue->buffer) {
726 rc = efx_probe_tx_queue(tx_queue);
727 if (rc)
728 return rc;
729 }
730 if (!tx_queue->initialised)
731 efx_init_tx_queue(tx_queue);
732 efx_init_tx_queue_core_txq(tx_queue);
733 }
734 }
735 } else {
736 /* Reduce number of classes before number of queues */
737 net_dev->num_tc = num_tc;
738 }
739
740 rc = netif_set_real_num_tx_queues(net_dev,
741 max_t(int, num_tc, 1) *
742 efx->n_tx_channels);
743 if (rc)
744 return rc;
745
746 /* Do not destroy high-priority queues when they become
747 * unused. We would have to flush them first, and it is
748 * fairly difficult to flush a subset of TX queues. Leave
749 * it to efx_fini_channels().
750 */
751
752 net_dev->num_tc = num_tc;
753 return 0;
754 }
755
efx_xmit_done(struct efx_tx_queue * tx_queue,unsigned int index)756 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
757 {
758 unsigned fill_level;
759 struct efx_nic *efx = tx_queue->efx;
760 struct efx_tx_queue *txq2;
761 unsigned int pkts_compl = 0, bytes_compl = 0;
762
763 EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
764
765 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
766 tx_queue->pkts_compl += pkts_compl;
767 tx_queue->bytes_compl += bytes_compl;
768
769 if (pkts_compl > 1)
770 ++tx_queue->merge_events;
771
772 /* See if we need to restart the netif queue. This memory
773 * barrier ensures that we write read_count (inside
774 * efx_dequeue_buffers()) before reading the queue status.
775 */
776 smp_mb();
777 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
778 likely(efx->port_enabled) &&
779 likely(netif_device_present(efx->net_dev))) {
780 txq2 = efx_tx_queue_partner(tx_queue);
781 fill_level = max(tx_queue->insert_count - tx_queue->read_count,
782 txq2->insert_count - txq2->read_count);
783 if (fill_level <= efx->txq_wake_thresh)
784 netif_tx_wake_queue(tx_queue->core_txq);
785 }
786
787 /* Check whether the hardware queue is now empty */
788 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
789 tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
790 if (tx_queue->read_count == tx_queue->old_write_count) {
791 smp_mb();
792 tx_queue->empty_read_count =
793 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
794 }
795 }
796 }
797
efx_tx_cb_page_count(struct efx_tx_queue * tx_queue)798 static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
799 {
800 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EFX_TX_CB_ORDER);
801 }
802
efx_probe_tx_queue(struct efx_tx_queue * tx_queue)803 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
804 {
805 struct efx_nic *efx = tx_queue->efx;
806 unsigned int entries;
807 int rc;
808
809 /* Create the smallest power-of-two aligned ring */
810 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
811 EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
812 tx_queue->ptr_mask = entries - 1;
813
814 netif_dbg(efx, probe, efx->net_dev,
815 "creating TX queue %d size %#x mask %#x\n",
816 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
817
818 /* Allocate software ring */
819 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
820 GFP_KERNEL);
821 if (!tx_queue->buffer)
822 return -ENOMEM;
823
824 tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
825 sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
826 if (!tx_queue->cb_page) {
827 rc = -ENOMEM;
828 goto fail1;
829 }
830
831 /* Allocate hardware ring */
832 rc = efx_nic_probe_tx(tx_queue);
833 if (rc)
834 goto fail2;
835
836 return 0;
837
838 fail2:
839 kfree(tx_queue->cb_page);
840 tx_queue->cb_page = NULL;
841 fail1:
842 kfree(tx_queue->buffer);
843 tx_queue->buffer = NULL;
844 return rc;
845 }
846
efx_init_tx_queue(struct efx_tx_queue * tx_queue)847 void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
848 {
849 struct efx_nic *efx = tx_queue->efx;
850
851 netif_dbg(efx, drv, efx->net_dev,
852 "initialising TX queue %d\n", tx_queue->queue);
853
854 tx_queue->insert_count = 0;
855 tx_queue->write_count = 0;
856 tx_queue->packet_write_count = 0;
857 tx_queue->old_write_count = 0;
858 tx_queue->read_count = 0;
859 tx_queue->old_read_count = 0;
860 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
861 tx_queue->xmit_more_available = false;
862 tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
863 tx_queue->channel == efx_ptp_channel(efx));
864 tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
865 tx_queue->completed_timestamp_major = 0;
866 tx_queue->completed_timestamp_minor = 0;
867
868 /* Set up default function pointers. These may get replaced by
869 * efx_nic_init_tx() based off NIC/queue capabilities.
870 */
871 tx_queue->handle_tso = efx_enqueue_skb_tso;
872
873 /* Set up TX descriptor ring */
874 efx_nic_init_tx(tx_queue);
875
876 tx_queue->initialised = true;
877 }
878
efx_fini_tx_queue(struct efx_tx_queue * tx_queue)879 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
880 {
881 struct efx_tx_buffer *buffer;
882
883 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
884 "shutting down TX queue %d\n", tx_queue->queue);
885
886 if (!tx_queue->buffer)
887 return;
888
889 /* Free any buffers left in the ring */
890 while (tx_queue->read_count != tx_queue->write_count) {
891 unsigned int pkts_compl = 0, bytes_compl = 0;
892 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
893 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
894
895 ++tx_queue->read_count;
896 }
897 tx_queue->xmit_more_available = false;
898 netdev_tx_reset_queue(tx_queue->core_txq);
899 }
900
efx_remove_tx_queue(struct efx_tx_queue * tx_queue)901 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
902 {
903 int i;
904
905 if (!tx_queue->buffer)
906 return;
907
908 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
909 "destroying TX queue %d\n", tx_queue->queue);
910 efx_nic_remove_tx(tx_queue);
911
912 if (tx_queue->cb_page) {
913 for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
914 efx_nic_free_buffer(tx_queue->efx,
915 &tx_queue->cb_page[i]);
916 kfree(tx_queue->cb_page);
917 tx_queue->cb_page = NULL;
918 }
919
920 kfree(tx_queue->buffer);
921 tx_queue->buffer = NULL;
922 }
923