1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2018 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11 #include "net_driver.h"
12 #include <linux/module.h>
13 #include <linux/iommu.h>
14 #include "efx.h"
15 #include "nic.h"
16 #include "rx_common.h"
17
18 /* This is the percentage fill level below which new RX descriptors
19 * will be added to the RX descriptor ring.
20 */
21 static unsigned int rx_refill_threshold;
22 module_param(rx_refill_threshold, uint, 0444);
23 MODULE_PARM_DESC(rx_refill_threshold,
24 "RX descriptor ring refill threshold (%)");
25
26 /* Number of RX buffers to recycle pages for. When creating the RX page recycle
27 * ring, this number is divided by the number of buffers per page to calculate
28 * the number of pages to store in the RX page recycle ring.
29 */
30 #define EFX_RECYCLE_RING_SIZE_IOMMU 4096
31 #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
32
33 /* RX maximum head room required.
34 *
35 * This must be at least 1 to prevent overflow, plus one packet-worth
36 * to allow pipelined receives.
37 */
38 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
39
40 /* Check the RX page recycle ring for a page that can be reused. */
efx_reuse_page(struct efx_rx_queue * rx_queue)41 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
42 {
43 struct efx_nic *efx = rx_queue->efx;
44 struct efx_rx_page_state *state;
45 unsigned int index;
46 struct page *page;
47
48 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
49 page = rx_queue->page_ring[index];
50 if (page == NULL)
51 return NULL;
52
53 rx_queue->page_ring[index] = NULL;
54 /* page_remove cannot exceed page_add. */
55 if (rx_queue->page_remove != rx_queue->page_add)
56 ++rx_queue->page_remove;
57
58 /* If page_count is 1 then we hold the only reference to this page. */
59 if (page_count(page) == 1) {
60 ++rx_queue->page_recycle_count;
61 return page;
62 } else {
63 state = page_address(page);
64 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
65 PAGE_SIZE << efx->rx_buffer_order,
66 DMA_FROM_DEVICE);
67 put_page(page);
68 ++rx_queue->page_recycle_failed;
69 }
70
71 return NULL;
72 }
73
74 /* Attempt to recycle the page if there is an RX recycle ring; the page can
75 * only be added if this is the final RX buffer, to prevent pages being used in
76 * the descriptor ring and appearing in the recycle ring simultaneously.
77 */
efx_recycle_rx_page(struct efx_channel * channel,struct efx_rx_buffer * rx_buf)78 static void efx_recycle_rx_page(struct efx_channel *channel,
79 struct efx_rx_buffer *rx_buf)
80 {
81 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
82 struct efx_nic *efx = rx_queue->efx;
83 struct page *page = rx_buf->page;
84 unsigned int index;
85
86 /* Only recycle the page after processing the final buffer. */
87 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
88 return;
89
90 index = rx_queue->page_add & rx_queue->page_ptr_mask;
91 if (rx_queue->page_ring[index] == NULL) {
92 unsigned int read_index = rx_queue->page_remove &
93 rx_queue->page_ptr_mask;
94
95 /* The next slot in the recycle ring is available, but
96 * increment page_remove if the read pointer currently
97 * points here.
98 */
99 if (read_index == index)
100 ++rx_queue->page_remove;
101 rx_queue->page_ring[index] = page;
102 ++rx_queue->page_add;
103 return;
104 }
105 ++rx_queue->page_recycle_full;
106 efx_unmap_rx_buffer(efx, rx_buf);
107 put_page(rx_buf->page);
108 }
109
110 /* Recycle the pages that are used by buffers that have just been received. */
efx_recycle_rx_pages(struct efx_channel * channel,struct efx_rx_buffer * rx_buf,unsigned int n_frags)111 void efx_recycle_rx_pages(struct efx_channel *channel,
112 struct efx_rx_buffer *rx_buf,
113 unsigned int n_frags)
114 {
115 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
116
117 do {
118 efx_recycle_rx_page(channel, rx_buf);
119 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
120 } while (--n_frags);
121 }
122
efx_discard_rx_packet(struct efx_channel * channel,struct efx_rx_buffer * rx_buf,unsigned int n_frags)123 void efx_discard_rx_packet(struct efx_channel *channel,
124 struct efx_rx_buffer *rx_buf,
125 unsigned int n_frags)
126 {
127 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
128
129 efx_recycle_rx_pages(channel, rx_buf, n_frags);
130
131 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
132 }
133
efx_init_rx_recycle_ring(struct efx_rx_queue * rx_queue)134 static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
135 {
136 unsigned int bufs_in_recycle_ring, page_ring_size;
137 struct efx_nic *efx = rx_queue->efx;
138
139 /* Set the RX recycle ring size */
140 #ifdef CONFIG_PPC64
141 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
142 #else
143 if (iommu_present(&pci_bus_type))
144 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
145 else
146 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
147 #endif /* CONFIG_PPC64 */
148
149 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
150 efx->rx_bufs_per_page);
151 rx_queue->page_ring = kcalloc(page_ring_size,
152 sizeof(*rx_queue->page_ring), GFP_KERNEL);
153 rx_queue->page_ptr_mask = page_ring_size - 1;
154 }
155
efx_fini_rx_recycle_ring(struct efx_rx_queue * rx_queue)156 static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
157 {
158 struct efx_nic *efx = rx_queue->efx;
159 int i;
160
161 /* Unmap and release the pages in the recycle ring. Remove the ring. */
162 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
163 struct page *page = rx_queue->page_ring[i];
164 struct efx_rx_page_state *state;
165
166 if (page == NULL)
167 continue;
168
169 state = page_address(page);
170 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
171 PAGE_SIZE << efx->rx_buffer_order,
172 DMA_FROM_DEVICE);
173 put_page(page);
174 }
175 kfree(rx_queue->page_ring);
176 rx_queue->page_ring = NULL;
177 }
178
efx_fini_rx_buffer(struct efx_rx_queue * rx_queue,struct efx_rx_buffer * rx_buf)179 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
180 struct efx_rx_buffer *rx_buf)
181 {
182 /* Release the page reference we hold for the buffer. */
183 if (rx_buf->page)
184 put_page(rx_buf->page);
185
186 /* If this is the last buffer in a page, unmap and free it. */
187 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
188 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
189 efx_free_rx_buffers(rx_queue, rx_buf, 1);
190 }
191 rx_buf->page = NULL;
192 }
193
efx_probe_rx_queue(struct efx_rx_queue * rx_queue)194 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
195 {
196 struct efx_nic *efx = rx_queue->efx;
197 unsigned int entries;
198 int rc;
199
200 /* Create the smallest power-of-two aligned ring */
201 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
202 EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
203 rx_queue->ptr_mask = entries - 1;
204
205 netif_dbg(efx, probe, efx->net_dev,
206 "creating RX queue %d size %#x mask %#x\n",
207 efx_rx_queue_index(rx_queue), efx->rxq_entries,
208 rx_queue->ptr_mask);
209
210 /* Allocate RX buffers */
211 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
212 GFP_KERNEL);
213 if (!rx_queue->buffer)
214 return -ENOMEM;
215
216 rc = efx_nic_probe_rx(rx_queue);
217 if (rc) {
218 kfree(rx_queue->buffer);
219 rx_queue->buffer = NULL;
220 }
221
222 return rc;
223 }
224
efx_init_rx_queue(struct efx_rx_queue * rx_queue)225 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
226 {
227 unsigned int max_fill, trigger, max_trigger;
228 struct efx_nic *efx = rx_queue->efx;
229 int rc = 0;
230
231 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
232 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
233
234 /* Initialise ptr fields */
235 rx_queue->added_count = 0;
236 rx_queue->notified_count = 0;
237 rx_queue->removed_count = 0;
238 rx_queue->min_fill = -1U;
239 efx_init_rx_recycle_ring(rx_queue);
240
241 rx_queue->page_remove = 0;
242 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
243 rx_queue->page_recycle_count = 0;
244 rx_queue->page_recycle_failed = 0;
245 rx_queue->page_recycle_full = 0;
246
247 /* Initialise limit fields */
248 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
249 max_trigger =
250 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
251 if (rx_refill_threshold != 0) {
252 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
253 if (trigger > max_trigger)
254 trigger = max_trigger;
255 } else {
256 trigger = max_trigger;
257 }
258
259 rx_queue->max_fill = max_fill;
260 rx_queue->fast_fill_trigger = trigger;
261 rx_queue->refill_enabled = true;
262
263 /* Initialise XDP queue information */
264 rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
265 rx_queue->core_index);
266
267 if (rc) {
268 netif_err(efx, rx_err, efx->net_dev,
269 "Failure to initialise XDP queue information rc=%d\n",
270 rc);
271 efx->xdp_rxq_info_failed = true;
272 } else {
273 rx_queue->xdp_rxq_info_valid = true;
274 }
275
276 /* Set up RX descriptor ring */
277 efx_nic_init_rx(rx_queue);
278 }
279
efx_fini_rx_queue(struct efx_rx_queue * rx_queue)280 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
281 {
282 struct efx_rx_buffer *rx_buf;
283 int i;
284
285 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
286 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
287
288 del_timer_sync(&rx_queue->slow_fill);
289
290 /* Release RX buffers from the current read ptr to the write ptr */
291 if (rx_queue->buffer) {
292 for (i = rx_queue->removed_count; i < rx_queue->added_count;
293 i++) {
294 unsigned int index = i & rx_queue->ptr_mask;
295
296 rx_buf = efx_rx_buffer(rx_queue, index);
297 efx_fini_rx_buffer(rx_queue, rx_buf);
298 }
299 }
300
301 efx_fini_rx_recycle_ring(rx_queue);
302
303 if (rx_queue->xdp_rxq_info_valid)
304 xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
305
306 rx_queue->xdp_rxq_info_valid = false;
307 }
308
efx_remove_rx_queue(struct efx_rx_queue * rx_queue)309 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
310 {
311 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
312 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
313
314 efx_nic_remove_rx(rx_queue);
315
316 kfree(rx_queue->buffer);
317 rx_queue->buffer = NULL;
318 }
319
320 /* Unmap a DMA-mapped page. This function is only called for the final RX
321 * buffer in a page.
322 */
efx_unmap_rx_buffer(struct efx_nic * efx,struct efx_rx_buffer * rx_buf)323 void efx_unmap_rx_buffer(struct efx_nic *efx,
324 struct efx_rx_buffer *rx_buf)
325 {
326 struct page *page = rx_buf->page;
327
328 if (page) {
329 struct efx_rx_page_state *state = page_address(page);
330
331 dma_unmap_page(&efx->pci_dev->dev,
332 state->dma_addr,
333 PAGE_SIZE << efx->rx_buffer_order,
334 DMA_FROM_DEVICE);
335 }
336 }
337
efx_free_rx_buffers(struct efx_rx_queue * rx_queue,struct efx_rx_buffer * rx_buf,unsigned int num_bufs)338 void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
339 struct efx_rx_buffer *rx_buf,
340 unsigned int num_bufs)
341 {
342 do {
343 if (rx_buf->page) {
344 put_page(rx_buf->page);
345 rx_buf->page = NULL;
346 }
347 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
348 } while (--num_bufs);
349 }
350
efx_rx_slow_fill(struct timer_list * t)351 void efx_rx_slow_fill(struct timer_list *t)
352 {
353 struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
354
355 /* Post an event to cause NAPI to run and refill the queue */
356 efx_nic_generate_fill_event(rx_queue);
357 ++rx_queue->slow_fill_count;
358 }
359
efx_schedule_slow_fill(struct efx_rx_queue * rx_queue)360 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
361 {
362 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
363 }
364
365 /* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
366 *
367 * @rx_queue: Efx RX queue
368 *
369 * This allocates a batch of pages, maps them for DMA, and populates
370 * struct efx_rx_buffers for each one. Return a negative error code or
371 * 0 on success. If a single page can be used for multiple buffers,
372 * then the page will either be inserted fully, or not at all.
373 */
efx_init_rx_buffers(struct efx_rx_queue * rx_queue,bool atomic)374 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
375 {
376 unsigned int page_offset, index, count;
377 struct efx_nic *efx = rx_queue->efx;
378 struct efx_rx_page_state *state;
379 struct efx_rx_buffer *rx_buf;
380 dma_addr_t dma_addr;
381 struct page *page;
382
383 count = 0;
384 do {
385 page = efx_reuse_page(rx_queue);
386 if (page == NULL) {
387 page = alloc_pages(__GFP_COMP |
388 (atomic ? GFP_ATOMIC : GFP_KERNEL),
389 efx->rx_buffer_order);
390 if (unlikely(page == NULL))
391 return -ENOMEM;
392 dma_addr =
393 dma_map_page(&efx->pci_dev->dev, page, 0,
394 PAGE_SIZE << efx->rx_buffer_order,
395 DMA_FROM_DEVICE);
396 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
397 dma_addr))) {
398 __free_pages(page, efx->rx_buffer_order);
399 return -EIO;
400 }
401 state = page_address(page);
402 state->dma_addr = dma_addr;
403 } else {
404 state = page_address(page);
405 dma_addr = state->dma_addr;
406 }
407
408 dma_addr += sizeof(struct efx_rx_page_state);
409 page_offset = sizeof(struct efx_rx_page_state);
410
411 do {
412 index = rx_queue->added_count & rx_queue->ptr_mask;
413 rx_buf = efx_rx_buffer(rx_queue, index);
414 rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
415 EFX_XDP_HEADROOM;
416 rx_buf->page = page;
417 rx_buf->page_offset = page_offset + efx->rx_ip_align +
418 EFX_XDP_HEADROOM;
419 rx_buf->len = efx->rx_dma_len;
420 rx_buf->flags = 0;
421 ++rx_queue->added_count;
422 get_page(page);
423 dma_addr += efx->rx_page_buf_step;
424 page_offset += efx->rx_page_buf_step;
425 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
426
427 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
428 } while (++count < efx->rx_pages_per_batch);
429
430 return 0;
431 }
432
efx_rx_config_page_split(struct efx_nic * efx)433 void efx_rx_config_page_split(struct efx_nic *efx)
434 {
435 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
436 EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
437 EFX_RX_BUF_ALIGNMENT);
438 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
439 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
440 efx->rx_page_buf_step);
441 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
442 efx->rx_bufs_per_page;
443 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
444 efx->rx_bufs_per_page);
445 }
446
447 /* efx_fast_push_rx_descriptors - push new RX descriptors quickly
448 * @rx_queue: RX descriptor queue
449 *
450 * This will aim to fill the RX descriptor queue up to
451 * @rx_queue->@max_fill. If there is insufficient atomic
452 * memory to do so, a slow fill will be scheduled.
453 *
454 * The caller must provide serialisation (none is used here). In practise,
455 * this means this function must run from the NAPI handler, or be called
456 * when NAPI is disabled.
457 */
efx_fast_push_rx_descriptors(struct efx_rx_queue * rx_queue,bool atomic)458 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
459 {
460 struct efx_nic *efx = rx_queue->efx;
461 unsigned int fill_level, batch_size;
462 int space, rc = 0;
463
464 if (!rx_queue->refill_enabled)
465 return;
466
467 /* Calculate current fill level, and exit if we don't need to fill */
468 fill_level = (rx_queue->added_count - rx_queue->removed_count);
469 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
470 if (fill_level >= rx_queue->fast_fill_trigger)
471 goto out;
472
473 /* Record minimum fill level */
474 if (unlikely(fill_level < rx_queue->min_fill)) {
475 if (fill_level)
476 rx_queue->min_fill = fill_level;
477 }
478
479 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
480 space = rx_queue->max_fill - fill_level;
481 EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
482
483 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
484 "RX queue %d fast-filling descriptor ring from"
485 " level %d to level %d\n",
486 efx_rx_queue_index(rx_queue), fill_level,
487 rx_queue->max_fill);
488
489 do {
490 rc = efx_init_rx_buffers(rx_queue, atomic);
491 if (unlikely(rc)) {
492 /* Ensure that we don't leave the rx queue empty */
493 efx_schedule_slow_fill(rx_queue);
494 goto out;
495 }
496 } while ((space -= batch_size) >= batch_size);
497
498 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
499 "RX queue %d fast-filled descriptor ring "
500 "to level %d\n", efx_rx_queue_index(rx_queue),
501 rx_queue->added_count - rx_queue->removed_count);
502
503 out:
504 if (rx_queue->notified_count != rx_queue->added_count)
505 efx_nic_notify_rx_desc(rx_queue);
506 }
507
508 /* Pass a received packet up through GRO. GRO can handle pages
509 * regardless of checksum state and skbs with a good checksum.
510 */
511 void
efx_rx_packet_gro(struct efx_channel * channel,struct efx_rx_buffer * rx_buf,unsigned int n_frags,u8 * eh,__wsum csum)512 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
513 unsigned int n_frags, u8 *eh, __wsum csum)
514 {
515 struct napi_struct *napi = &channel->napi_str;
516 struct efx_nic *efx = channel->efx;
517 struct sk_buff *skb;
518
519 skb = napi_get_frags(napi);
520 if (unlikely(!skb)) {
521 struct efx_rx_queue *rx_queue;
522
523 rx_queue = efx_channel_get_rx_queue(channel);
524 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
525 return;
526 }
527
528 if (efx->net_dev->features & NETIF_F_RXHASH &&
529 efx_rx_buf_hash_valid(efx, eh))
530 skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
531 PKT_HASH_TYPE_L3);
532 if (csum) {
533 skb->csum = csum;
534 skb->ip_summed = CHECKSUM_COMPLETE;
535 } else {
536 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
537 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
538 }
539 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
540
541 for (;;) {
542 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
543 rx_buf->page, rx_buf->page_offset,
544 rx_buf->len);
545 rx_buf->page = NULL;
546 skb->len += rx_buf->len;
547 if (skb_shinfo(skb)->nr_frags == n_frags)
548 break;
549
550 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
551 }
552
553 skb->data_len = skb->len;
554 skb->truesize += n_frags * efx->rx_buffer_truesize;
555
556 skb_record_rx_queue(skb, channel->rx_queue.core_index);
557
558 napi_gro_frags(napi);
559 }
560
561 /* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
562 * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
563 */
efx_alloc_rss_context_entry(struct efx_nic * efx)564 struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
565 {
566 struct list_head *head = &efx->rss_context.list;
567 struct efx_rss_context *ctx, *new;
568 u32 id = 1; /* Don't use zero, that refers to the master RSS context */
569
570 WARN_ON(!mutex_is_locked(&efx->rss_lock));
571
572 /* Search for first gap in the numbering */
573 list_for_each_entry(ctx, head, list) {
574 if (ctx->user_id != id)
575 break;
576 id++;
577 /* Check for wrap. If this happens, we have nearly 2^32
578 * allocated RSS contexts, which seems unlikely.
579 */
580 if (WARN_ON_ONCE(!id))
581 return NULL;
582 }
583
584 /* Create the new entry */
585 new = kmalloc(sizeof(*new), GFP_KERNEL);
586 if (!new)
587 return NULL;
588 new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
589 new->rx_hash_udp_4tuple = false;
590
591 /* Insert the new entry into the gap */
592 new->user_id = id;
593 list_add_tail(&new->list, &ctx->list);
594 return new;
595 }
596
efx_find_rss_context_entry(struct efx_nic * efx,u32 id)597 struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
598 {
599 struct list_head *head = &efx->rss_context.list;
600 struct efx_rss_context *ctx;
601
602 WARN_ON(!mutex_is_locked(&efx->rss_lock));
603
604 list_for_each_entry(ctx, head, list)
605 if (ctx->user_id == id)
606 return ctx;
607 return NULL;
608 }
609
efx_free_rss_context_entry(struct efx_rss_context * ctx)610 void efx_free_rss_context_entry(struct efx_rss_context *ctx)
611 {
612 list_del(&ctx->list);
613 kfree(ctx);
614 }
615
efx_set_default_rx_indir_table(struct efx_nic * efx,struct efx_rss_context * ctx)616 void efx_set_default_rx_indir_table(struct efx_nic *efx,
617 struct efx_rss_context *ctx)
618 {
619 size_t i;
620
621 for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
622 ctx->rx_indir_table[i] =
623 ethtool_rxfh_indir_default(i, efx->rss_spread);
624 }
625
626 /**
627 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
628 * @spec: Specification to test
629 *
630 * Return: %true if the specification is a non-drop RX filter that
631 * matches a local MAC address I/G bit value of 1 or matches a local
632 * IPv4 or IPv6 address value in the respective multicast address
633 * range. Otherwise %false.
634 */
efx_filter_is_mc_recipient(const struct efx_filter_spec * spec)635 bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
636 {
637 if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
638 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
639 return false;
640
641 if (spec->match_flags &
642 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
643 is_multicast_ether_addr(spec->loc_mac))
644 return true;
645
646 if ((spec->match_flags &
647 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
648 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
649 if (spec->ether_type == htons(ETH_P_IP) &&
650 ipv4_is_multicast(spec->loc_host[0]))
651 return true;
652 if (spec->ether_type == htons(ETH_P_IPV6) &&
653 ((const u8 *)spec->loc_host)[0] == 0xff)
654 return true;
655 }
656
657 return false;
658 }
659
efx_filter_spec_equal(const struct efx_filter_spec * left,const struct efx_filter_spec * right)660 bool efx_filter_spec_equal(const struct efx_filter_spec *left,
661 const struct efx_filter_spec *right)
662 {
663 if ((left->match_flags ^ right->match_flags) |
664 ((left->flags ^ right->flags) &
665 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
666 return false;
667
668 return memcmp(&left->outer_vid, &right->outer_vid,
669 sizeof(struct efx_filter_spec) -
670 offsetof(struct efx_filter_spec, outer_vid)) == 0;
671 }
672
efx_filter_spec_hash(const struct efx_filter_spec * spec)673 u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
674 {
675 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
676 return jhash2((const u32 *)&spec->outer_vid,
677 (sizeof(struct efx_filter_spec) -
678 offsetof(struct efx_filter_spec, outer_vid)) / 4,
679 0);
680 }
681
682 #ifdef CONFIG_RFS_ACCEL
efx_rps_check_rule(struct efx_arfs_rule * rule,unsigned int filter_idx,bool * force)683 bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
684 bool *force)
685 {
686 if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
687 /* ARFS is currently updating this entry, leave it */
688 return false;
689 }
690 if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
691 /* ARFS tried and failed to update this, so it's probably out
692 * of date. Remove the filter and the ARFS rule entry.
693 */
694 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
695 *force = true;
696 return true;
697 } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
698 /* ARFS has moved on, so old filter is not needed. Since we did
699 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
700 * not be removed by efx_rps_hash_del() subsequently.
701 */
702 *force = true;
703 return true;
704 }
705 /* Remove it iff ARFS wants to. */
706 return true;
707 }
708
709 static
efx_rps_hash_bucket(struct efx_nic * efx,const struct efx_filter_spec * spec)710 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
711 const struct efx_filter_spec *spec)
712 {
713 u32 hash = efx_filter_spec_hash(spec);
714
715 lockdep_assert_held(&efx->rps_hash_lock);
716 if (!efx->rps_hash_table)
717 return NULL;
718 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
719 }
720
efx_rps_hash_find(struct efx_nic * efx,const struct efx_filter_spec * spec)721 struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
722 const struct efx_filter_spec *spec)
723 {
724 struct efx_arfs_rule *rule;
725 struct hlist_head *head;
726 struct hlist_node *node;
727
728 head = efx_rps_hash_bucket(efx, spec);
729 if (!head)
730 return NULL;
731 hlist_for_each(node, head) {
732 rule = container_of(node, struct efx_arfs_rule, node);
733 if (efx_filter_spec_equal(spec, &rule->spec))
734 return rule;
735 }
736 return NULL;
737 }
738
efx_rps_hash_add(struct efx_nic * efx,const struct efx_filter_spec * spec,bool * new)739 struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
740 const struct efx_filter_spec *spec,
741 bool *new)
742 {
743 struct efx_arfs_rule *rule;
744 struct hlist_head *head;
745 struct hlist_node *node;
746
747 head = efx_rps_hash_bucket(efx, spec);
748 if (!head)
749 return NULL;
750 hlist_for_each(node, head) {
751 rule = container_of(node, struct efx_arfs_rule, node);
752 if (efx_filter_spec_equal(spec, &rule->spec)) {
753 *new = false;
754 return rule;
755 }
756 }
757 rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
758 *new = true;
759 if (rule) {
760 memcpy(&rule->spec, spec, sizeof(rule->spec));
761 hlist_add_head(&rule->node, head);
762 }
763 return rule;
764 }
765
efx_rps_hash_del(struct efx_nic * efx,const struct efx_filter_spec * spec)766 void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
767 {
768 struct efx_arfs_rule *rule;
769 struct hlist_head *head;
770 struct hlist_node *node;
771
772 head = efx_rps_hash_bucket(efx, spec);
773 if (WARN_ON(!head))
774 return;
775 hlist_for_each(node, head) {
776 rule = container_of(node, struct efx_arfs_rule, node);
777 if (efx_filter_spec_equal(spec, &rule->spec)) {
778 /* Someone already reused the entry. We know that if
779 * this check doesn't fire (i.e. filter_id == REMOVING)
780 * then the REMOVING mark was put there by our caller,
781 * because caller is holding a lock on filter table and
782 * only holders of that lock set REMOVING.
783 */
784 if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
785 return;
786 hlist_del(node);
787 kfree(rule);
788 return;
789 }
790 }
791 /* We didn't find it. */
792 WARN_ON(1);
793 }
794 #endif
795
efx_probe_filters(struct efx_nic * efx)796 int efx_probe_filters(struct efx_nic *efx)
797 {
798 int rc;
799
800 mutex_lock(&efx->mac_lock);
801 down_write(&efx->filter_sem);
802 rc = efx->type->filter_table_probe(efx);
803 if (rc)
804 goto out_unlock;
805
806 #ifdef CONFIG_RFS_ACCEL
807 if (efx->type->offload_features & NETIF_F_NTUPLE) {
808 struct efx_channel *channel;
809 int i, success = 1;
810
811 efx_for_each_channel(channel, efx) {
812 channel->rps_flow_id =
813 kcalloc(efx->type->max_rx_ip_filters,
814 sizeof(*channel->rps_flow_id),
815 GFP_KERNEL);
816 if (!channel->rps_flow_id)
817 success = 0;
818 else
819 for (i = 0;
820 i < efx->type->max_rx_ip_filters;
821 ++i)
822 channel->rps_flow_id[i] =
823 RPS_FLOW_ID_INVALID;
824 channel->rfs_expire_index = 0;
825 channel->rfs_filter_count = 0;
826 }
827
828 if (!success) {
829 efx_for_each_channel(channel, efx)
830 kfree(channel->rps_flow_id);
831 efx->type->filter_table_remove(efx);
832 rc = -ENOMEM;
833 goto out_unlock;
834 }
835 }
836 #endif
837 out_unlock:
838 up_write(&efx->filter_sem);
839 mutex_unlock(&efx->mac_lock);
840 return rc;
841 }
842
efx_remove_filters(struct efx_nic * efx)843 void efx_remove_filters(struct efx_nic *efx)
844 {
845 #ifdef CONFIG_RFS_ACCEL
846 struct efx_channel *channel;
847
848 efx_for_each_channel(channel, efx) {
849 cancel_delayed_work_sync(&channel->filter_work);
850 kfree(channel->rps_flow_id);
851 channel->rps_flow_id = NULL;
852 }
853 #endif
854 down_write(&efx->filter_sem);
855 efx->type->filter_table_remove(efx);
856 up_write(&efx->filter_sem);
857 }
858
859 #ifdef CONFIG_RFS_ACCEL
860
efx_filter_rfs_work(struct work_struct * data)861 static void efx_filter_rfs_work(struct work_struct *data)
862 {
863 struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
864 work);
865 struct efx_nic *efx = netdev_priv(req->net_dev);
866 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
867 int slot_idx = req - efx->rps_slot;
868 struct efx_arfs_rule *rule;
869 u16 arfs_id = 0;
870 int rc;
871
872 rc = efx->type->filter_insert(efx, &req->spec, true);
873 if (rc >= 0)
874 /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
875 rc %= efx->type->max_rx_ip_filters;
876 if (efx->rps_hash_table) {
877 spin_lock_bh(&efx->rps_hash_lock);
878 rule = efx_rps_hash_find(efx, &req->spec);
879 /* The rule might have already gone, if someone else's request
880 * for the same spec was already worked and then expired before
881 * we got around to our work. In that case we have nothing
882 * tying us to an arfs_id, meaning that as soon as the filter
883 * is considered for expiry it will be removed.
884 */
885 if (rule) {
886 if (rc < 0)
887 rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
888 else
889 rule->filter_id = rc;
890 arfs_id = rule->arfs_id;
891 }
892 spin_unlock_bh(&efx->rps_hash_lock);
893 }
894 if (rc >= 0) {
895 /* Remember this so we can check whether to expire the filter
896 * later.
897 */
898 mutex_lock(&efx->rps_mutex);
899 if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
900 channel->rfs_filter_count++;
901 channel->rps_flow_id[rc] = req->flow_id;
902 mutex_unlock(&efx->rps_mutex);
903
904 if (req->spec.ether_type == htons(ETH_P_IP))
905 netif_info(efx, rx_status, efx->net_dev,
906 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
907 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
908 req->spec.rem_host, ntohs(req->spec.rem_port),
909 req->spec.loc_host, ntohs(req->spec.loc_port),
910 req->rxq_index, req->flow_id, rc, arfs_id);
911 else
912 netif_info(efx, rx_status, efx->net_dev,
913 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
914 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
915 req->spec.rem_host, ntohs(req->spec.rem_port),
916 req->spec.loc_host, ntohs(req->spec.loc_port),
917 req->rxq_index, req->flow_id, rc, arfs_id);
918 channel->n_rfs_succeeded++;
919 } else {
920 if (req->spec.ether_type == htons(ETH_P_IP))
921 netif_dbg(efx, rx_status, efx->net_dev,
922 "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
923 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
924 req->spec.rem_host, ntohs(req->spec.rem_port),
925 req->spec.loc_host, ntohs(req->spec.loc_port),
926 req->rxq_index, req->flow_id, rc, arfs_id);
927 else
928 netif_dbg(efx, rx_status, efx->net_dev,
929 "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
930 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
931 req->spec.rem_host, ntohs(req->spec.rem_port),
932 req->spec.loc_host, ntohs(req->spec.loc_port),
933 req->rxq_index, req->flow_id, rc, arfs_id);
934 channel->n_rfs_failed++;
935 /* We're overloading the NIC's filter tables, so let's do a
936 * chunk of extra expiry work.
937 */
938 __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
939 100u));
940 }
941
942 /* Release references */
943 clear_bit(slot_idx, &efx->rps_slot_map);
944 dev_put(req->net_dev);
945 }
946
efx_filter_rfs(struct net_device * net_dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)947 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
948 u16 rxq_index, u32 flow_id)
949 {
950 struct efx_nic *efx = netdev_priv(net_dev);
951 struct efx_async_filter_insertion *req;
952 struct efx_arfs_rule *rule;
953 struct flow_keys fk;
954 int slot_idx;
955 bool new;
956 int rc;
957
958 /* find a free slot */
959 for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
960 if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
961 break;
962 if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
963 return -EBUSY;
964
965 if (flow_id == RPS_FLOW_ID_INVALID) {
966 rc = -EINVAL;
967 goto out_clear;
968 }
969
970 if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
971 rc = -EPROTONOSUPPORT;
972 goto out_clear;
973 }
974
975 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
976 rc = -EPROTONOSUPPORT;
977 goto out_clear;
978 }
979 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
980 rc = -EPROTONOSUPPORT;
981 goto out_clear;
982 }
983
984 req = efx->rps_slot + slot_idx;
985 efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
986 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
987 rxq_index);
988 req->spec.match_flags =
989 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
990 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
991 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
992 req->spec.ether_type = fk.basic.n_proto;
993 req->spec.ip_proto = fk.basic.ip_proto;
994
995 if (fk.basic.n_proto == htons(ETH_P_IP)) {
996 req->spec.rem_host[0] = fk.addrs.v4addrs.src;
997 req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
998 } else {
999 memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
1000 sizeof(struct in6_addr));
1001 memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
1002 sizeof(struct in6_addr));
1003 }
1004
1005 req->spec.rem_port = fk.ports.src;
1006 req->spec.loc_port = fk.ports.dst;
1007
1008 if (efx->rps_hash_table) {
1009 /* Add it to ARFS hash table */
1010 spin_lock(&efx->rps_hash_lock);
1011 rule = efx_rps_hash_add(efx, &req->spec, &new);
1012 if (!rule) {
1013 rc = -ENOMEM;
1014 goto out_unlock;
1015 }
1016 if (new)
1017 rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
1018 rc = rule->arfs_id;
1019 /* Skip if existing or pending filter already does the right thing */
1020 if (!new && rule->rxq_index == rxq_index &&
1021 rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
1022 goto out_unlock;
1023 rule->rxq_index = rxq_index;
1024 rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
1025 spin_unlock(&efx->rps_hash_lock);
1026 } else {
1027 /* Without an ARFS hash table, we just use arfs_id 0 for all
1028 * filters. This means if multiple flows hash to the same
1029 * flow_id, all but the most recently touched will be eligible
1030 * for expiry.
1031 */
1032 rc = 0;
1033 }
1034
1035 /* Queue the request */
1036 dev_hold(req->net_dev = net_dev);
1037 INIT_WORK(&req->work, efx_filter_rfs_work);
1038 req->rxq_index = rxq_index;
1039 req->flow_id = flow_id;
1040 schedule_work(&req->work);
1041 return rc;
1042 out_unlock:
1043 spin_unlock(&efx->rps_hash_lock);
1044 out_clear:
1045 clear_bit(slot_idx, &efx->rps_slot_map);
1046 return rc;
1047 }
1048
__efx_filter_rfs_expire(struct efx_channel * channel,unsigned int quota)1049 bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
1050 {
1051 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
1052 struct efx_nic *efx = channel->efx;
1053 unsigned int index, size, start;
1054 u32 flow_id;
1055
1056 if (!mutex_trylock(&efx->rps_mutex))
1057 return false;
1058 expire_one = efx->type->filter_rfs_expire_one;
1059 index = channel->rfs_expire_index;
1060 start = index;
1061 size = efx->type->max_rx_ip_filters;
1062 while (quota) {
1063 flow_id = channel->rps_flow_id[index];
1064
1065 if (flow_id != RPS_FLOW_ID_INVALID) {
1066 quota--;
1067 if (expire_one(efx, flow_id, index)) {
1068 netif_info(efx, rx_status, efx->net_dev,
1069 "expired filter %d [channel %u flow %u]\n",
1070 index, channel->channel, flow_id);
1071 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
1072 channel->rfs_filter_count--;
1073 }
1074 }
1075 if (++index == size)
1076 index = 0;
1077 /* If we were called with a quota that exceeds the total number
1078 * of filters in the table (which shouldn't happen, but could
1079 * if two callers race), ensure that we don't loop forever -
1080 * stop when we've examined every row of the table.
1081 */
1082 if (index == start)
1083 break;
1084 }
1085
1086 channel->rfs_expire_index = index;
1087 mutex_unlock(&efx->rps_mutex);
1088 return true;
1089 }
1090
1091 #endif /* CONFIG_RFS_ACCEL */
1092