Lines Matching full:rx

22 static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)  in gve_rx_unfill_pages()  argument
24 if (rx->data.raw_addressing) { in gve_rx_unfill_pages()
25 u32 slots = rx->mask + 1; in gve_rx_unfill_pages()
29 gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i], in gve_rx_unfill_pages()
30 &rx->data.data_ring[i]); in gve_rx_unfill_pages()
32 gve_unassign_qpl(priv, rx->data.qpl->id); in gve_rx_unfill_pages()
33 rx->data.qpl = NULL; in gve_rx_unfill_pages()
35 kvfree(rx->data.page_info); in gve_rx_unfill_pages()
36 rx->data.page_info = NULL; in gve_rx_unfill_pages()
41 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_free_ring() local
43 u32 slots = rx->mask + 1; in gve_rx_free_ring()
49 dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus); in gve_rx_free_ring()
50 rx->desc.desc_ring = NULL; in gve_rx_free_ring()
52 dma_free_coherent(dev, sizeof(*rx->q_resources), in gve_rx_free_ring()
53 rx->q_resources, rx->q_resources_bus); in gve_rx_free_ring()
54 rx->q_resources = NULL; in gve_rx_free_ring()
56 gve_rx_unfill_pages(priv, rx); in gve_rx_free_ring()
58 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_free_ring()
59 dma_free_coherent(dev, bytes, rx->data.data_ring, in gve_rx_free_ring()
60 rx->data.data_bus); in gve_rx_free_ring()
61 rx->data.data_ring = NULL; in gve_rx_free_ring()
62 netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); in gve_rx_free_ring()
90 static int gve_prefill_rx_pages(struct gve_rx_ring *rx) in gve_prefill_rx_pages() argument
92 struct gve_priv *priv = rx->gve; in gve_prefill_rx_pages()
97 /* Allocate one page per Rx queue slot. Each page is split into two in gve_prefill_rx_pages()
100 slots = rx->mask + 1; in gve_prefill_rx_pages()
102 rx->data.page_info = kvzalloc(slots * in gve_prefill_rx_pages()
103 sizeof(*rx->data.page_info), GFP_KERNEL); in gve_prefill_rx_pages()
104 if (!rx->data.page_info) in gve_prefill_rx_pages()
107 if (!rx->data.raw_addressing) { in gve_prefill_rx_pages()
108 rx->data.qpl = gve_assign_rx_qpl(priv); in gve_prefill_rx_pages()
109 if (!rx->data.qpl) { in gve_prefill_rx_pages()
110 kvfree(rx->data.page_info); in gve_prefill_rx_pages()
111 rx->data.page_info = NULL; in gve_prefill_rx_pages()
116 if (!rx->data.raw_addressing) { in gve_prefill_rx_pages()
117 struct page *page = rx->data.qpl->pages[i]; in gve_prefill_rx_pages()
120 gve_setup_rx_buffer(&rx->data.page_info[i], addr, page, in gve_prefill_rx_pages()
121 &rx->data.data_ring[i].qpl_offset); in gve_prefill_rx_pages()
124 err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i], in gve_prefill_rx_pages()
125 &rx->data.data_ring[i]); in gve_prefill_rx_pages()
134 &rx->data.page_info[i], in gve_prefill_rx_pages()
135 &rx->data.data_ring[i]); in gve_prefill_rx_pages()
141 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_alloc_ring() local
148 netif_dbg(priv, drv, priv->dev, "allocating rx ring\n"); in gve_rx_alloc_ring()
150 memset(rx, 0, sizeof(*rx)); in gve_rx_alloc_ring()
152 rx->gve = priv; in gve_rx_alloc_ring()
153 rx->q_num = idx; in gve_rx_alloc_ring()
156 rx->mask = slots - 1; in gve_rx_alloc_ring()
157 rx->data.raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT; in gve_rx_alloc_ring()
159 /* alloc rx data ring */ in gve_rx_alloc_ring()
160 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_alloc_ring()
161 rx->data.data_ring = dma_alloc_coherent(hdev, bytes, in gve_rx_alloc_ring()
162 &rx->data.data_bus, in gve_rx_alloc_ring()
164 if (!rx->data.data_ring) in gve_rx_alloc_ring()
166 filled_pages = gve_prefill_rx_pages(rx); in gve_rx_alloc_ring()
171 rx->fill_cnt = filled_pages; in gve_rx_alloc_ring()
176 rx->q_resources = in gve_rx_alloc_ring()
178 sizeof(*rx->q_resources), in gve_rx_alloc_ring()
179 &rx->q_resources_bus, in gve_rx_alloc_ring()
181 if (!rx->q_resources) { in gve_rx_alloc_ring()
185 netif_dbg(priv, drv, priv->dev, "rx[%d]->data.data_bus=%lx\n", idx, in gve_rx_alloc_ring()
186 (unsigned long)rx->data.data_bus); in gve_rx_alloc_ring()
188 /* alloc rx desc ring */ in gve_rx_alloc_ring()
196 rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus, in gve_rx_alloc_ring()
198 if (!rx->desc.desc_ring) { in gve_rx_alloc_ring()
202 rx->cnt = 0; in gve_rx_alloc_ring()
203 rx->db_threshold = priv->rx_desc_cnt / 2; in gve_rx_alloc_ring()
204 rx->desc.seqno = 1; in gve_rx_alloc_ring()
210 dma_free_coherent(hdev, sizeof(*rx->q_resources), in gve_rx_alloc_ring()
211 rx->q_resources, rx->q_resources_bus); in gve_rx_alloc_ring()
212 rx->q_resources = NULL; in gve_rx_alloc_ring()
214 gve_rx_unfill_pages(priv, rx); in gve_rx_alloc_ring()
216 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_alloc_ring()
217 dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus); in gve_rx_alloc_ring()
218 rx->data.data_ring = NULL; in gve_rx_alloc_ring()
232 "Failed to alloc rx ring=%d: err=%d\n", in gve_rx_alloc_rings()
255 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx) in gve_rx_write_doorbell() argument
257 u32 db_idx = be32_to_cpu(rx->q_resources->db_index); in gve_rx_write_doorbell()
259 iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]); in gve_rx_write_doorbell()
339 struct gve_rx_ring *rx, struct gve_rx_slot_page_info *page_info, in gve_rx_qpl() argument
361 u64_stats_update_begin(&rx->statss); in gve_rx_qpl()
362 rx->rx_copied_pkt++; in gve_rx_qpl()
363 u64_stats_update_end(&rx->statss); in gve_rx_qpl()
369 static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, in gve_rx() argument
373 struct gve_priv *priv = rx->gve; in gve_rx()
374 struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_rx()
383 u64_stats_update_begin(&rx->statss); in gve_rx()
384 rx->rx_desc_err_dropped_pkt++; in gve_rx()
385 u64_stats_update_end(&rx->statss); in gve_rx()
390 page_info = &rx->data.page_info[idx]; in gve_rx()
392 data_slot = &rx->data.data_ring[idx]; in gve_rx()
393 page_bus = (rx->data.raw_addressing) ? in gve_rx()
395 rx->data.qpl->page_buses[idx]; in gve_rx()
402 u64_stats_update_begin(&rx->statss); in gve_rx()
403 rx->rx_copied_pkt++; in gve_rx()
404 rx->rx_copybreak_pkt++; in gve_rx()
405 u64_stats_update_end(&rx->statss); in gve_rx()
413 if (!rx->data.raw_addressing) in gve_rx()
420 if (rx->data.raw_addressing) { in gve_rx()
425 skb = gve_rx_qpl(&priv->pdev->dev, dev, rx, in gve_rx()
431 u64_stats_update_begin(&rx->statss); in gve_rx()
432 rx->rx_skb_alloc_fail++; in gve_rx()
433 u64_stats_update_end(&rx->statss); in gve_rx()
459 static bool gve_rx_work_pending(struct gve_rx_ring *rx) in gve_rx_work_pending() argument
465 next_idx = rx->cnt & rx->mask; in gve_rx_work_pending()
466 desc = rx->desc.desc_ring + next_idx; in gve_rx_work_pending()
472 return (GVE_SEQNO(flags_seq) == rx->desc.seqno); in gve_rx_work_pending()
475 static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) in gve_rx_refill_buffers() argument
477 int refill_target = rx->mask + 1; in gve_rx_refill_buffers()
478 u32 fill_cnt = rx->fill_cnt; in gve_rx_refill_buffers()
480 while (fill_cnt - rx->cnt < refill_target) { in gve_rx_refill_buffers()
482 u32 idx = fill_cnt & rx->mask; in gve_rx_refill_buffers()
484 page_info = &rx->data.page_info[idx]; in gve_rx_refill_buffers()
490 &rx->data.data_ring[idx]; in gve_rx_refill_buffers()
505 if (!rx->data.raw_addressing) in gve_rx_refill_buffers()
512 &rx->data.data_ring[idx]; in gve_rx_refill_buffers()
523 rx->fill_cnt = fill_cnt; in gve_rx_refill_buffers()
527 bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, in gve_clean_rx_done() argument
530 struct gve_priv *priv = rx->gve; in gve_clean_rx_done()
533 u32 cnt = rx->cnt; in gve_clean_rx_done()
534 u32 idx = cnt & rx->mask; in gve_clean_rx_done()
537 desc = rx->desc.desc_ring + idx; in gve_clean_rx_done()
538 while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) && in gve_clean_rx_done()
544 rx->q_num, idx, desc, desc->flags_seq); in gve_clean_rx_done()
546 "[%d] seqno=%d rx->desc.seqno=%d\n", in gve_clean_rx_done()
547 rx->q_num, GVE_SEQNO(desc->flags_seq), in gve_clean_rx_done()
548 rx->desc.seqno); in gve_clean_rx_done()
549 dropped = !gve_rx(rx, desc, feat, idx); in gve_clean_rx_done()
555 idx = cnt & rx->mask; in gve_clean_rx_done()
556 desc = rx->desc.desc_ring + idx; in gve_clean_rx_done()
557 rx->desc.seqno = gve_next_seqno(rx->desc.seqno); in gve_clean_rx_done()
561 if (!work_done && rx->fill_cnt - cnt > rx->db_threshold) in gve_clean_rx_done()
564 u64_stats_update_begin(&rx->statss); in gve_clean_rx_done()
565 rx->rpackets += packets; in gve_clean_rx_done()
566 rx->rbytes += bytes; in gve_clean_rx_done()
567 u64_stats_update_end(&rx->statss); in gve_clean_rx_done()
568 rx->cnt = cnt; in gve_clean_rx_done()
571 if (!rx->data.raw_addressing) { in gve_clean_rx_done()
573 rx->fill_cnt += work_done; in gve_clean_rx_done()
574 } else if (rx->fill_cnt - cnt <= rx->db_threshold) { in gve_clean_rx_done()
578 if (!gve_rx_refill_buffers(priv, rx)) in gve_clean_rx_done()
584 if (rx->fill_cnt - cnt <= rx->db_threshold) { in gve_clean_rx_done()
585 gve_rx_write_doorbell(priv, rx); in gve_clean_rx_done()
590 gve_rx_write_doorbell(priv, rx); in gve_clean_rx_done()
591 return gve_rx_work_pending(rx); in gve_clean_rx_done()
596 struct gve_rx_ring *rx = block->rx; in gve_rx_poll() local
607 repoll |= gve_clean_rx_done(rx, budget, feat); in gve_rx_poll()
609 repoll |= gve_rx_work_pending(rx); in gve_rx_poll()