Lines Matching refs:rx_queue

76 efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)  in efx_rx_buf_next()  argument
78 if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask))) in efx_rx_buf_next()
79 return efx_rx_buffer(rx_queue, 0); in efx_rx_buf_next()
106 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) in efx_reuse_page() argument
108 struct efx_nic *efx = rx_queue->efx; in efx_reuse_page()
113 index = rx_queue->page_remove & rx_queue->page_ptr_mask; in efx_reuse_page()
114 page = rx_queue->page_ring[index]; in efx_reuse_page()
118 rx_queue->page_ring[index] = NULL; in efx_reuse_page()
120 if (rx_queue->page_remove != rx_queue->page_add) in efx_reuse_page()
121 ++rx_queue->page_remove; in efx_reuse_page()
125 ++rx_queue->page_recycle_count; in efx_reuse_page()
133 ++rx_queue->page_recycle_failed; in efx_reuse_page()
149 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) in efx_init_rx_buffers() argument
151 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_buffers()
161 page = efx_reuse_page(rx_queue); in efx_init_rx_buffers()
188 index = rx_queue->added_count & rx_queue->ptr_mask; in efx_init_rx_buffers()
189 rx_buf = efx_rx_buffer(rx_queue, index); in efx_init_rx_buffers()
195 ++rx_queue->added_count; in efx_init_rx_buffers()
224 static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, in efx_free_rx_buffers() argument
233 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); in efx_free_rx_buffers()
245 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); in efx_recycle_rx_page() local
246 struct efx_nic *efx = rx_queue->efx; in efx_recycle_rx_page()
253 index = rx_queue->page_add & rx_queue->page_ptr_mask; in efx_recycle_rx_page()
254 if (rx_queue->page_ring[index] == NULL) { in efx_recycle_rx_page()
255 unsigned read_index = rx_queue->page_remove & in efx_recycle_rx_page()
256 rx_queue->page_ptr_mask; in efx_recycle_rx_page()
263 ++rx_queue->page_remove; in efx_recycle_rx_page()
264 rx_queue->page_ring[index] = page; in efx_recycle_rx_page()
265 ++rx_queue->page_add; in efx_recycle_rx_page()
268 ++rx_queue->page_recycle_full; in efx_recycle_rx_page()
273 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, in efx_fini_rx_buffer() argument
282 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); in efx_fini_rx_buffer()
283 efx_free_rx_buffers(rx_queue, rx_buf, 1); in efx_fini_rx_buffer()
293 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); in efx_recycle_rx_pages() local
297 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); in efx_recycle_rx_pages()
305 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); in efx_discard_rx_packet() local
309 efx_free_rx_buffers(rx_queue, rx_buf, n_frags); in efx_discard_rx_packet()
324 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) in efx_fast_push_rx_descriptors() argument
326 struct efx_nic *efx = rx_queue->efx; in efx_fast_push_rx_descriptors()
330 if (!rx_queue->refill_enabled) in efx_fast_push_rx_descriptors()
334 fill_level = (rx_queue->added_count - rx_queue->removed_count); in efx_fast_push_rx_descriptors()
335 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries); in efx_fast_push_rx_descriptors()
336 if (fill_level >= rx_queue->fast_fill_trigger) in efx_fast_push_rx_descriptors()
340 if (unlikely(fill_level < rx_queue->min_fill)) { in efx_fast_push_rx_descriptors()
342 rx_queue->min_fill = fill_level; in efx_fast_push_rx_descriptors()
346 space = rx_queue->max_fill - fill_level; in efx_fast_push_rx_descriptors()
349 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in efx_fast_push_rx_descriptors()
352 efx_rx_queue_index(rx_queue), fill_level, in efx_fast_push_rx_descriptors()
353 rx_queue->max_fill); in efx_fast_push_rx_descriptors()
357 rc = efx_init_rx_buffers(rx_queue, atomic); in efx_fast_push_rx_descriptors()
360 efx_schedule_slow_fill(rx_queue); in efx_fast_push_rx_descriptors()
365 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in efx_fast_push_rx_descriptors()
367 "to level %d\n", efx_rx_queue_index(rx_queue), in efx_fast_push_rx_descriptors()
368 rx_queue->added_count - rx_queue->removed_count); in efx_fast_push_rx_descriptors()
371 if (rx_queue->notified_count != rx_queue->added_count) in efx_fast_push_rx_descriptors()
372 efx_nic_notify_rx_desc(rx_queue); in efx_fast_push_rx_descriptors()
377 struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill); in efx_rx_slow_fill() local
380 efx_nic_generate_fill_event(rx_queue); in efx_rx_slow_fill()
381 ++rx_queue->slow_fill_count; in efx_rx_slow_fill()
384 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, in efx_rx_packet__check_len() argument
388 struct efx_nic *efx = rx_queue->efx; in efx_rx_packet__check_len()
402 efx_rx_queue_index(rx_queue), len, max_len); in efx_rx_packet__check_len()
404 efx_rx_queue_channel(rx_queue)->n_rx_overlength++; in efx_rx_packet__check_len()
420 struct efx_rx_queue *rx_queue; in efx_rx_packet_gro() local
422 rx_queue = efx_channel_get_rx_queue(channel); in efx_rx_packet_gro()
423 efx_free_rx_buffers(rx_queue, rx_buf, n_frags); in efx_rx_packet_gro()
443 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); in efx_rx_packet_gro()
449 skb_record_rx_queue(skb, channel->rx_queue.core_index); in efx_rx_packet_gro()
494 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); in efx_rx_mk_skb()
512 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, in efx_rx_packet() argument
515 struct efx_nic *efx = rx_queue->efx; in efx_rx_packet()
516 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); in efx_rx_packet()
519 rx_queue->rx_packets++; in efx_rx_packet()
521 rx_buf = efx_rx_buffer(rx_queue, index); in efx_rx_packet()
527 efx_rx_packet__check_len(rx_queue, rx_buf, len); in efx_rx_packet()
541 efx_rx_queue_index(rx_queue), index, in efx_rx_packet()
542 (index + n_frags - 1) & rx_queue->ptr_mask, len, in efx_rx_packet()
578 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); in efx_rx_packet()
588 rx_buf = efx_rx_buffer(rx_queue, index); in efx_rx_packet()
608 struct efx_rx_queue *rx_queue; in efx_rx_deliver() local
610 rx_queue = efx_channel_get_rx_queue(channel); in efx_rx_deliver()
611 efx_free_rx_buffers(rx_queue, rx_buf, n_frags); in efx_rx_deliver()
614 skb_record_rx_queue(skb, channel->rx_queue.core_index); in efx_rx_deliver()
643 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); in __efx_rx_packet()
657 struct efx_rx_queue *rx_queue; in __efx_rx_packet() local
660 rx_queue = efx_channel_get_rx_queue(channel); in __efx_rx_packet()
661 efx_free_rx_buffers(rx_queue, rx_buf, in __efx_rx_packet()
677 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) in efx_probe_rx_queue() argument
679 struct efx_nic *efx = rx_queue->efx; in efx_probe_rx_queue()
686 rx_queue->ptr_mask = entries - 1; in efx_probe_rx_queue()
690 efx_rx_queue_index(rx_queue), efx->rxq_entries, in efx_probe_rx_queue()
691 rx_queue->ptr_mask); in efx_probe_rx_queue()
694 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), in efx_probe_rx_queue()
696 if (!rx_queue->buffer) in efx_probe_rx_queue()
699 rc = efx_nic_probe_rx(rx_queue); in efx_probe_rx_queue()
701 kfree(rx_queue->buffer); in efx_probe_rx_queue()
702 rx_queue->buffer = NULL; in efx_probe_rx_queue()
709 struct efx_rx_queue *rx_queue) in efx_init_rx_recycle_ring() argument
725 rx_queue->page_ring = kcalloc(page_ring_size, in efx_init_rx_recycle_ring()
726 sizeof(*rx_queue->page_ring), GFP_KERNEL); in efx_init_rx_recycle_ring()
727 rx_queue->page_ptr_mask = page_ring_size - 1; in efx_init_rx_recycle_ring()
730 void efx_init_rx_queue(struct efx_rx_queue *rx_queue) in efx_init_rx_queue() argument
732 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_queue()
735 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_init_rx_queue()
736 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); in efx_init_rx_queue()
739 rx_queue->added_count = 0; in efx_init_rx_queue()
740 rx_queue->notified_count = 0; in efx_init_rx_queue()
741 rx_queue->removed_count = 0; in efx_init_rx_queue()
742 rx_queue->min_fill = -1U; in efx_init_rx_queue()
743 efx_init_rx_recycle_ring(efx, rx_queue); in efx_init_rx_queue()
745 rx_queue->page_remove = 0; in efx_init_rx_queue()
746 rx_queue->page_add = rx_queue->page_ptr_mask + 1; in efx_init_rx_queue()
747 rx_queue->page_recycle_count = 0; in efx_init_rx_queue()
748 rx_queue->page_recycle_failed = 0; in efx_init_rx_queue()
749 rx_queue->page_recycle_full = 0; in efx_init_rx_queue()
763 rx_queue->max_fill = max_fill; in efx_init_rx_queue()
764 rx_queue->fast_fill_trigger = trigger; in efx_init_rx_queue()
765 rx_queue->refill_enabled = true; in efx_init_rx_queue()
768 efx_nic_init_rx(rx_queue); in efx_init_rx_queue()
771 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) in efx_fini_rx_queue() argument
774 struct efx_nic *efx = rx_queue->efx; in efx_fini_rx_queue()
777 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_fini_rx_queue()
778 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); in efx_fini_rx_queue()
780 del_timer_sync(&rx_queue->slow_fill); in efx_fini_rx_queue()
783 if (rx_queue->buffer) { in efx_fini_rx_queue()
784 for (i = rx_queue->removed_count; i < rx_queue->added_count; in efx_fini_rx_queue()
786 unsigned index = i & rx_queue->ptr_mask; in efx_fini_rx_queue()
787 rx_buf = efx_rx_buffer(rx_queue, index); in efx_fini_rx_queue()
788 efx_fini_rx_buffer(rx_queue, rx_buf); in efx_fini_rx_queue()
793 for (i = 0; i <= rx_queue->page_ptr_mask; i++) { in efx_fini_rx_queue()
794 struct page *page = rx_queue->page_ring[i]; in efx_fini_rx_queue()
806 kfree(rx_queue->page_ring); in efx_fini_rx_queue()
807 rx_queue->page_ring = NULL; in efx_fini_rx_queue()
810 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) in efx_remove_rx_queue() argument
812 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_remove_rx_queue()
813 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); in efx_remove_rx_queue()
815 efx_nic_remove_rx(rx_queue); in efx_remove_rx_queue()
817 kfree(rx_queue->buffer); in efx_remove_rx_queue()
818 rx_queue->buffer = NULL; in efx_remove_rx_queue()