Lines Matching refs:efx
62 static inline u32 ef4_rx_buf_hash(struct ef4_nic *efx, const u8 *eh) in ef4_rx_buf_hash() argument
65 return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset)); in ef4_rx_buf_hash()
67 const u8 *data = eh + efx->rx_packet_hash_offset; in ef4_rx_buf_hash()
84 static inline void ef4_sync_rx_buffer(struct ef4_nic *efx, in ef4_sync_rx_buffer() argument
88 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len, in ef4_sync_rx_buffer()
92 void ef4_rx_config_page_split(struct ef4_nic *efx) in ef4_rx_config_page_split() argument
94 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align, in ef4_rx_config_page_split()
96 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : in ef4_rx_config_page_split()
98 efx->rx_page_buf_step); in ef4_rx_config_page_split()
99 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / in ef4_rx_config_page_split()
100 efx->rx_bufs_per_page; in ef4_rx_config_page_split()
101 efx->rx_pages_per_batch = DIV_ROUND_UP(EF4_RX_PREFERRED_BATCH, in ef4_rx_config_page_split()
102 efx->rx_bufs_per_page); in ef4_rx_config_page_split()
108 struct ef4_nic *efx = rx_queue->efx; in ef4_reuse_page() local
129 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, in ef4_reuse_page()
130 PAGE_SIZE << efx->rx_buffer_order, in ef4_reuse_page()
151 struct ef4_nic *efx = rx_queue->efx; in ef4_init_rx_buffers() local
165 efx->rx_buffer_order); in ef4_init_rx_buffers()
169 dma_map_page(&efx->pci_dev->dev, page, 0, in ef4_init_rx_buffers()
170 PAGE_SIZE << efx->rx_buffer_order, in ef4_init_rx_buffers()
172 if (unlikely(dma_mapping_error(&efx->pci_dev->dev, in ef4_init_rx_buffers()
174 __free_pages(page, efx->rx_buffer_order); in ef4_init_rx_buffers()
190 rx_buf->dma_addr = dma_addr + efx->rx_ip_align; in ef4_init_rx_buffers()
192 rx_buf->page_offset = page_offset + efx->rx_ip_align; in ef4_init_rx_buffers()
193 rx_buf->len = efx->rx_dma_len; in ef4_init_rx_buffers()
197 dma_addr += efx->rx_page_buf_step; in ef4_init_rx_buffers()
198 page_offset += efx->rx_page_buf_step; in ef4_init_rx_buffers()
199 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); in ef4_init_rx_buffers()
202 } while (++count < efx->rx_pages_per_batch); in ef4_init_rx_buffers()
210 static void ef4_unmap_rx_buffer(struct ef4_nic *efx, in ef4_unmap_rx_buffer() argument
217 dma_unmap_page(&efx->pci_dev->dev, in ef4_unmap_rx_buffer()
219 PAGE_SIZE << efx->rx_buffer_order, in ef4_unmap_rx_buffer()
246 struct ef4_nic *efx = rx_queue->efx; in ef4_recycle_rx_page() local
269 ef4_unmap_rx_buffer(efx, rx_buf); in ef4_recycle_rx_page()
282 ef4_unmap_rx_buffer(rx_queue->efx, rx_buf); in ef4_fini_rx_buffer()
326 struct ef4_nic *efx = rx_queue->efx; in ef4_fast_push_rx_descriptors() local
335 EF4_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); in ef4_fast_push_rx_descriptors()
345 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; in ef4_fast_push_rx_descriptors()
349 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in ef4_fast_push_rx_descriptors()
366 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in ef4_fast_push_rx_descriptors()
389 struct ef4_nic *efx = rx_queue->efx; in ef4_rx_packet__check_len() local
390 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; in ef4_rx_packet__check_len()
400 if ((len > rx_buf->len) && EF4_WORKAROUND_8071(efx)) { in ef4_rx_packet__check_len()
402 netif_err(efx, rx_err, efx->net_dev, in ef4_rx_packet__check_len()
406 efx->type->rx_buffer_padding); in ef4_rx_packet__check_len()
407 ef4_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); in ef4_rx_packet__check_len()
410 netif_err(efx, rx_err, efx->net_dev, in ef4_rx_packet__check_len()
427 struct ef4_nic *efx = channel->efx; in ef4_rx_packet_gro() local
439 if (efx->net_dev->features & NETIF_F_RXHASH) in ef4_rx_packet_gro()
440 skb_set_hash(skb, ef4_rx_buf_hash(efx, eh), in ef4_rx_packet_gro()
458 skb->truesize += n_frags * efx->rx_buffer_truesize; in ef4_rx_packet_gro()
471 struct ef4_nic *efx = channel->efx; in ef4_rx_mk_skb() local
475 skb = netdev_alloc_skb(efx->net_dev, in ef4_rx_mk_skb()
476 efx->rx_ip_align + efx->rx_prefix_size + in ef4_rx_mk_skb()
479 atomic_inc(&efx->n_rx_noskb_drops); in ef4_rx_mk_skb()
485 memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size, in ef4_rx_mk_skb()
486 efx->rx_prefix_size + hdr_len); in ef4_rx_mk_skb()
487 skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size); in ef4_rx_mk_skb()
508 __free_pages(rx_buf->page, efx->rx_buffer_order); in ef4_rx_mk_skb()
513 skb->truesize += n_frags * efx->rx_buffer_truesize; in ef4_rx_mk_skb()
516 skb->protocol = eth_type_trans(skb, efx->net_dev); in ef4_rx_mk_skb()
526 struct ef4_nic *efx = rx_queue->efx; in ef4_rx_packet() local
540 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) || in ef4_rx_packet()
541 unlikely(len > n_frags * efx->rx_dma_len) || in ef4_rx_packet()
542 unlikely(!efx->rx_scatter)) { in ef4_rx_packet()
550 netif_vdbg(efx, rx_status, efx->net_dev, in ef4_rx_packet()
572 ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len); in ef4_rx_packet()
579 rx_buf->page_offset += efx->rx_prefix_size; in ef4_rx_packet()
580 rx_buf->len -= efx->rx_prefix_size; in ef4_rx_packet()
592 ef4_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len); in ef4_rx_packet()
594 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len; in ef4_rx_packet()
595 ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len); in ef4_rx_packet()
643 struct ef4_nic *efx = channel->efx; in __ef4_rx_packet() local
653 (eh + efx->rx_packet_len_offset)); in __ef4_rx_packet()
658 if (unlikely(efx->loopback_selftest)) { in __ef4_rx_packet()
661 ef4_loopback_rx_packet(efx, eh, rx_buf->len); in __ef4_rx_packet()
668 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) in __ef4_rx_packet()
681 struct ef4_nic *efx = rx_queue->efx; in ef4_probe_rx_queue() local
686 entries = max(roundup_pow_of_two(efx->rxq_entries), EF4_MIN_DMAQ_SIZE); in ef4_probe_rx_queue()
690 netif_dbg(efx, probe, efx->net_dev, in ef4_probe_rx_queue()
692 ef4_rx_queue_index(rx_queue), efx->rxq_entries, in ef4_probe_rx_queue()
710 static void ef4_init_rx_recycle_ring(struct ef4_nic *efx, in ef4_init_rx_recycle_ring() argument
726 efx->rx_bufs_per_page); in ef4_init_rx_recycle_ring()
734 struct ef4_nic *efx = rx_queue->efx; in ef4_init_rx_queue() local
737 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in ef4_init_rx_queue()
745 ef4_init_rx_recycle_ring(efx, rx_queue); in ef4_init_rx_queue()
754 max_fill = efx->rxq_entries - EF4_RXD_HEAD_ROOM; in ef4_init_rx_queue()
756 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; in ef4_init_rx_queue()
776 struct ef4_nic *efx = rx_queue->efx; in ef4_fini_rx_queue() local
779 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in ef4_fini_rx_queue()
803 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, in ef4_fini_rx_queue()
804 PAGE_SIZE << efx->rx_buffer_order, in ef4_fini_rx_queue()
814 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in ef4_remove_rx_queue()
833 struct ef4_nic *efx = netdev_priv(net_dev); in ef4_filter_rfs() local
851 efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0, in ef4_filter_rfs()
871 rc = efx->type->filter_rfs_insert(efx, &spec); in ef4_filter_rfs()
876 channel = ef4_get_channel(efx, rxq_index); in ef4_filter_rfs()
881 netif_info(efx, rx_status, efx->net_dev, in ef4_filter_rfs()
887 netif_info(efx, rx_status, efx->net_dev, in ef4_filter_rfs()
896 bool __ef4_filter_rfs_expire(struct ef4_nic *efx, unsigned int quota) in __ef4_filter_rfs_expire() argument
898 bool (*expire_one)(struct ef4_nic *efx, u32 flow_id, unsigned int index); in __ef4_filter_rfs_expire()
902 if (!spin_trylock_bh(&efx->filter_lock)) in __ef4_filter_rfs_expire()
905 expire_one = efx->type->filter_rfs_expire_one; in __ef4_filter_rfs_expire()
906 channel_idx = efx->rps_expire_channel; in __ef4_filter_rfs_expire()
907 index = efx->rps_expire_index; in __ef4_filter_rfs_expire()
908 size = efx->type->max_rx_ip_filters; in __ef4_filter_rfs_expire()
910 struct ef4_channel *channel = ef4_get_channel(efx, channel_idx); in __ef4_filter_rfs_expire()
914 expire_one(efx, flow_id, index)) { in __ef4_filter_rfs_expire()
915 netif_info(efx, rx_status, efx->net_dev, in __ef4_filter_rfs_expire()
921 if (++channel_idx == efx->n_channels) in __ef4_filter_rfs_expire()
926 efx->rps_expire_channel = channel_idx; in __ef4_filter_rfs_expire()
927 efx->rps_expire_index = index; in __ef4_filter_rfs_expire()
929 spin_unlock_bh(&efx->filter_lock); in __ef4_filter_rfs_expire()