Lines Matching refs:efx
62 static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh) in efx_rx_buf_hash() argument
65 return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset)); in efx_rx_buf_hash()
67 const u8 *data = eh + efx->rx_packet_hash_offset; in efx_rx_buf_hash()
84 static inline void efx_sync_rx_buffer(struct efx_nic *efx, in efx_sync_rx_buffer() argument
88 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len, in efx_sync_rx_buffer()
92 void efx_rx_config_page_split(struct efx_nic *efx) in efx_rx_config_page_split() argument
94 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align, in efx_rx_config_page_split()
96 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : in efx_rx_config_page_split()
98 efx->rx_page_buf_step); in efx_rx_config_page_split()
99 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / in efx_rx_config_page_split()
100 efx->rx_bufs_per_page; in efx_rx_config_page_split()
101 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, in efx_rx_config_page_split()
102 efx->rx_bufs_per_page); in efx_rx_config_page_split()
108 struct efx_nic *efx = rx_queue->efx; in efx_reuse_page() local
129 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, in efx_reuse_page()
130 PAGE_SIZE << efx->rx_buffer_order, in efx_reuse_page()
151 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_buffers() local
165 efx->rx_buffer_order); in efx_init_rx_buffers()
169 dma_map_page(&efx->pci_dev->dev, page, 0, in efx_init_rx_buffers()
170 PAGE_SIZE << efx->rx_buffer_order, in efx_init_rx_buffers()
172 if (unlikely(dma_mapping_error(&efx->pci_dev->dev, in efx_init_rx_buffers()
174 __free_pages(page, efx->rx_buffer_order); in efx_init_rx_buffers()
190 rx_buf->dma_addr = dma_addr + efx->rx_ip_align; in efx_init_rx_buffers()
192 rx_buf->page_offset = page_offset + efx->rx_ip_align; in efx_init_rx_buffers()
193 rx_buf->len = efx->rx_dma_len; in efx_init_rx_buffers()
197 dma_addr += efx->rx_page_buf_step; in efx_init_rx_buffers()
198 page_offset += efx->rx_page_buf_step; in efx_init_rx_buffers()
199 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); in efx_init_rx_buffers()
202 } while (++count < efx->rx_pages_per_batch); in efx_init_rx_buffers()
210 static void efx_unmap_rx_buffer(struct efx_nic *efx, in efx_unmap_rx_buffer() argument
217 dma_unmap_page(&efx->pci_dev->dev, in efx_unmap_rx_buffer()
219 PAGE_SIZE << efx->rx_buffer_order, in efx_unmap_rx_buffer()
246 struct efx_nic *efx = rx_queue->efx; in efx_recycle_rx_page() local
269 efx_unmap_rx_buffer(efx, rx_buf); in efx_recycle_rx_page()
282 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); in efx_fini_rx_buffer()
326 struct efx_nic *efx = rx_queue->efx; in efx_fast_push_rx_descriptors() local
335 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries); in efx_fast_push_rx_descriptors()
345 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; in efx_fast_push_rx_descriptors()
349 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in efx_fast_push_rx_descriptors()
365 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in efx_fast_push_rx_descriptors()
388 struct efx_nic *efx = rx_queue->efx; in efx_rx_packet__check_len() local
389 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; in efx_rx_packet__check_len()
400 netif_err(efx, rx_err, efx->net_dev, in efx_rx_packet__check_len()
415 struct efx_nic *efx = channel->efx; in efx_rx_packet_gro() local
427 if (efx->net_dev->features & NETIF_F_RXHASH) in efx_rx_packet_gro()
428 skb_set_hash(skb, efx_rx_buf_hash(efx, eh), in efx_rx_packet_gro()
447 skb->truesize += n_frags * efx->rx_buffer_truesize; in efx_rx_packet_gro()
460 struct efx_nic *efx = channel->efx; in efx_rx_mk_skb() local
464 skb = netdev_alloc_skb(efx->net_dev, in efx_rx_mk_skb()
465 efx->rx_ip_align + efx->rx_prefix_size + in efx_rx_mk_skb()
468 atomic_inc(&efx->n_rx_noskb_drops); in efx_rx_mk_skb()
474 memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size, in efx_rx_mk_skb()
475 efx->rx_prefix_size + hdr_len); in efx_rx_mk_skb()
476 skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size); in efx_rx_mk_skb()
497 __free_pages(rx_buf->page, efx->rx_buffer_order); in efx_rx_mk_skb()
502 skb->truesize += n_frags * efx->rx_buffer_truesize; in efx_rx_mk_skb()
505 skb->protocol = eth_type_trans(skb, efx->net_dev); in efx_rx_mk_skb()
515 struct efx_nic *efx = rx_queue->efx; in efx_rx_packet() local
529 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) || in efx_rx_packet()
530 unlikely(len > n_frags * efx->rx_dma_len) || in efx_rx_packet()
531 unlikely(!efx->rx_scatter)) { in efx_rx_packet()
539 netif_vdbg(efx, rx_status, efx->net_dev, in efx_rx_packet()
561 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); in efx_rx_packet()
568 rx_buf->page_offset += efx->rx_prefix_size; in efx_rx_packet()
569 rx_buf->len -= efx->rx_prefix_size; in efx_rx_packet()
581 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len); in efx_rx_packet()
583 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len; in efx_rx_packet()
584 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); in efx_rx_packet()
641 struct efx_nic *efx = channel->efx; in __efx_rx_packet() local
651 (eh + efx->rx_packet_len_offset)); in __efx_rx_packet()
656 if (unlikely(efx->loopback_selftest)) { in __efx_rx_packet()
659 efx_loopback_rx_packet(efx, eh, rx_buf->len); in __efx_rx_packet()
666 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) in __efx_rx_packet()
679 struct efx_nic *efx = rx_queue->efx; in efx_probe_rx_queue() local
684 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); in efx_probe_rx_queue()
688 netif_dbg(efx, probe, efx->net_dev, in efx_probe_rx_queue()
690 efx_rx_queue_index(rx_queue), efx->rxq_entries, in efx_probe_rx_queue()
708 static void efx_init_rx_recycle_ring(struct efx_nic *efx, in efx_init_rx_recycle_ring() argument
724 efx->rx_bufs_per_page); in efx_init_rx_recycle_ring()
732 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_queue() local
735 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_init_rx_queue()
743 efx_init_rx_recycle_ring(efx, rx_queue); in efx_init_rx_queue()
752 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; in efx_init_rx_queue()
754 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; in efx_init_rx_queue()
774 struct efx_nic *efx = rx_queue->efx; in efx_fini_rx_queue() local
777 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_fini_rx_queue()
801 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, in efx_fini_rx_queue()
802 PAGE_SIZE << efx->rx_buffer_order, in efx_fini_rx_queue()
812 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_remove_rx_queue()
832 struct efx_nic *efx = netdev_priv(req->net_dev); in efx_filter_rfs_work() local
833 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); in efx_filter_rfs_work()
834 int slot_idx = req - efx->rps_slot; in efx_filter_rfs_work()
839 rc = efx->type->filter_insert(efx, &req->spec, true); in efx_filter_rfs_work()
841 rc %= efx->type->max_rx_ip_filters; in efx_filter_rfs_work()
842 if (efx->rps_hash_table) { in efx_filter_rfs_work()
843 spin_lock_bh(&efx->rps_hash_lock); in efx_filter_rfs_work()
844 rule = efx_rps_hash_find(efx, &req->spec); in efx_filter_rfs_work()
858 spin_unlock_bh(&efx->rps_hash_lock); in efx_filter_rfs_work()
864 mutex_lock(&efx->rps_mutex); in efx_filter_rfs_work()
867 mutex_unlock(&efx->rps_mutex); in efx_filter_rfs_work()
870 netif_info(efx, rx_status, efx->net_dev, in efx_filter_rfs_work()
877 netif_info(efx, rx_status, efx->net_dev, in efx_filter_rfs_work()
886 clear_bit(slot_idx, &efx->rps_slot_map); in efx_filter_rfs_work()
893 struct efx_nic *efx = netdev_priv(net_dev); in efx_filter_rfs() local
903 if (!test_and_set_bit(slot_idx, &efx->rps_slot_map)) in efx_filter_rfs()
927 req = efx->rps_slot + slot_idx; in efx_filter_rfs()
929 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, in efx_filter_rfs()
951 if (efx->rps_hash_table) { in efx_filter_rfs()
953 spin_lock(&efx->rps_hash_lock); in efx_filter_rfs()
954 rule = efx_rps_hash_add(efx, &req->spec, &new); in efx_filter_rfs()
960 rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER; in efx_filter_rfs()
968 spin_unlock(&efx->rps_hash_lock); in efx_filter_rfs()
986 spin_unlock(&efx->rps_hash_lock); in efx_filter_rfs()
988 clear_bit(slot_idx, &efx->rps_slot_map); in efx_filter_rfs()
992 bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) in __efx_filter_rfs_expire() argument
994 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); in __efx_filter_rfs_expire()
998 if (!mutex_trylock(&efx->rps_mutex)) in __efx_filter_rfs_expire()
1000 expire_one = efx->type->filter_rfs_expire_one; in __efx_filter_rfs_expire()
1001 channel_idx = efx->rps_expire_channel; in __efx_filter_rfs_expire()
1002 index = efx->rps_expire_index; in __efx_filter_rfs_expire()
1003 size = efx->type->max_rx_ip_filters; in __efx_filter_rfs_expire()
1005 struct efx_channel *channel = efx_get_channel(efx, channel_idx); in __efx_filter_rfs_expire()
1009 expire_one(efx, flow_id, index)) { in __efx_filter_rfs_expire()
1010 netif_info(efx, rx_status, efx->net_dev, in __efx_filter_rfs_expire()
1016 if (++channel_idx == efx->n_channels) in __efx_filter_rfs_expire()
1021 efx->rps_expire_channel = channel_idx; in __efx_filter_rfs_expire()
1022 efx->rps_expire_index = index; in __efx_filter_rfs_expire()
1024 mutex_unlock(&efx->rps_mutex); in __efx_filter_rfs_expire()