Lines Matching refs:nvchan

329 	struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];  in netvsc_alloc_recv_comp_ring()  local
330 int node = cpu_to_node(nvchan->channel->target_cpu); in netvsc_alloc_recv_comp_ring()
334 nvchan->mrc.slots = vzalloc_node(size, node); in netvsc_alloc_recv_comp_ring()
335 if (!nvchan->mrc.slots) in netvsc_alloc_recv_comp_ring()
336 nvchan->mrc.slots = vzalloc(size); in netvsc_alloc_recv_comp_ring()
338 return nvchan->mrc.slots ? 0 : -ENOMEM; in netvsc_alloc_recv_comp_ring()
456 struct netvsc_channel *nvchan = &net_device->chan_table[i]; in netvsc_init_buf() local
458 nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL); in netvsc_init_buf()
459 if (nvchan->recv_buf == NULL) { in netvsc_init_buf()
1079 struct netvsc_channel * const nvchan = in netvsc_send_pkt() local
1081 struct vmbus_channel *out_channel = nvchan->channel; in netvsc_send_pkt()
1136 atomic_inc_return(&nvchan->queue_sends); in netvsc_send_pkt()
1153 atomic_read(&nvchan->queue_sends) < 1 && in netvsc_send_pkt()
1209 struct netvsc_channel *nvchan; in netvsc_send() local
1221 nvchan = &net_device->chan_table[packet->q_idx]; in netvsc_send()
1233 msdp = &nvchan->msd; in netvsc_send()
1326 struct netvsc_channel *nvchan) in send_recv_completions() argument
1328 struct multi_recv_comp *mrc = &nvchan->mrc; in send_recv_completions()
1343 ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg), in send_recv_completions()
1383 struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx]; in enq_receive_complete() local
1384 struct multi_recv_comp *mrc = &nvchan->mrc; in enq_receive_complete()
1391 send_recv_completions(ndev, nvdev, nvchan); in enq_receive_complete()
1411 struct netvsc_channel *nvchan, in netvsc_receive() argument
1415 struct vmbus_channel *channel = nvchan->channel; in netvsc_receive()
1477 nvchan->rsc.cnt = 0; in netvsc_receive()
1490 nvchan->rsc.cnt = 0; in netvsc_receive()
1501 nvchan->rsc.is_last = (i == count - 1); in netvsc_receive()
1507 nvchan, data, buflen); in netvsc_receive()
1511 nvchan->rsc.cnt = 0; in netvsc_receive()
1620 struct netvsc_channel *nvchan, in netvsc_process_raw_pkt() argument
1626 struct vmbus_channel *channel = nvchan->channel; in netvsc_process_raw_pkt()
1637 return netvsc_receive(ndev, net_device, nvchan, desc); in netvsc_process_raw_pkt()
1665 struct netvsc_channel *nvchan in netvsc_poll() local
1667 struct netvsc_device *net_device = nvchan->net_device; in netvsc_poll()
1668 struct vmbus_channel *channel = nvchan->channel; in netvsc_poll()
1675 if (!nvchan->desc) in netvsc_poll()
1676 nvchan->desc = hv_pkt_iter_first(channel); in netvsc_poll()
1678 nvchan->xdp_flush = false; in netvsc_poll()
1680 while (nvchan->desc && work_done < budget) { in netvsc_poll()
1681 work_done += netvsc_process_raw_pkt(device, nvchan, net_device, in netvsc_poll()
1682 ndev, nvchan->desc, budget); in netvsc_poll()
1683 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); in netvsc_poll()
1686 if (nvchan->xdp_flush) in netvsc_poll()
1690 ret = send_recv_completions(ndev, net_device, nvchan); in netvsc_poll()
1715 struct netvsc_channel *nvchan = context; in netvsc_channel_cb() local
1716 struct vmbus_channel *channel = nvchan->channel; in netvsc_channel_cb()
1722 if (napi_schedule_prep(&nvchan->napi)) { in netvsc_channel_cb()
1726 __napi_schedule_irqoff(&nvchan->napi); in netvsc_channel_cb()
1762 struct netvsc_channel *nvchan = &net_device->chan_table[i]; in netvsc_device_add() local
1764 nvchan->channel = device->channel; in netvsc_device_add()
1765 nvchan->net_device = net_device; in netvsc_device_add()
1766 u64_stats_init(&nvchan->tx_stats.syncp); in netvsc_device_add()
1767 u64_stats_init(&nvchan->rx_stats.syncp); in netvsc_device_add()
1769 ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0); in netvsc_device_add()
1776 ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq, in netvsc_device_add()