Lines Matching +full:mixed +full:- +full:burst
1 // SPDX-License-Identifier: GPL-2.0-only
38 struct hv_device *dev = net_device_ctx->device_ctx; in netvsc_switch_datapath()
39 struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev); in netvsc_switch_datapath()
40 struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt; in netvsc_switch_datapath()
45 net_device_ctx->data_path_is_vf = vf; in netvsc_switch_datapath()
48 init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH; in netvsc_switch_datapath()
50 init_pkt->msg.v4_msg.active_dp.active_datapath = in netvsc_switch_datapath()
53 init_pkt->msg.v4_msg.active_dp.active_datapath = in netvsc_switch_datapath()
59 ret = vmbus_sendpacket(dev->channel, init_pkt, in netvsc_switch_datapath()
68 if (ret != -EAGAIN) { in netvsc_switch_datapath()
87 wait_for_completion(&nv_dev->channel_init_wait); in netvsc_switch_datapath()
88 net_device_ctx->data_path_is_vf = vf; in netvsc_switch_datapath()
110 rdev = nvdev->extension; in netvsc_subchan_work()
112 ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL); in netvsc_subchan_work()
114 netif_device_attach(rdev->ndev); in netvsc_subchan_work()
117 for (i = 1; i < nvdev->num_chn; i++) in netvsc_subchan_work()
118 netif_napi_del(&nvdev->chan_table[i].napi); in netvsc_subchan_work()
120 nvdev->max_chn = 1; in netvsc_subchan_work()
121 nvdev->num_chn = 1; in netvsc_subchan_work()
136 init_waitqueue_head(&net_device->wait_drain); in alloc_net_device()
137 net_device->destroy = false; in alloc_net_device()
138 net_device->tx_disable = true; in alloc_net_device()
140 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; in alloc_net_device()
141 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; in alloc_net_device()
143 init_completion(&net_device->channel_init_wait); in alloc_net_device()
144 init_waitqueue_head(&net_device->subchan_open); in alloc_net_device()
145 INIT_WORK(&net_device->subchan_work, netvsc_subchan_work); in alloc_net_device()
156 kfree(nvdev->extension); in free_netvsc_device()
158 if (nvdev->recv_original_buf) in free_netvsc_device()
159 vfree(nvdev->recv_original_buf); in free_netvsc_device()
161 vfree(nvdev->recv_buf); in free_netvsc_device()
163 if (nvdev->send_original_buf) in free_netvsc_device()
164 vfree(nvdev->send_original_buf); in free_netvsc_device()
166 vfree(nvdev->send_buf); in free_netvsc_device()
168 bitmap_free(nvdev->send_section_map); in free_netvsc_device()
171 xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq); in free_netvsc_device()
172 kfree(nvdev->chan_table[i].recv_buf); in free_netvsc_device()
173 vfree(nvdev->chan_table[i].mrc.slots); in free_netvsc_device()
181 call_rcu(&nvdev->rcu, free_netvsc_device); in free_netvsc_device_rcu()
197 if (net_device->recv_section_cnt) { in netvsc_revoke_recv_buf()
199 revoke_packet = &net_device->revoke_packet; in netvsc_revoke_recv_buf()
202 revoke_packet->hdr.msg_type = in netvsc_revoke_recv_buf()
204 revoke_packet->msg.v1_msg. in netvsc_revoke_recv_buf()
209 ret = vmbus_sendpacket(device->channel, in netvsc_revoke_recv_buf()
219 if (device->channel->rescind) in netvsc_revoke_recv_buf()
230 net_device->recv_section_cnt = 0; in netvsc_revoke_recv_buf()
247 if (net_device->send_section_cnt) { in netvsc_revoke_send_buf()
249 revoke_packet = &net_device->revoke_packet; in netvsc_revoke_send_buf()
252 revoke_packet->hdr.msg_type = in netvsc_revoke_send_buf()
254 revoke_packet->msg.v1_msg.revoke_send_buf.id = in netvsc_revoke_send_buf()
259 ret = vmbus_sendpacket(device->channel, in netvsc_revoke_send_buf()
270 if (device->channel->rescind) in netvsc_revoke_send_buf()
281 net_device->send_section_cnt = 0; in netvsc_revoke_send_buf()
291 if (net_device->recv_buf_gpadl_handle.gpadl_handle) { in netvsc_teardown_recv_gpadl()
292 ret = vmbus_teardown_gpadl(device->channel, in netvsc_teardown_recv_gpadl()
293 &net_device->recv_buf_gpadl_handle); in netvsc_teardown_recv_gpadl()
312 if (net_device->send_buf_gpadl_handle.gpadl_handle) { in netvsc_teardown_send_gpadl()
313 ret = vmbus_teardown_gpadl(device->channel, in netvsc_teardown_send_gpadl()
314 &net_device->send_buf_gpadl_handle); in netvsc_teardown_send_gpadl()
329 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; in netvsc_alloc_recv_comp_ring()
330 int node = cpu_to_node(nvchan->channel->target_cpu); in netvsc_alloc_recv_comp_ring()
333 size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data); in netvsc_alloc_recv_comp_ring()
334 nvchan->mrc.slots = vzalloc_node(size, node); in netvsc_alloc_recv_comp_ring()
335 if (!nvchan->mrc.slots) in netvsc_alloc_recv_comp_ring()
336 nvchan->mrc.slots = vzalloc(size); in netvsc_alloc_recv_comp_ring()
338 return nvchan->mrc.slots ? 0 : -ENOMEM; in netvsc_alloc_recv_comp_ring()
353 buf_size = device_info->recv_sections * device_info->recv_section_size; in netvsc_init_buf()
357 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2) in netvsc_init_buf()
361 net_device->recv_buf = vzalloc(buf_size); in netvsc_init_buf()
362 if (!net_device->recv_buf) { in netvsc_init_buf()
366 ret = -ENOMEM; in netvsc_init_buf()
370 net_device->recv_buf_size = buf_size; in netvsc_init_buf()
377 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf, in netvsc_init_buf()
379 &net_device->recv_buf_gpadl_handle); in netvsc_init_buf()
387 vaddr = hv_map_memory(net_device->recv_buf, buf_size); in netvsc_init_buf()
389 ret = -ENOMEM; in netvsc_init_buf()
393 net_device->recv_original_buf = net_device->recv_buf; in netvsc_init_buf()
394 net_device->recv_buf = vaddr; in netvsc_init_buf()
398 init_packet = &net_device->channel_init_pkt; in netvsc_init_buf()
400 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF; in netvsc_init_buf()
401 init_packet->msg.v1_msg.send_recv_buf. in netvsc_init_buf()
402 gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle; in netvsc_init_buf()
403 init_packet->msg.v1_msg. in netvsc_init_buf()
409 ret = vmbus_sendpacket(device->channel, init_packet, in netvsc_init_buf()
420 wait_for_completion(&net_device->channel_init_wait); in netvsc_init_buf()
423 resp = &init_packet->msg.v1_msg.send_recv_buf_complete; in netvsc_init_buf()
424 if (resp->status != NVSP_STAT_SUCCESS) { in netvsc_init_buf()
426 "Unable to complete receive buffer initialization with NetVsp - status %d\n", in netvsc_init_buf()
427 resp->status); in netvsc_init_buf()
428 ret = -EINVAL; in netvsc_init_buf()
434 resp->num_sections, resp->sections[0].sub_alloc_size, in netvsc_init_buf()
435 resp->sections[0].num_sub_allocs); in netvsc_init_buf()
438 if (resp->num_sections != 1 || resp->sections[0].offset != 0) { in netvsc_init_buf()
439 ret = -EINVAL; in netvsc_init_buf()
443 net_device->recv_section_size = resp->sections[0].sub_alloc_size; in netvsc_init_buf()
444 net_device->recv_section_cnt = resp->sections[0].num_sub_allocs; in netvsc_init_buf()
447 if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size * in netvsc_init_buf()
448 (u64)net_device->recv_section_cnt > (u64)buf_size) { in netvsc_init_buf()
450 net_device->recv_section_size); in netvsc_init_buf()
451 ret = -EINVAL; in netvsc_init_buf()
456 struct netvsc_channel *nvchan = &net_device->chan_table[i]; in netvsc_init_buf()
458 nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL); in netvsc_init_buf()
459 if (nvchan->recv_buf == NULL) { in netvsc_init_buf()
460 ret = -ENOMEM; in netvsc_init_buf()
469 net_device->recv_completion_cnt = net_device->recv_section_cnt + 1; in netvsc_init_buf()
475 buf_size = device_info->send_sections * device_info->send_section_size; in netvsc_init_buf()
478 net_device->send_buf = vzalloc(buf_size); in netvsc_init_buf()
479 if (!net_device->send_buf) { in netvsc_init_buf()
482 ret = -ENOMEM; in netvsc_init_buf()
485 net_device->send_buf_size = buf_size; in netvsc_init_buf()
491 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf, in netvsc_init_buf()
493 &net_device->send_buf_gpadl_handle); in netvsc_init_buf()
501 vaddr = hv_map_memory(net_device->send_buf, buf_size); in netvsc_init_buf()
503 ret = -ENOMEM; in netvsc_init_buf()
507 net_device->send_original_buf = net_device->send_buf; in netvsc_init_buf()
508 net_device->send_buf = vaddr; in netvsc_init_buf()
512 init_packet = &net_device->channel_init_pkt; in netvsc_init_buf()
514 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF; in netvsc_init_buf()
515 init_packet->msg.v1_msg.send_send_buf.gpadl_handle = in netvsc_init_buf()
516 net_device->send_buf_gpadl_handle.gpadl_handle; in netvsc_init_buf()
517 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID; in netvsc_init_buf()
522 ret = vmbus_sendpacket(device->channel, init_packet, in netvsc_init_buf()
533 wait_for_completion(&net_device->channel_init_wait); in netvsc_init_buf()
536 if (init_packet->msg.v1_msg. in netvsc_init_buf()
539 "initialization with NetVsp - status %d\n", in netvsc_init_buf()
540 init_packet->msg.v1_msg. in netvsc_init_buf()
542 ret = -EINVAL; in netvsc_init_buf()
547 net_device->send_section_size = init_packet->msg. in netvsc_init_buf()
549 if (net_device->send_section_size < NETVSC_MTU_MIN) { in netvsc_init_buf()
551 net_device->send_section_size); in netvsc_init_buf()
552 ret = -EINVAL; in netvsc_init_buf()
557 net_device->send_section_cnt = buf_size / net_device->send_section_size; in netvsc_init_buf()
560 net_device->send_section_size, net_device->send_section_cnt); in netvsc_init_buf()
563 net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt, in netvsc_init_buf()
565 if (!net_device->send_section_map) { in netvsc_init_buf()
566 ret = -ENOMEM; in netvsc_init_buf()
592 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT; in negotiate_nvsp_ver()
593 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver; in negotiate_nvsp_ver()
594 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver; in negotiate_nvsp_ver()
598 ret = vmbus_sendpacket(device->channel, init_packet, in negotiate_nvsp_ver()
607 wait_for_completion(&net_device->channel_init_wait); in negotiate_nvsp_ver()
609 if (init_packet->msg.init_msg.init_complete.status != in negotiate_nvsp_ver()
611 return -EINVAL; in negotiate_nvsp_ver()
618 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG; in negotiate_nvsp_ver()
619 init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN; in negotiate_nvsp_ver()
620 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1; in negotiate_nvsp_ver()
624 netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n"); in negotiate_nvsp_ver()
626 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1; in negotiate_nvsp_ver()
629 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1; in negotiate_nvsp_ver()
633 init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1; in negotiate_nvsp_ver()
637 ret = vmbus_sendpacket(device->channel, init_packet, in negotiate_nvsp_ver()
658 init_packet = &net_device->channel_init_pkt; in netvsc_connect_vsp()
661 for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--) in netvsc_connect_vsp()
664 net_device->nvsp_version = ver_list[i]; in netvsc_connect_vsp()
669 ret = -EPROTO; in netvsc_connect_vsp()
673 if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) { in netvsc_connect_vsp()
675 net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61); in netvsc_connect_vsp()
676 ret = -EPROTO; in netvsc_connect_vsp()
680 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version); in netvsc_connect_vsp()
685 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4) in netvsc_connect_vsp()
690 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER; in netvsc_connect_vsp()
691 init_packet->msg.v1_msg. in netvsc_connect_vsp()
694 init_packet->msg.v1_msg. in netvsc_connect_vsp()
701 ret = vmbus_sendpacket(device->channel, init_packet, in netvsc_connect_vsp()
716 * netvsc_device_remove - Callback when the root bus device is removed
723 = rtnl_dereference(net_device_ctx->nvdev); in netvsc_device_remove()
727 * Revoke receive buffer. If host is pre-Win2016 then tear down in netvsc_device_remove()
738 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); in netvsc_device_remove()
741 for (i = 0; i < net_device->num_chn; i++) { in netvsc_device_remove()
743 napi_disable(&net_device->chan_table[i].napi); in netvsc_device_remove()
744 netif_napi_del(&net_device->chan_table[i].napi); in netvsc_device_remove()
754 vmbus_close(device->channel); in netvsc_device_remove()
765 if (net_device->recv_original_buf) in netvsc_device_remove()
766 hv_unmap_memory(net_device->recv_buf); in netvsc_device_remove()
768 if (net_device->send_original_buf) in netvsc_device_remove()
769 hv_unmap_memory(net_device->send_buf); in netvsc_device_remove()
781 sync_change_bit(index, net_device->send_section_map); in netvsc_free_send_slot()
796 cmd_rqst = channel->request_addr_callback(channel, desc->trans_id); in netvsc_send_tx_complete()
798 netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id); in netvsc_send_tx_complete()
807 = (struct hv_netvsc_packet *)skb->cb; in netvsc_send_tx_complete()
808 u32 send_index = packet->send_buf_index; in netvsc_send_tx_complete()
813 q_idx = packet->q_idx; in netvsc_send_tx_complete()
815 tx_stats = &net_device->chan_table[q_idx].tx_stats; in netvsc_send_tx_complete()
817 u64_stats_update_begin(&tx_stats->syncp); in netvsc_send_tx_complete()
818 tx_stats->packets += packet->total_packets; in netvsc_send_tx_complete()
819 tx_stats->bytes += packet->total_bytes; in netvsc_send_tx_complete()
820 u64_stats_update_end(&tx_stats->syncp); in netvsc_send_tx_complete()
822 netvsc_dma_unmap(ndev_ctx->device_ctx, packet); in netvsc_send_tx_complete()
827 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); in netvsc_send_tx_complete()
829 if (unlikely(net_device->destroy)) { in netvsc_send_tx_complete()
831 wake_up(&net_device->wait_drain); in netvsc_send_tx_complete()
835 if (netif_tx_queue_stopped(txq) && !net_device->tx_disable && in netvsc_send_tx_complete()
836 (hv_get_avail_to_write_percent(&channel->outbound) > in netvsc_send_tx_complete()
839 ndev_ctx->eth_stats.wake_queue++; in netvsc_send_tx_complete()
857 cmd_rqst = incoming_channel->request_addr_callback(incoming_channel, in netvsc_send_completion()
858 desc->trans_id); in netvsc_send_completion()
860 netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id); in netvsc_send_completion()
865 switch (pkt_rqst->hdr.msg_type) { in netvsc_send_completion()
867 complete(&net_device->channel_init_wait); in netvsc_send_completion()
883 switch (nvsp_packet->hdr.msg_type) { in netvsc_send_completion()
919 memcpy(&net_device->channel_init_pkt, nvsp_packet, in netvsc_send_completion()
921 complete(&net_device->channel_init_wait); in netvsc_send_completion()
932 nvsp_packet->hdr.msg_type); in netvsc_send_completion()
938 unsigned long *map_addr = net_device->send_section_map; in netvsc_get_next_send_section()
941 for_each_clear_bit(i, map_addr, net_device->send_section_cnt) { in netvsc_get_next_send_section()
957 char *start = net_device->send_buf; in netvsc_copy_to_send_buf()
958 char *dest = start + (section_index * net_device->send_section_size) in netvsc_copy_to_send_buf()
962 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt : in netvsc_copy_to_send_buf()
963 packet->page_buf_cnt; in netvsc_copy_to_send_buf()
967 remain = packet->total_data_buflen & (net_device->pkt_align - 1); in netvsc_copy_to_send_buf()
969 padding = net_device->pkt_align - remain; in netvsc_copy_to_send_buf()
970 rndis_msg->msg_len += padding; in netvsc_copy_to_send_buf()
971 packet->total_data_buflen += padding; in netvsc_copy_to_send_buf()
990 u32 page_count = packet->cp_partial ? in netvsc_dma_unmap()
991 packet->page_buf_cnt - packet->rmsg_pgcnt : in netvsc_dma_unmap()
992 packet->page_buf_cnt; in netvsc_dma_unmap()
998 if (!packet->dma_range) in netvsc_dma_unmap()
1002 dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma, in netvsc_dma_unmap()
1003 packet->dma_range[i].mapping_size, in netvsc_dma_unmap()
1006 kfree(packet->dma_range); in netvsc_dma_unmap()
1009 /* netvsc_dma_map - Map swiotlb bounce buffer with data page of
1022 * len that may be non-zero, even for entries in the middle of the
1031 u32 page_count = packet->cp_partial ? in netvsc_dma_map()
1032 packet->page_buf_cnt - packet->rmsg_pgcnt : in netvsc_dma_map()
1033 packet->page_buf_cnt; in netvsc_dma_map()
1040 packet->dma_range = kcalloc(page_count, in netvsc_dma_map()
1041 sizeof(*packet->dma_range), in netvsc_dma_map()
1043 if (!packet->dma_range) in netvsc_dma_map()
1044 return -ENOMEM; in netvsc_dma_map()
1051 dma = dma_map_single(&hv_dev->device, src, len, in netvsc_dma_map()
1053 if (dma_mapping_error(&hv_dev->device, dma)) { in netvsc_dma_map()
1054 kfree(packet->dma_range); in netvsc_dma_map()
1055 return -ENOMEM; in netvsc_dma_map()
1061 packet->dma_range[i].dma = dma; in netvsc_dma_map()
1062 packet->dma_range[i].mapping_size = len; in netvsc_dma_map()
1080 &net_device->chan_table[packet->q_idx]; in netvsc_send_pkt()
1081 struct vmbus_channel *out_channel = nvchan->channel; in netvsc_send_pkt()
1084 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); in netvsc_send_pkt()
1087 u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound); in netvsc_send_pkt()
1092 rpkt->channel_type = 0; /* 0 is RMC_DATA */ in netvsc_send_pkt()
1094 rpkt->channel_type = 1; /* 1 is RMC_CONTROL */ in netvsc_send_pkt()
1096 rpkt->send_buf_section_index = packet->send_buf_index; in netvsc_send_pkt()
1097 if (packet->send_buf_index == NETVSC_INVALID_INDEX) in netvsc_send_pkt()
1098 rpkt->send_buf_section_size = 0; in netvsc_send_pkt()
1100 rpkt->send_buf_section_size = packet->total_data_buflen; in netvsc_send_pkt()
1104 if (out_channel->rescind) in netvsc_send_pkt()
1105 return -ENODEV; in netvsc_send_pkt()
1109 packet->dma_range = NULL; in netvsc_send_pkt()
1110 if (packet->page_buf_cnt) { in netvsc_send_pkt()
1111 if (packet->cp_partial) in netvsc_send_pkt()
1112 pb += packet->rmsg_pgcnt; in netvsc_send_pkt()
1114 ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb); in netvsc_send_pkt()
1116 ret = -EAGAIN; in netvsc_send_pkt()
1121 pb, packet->page_buf_cnt, in netvsc_send_pkt()
1126 netvsc_dma_unmap(ndev_ctx->device_ctx, packet); in netvsc_send_pkt()
1136 atomic_inc_return(&nvchan->queue_sends); in netvsc_send_pkt()
1140 ndev_ctx->eth_stats.stop_queue++; in netvsc_send_pkt()
1142 } else if (ret == -EAGAIN) { in netvsc_send_pkt()
1144 ndev_ctx->eth_stats.stop_queue++; in netvsc_send_pkt()
1148 packet->page_buf_cnt, packet->total_data_buflen, in netvsc_send_pkt()
1153 atomic_read(&nvchan->queue_sends) < 1 && in netvsc_send_pkt()
1154 !net_device->tx_disable) { in netvsc_send_pkt()
1156 ndev_ctx->eth_stats.wake_queue++; in netvsc_send_pkt()
1157 if (ret == -EAGAIN) in netvsc_send_pkt()
1158 ret = -ENOSPC; in netvsc_send_pkt()
1169 *msd_skb = msdp->skb; in move_pkt_msd()
1170 *msd_send = msdp->pkt; in move_pkt_msd()
1171 msdp->skb = NULL; in move_pkt_msd()
1172 msdp->pkt = NULL; in move_pkt_msd()
1173 msdp->count = 0; in move_pkt_msd()
1180 * For small, non-LSO packets we copy the packet to a send buffer
1181 * which is pre-registered with the Hyper-V side. This enables the
1186 * a burst of packets, keep on copying into the buffer until it is
1187 * full or we are done collecting a burst. If there is an existing
1206 = rcu_dereference_bh(ndev_ctx->nvdev); in netvsc_send()
1207 struct hv_device *device = ndev_ctx->device_ctx; in netvsc_send()
1210 u32 pktlen = packet->total_data_buflen, msd_len = 0; in netvsc_send()
1218 if (unlikely(!net_device || net_device->destroy)) in netvsc_send()
1219 return -ENODEV; in netvsc_send()
1221 nvchan = &net_device->chan_table[packet->q_idx]; in netvsc_send()
1222 packet->send_buf_index = NETVSC_INVALID_INDEX; in netvsc_send()
1223 packet->cp_partial = false; in netvsc_send()
1226 * msd (Multi-Send Data) field which may be changed during data packet in netvsc_send()
1233 msdp = &nvchan->msd; in netvsc_send()
1234 if (msdp->pkt) in netvsc_send()
1235 msd_len = msdp->pkt->total_data_buflen; in netvsc_send()
1237 try_batch = msd_len > 0 && msdp->count < net_device->max_pkt; in netvsc_send()
1238 if (try_batch && msd_len + pktlen + net_device->pkt_align < in netvsc_send()
1239 net_device->send_section_size) { in netvsc_send()
1240 section_index = msdp->pkt->send_buf_index; in netvsc_send()
1242 } else if (try_batch && msd_len + packet->rmsg_size < in netvsc_send()
1243 net_device->send_section_size) { in netvsc_send()
1244 section_index = msdp->pkt->send_buf_index; in netvsc_send()
1245 packet->cp_partial = true; in netvsc_send()
1247 } else if (pktlen + net_device->pkt_align < in netvsc_send()
1248 net_device->send_section_size) { in netvsc_send()
1251 ++ndev_ctx->eth_stats.tx_send_full; in netvsc_send()
1259 * and not doing mixed modes send and not flow blocked in netvsc_send()
1262 !packet->cp_partial && in netvsc_send()
1263 !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx)); in netvsc_send()
1270 packet->send_buf_index = section_index; in netvsc_send()
1272 if (packet->cp_partial) { in netvsc_send()
1273 packet->page_buf_cnt -= packet->rmsg_pgcnt; in netvsc_send()
1274 packet->total_data_buflen = msd_len + packet->rmsg_size; in netvsc_send()
1276 packet->page_buf_cnt = 0; in netvsc_send()
1277 packet->total_data_buflen += msd_len; in netvsc_send()
1280 if (msdp->pkt) { in netvsc_send()
1281 packet->total_packets += msdp->pkt->total_packets; in netvsc_send()
1282 packet->total_bytes += msdp->pkt->total_bytes; in netvsc_send()
1285 if (msdp->skb) in netvsc_send()
1286 dev_consume_skb_any(msdp->skb); in netvsc_send()
1289 msdp->skb = skb; in netvsc_send()
1290 msdp->pkt = packet; in netvsc_send()
1291 msdp->count++; in netvsc_send()
1294 msdp->skb = NULL; in netvsc_send()
1295 msdp->pkt = NULL; in netvsc_send()
1296 msdp->count = 0; in netvsc_send()
1309 msd_send->send_buf_index); in netvsc_send()
1328 struct multi_recv_comp *mrc = &nvchan->mrc; in send_recv_completions()
1338 while (mrc->first != mrc->next) { in send_recv_completions()
1340 = mrc->slots + mrc->first; in send_recv_completions()
1342 msg.status = rcd->status; in send_recv_completions()
1343 ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg), in send_recv_completions()
1344 rcd->tid, VM_PKT_COMP, 0); in send_recv_completions()
1348 ++ndev_ctx->eth_stats.rx_comp_busy; in send_recv_completions()
1352 if (++mrc->first == nvdev->recv_completion_cnt) in send_recv_completions()
1353 mrc->first = 0; in send_recv_completions()
1357 if (unlikely(nvdev->destroy)) in send_recv_completions()
1358 wake_up(&nvdev->wait_drain); in send_recv_completions()
1368 u32 count = nvdev->recv_completion_cnt; in recv_comp_slot_avail()
1370 if (mrc->next >= mrc->first) in recv_comp_slot_avail()
1371 *filled = mrc->next - mrc->first; in recv_comp_slot_avail()
1373 *filled = (count - mrc->first) + mrc->next; in recv_comp_slot_avail()
1375 *avail = count - *filled - 1; in recv_comp_slot_avail()
1383 struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx]; in enq_receive_complete()
1384 struct multi_recv_comp *mrc = &nvchan->mrc; in enq_receive_complete()
1401 rcd = mrc->slots + mrc->next; in enq_receive_complete()
1402 rcd->tid = tid; in enq_receive_complete()
1403 rcd->status = status; in enq_receive_complete()
1405 if (++mrc->next == nvdev->recv_completion_cnt) in enq_receive_complete()
1406 mrc->next = 0; in enq_receive_complete()
1415 struct vmbus_channel *channel = nvchan->channel; in netvsc_receive()
1420 u16 q_idx = channel->offermsg.offer.sub_channel_index; in netvsc_receive()
1421 char *recv_buf = net_device->recv_buf; in netvsc_receive()
1435 if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) { in netvsc_receive()
1438 nvsp->hdr.msg_type); in netvsc_receive()
1443 if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) { in netvsc_receive()
1446 desc->offset8 << 3); in netvsc_receive()
1450 if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) { in netvsc_receive()
1452 "Invalid xfer page set id - expecting %x got %x\n", in netvsc_receive()
1454 vmxferpage_packet->xfer_pageset_id); in netvsc_receive()
1458 count = vmxferpage_packet->range_cnt; in netvsc_receive()
1461 if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) { in netvsc_receive()
1470 u32 offset = vmxferpage_packet->ranges[i].byte_offset; in netvsc_receive()
1471 u32 buflen = vmxferpage_packet->ranges[i].byte_count; in netvsc_receive()
1475 if (unlikely(offset > net_device->recv_buf_size || in netvsc_receive()
1476 buflen > net_device->recv_buf_size - offset)) { in netvsc_receive()
1477 nvchan->rsc.cnt = 0; in netvsc_receive()
1486 /* We're going to copy (sections of) the packet into nvchan->recv_buf; in netvsc_receive()
1487 * make sure that nvchan->recv_buf is large enough to hold the packet. in netvsc_receive()
1489 if (unlikely(buflen > net_device->recv_section_size)) { in netvsc_receive()
1490 nvchan->rsc.cnt = 0; in netvsc_receive()
1494 buflen, net_device->recv_section_size); in netvsc_receive()
1501 nvchan->rsc.is_last = (i == count - 1); in netvsc_receive()
1511 nvchan->rsc.cnt = 0; in netvsc_receive()
1517 vmxferpage_packet->d.trans_id, status); in netvsc_receive()
1538 count = nvmsg->msg.v5_msg.send_table.count; in netvsc_send_table()
1539 offset = nvmsg->msg.v5_msg.send_table.offset; in netvsc_send_table()
1542 netdev_err(ndev, "Received wrong send-table size:%u\n", count); in netvsc_send_table()
1549 if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 && in netvsc_send_table()
1556 if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) { in netvsc_send_table()
1557 netdev_err(ndev, "Received send-table offset too big:%u\n", in netvsc_send_table()
1565 net_device_ctx->tx_table[i] = tab[i]; in netvsc_send_table()
1581 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; in netvsc_send_vf()
1582 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; in netvsc_send_vf()
1584 if (net_device_ctx->vf_alloc) in netvsc_send_vf()
1585 complete(&net_device_ctx->vf_add); in netvsc_send_vf()
1588 net_device_ctx->vf_serial, in netvsc_send_vf()
1589 net_device_ctx->vf_alloc ? "added" : "removed"); in netvsc_send_vf()
1605 switch (nvmsg->hdr.msg_type) { in netvsc_receive_inband()
1626 struct vmbus_channel *channel = nvchan->channel; in netvsc_process_raw_pkt()
1631 switch (desc->type) { in netvsc_process_raw_pkt()
1645 desc->type, desc->trans_id); in netvsc_process_raw_pkt()
1654 struct vmbus_channel *primary = channel->primary_channel; in netvsc_channel_to_device()
1656 return primary ? primary->device_obj : channel->device_obj; in netvsc_channel_to_device()
1667 struct netvsc_device *net_device = nvchan->net_device; in netvsc_poll()
1668 struct vmbus_channel *channel = nvchan->channel; in netvsc_poll()
1675 if (!nvchan->desc) in netvsc_poll()
1676 nvchan->desc = hv_pkt_iter_first(channel); in netvsc_poll()
1678 nvchan->xdp_flush = false; in netvsc_poll()
1680 while (nvchan->desc && work_done < budget) { in netvsc_poll()
1682 ndev, nvchan->desc, budget); in netvsc_poll()
1683 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); in netvsc_poll()
1686 if (nvchan->xdp_flush) in netvsc_poll()
1694 * then re-enable host interrupts in netvsc_poll()
1700 (ret || hv_end_read(&channel->inbound)) && in netvsc_poll()
1702 hv_begin_read(&channel->inbound); in netvsc_poll()
1716 struct vmbus_channel *channel = nvchan->channel; in netvsc_channel_cb()
1717 struct hv_ring_buffer_info *rbi = &channel->inbound; in netvsc_channel_cb()
1720 prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index); in netvsc_channel_cb()
1722 if (napi_schedule_prep(&nvchan->napi)) { in netvsc_channel_cb()
1726 __napi_schedule_irqoff(&nvchan->napi); in netvsc_channel_cb()
1731 * netvsc_device_add - Callback when the device belonging to this
1744 return ERR_PTR(-ENOMEM); in netvsc_device_add()
1747 net_device_ctx->tx_table[i] = 0; in netvsc_device_add()
1752 set_channel_read_mode(device->channel, HV_CALL_ISR); in netvsc_device_add()
1762 struct netvsc_channel *nvchan = &net_device->chan_table[i]; in netvsc_device_add()
1764 nvchan->channel = device->channel; in netvsc_device_add()
1765 nvchan->net_device = net_device; in netvsc_device_add()
1766 u64_stats_init(&nvchan->tx_stats.syncp); in netvsc_device_add()
1767 u64_stats_init(&nvchan->rx_stats.syncp); in netvsc_device_add()
1769 ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0); in netvsc_device_add()
1776 ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq, in netvsc_device_add()
1786 netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll); in netvsc_device_add()
1789 device->channel->next_request_id_callback = vmbus_next_request_id; in netvsc_device_add()
1790 device->channel->request_addr_callback = vmbus_request_addr; in netvsc_device_add()
1791 device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes); in netvsc_device_add()
1792 device->channel->max_pkt_size = NETVSC_MAX_PKT_SIZE; in netvsc_device_add()
1794 ret = vmbus_open(device->channel, netvsc_ring_bytes, in netvsc_device_add()
1796 netvsc_channel_cb, net_device->chan_table); in netvsc_device_add()
1806 napi_enable(&net_device->chan_table[0].napi); in netvsc_device_add()
1812 "unable to connect to NetVSP - %d\n", ret); in netvsc_device_add()
1819 rcu_assign_pointer(net_device_ctx->nvdev, net_device); in netvsc_device_add()
1824 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); in netvsc_device_add()
1825 napi_disable(&net_device->chan_table[0].napi); in netvsc_device_add()
1828 vmbus_close(device->channel); in netvsc_device_add()
1831 netif_napi_del(&net_device->chan_table[0].napi); in netvsc_device_add()
1834 if (net_device->recv_original_buf) in netvsc_device_add()
1835 hv_unmap_memory(net_device->recv_buf); in netvsc_device_add()
1837 if (net_device->send_original_buf) in netvsc_device_add()
1838 hv_unmap_memory(net_device->send_buf); in netvsc_device_add()
1840 free_netvsc_device(&net_device->rcu); in netvsc_device_add()