Lines Matching refs:vi
372 static void enable_delayed_refill(struct virtnet_info *vi) in enable_delayed_refill() argument
374 spin_lock_bh(&vi->refill_lock); in enable_delayed_refill()
375 vi->refill_enabled = true; in enable_delayed_refill()
376 spin_unlock_bh(&vi->refill_lock); in enable_delayed_refill()
379 static void disable_delayed_refill(struct virtnet_info *vi) in disable_delayed_refill() argument
381 spin_lock_bh(&vi->refill_lock); in disable_delayed_refill()
382 vi->refill_enabled = false; in disable_delayed_refill()
383 spin_unlock_bh(&vi->refill_lock); in disable_delayed_refill()
411 struct virtnet_info *vi = vq->vdev->priv; in skb_xmit_done() local
412 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; in skb_xmit_done()
421 netif_wake_subqueue(vi->dev, vq2txq(vq)); in skb_xmit_done()
442 static struct sk_buff *page_to_skb(struct virtnet_info *vi, in page_to_skb() argument
459 hdr_len = vi->hdr_len; in page_to_skb()
460 if (vi->mergeable_rx_bufs) in page_to_skb()
515 if (vi->mergeable_rx_bufs) { in page_to_skb()
564 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, in __virtnet_xdp_xmit_one() argument
571 if (unlikely(xdpf->headroom < vi->hdr_len)) in __virtnet_xdp_xmit_one()
575 xdpf->data -= vi->hdr_len; in __virtnet_xdp_xmit_one()
578 memset(hdr, 0, vi->hdr_len); in __virtnet_xdp_xmit_one()
579 xdpf->len += vi->hdr_len; in __virtnet_xdp_xmit_one()
599 #define virtnet_xdp_get_sq(vi) ({ \ argument
602 typeof(vi) v = (vi); \
618 #define virtnet_xdp_put_sq(vi, q) { \ argument
620 typeof(vi) v = (vi); \
632 struct virtnet_info *vi = netdev_priv(dev); in virtnet_xdp_xmit() local
633 struct receive_queue *rq = vi->rq; in virtnet_xdp_xmit()
652 sq = virtnet_xdp_get_sq(vi); in virtnet_xdp_xmit()
678 if (__virtnet_xdp_xmit_one(vi, sq, xdpf)) in virtnet_xdp_xmit()
697 virtnet_xdp_put_sq(vi, sq); in virtnet_xdp_xmit()
701 static unsigned int virtnet_get_headroom(struct virtnet_info *vi) in virtnet_get_headroom() argument
703 return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; in virtnet_get_headroom()
771 struct virtnet_info *vi, in receive_small() argument
782 unsigned int headroom = vi->hdr_len + header_offset; in receive_small()
791 len -= vi->hdr_len; in receive_small()
801 if (likely(!vi->xdp_enabled)) { in receive_small()
818 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { in receive_small()
820 unsigned int tlen = len + vi->hdr_len; in receive_small()
823 xdp_headroom = virtnet_get_headroom(vi); in receive_small()
825 headroom = vi->hdr_len + header_offset; in receive_small()
840 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, in receive_small()
862 trace_xdp_exception(vi->dev, xdp_prog, act); in receive_small()
877 bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act); in receive_small()
880 trace_xdp_exception(vi->dev, xdp_prog, act); in receive_small()
896 memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); in receive_small()
915 struct virtnet_info *vi, in receive_big() argument
923 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0, 0); in receive_big()
925 stats->bytes += len - vi->hdr_len; in receive_big()
938 struct virtnet_info *vi, in receive_mergeable() argument
947 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable()
959 stats->bytes += len - vi->hdr_len; in receive_mergeable()
968 if (likely(!vi->xdp_enabled)) { in receive_mergeable()
1001 headroom < virtnet_get_headroom(vi))) { in receive_mergeable()
1020 xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq); in receive_mergeable()
1021 xdp_prepare_buff(&xdp, data - VIRTIO_XDP_HEADROOM + vi->hdr_len, in receive_mergeable()
1022 VIRTIO_XDP_HEADROOM, len - vi->hdr_len, true); in receive_mergeable()
1037 vi->hdr_len - metasize; in receive_mergeable()
1042 len = xdp.data_end - xdp.data + vi->hdr_len + metasize; in receive_mergeable()
1065 head_skb = page_to_skb(vi, rq, xdp_page, offset, in receive_mergeable()
1084 trace_xdp_exception(vi->dev, xdp_prog, act); in receive_mergeable()
1108 bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act); in receive_mergeable()
1111 trace_xdp_exception(vi->dev, xdp_prog, act); in receive_mergeable()
1122 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog, in receive_mergeable()
1135 virtio16_to_cpu(vi->vdev, in receive_mergeable()
1238 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, in receive_buf() argument
1243 struct net_device *dev = vi->dev; in receive_buf()
1247 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { in receive_buf()
1250 if (vi->mergeable_rx_bufs) { in receive_buf()
1252 } else if (vi->big_packets) { in receive_buf()
1260 if (vi->mergeable_rx_bufs) in receive_buf()
1261 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, in receive_buf()
1263 else if (vi->big_packets) in receive_buf()
1264 skb = receive_big(dev, vi, rq, buf, len, stats); in receive_buf()
1266 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); in receive_buf()
1272 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) in receive_buf()
1279 virtio_is_little_endian(vi->vdev))) { in receive_buf()
1304 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, in add_recvbuf_small() argument
1309 unsigned int xdp_headroom = virtnet_get_headroom(vi); in add_recvbuf_small()
1311 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; in add_recvbuf_small()
1323 vi->hdr_len + GOOD_PACKET_LEN); in add_recvbuf_small()
1330 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, in add_recvbuf_big() argument
1337 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2); in add_recvbuf_big()
1340 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) { in add_recvbuf_big()
1363 sg_set_buf(&rq->sg[0], p, vi->hdr_len); in add_recvbuf_big()
1371 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2, in add_recvbuf_big()
1383 struct virtnet_info *vi = rq->vq->vdev->priv; in get_mergeable_buf_len() local
1384 const size_t hdr_len = vi->hdr_len; in get_mergeable_buf_len()
1396 static int add_recvbuf_mergeable(struct virtnet_info *vi, in add_recvbuf_mergeable() argument
1400 unsigned int headroom = virtnet_get_headroom(vi); in add_recvbuf_mergeable()
1446 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, in try_fill_recv() argument
1453 if (vi->mergeable_rx_bufs) in try_fill_recv()
1454 err = add_recvbuf_mergeable(vi, rq, gfp); in try_fill_recv()
1455 else if (vi->big_packets) in try_fill_recv()
1456 err = add_recvbuf_big(vi, rq, gfp); in try_fill_recv()
1458 err = add_recvbuf_small(vi, rq, gfp); in try_fill_recv()
1477 struct virtnet_info *vi = rvq->vdev->priv; in skb_recv_done() local
1478 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; in skb_recv_done()
1496 static void virtnet_napi_tx_enable(struct virtnet_info *vi, in virtnet_napi_tx_enable() argument
1506 if (!vi->affinity_hint_set) { in virtnet_napi_tx_enable()
1522 struct virtnet_info *vi = in refill_work() local
1527 for (i = 0; i < vi->curr_queue_pairs; i++) { in refill_work()
1528 struct receive_queue *rq = &vi->rq[i]; in refill_work()
1531 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); in refill_work()
1538 schedule_delayed_work(&vi->refill, HZ/2); in refill_work()
1545 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_receive() local
1551 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_receive()
1556 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); in virtnet_receive()
1562 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); in virtnet_receive()
1568 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) { in virtnet_receive()
1569 spin_lock(&vi->refill_lock); in virtnet_receive()
1570 if (vi->refill_enabled) in virtnet_receive()
1571 schedule_delayed_work(&vi->refill, 0); in virtnet_receive()
1572 spin_unlock(&vi->refill_lock); in virtnet_receive()
1625 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) in is_xdp_raw_buffer_queue() argument
1627 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) in is_xdp_raw_buffer_queue()
1629 else if (q < vi->curr_queue_pairs) in is_xdp_raw_buffer_queue()
1637 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll_cleantx() local
1639 struct send_queue *sq = &vi->sq[index]; in virtnet_poll_cleantx()
1640 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_cleantx()
1642 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) in virtnet_poll_cleantx()
1667 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll() local
1684 sq = virtnet_xdp_get_sq(vi); in virtnet_poll()
1690 virtnet_xdp_put_sq(vi, sq); in virtnet_poll()
1698 struct virtnet_info *vi = netdev_priv(dev); in virtnet_open() local
1701 enable_delayed_refill(vi); in virtnet_open()
1703 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_open()
1704 if (i < vi->curr_queue_pairs) in virtnet_open()
1706 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) in virtnet_open()
1707 schedule_delayed_work(&vi->refill, 0); in virtnet_open()
1709 err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id); in virtnet_open()
1713 err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq, in virtnet_open()
1716 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); in virtnet_open()
1720 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); in virtnet_open()
1721 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); in virtnet_open()
1730 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_poll_tx() local
1736 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { in virtnet_poll_tx()
1742 txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_tx()
1777 struct virtnet_info *vi = sq->vq->vdev->priv; in xmit_skb() local
1779 unsigned hdr_len = vi->hdr_len; in xmit_skb()
1782 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); in xmit_skb()
1784 can_push = vi->any_header_sg && in xmit_skb()
1795 virtio_is_little_endian(vi->vdev), false, in xmit_skb()
1799 if (vi->mergeable_rx_bufs) in xmit_skb()
1822 struct virtnet_info *vi = netdev_priv(dev); in start_xmit() local
1824 struct send_queue *sq = &vi->sq[qnum]; in start_xmit()
1898 static int virtnet_rx_resize(struct virtnet_info *vi, in virtnet_rx_resize() argument
1901 bool running = netif_running(vi->dev); in virtnet_rx_resize()
1904 qindex = rq - vi->rq; in virtnet_rx_resize()
1911 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); in virtnet_rx_resize()
1913 if (!try_fill_recv(vi, rq, GFP_KERNEL)) in virtnet_rx_resize()
1914 schedule_delayed_work(&vi->refill, 0); in virtnet_rx_resize()
1921 static int virtnet_tx_resize(struct virtnet_info *vi, in virtnet_tx_resize() argument
1924 bool running = netif_running(vi->dev); in virtnet_tx_resize()
1928 qindex = sq - vi->sq; in virtnet_tx_resize()
1933 txq = netdev_get_tx_queue(vi->dev, qindex); in virtnet_tx_resize()
1944 netif_stop_subqueue(vi->dev, qindex); in virtnet_tx_resize()
1950 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); in virtnet_tx_resize()
1958 virtnet_napi_tx_enable(vi, sq->vq, &sq->napi); in virtnet_tx_resize()
1967 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, in virtnet_send_command() argument
1975 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); in virtnet_send_command()
1977 vi->ctrl->status = ~0; in virtnet_send_command()
1978 vi->ctrl->hdr.class = class; in virtnet_send_command()
1979 vi->ctrl->hdr.cmd = cmd; in virtnet_send_command()
1981 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); in virtnet_send_command()
1988 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); in virtnet_send_command()
1992 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); in virtnet_send_command()
1994 dev_warn(&vi->vdev->dev, in virtnet_send_command()
1999 if (unlikely(!virtqueue_kick(vi->cvq))) in virtnet_send_command()
2000 return vi->ctrl->status == VIRTIO_NET_OK; in virtnet_send_command()
2005 while (!virtqueue_get_buf(vi->cvq, &tmp) && in virtnet_send_command()
2006 !virtqueue_is_broken(vi->cvq)) in virtnet_send_command()
2009 return vi->ctrl->status == VIRTIO_NET_OK; in virtnet_send_command()
2014 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_mac_address() local
2015 struct virtio_device *vdev = vi->vdev; in virtnet_set_mac_address()
2020 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_set_mac_address()
2033 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, in virtnet_set_mac_address()
2062 struct virtnet_info *vi = netdev_priv(dev); in virtnet_stats() local
2066 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_stats()
2068 struct receive_queue *rq = &vi->rq[i]; in virtnet_stats()
2069 struct send_queue *sq = &vi->sq[i]; in virtnet_stats()
2099 static void virtnet_ack_link_announce(struct virtnet_info *vi) in virtnet_ack_link_announce() argument
2102 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, in virtnet_ack_link_announce()
2104 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); in virtnet_ack_link_announce()
2108 static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) in _virtnet_set_queues() argument
2111 struct net_device *dev = vi->dev; in _virtnet_set_queues()
2113 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) in _virtnet_set_queues()
2116 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); in _virtnet_set_queues()
2117 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); in _virtnet_set_queues()
2119 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, in _virtnet_set_queues()
2125 vi->curr_queue_pairs = queue_pairs; in _virtnet_set_queues()
2128 schedule_delayed_work(&vi->refill, 0); in _virtnet_set_queues()
2134 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) in virtnet_set_queues() argument
2139 err = _virtnet_set_queues(vi, queue_pairs); in virtnet_set_queues()
2146 struct virtnet_info *vi = netdev_priv(dev); in virtnet_close() local
2150 disable_delayed_refill(vi); in virtnet_close()
2152 cancel_delayed_work_sync(&vi->refill); in virtnet_close()
2154 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_close()
2155 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); in virtnet_close()
2156 napi_disable(&vi->rq[i].napi); in virtnet_close()
2157 virtnet_napi_tx_disable(&vi->sq[i].napi); in virtnet_close()
2165 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_rx_mode() local
2175 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) in virtnet_set_rx_mode()
2178 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); in virtnet_set_rx_mode()
2179 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); in virtnet_set_rx_mode()
2181 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); in virtnet_set_rx_mode()
2183 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, in virtnet_set_rx_mode()
2186 vi->ctrl->promisc ? "en" : "dis"); in virtnet_set_rx_mode()
2188 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); in virtnet_set_rx_mode()
2190 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, in virtnet_set_rx_mode()
2193 vi->ctrl->allmulti ? "en" : "dis"); in virtnet_set_rx_mode()
2207 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); in virtnet_set_rx_mode()
2218 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); in virtnet_set_rx_mode()
2226 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, in virtnet_set_rx_mode()
2236 struct virtnet_info *vi = netdev_priv(dev); in virtnet_vlan_rx_add_vid() local
2239 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_add_vid()
2240 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); in virtnet_vlan_rx_add_vid()
2242 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, in virtnet_vlan_rx_add_vid()
2251 struct virtnet_info *vi = netdev_priv(dev); in virtnet_vlan_rx_kill_vid() local
2254 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_kill_vid()
2255 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); in virtnet_vlan_rx_kill_vid()
2257 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, in virtnet_vlan_rx_kill_vid()
2263 static void virtnet_clean_affinity(struct virtnet_info *vi) in virtnet_clean_affinity() argument
2267 if (vi->affinity_hint_set) { in virtnet_clean_affinity()
2268 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_clean_affinity()
2269 virtqueue_set_affinity(vi->rq[i].vq, NULL); in virtnet_clean_affinity()
2270 virtqueue_set_affinity(vi->sq[i].vq, NULL); in virtnet_clean_affinity()
2273 vi->affinity_hint_set = false; in virtnet_clean_affinity()
2277 static void virtnet_set_affinity(struct virtnet_info *vi) in virtnet_set_affinity() argument
2287 virtnet_clean_affinity(vi); in virtnet_set_affinity()
2292 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); in virtnet_set_affinity()
2293 stragglers = num_cpu >= vi->curr_queue_pairs ? in virtnet_set_affinity()
2294 num_cpu % vi->curr_queue_pairs : in virtnet_set_affinity()
2298 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_set_affinity()
2306 virtqueue_set_affinity(vi->rq[i].vq, mask); in virtnet_set_affinity()
2307 virtqueue_set_affinity(vi->sq[i].vq, mask); in virtnet_set_affinity()
2308 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); in virtnet_set_affinity()
2312 vi->affinity_hint_set = true; in virtnet_set_affinity()
2318 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, in virtnet_cpu_online() local
2320 virtnet_set_affinity(vi); in virtnet_cpu_online()
2326 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, in virtnet_cpu_dead() local
2328 virtnet_set_affinity(vi); in virtnet_cpu_dead()
2334 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, in virtnet_cpu_down_prep() local
2337 virtnet_clean_affinity(vi); in virtnet_cpu_down_prep()
2343 static int virtnet_cpu_notif_add(struct virtnet_info *vi) in virtnet_cpu_notif_add() argument
2347 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
2351 &vi->node_dead); in virtnet_cpu_notif_add()
2354 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
2358 static void virtnet_cpu_notif_remove(struct virtnet_info *vi) in virtnet_cpu_notif_remove() argument
2360 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_remove()
2362 &vi->node_dead); in virtnet_cpu_notif_remove()
2370 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_ringparam() local
2372 ring->rx_max_pending = vi->rq[0].vq->num_max; in virtnet_get_ringparam()
2373 ring->tx_max_pending = vi->sq[0].vq->num_max; in virtnet_get_ringparam()
2374 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_get_ringparam()
2375 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_get_ringparam()
2383 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_ringparam() local
2392 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_set_ringparam()
2393 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_set_ringparam()
2399 if (ring->rx_pending > vi->rq[0].vq->num_max) in virtnet_set_ringparam()
2402 if (ring->tx_pending > vi->sq[0].vq->num_max) in virtnet_set_ringparam()
2405 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_set_ringparam()
2406 rq = vi->rq + i; in virtnet_set_ringparam()
2407 sq = vi->sq + i; in virtnet_set_ringparam()
2410 err = virtnet_tx_resize(vi, sq, ring->tx_pending); in virtnet_set_ringparam()
2416 err = virtnet_rx_resize(vi, rq, ring->rx_pending); in virtnet_set_ringparam()
2425 static bool virtnet_commit_rss_command(struct virtnet_info *vi) in virtnet_commit_rss_command() argument
2427 struct net_device *dev = vi->dev; in virtnet_commit_rss_command()
2435 sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size); in virtnet_commit_rss_command()
2437 sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1); in virtnet_commit_rss_command()
2438 sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size); in virtnet_commit_rss_command()
2442 sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size); in virtnet_commit_rss_command()
2444 sg_buf_size = vi->rss_key_size; in virtnet_commit_rss_command()
2445 sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size); in virtnet_commit_rss_command()
2447 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, in virtnet_commit_rss_command()
2448 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG in virtnet_commit_rss_command()
2456 static void virtnet_init_default_rss(struct virtnet_info *vi) in virtnet_init_default_rss() argument
2461 vi->ctrl->rss.hash_types = vi->rss_hash_types_supported; in virtnet_init_default_rss()
2462 vi->rss_hash_types_saved = vi->rss_hash_types_supported; in virtnet_init_default_rss()
2463 vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size in virtnet_init_default_rss()
2464 ? vi->rss_indir_table_size - 1 : 0; in virtnet_init_default_rss()
2465 vi->ctrl->rss.unclassified_queue = 0; in virtnet_init_default_rss()
2467 for (; i < vi->rss_indir_table_size; ++i) { in virtnet_init_default_rss()
2468 indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs); in virtnet_init_default_rss()
2469 vi->ctrl->rss.indirection_table[i] = indir_val; in virtnet_init_default_rss()
2472 vi->ctrl->rss.max_tx_vq = vi->curr_queue_pairs; in virtnet_init_default_rss()
2473 vi->ctrl->rss.hash_key_length = vi->rss_key_size; in virtnet_init_default_rss()
2475 netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size); in virtnet_init_default_rss()
2478 static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info) in virtnet_get_hashflow() argument
2483 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) { in virtnet_get_hashflow()
2486 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { in virtnet_get_hashflow()
2491 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) { in virtnet_get_hashflow()
2494 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { in virtnet_get_hashflow()
2499 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) { in virtnet_get_hashflow()
2502 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { in virtnet_get_hashflow()
2507 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) { in virtnet_get_hashflow()
2510 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { in virtnet_get_hashflow()
2515 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) in virtnet_get_hashflow()
2520 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) in virtnet_get_hashflow()
2530 static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info) in virtnet_set_hashflow() argument
2532 u32 new_hashtypes = vi->rss_hash_types_saved; in virtnet_set_hashflow()
2581 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported)) in virtnet_set_hashflow()
2584 if (new_hashtypes != vi->rss_hash_types_saved) { in virtnet_set_hashflow()
2585 vi->rss_hash_types_saved = new_hashtypes; in virtnet_set_hashflow()
2586 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; in virtnet_set_hashflow()
2587 if (vi->dev->features & NETIF_F_RXHASH) in virtnet_set_hashflow()
2588 return virtnet_commit_rss_command(vi); in virtnet_set_hashflow()
2597 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_drvinfo() local
2598 struct virtio_device *vdev = vi->vdev; in virtnet_get_drvinfo()
2610 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_channels() local
2620 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) in virtnet_set_channels()
2627 if (vi->rq[0].xdp_prog) in virtnet_set_channels()
2631 err = _virtnet_set_queues(vi, queue_pairs); in virtnet_set_channels()
2636 virtnet_set_affinity(vi); in virtnet_set_channels()
2647 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_strings() local
2653 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_strings()
2659 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_strings()
2670 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_sset_count() local
2674 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN + in virtnet_get_sset_count()
2684 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_ethtool_stats() local
2689 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_ethtool_stats()
2690 struct receive_queue *rq = &vi->rq[i]; in virtnet_get_ethtool_stats()
2703 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_ethtool_stats()
2704 struct send_queue *sq = &vi->sq[i]; in virtnet_get_ethtool_stats()
2721 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_channels() local
2723 channels->combined_count = vi->curr_queue_pairs; in virtnet_get_channels()
2724 channels->max_combined = vi->max_queue_pairs; in virtnet_get_channels()
2734 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_link_ksettings() local
2737 &vi->speed, &vi->duplex); in virtnet_set_link_ksettings()
2743 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_link_ksettings() local
2745 cmd->base.speed = vi->speed; in virtnet_get_link_ksettings()
2746 cmd->base.duplex = vi->duplex; in virtnet_get_link_ksettings()
2752 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, in virtnet_send_notf_coal_cmds() argument
2763 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, in virtnet_send_notf_coal_cmds()
2769 vi->tx_usecs = ec->tx_coalesce_usecs; in virtnet_send_notf_coal_cmds()
2770 vi->tx_max_packets = ec->tx_max_coalesced_frames; in virtnet_send_notf_coal_cmds()
2776 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, in virtnet_send_notf_coal_cmds()
2782 vi->rx_usecs = ec->rx_coalesce_usecs; in virtnet_send_notf_coal_cmds()
2783 vi->rx_max_packets = ec->rx_max_coalesced_frames; in virtnet_send_notf_coal_cmds()
2808 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_coalesce() local
2814 if (napi_weight ^ vi->sq[0].napi.weight) { in virtnet_set_coalesce()
2821 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) in virtnet_set_coalesce()
2822 ret = virtnet_send_notf_coal_cmds(vi, ec); in virtnet_set_coalesce()
2830 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_set_coalesce()
2831 vi->sq[i].napi.weight = napi_weight; in virtnet_set_coalesce()
2842 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_coalesce() local
2844 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { in virtnet_get_coalesce()
2845 ec->rx_coalesce_usecs = vi->rx_usecs; in virtnet_get_coalesce()
2846 ec->tx_coalesce_usecs = vi->tx_usecs; in virtnet_get_coalesce()
2847 ec->tx_max_coalesced_frames = vi->tx_max_packets; in virtnet_get_coalesce()
2848 ec->rx_max_coalesced_frames = vi->rx_max_packets; in virtnet_get_coalesce()
2852 if (vi->sq[0].napi.weight) in virtnet_get_coalesce()
2861 struct virtnet_info *vi = netdev_priv(dev); in virtnet_init_settings() local
2863 vi->speed = SPEED_UNKNOWN; in virtnet_init_settings()
2864 vi->duplex = DUPLEX_UNKNOWN; in virtnet_init_settings()
2867 static void virtnet_update_settings(struct virtnet_info *vi) in virtnet_update_settings() argument
2872 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) in virtnet_update_settings()
2875 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); in virtnet_update_settings()
2878 vi->speed = speed; in virtnet_update_settings()
2880 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); in virtnet_update_settings()
2883 vi->duplex = duplex; in virtnet_update_settings()
2898 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_rxfh() local
2902 for (i = 0; i < vi->rss_indir_table_size; ++i) in virtnet_get_rxfh()
2903 indir[i] = vi->ctrl->rss.indirection_table[i]; in virtnet_get_rxfh()
2907 memcpy(key, vi->ctrl->rss.key, vi->rss_key_size); in virtnet_get_rxfh()
2917 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_rxfh() local
2924 for (i = 0; i < vi->rss_indir_table_size; ++i) in virtnet_set_rxfh()
2925 vi->ctrl->rss.indirection_table[i] = indir[i]; in virtnet_set_rxfh()
2928 memcpy(vi->ctrl->rss.key, key, vi->rss_key_size); in virtnet_set_rxfh()
2930 virtnet_commit_rss_command(vi); in virtnet_set_rxfh()
2937 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_rxnfc() local
2942 info->data = vi->curr_queue_pairs; in virtnet_get_rxnfc()
2945 virtnet_get_hashflow(vi, info); in virtnet_get_rxnfc()
2956 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_rxnfc() local
2961 if (!virtnet_set_hashflow(vi, info)) in virtnet_set_rxnfc()
2999 struct virtnet_info *vi = vdev->priv; in virtnet_freeze_down() local
3002 flush_work(&vi->config_work); in virtnet_freeze_down()
3004 netif_tx_lock_bh(vi->dev); in virtnet_freeze_down()
3005 netif_device_detach(vi->dev); in virtnet_freeze_down()
3006 netif_tx_unlock_bh(vi->dev); in virtnet_freeze_down()
3007 if (netif_running(vi->dev)) in virtnet_freeze_down()
3008 virtnet_close(vi->dev); in virtnet_freeze_down()
3011 static int init_vqs(struct virtnet_info *vi);
3015 struct virtnet_info *vi = vdev->priv; in virtnet_restore_up() local
3018 err = init_vqs(vi); in virtnet_restore_up()
3024 enable_delayed_refill(vi); in virtnet_restore_up()
3026 if (netif_running(vi->dev)) { in virtnet_restore_up()
3027 err = virtnet_open(vi->dev); in virtnet_restore_up()
3032 netif_tx_lock_bh(vi->dev); in virtnet_restore_up()
3033 netif_device_attach(vi->dev); in virtnet_restore_up()
3034 netif_tx_unlock_bh(vi->dev); in virtnet_restore_up()
3038 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) in virtnet_set_guest_offloads() argument
3041 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); in virtnet_set_guest_offloads()
3043 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); in virtnet_set_guest_offloads()
3045 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, in virtnet_set_guest_offloads()
3047 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); in virtnet_set_guest_offloads()
3054 static int virtnet_clear_guest_offloads(struct virtnet_info *vi) in virtnet_clear_guest_offloads() argument
3058 if (!vi->guest_offloads) in virtnet_clear_guest_offloads()
3061 return virtnet_set_guest_offloads(vi, offloads); in virtnet_clear_guest_offloads()
3064 static int virtnet_restore_guest_offloads(struct virtnet_info *vi) in virtnet_restore_guest_offloads() argument
3066 u64 offloads = vi->guest_offloads; in virtnet_restore_guest_offloads()
3068 if (!vi->guest_offloads) in virtnet_restore_guest_offloads()
3071 return virtnet_set_guest_offloads(vi, offloads); in virtnet_restore_guest_offloads()
3078 struct virtnet_info *vi = netdev_priv(dev); in virtnet_xdp_set() local
3083 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) in virtnet_xdp_set()
3084 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_xdp_set()
3085 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_xdp_set()
3086 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_xdp_set()
3087 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || in virtnet_xdp_set()
3088 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) { in virtnet_xdp_set()
3093 if (vi->mergeable_rx_bufs && !vi->any_header_sg) { in virtnet_xdp_set()
3104 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; in virtnet_xdp_set()
3109 if (curr_qp + xdp_qp > vi->max_queue_pairs) { in virtnet_xdp_set()
3111 curr_qp + xdp_qp, vi->max_queue_pairs); in virtnet_xdp_set()
3115 old_prog = rtnl_dereference(vi->rq[0].xdp_prog); in virtnet_xdp_set()
3120 bpf_prog_add(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
3124 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
3125 napi_disable(&vi->rq[i].napi); in virtnet_xdp_set()
3126 virtnet_napi_tx_disable(&vi->sq[i].napi); in virtnet_xdp_set()
3131 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
3132 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
3134 virtnet_restore_guest_offloads(vi); in virtnet_xdp_set()
3139 err = _virtnet_set_queues(vi, curr_qp + xdp_qp); in virtnet_xdp_set()
3143 vi->xdp_queue_pairs = xdp_qp; in virtnet_xdp_set()
3146 vi->xdp_enabled = true; in virtnet_xdp_set()
3147 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
3148 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
3150 virtnet_clear_guest_offloads(vi); in virtnet_xdp_set()
3153 vi->xdp_enabled = false; in virtnet_xdp_set()
3156 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
3160 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); in virtnet_xdp_set()
3161 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_xdp_set()
3162 &vi->sq[i].napi); in virtnet_xdp_set()
3170 virtnet_clear_guest_offloads(vi); in virtnet_xdp_set()
3171 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_xdp_set()
3172 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); in virtnet_xdp_set()
3176 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
3177 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); in virtnet_xdp_set()
3178 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_xdp_set()
3179 &vi->sq[i].napi); in virtnet_xdp_set()
3183 bpf_prog_sub(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
3200 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_phys_port_name() local
3203 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_get_phys_port_name()
3216 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_features() local
3221 if (vi->xdp_enabled) in virtnet_set_features()
3225 offloads = vi->guest_offloads_capable; in virtnet_set_features()
3227 offloads = vi->guest_offloads_capable & in virtnet_set_features()
3230 err = virtnet_set_guest_offloads(vi, offloads); in virtnet_set_features()
3233 vi->guest_offloads = offloads; in virtnet_set_features()
3238 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; in virtnet_set_features()
3240 vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE; in virtnet_set_features()
3242 if (!virtnet_commit_rss_command(vi)) in virtnet_set_features()
3284 struct virtnet_info *vi = in virtnet_config_changed_work() local
3288 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, in virtnet_config_changed_work()
3293 netdev_notify_peers(vi->dev); in virtnet_config_changed_work()
3294 virtnet_ack_link_announce(vi); in virtnet_config_changed_work()
3300 if (vi->status == v) in virtnet_config_changed_work()
3303 vi->status = v; in virtnet_config_changed_work()
3305 if (vi->status & VIRTIO_NET_S_LINK_UP) { in virtnet_config_changed_work()
3306 virtnet_update_settings(vi); in virtnet_config_changed_work()
3307 netif_carrier_on(vi->dev); in virtnet_config_changed_work()
3308 netif_tx_wake_all_queues(vi->dev); in virtnet_config_changed_work()
3310 netif_carrier_off(vi->dev); in virtnet_config_changed_work()
3311 netif_tx_stop_all_queues(vi->dev); in virtnet_config_changed_work()
3317 struct virtnet_info *vi = vdev->priv; in virtnet_config_changed() local
3319 schedule_work(&vi->config_work); in virtnet_config_changed()
3322 static void virtnet_free_queues(struct virtnet_info *vi) in virtnet_free_queues() argument
3326 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_free_queues()
3327 __netif_napi_del(&vi->rq[i].napi); in virtnet_free_queues()
3328 __netif_napi_del(&vi->sq[i].napi); in virtnet_free_queues()
3336 kfree(vi->rq); in virtnet_free_queues()
3337 kfree(vi->sq); in virtnet_free_queues()
3338 kfree(vi->ctrl); in virtnet_free_queues()
3341 static void _free_receive_bufs(struct virtnet_info *vi) in _free_receive_bufs() argument
3346 for (i = 0; i < vi->max_queue_pairs; i++) { in _free_receive_bufs()
3347 while (vi->rq[i].pages) in _free_receive_bufs()
3348 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); in _free_receive_bufs()
3350 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); in _free_receive_bufs()
3351 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); in _free_receive_bufs()
3357 static void free_receive_bufs(struct virtnet_info *vi) in free_receive_bufs() argument
3360 _free_receive_bufs(vi); in free_receive_bufs()
3364 static void free_receive_page_frags(struct virtnet_info *vi) in free_receive_page_frags() argument
3367 for (i = 0; i < vi->max_queue_pairs; i++) in free_receive_page_frags()
3368 if (vi->rq[i].alloc_frag.page) in free_receive_page_frags()
3369 put_page(vi->rq[i].alloc_frag.page); in free_receive_page_frags()
3382 struct virtnet_info *vi = vq->vdev->priv; in virtnet_rq_free_unused_buf() local
3385 if (vi->mergeable_rx_bufs) in virtnet_rq_free_unused_buf()
3387 else if (vi->big_packets) in virtnet_rq_free_unused_buf()
3388 give_pages(&vi->rq[i], buf); in virtnet_rq_free_unused_buf()
3393 static void free_unused_bufs(struct virtnet_info *vi) in free_unused_bufs() argument
3398 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
3399 struct virtqueue *vq = vi->sq[i].vq; in free_unused_bufs()
3404 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
3405 struct virtqueue *vq = vi->rq[i].vq; in free_unused_bufs()
3411 static void virtnet_del_vqs(struct virtnet_info *vi) in virtnet_del_vqs() argument
3413 struct virtio_device *vdev = vi->vdev; in virtnet_del_vqs()
3415 virtnet_clean_affinity(vi); in virtnet_del_vqs()
3419 virtnet_free_queues(vi); in virtnet_del_vqs()
3426 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) in mergeable_min_buf_len() argument
3428 const unsigned int hdr_len = vi->hdr_len; in mergeable_min_buf_len()
3430 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; in mergeable_min_buf_len()
3438 static int virtnet_find_vqs(struct virtnet_info *vi) in virtnet_find_vqs() argument
3451 total_vqs = vi->max_queue_pairs * 2 + in virtnet_find_vqs()
3452 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); in virtnet_find_vqs()
3464 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_find_vqs()
3473 if (vi->has_cvq) { in virtnet_find_vqs()
3479 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
3482 sprintf(vi->rq[i].name, "input.%d", i); in virtnet_find_vqs()
3483 sprintf(vi->sq[i].name, "output.%d", i); in virtnet_find_vqs()
3484 names[rxq2vq(i)] = vi->rq[i].name; in virtnet_find_vqs()
3485 names[txq2vq(i)] = vi->sq[i].name; in virtnet_find_vqs()
3490 ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks, in virtnet_find_vqs()
3495 if (vi->has_cvq) { in virtnet_find_vqs()
3496 vi->cvq = vqs[total_vqs - 1]; in virtnet_find_vqs()
3497 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) in virtnet_find_vqs()
3498 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in virtnet_find_vqs()
3501 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
3502 vi->rq[i].vq = vqs[rxq2vq(i)]; in virtnet_find_vqs()
3503 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); in virtnet_find_vqs()
3504 vi->sq[i].vq = vqs[txq2vq(i)]; in virtnet_find_vqs()
3522 static int virtnet_alloc_queues(struct virtnet_info *vi) in virtnet_alloc_queues() argument
3526 if (vi->has_cvq) { in virtnet_alloc_queues()
3527 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); in virtnet_alloc_queues()
3528 if (!vi->ctrl) in virtnet_alloc_queues()
3531 vi->ctrl = NULL; in virtnet_alloc_queues()
3533 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); in virtnet_alloc_queues()
3534 if (!vi->sq) in virtnet_alloc_queues()
3536 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); in virtnet_alloc_queues()
3537 if (!vi->rq) in virtnet_alloc_queues()
3540 INIT_DELAYED_WORK(&vi->refill, refill_work); in virtnet_alloc_queues()
3541 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_alloc_queues()
3542 vi->rq[i].pages = NULL; in virtnet_alloc_queues()
3543 netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll, in virtnet_alloc_queues()
3545 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, in virtnet_alloc_queues()
3549 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); in virtnet_alloc_queues()
3550 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); in virtnet_alloc_queues()
3551 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); in virtnet_alloc_queues()
3553 u64_stats_init(&vi->rq[i].stats.syncp); in virtnet_alloc_queues()
3554 u64_stats_init(&vi->sq[i].stats.syncp); in virtnet_alloc_queues()
3560 kfree(vi->sq); in virtnet_alloc_queues()
3562 kfree(vi->ctrl); in virtnet_alloc_queues()
3567 static int init_vqs(struct virtnet_info *vi) in init_vqs() argument
3572 ret = virtnet_alloc_queues(vi); in init_vqs()
3576 ret = virtnet_find_vqs(vi); in init_vqs()
3581 virtnet_set_affinity(vi); in init_vqs()
3587 virtnet_free_queues(vi); in init_vqs()
3596 struct virtnet_info *vi = netdev_priv(queue->dev); in mergeable_rx_buffer_size_show() local
3598 unsigned int headroom = virtnet_get_headroom(vi); in mergeable_rx_buffer_size_show()
3602 BUG_ON(queue_index >= vi->max_queue_pairs); in mergeable_rx_buffer_size_show()
3603 avg = &vi->rq[queue_index].mrg_avg_pkt_len; in mergeable_rx_buffer_size_show()
3605 get_mergeable_buf_len(&vi->rq[queue_index], avg, in mergeable_rx_buffer_size_show()
3688 static bool virtnet_check_guest_gso(const struct virtnet_info *vi) in virtnet_check_guest_gso() argument
3690 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_check_guest_gso()
3691 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_check_guest_gso()
3692 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_check_guest_gso()
3693 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO); in virtnet_check_guest_gso()
3696 static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu) in virtnet_set_big_packets() argument
3698 bool guest_gso = virtnet_check_guest_gso(vi); in virtnet_set_big_packets()
3705 vi->big_packets = true; in virtnet_set_big_packets()
3706 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE); in virtnet_set_big_packets()
3714 struct virtnet_info *vi; in virtnet_probe() local
3796 vi = netdev_priv(dev); in virtnet_probe()
3797 vi->dev = dev; in virtnet_probe()
3798 vi->vdev = vdev; in virtnet_probe()
3799 vdev->priv = vi; in virtnet_probe()
3801 INIT_WORK(&vi->config_work, virtnet_config_changed_work); in virtnet_probe()
3802 spin_lock_init(&vi->refill_lock); in virtnet_probe()
3805 vi->mergeable_rx_bufs = true; in virtnet_probe()
3807 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { in virtnet_probe()
3808 vi->rx_usecs = 0; in virtnet_probe()
3809 vi->tx_usecs = 0; in virtnet_probe()
3810 vi->tx_max_packets = 0; in virtnet_probe()
3811 vi->rx_max_packets = 0; in virtnet_probe()
3815 vi->has_rss_hash_report = true; in virtnet_probe()
3818 vi->has_rss = true; in virtnet_probe()
3820 if (vi->has_rss || vi->has_rss_hash_report) { in virtnet_probe()
3821 vi->rss_indir_table_size = in virtnet_probe()
3824 vi->rss_key_size = in virtnet_probe()
3827 vi->rss_hash_types_supported = in virtnet_probe()
3829 vi->rss_hash_types_supported &= in virtnet_probe()
3837 if (vi->has_rss_hash_report) in virtnet_probe()
3838 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash); in virtnet_probe()
3841 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); in virtnet_probe()
3843 vi->hdr_len = sizeof(struct virtio_net_hdr); in virtnet_probe()
3847 vi->any_header_sg = true; in virtnet_probe()
3850 vi->has_cvq = true; in virtnet_probe()
3871 virtnet_set_big_packets(vi, mtu); in virtnet_probe()
3873 if (vi->any_header_sg) in virtnet_probe()
3874 dev->needed_headroom = vi->hdr_len; in virtnet_probe()
3878 vi->curr_queue_pairs = max_queue_pairs; in virtnet_probe()
3880 vi->curr_queue_pairs = num_online_cpus(); in virtnet_probe()
3881 vi->max_queue_pairs = max_queue_pairs; in virtnet_probe()
3884 err = init_vqs(vi); in virtnet_probe()
3889 if (vi->mergeable_rx_bufs) in virtnet_probe()
3892 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
3893 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
3898 vi->failover = net_failover_create(vi->dev); in virtnet_probe()
3899 if (IS_ERR(vi->failover)) { in virtnet_probe()
3900 err = PTR_ERR(vi->failover); in virtnet_probe()
3905 if (vi->has_rss || vi->has_rss_hash_report) in virtnet_probe()
3906 virtnet_init_default_rss(vi); in virtnet_probe()
3922 err = virtnet_cpu_notif_add(vi); in virtnet_probe()
3928 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_probe()
3933 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_probe()
3934 schedule_work(&vi->config_work); in virtnet_probe()
3936 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_probe()
3937 virtnet_update_settings(vi); in virtnet_probe()
3942 if (virtio_has_feature(vi->vdev, guest_offloads[i])) in virtnet_probe()
3943 set_bit(guest_offloads[i], &vi->guest_offloads); in virtnet_probe()
3944 vi->guest_offloads_capable = vi->guest_offloads; in virtnet_probe()
3954 net_failover_destroy(vi->failover); in virtnet_probe()
3957 cancel_delayed_work_sync(&vi->refill); in virtnet_probe()
3958 free_receive_page_frags(vi); in virtnet_probe()
3959 virtnet_del_vqs(vi); in virtnet_probe()
3965 static void remove_vq_common(struct virtnet_info *vi) in remove_vq_common() argument
3967 virtio_reset_device(vi->vdev); in remove_vq_common()
3970 free_unused_bufs(vi); in remove_vq_common()
3972 free_receive_bufs(vi); in remove_vq_common()
3974 free_receive_page_frags(vi); in remove_vq_common()
3976 virtnet_del_vqs(vi); in remove_vq_common()
3981 struct virtnet_info *vi = vdev->priv; in virtnet_remove() local
3983 virtnet_cpu_notif_remove(vi); in virtnet_remove()
3986 flush_work(&vi->config_work); in virtnet_remove()
3988 unregister_netdev(vi->dev); in virtnet_remove()
3990 net_failover_destroy(vi->failover); in virtnet_remove()
3992 remove_vq_common(vi); in virtnet_remove()
3994 free_netdev(vi->dev); in virtnet_remove()
3999 struct virtnet_info *vi = vdev->priv; in virtnet_freeze() local
4001 virtnet_cpu_notif_remove(vi); in virtnet_freeze()
4003 remove_vq_common(vi); in virtnet_freeze()
4010 struct virtnet_info *vi = vdev->priv; in virtnet_restore() local
4016 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_restore()
4018 err = virtnet_cpu_notif_add(vi); in virtnet_restore()
4021 remove_vq_common(vi); in virtnet_restore()