Lines Matching +full:charge +full:- +full:ctrl +full:- +full:value

2  * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
8 * Copyright (c) 2002-2005, K A Fraser
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
100 * for xen-netfront with the XDP_PACKET_HEADROOM offset
120 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn()
130 (vif->pending_tx_info[pending_idx].callback_struct)
136 u16 pending_idx = ubuf->desc; in ubuf_to_queue()
139 return container_of(temp - pending_idx, in ubuf_to_queue()
156 return i & (MAX_PENDING_REQS-1); in pending_index()
161 wake_up(&queue->wq); in xenvif_kick_thread()
168 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do); in xenvif_napi_schedule_or_enable_events()
171 napi_schedule(&queue->napi); in xenvif_napi_schedule_or_enable_events()
173 &queue->eoi_pending) & in xenvif_napi_schedule_or_enable_events()
175 xen_irq_lateeoi(queue->tx_irq, 0); in xenvif_napi_schedule_or_enable_events()
186 max_burst = max(131072UL, queue->credit_bytes); in tx_add_credit()
189 max_credit = queue->remaining_credit + queue->credit_bytes; in tx_add_credit()
190 if (max_credit < queue->remaining_credit) in tx_add_credit()
193 queue->remaining_credit = min(max_credit, max_burst); in tx_add_credit()
194 queue->rate_limited = false; in tx_add_credit()
208 RING_IDX cons = queue->tx.req_cons; in xenvif_tx_err()
212 spin_lock_irqsave(&queue->response_lock, flags); in xenvif_tx_err()
215 spin_unlock_irqrestore(&queue->response_lock, flags); in xenvif_tx_err()
218 RING_COPY_REQUEST(&queue->tx, cons++, txp); in xenvif_tx_err()
221 queue->tx.req_cons = cons; in xenvif_tx_err()
226 netdev_err(vif->dev, "fatal error; disabling device\n"); in xenvif_fatal_tx_err()
227 vif->disabled = true; in xenvif_fatal_tx_err()
229 if (vif->num_queues) in xenvif_fatal_tx_err()
230 xenvif_kick_thread(&vif->queues[0]); in xenvif_fatal_tx_err()
239 RING_IDX cons = queue->tx.req_cons; in xenvif_count_requests()
244 if (!(first->flags & XEN_NETTXF_more_data)) in xenvif_count_requests()
251 netdev_err(queue->vif->dev, in xenvif_count_requests()
254 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
255 return -ENODATA; in xenvif_count_requests()
262 netdev_err(queue->vif->dev, in xenvif_count_requests()
265 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
266 return -E2BIG; in xenvif_count_requests()
271 * the historical MAX_SKB_FRAGS value 18 to honor the in xenvif_count_requests()
278 netdev_dbg(queue->vif->dev, in xenvif_count_requests()
281 drop_err = -E2BIG; in xenvif_count_requests()
287 RING_COPY_REQUEST(&queue->tx, cons + slots, txp); in xenvif_count_requests()
290 * first->size overflowed and following slots will in xenvif_count_requests()
298 if (!drop_err && txp->size > first->size) { in xenvif_count_requests()
300 netdev_dbg(queue->vif->dev, in xenvif_count_requests()
302 txp->size, first->size); in xenvif_count_requests()
303 drop_err = -EIO; in xenvif_count_requests()
306 first->size -= txp->size; in xenvif_count_requests()
309 if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) { in xenvif_count_requests()
310 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n", in xenvif_count_requests()
311 txp->offset, txp->size); in xenvif_count_requests()
312 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
313 return -EINVAL; in xenvif_count_requests()
316 more_data = txp->flags & XEN_NETTXF_more_data; in xenvif_count_requests()
336 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
344 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; in xenvif_tx_create_map_op()
347 txp->gref, queue->vif->domid); in xenvif_tx_create_map_op()
349 memcpy(&queue->pending_tx_info[pending_idx].req, txp, in xenvif_tx_create_map_op()
351 queue->pending_tx_info[pending_idx].extra_count = extra_count; in xenvif_tx_create_map_op()
366 skb_shinfo(skb)->destructor_arg = NULL; in xenvif_alloc_skb()
379 skb_frag_t *frags = shinfo->frags; in xenvif_get_requests()
380 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; in xenvif_get_requests()
385 nr_slots = shinfo->nr_frags; in xenvif_get_requests()
388 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); in xenvif_get_requests()
390 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; in xenvif_get_requests()
391 shinfo->nr_frags++, txp++, gop++) { in xenvif_get_requests()
392 index = pending_index(queue->pending_cons++); in xenvif_get_requests()
393 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
395 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); in xenvif_get_requests()
401 frags = shinfo->frags; in xenvif_get_requests()
403 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; in xenvif_get_requests()
404 shinfo->nr_frags++, txp++, gop++) { in xenvif_get_requests()
405 index = pending_index(queue->pending_cons++); in xenvif_get_requests()
406 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
409 frag_set_pending_idx(&frags[shinfo->nr_frags], in xenvif_get_requests()
413 skb_shinfo(skb)->frag_list = nskb; in xenvif_get_requests()
423 if (unlikely(queue->grant_tx_handle[pending_idx] != in xenvif_grant_handle_set()
425 netdev_err(queue->vif->dev, in xenvif_grant_handle_set()
430 queue->grant_tx_handle[pending_idx] = handle; in xenvif_grant_handle_set()
436 if (unlikely(queue->grant_tx_handle[pending_idx] == in xenvif_grant_handle_reset()
438 netdev_err(queue->vif->dev, in xenvif_grant_handle_reset()
443 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; in xenvif_grant_handle_reset()
452 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; in xenvif_tx_check_gop()
457 /* If this is non-NULL, we are currently checking the frag_list skb, and in xenvif_tx_check_gop()
461 int nr_frags = shinfo->nr_frags; in xenvif_tx_check_gop()
463 frag_get_pending_idx(&shinfo->frags[0]) == pending_idx; in xenvif_tx_check_gop()
467 err = (*gopp_copy)->status; in xenvif_tx_check_gop()
470 netdev_dbg(queue->vif->dev, in xenvif_tx_check_gop()
472 (*gopp_copy)->status, in xenvif_tx_check_gop()
474 (*gopp_copy)->source.u.ref); in xenvif_tx_check_gop()
486 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); in xenvif_tx_check_gop()
489 newerr = gop_map->status; in xenvif_tx_check_gop()
494 gop_map->handle); in xenvif_tx_check_gop()
514 netdev_dbg(queue->vif->dev, in xenvif_tx_check_gop()
517 gop_map->status, in xenvif_tx_check_gop()
519 gop_map->ref); in xenvif_tx_check_gop()
532 XENVIF_TX_CB(skb)->pending_idx, in xenvif_tx_check_gop()
537 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); in xenvif_tx_check_gop()
547 for (j = 0; j < first_shinfo->nr_frags; j++) { in xenvif_tx_check_gop()
548 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]); in xenvif_tx_check_gop()
561 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list); in xenvif_tx_check_gop()
562 nr_frags = shinfo->nr_frags; in xenvif_tx_check_gop()
574 int nr_frags = shinfo->nr_frags; in xenvif_fill_frags()
579 skb_frag_t *frag = shinfo->frags + i; in xenvif_fill_frags()
588 skb_shinfo(skb)->destructor_arg = in xenvif_fill_frags()
597 txp = &queue->pending_tx_info[pending_idx].req; in xenvif_fill_frags()
599 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); in xenvif_fill_frags()
600 skb->len += txp->size; in xenvif_fill_frags()
601 skb->data_len += txp->size; in xenvif_fill_frags()
602 skb->truesize += txp->size; in xenvif_fill_frags()
605 get_page(queue->mmap_pages[pending_idx]); in xenvif_fill_frags()
615 RING_IDX cons = queue->tx.req_cons; in xenvif_get_extras()
618 if (unlikely(work_to_do-- <= 0)) { in xenvif_get_extras()
619 netdev_err(queue->vif->dev, "Missing extra info\n"); in xenvif_get_extras()
620 xenvif_fatal_tx_err(queue->vif); in xenvif_get_extras()
621 return -EBADR; in xenvif_get_extras()
624 RING_COPY_REQUEST(&queue->tx, cons, &extra); in xenvif_get_extras()
626 queue->tx.req_cons = ++cons; in xenvif_get_extras()
631 netdev_err(queue->vif->dev, in xenvif_get_extras()
633 xenvif_fatal_tx_err(queue->vif); in xenvif_get_extras()
634 return -EINVAL; in xenvif_get_extras()
637 memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); in xenvif_get_extras()
647 if (!gso->u.gso.size) { in xenvif_set_skb_gso()
648 netdev_err(vif->dev, "GSO size must not be zero.\n"); in xenvif_set_skb_gso()
650 return -EINVAL; in xenvif_set_skb_gso()
653 switch (gso->u.gso.type) { in xenvif_set_skb_gso()
655 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in xenvif_set_skb_gso()
658 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in xenvif_set_skb_gso()
661 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); in xenvif_set_skb_gso()
663 return -EINVAL; in xenvif_set_skb_gso()
666 skb_shinfo(skb)->gso_size = gso->u.gso.size; in xenvif_set_skb_gso()
681 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { in checksum_setup()
682 queue->stats.rx_gso_checksum_fixup++; in checksum_setup()
683 skb->ip_summed = CHECKSUM_PARTIAL; in checksum_setup()
687 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ in checksum_setup()
688 if (skb->ip_summed != CHECKSUM_PARTIAL) in checksum_setup()
697 u64 next_credit = queue->credit_window_start + in tx_credit_exceeded()
698 msecs_to_jiffies(queue->credit_usec / 1000); in tx_credit_exceeded()
701 if (timer_pending(&queue->credit_timeout)) { in tx_credit_exceeded()
702 queue->rate_limited = true; in tx_credit_exceeded()
708 queue->credit_window_start = now; in tx_credit_exceeded()
713 if (size > queue->remaining_credit) { in tx_credit_exceeded()
714 mod_timer(&queue->credit_timeout, in tx_credit_exceeded()
716 queue->credit_window_start = next_credit; in tx_credit_exceeded()
717 queue->rate_limited = true; in tx_credit_exceeded()
734 if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) { in xenvif_mcast_add()
736 netdev_err(vif->dev, in xenvif_mcast_add()
738 return -ENOSPC; in xenvif_mcast_add()
743 return -ENOMEM; in xenvif_mcast_add()
745 ether_addr_copy(mcast->addr, addr); in xenvif_mcast_add()
746 list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr); in xenvif_mcast_add()
747 vif->fe_mcast_count++; in xenvif_mcast_add()
756 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { in xenvif_mcast_del()
757 if (ether_addr_equal(addr, mcast->addr)) { in xenvif_mcast_del()
758 --vif->fe_mcast_count; in xenvif_mcast_del()
759 list_del_rcu(&mcast->entry); in xenvif_mcast_del()
771 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { in xenvif_mcast_match()
772 if (ether_addr_equal(addr, mcast->addr)) { in xenvif_mcast_match()
787 while (!list_empty(&vif->fe_mcast_addr)) { in xenvif_mcast_addr_list_free()
790 mcast = list_first_entry(&vif->fe_mcast_addr, in xenvif_mcast_addr_list_free()
793 --vif->fe_mcast_count; in xenvif_mcast_addr_list_free()
794 list_del(&mcast->entry); in xenvif_mcast_addr_list_free()
804 struct gnttab_map_grant_ref *gop = queue->tx_map_ops; in xenvif_tx_build_gops()
809 while (skb_queue_len(&queue->tx_queue) < budget) { in xenvif_tx_build_gops()
812 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; in xenvif_tx_build_gops()
820 if (queue->tx.sring->req_prod - queue->tx.req_cons > in xenvif_tx_build_gops()
822 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
825 queue->tx.sring->req_prod, queue->tx.req_cons, in xenvif_tx_build_gops()
827 xenvif_fatal_tx_err(queue->vif); in xenvif_tx_build_gops()
831 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_tx_build_gops()
835 idx = queue->tx.req_cons; in xenvif_tx_build_gops()
837 RING_COPY_REQUEST(&queue->tx, idx, &txreq); in xenvif_tx_build_gops()
839 /* Credit-based scheduling. */ in xenvif_tx_build_gops()
840 if (txreq.size > queue->remaining_credit && in xenvif_tx_build_gops()
844 queue->remaining_credit -= txreq.size; in xenvif_tx_build_gops()
846 work_to_do--; in xenvif_tx_build_gops()
847 queue->tx.req_cons = ++idx; in xenvif_tx_build_gops()
855 idx = queue->tx.req_cons; in xenvif_tx_build_gops()
860 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) { in xenvif_tx_build_gops()
863 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1]; in xenvif_tx_build_gops()
864 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr); in xenvif_tx_build_gops()
874 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) { in xenvif_tx_build_gops()
877 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1]; in xenvif_tx_build_gops()
878 xenvif_mcast_del(queue->vif, extra->u.mcast.addr); in xenvif_tx_build_gops()
894 netdev_dbg(queue->vif->dev, in xenvif_tx_build_gops()
902 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
906 xenvif_fatal_tx_err(queue->vif); in xenvif_tx_build_gops()
910 index = pending_index(queue->pending_cons); in xenvif_tx_build_gops()
911 pending_idx = queue->pending_ring[index]; in xenvif_tx_build_gops()
919 netdev_dbg(queue->vif->dev, in xenvif_tx_build_gops()
925 skb_shinfo(skb)->nr_frags = ret; in xenvif_tx_build_gops()
927 skb_shinfo(skb)->nr_frags++; in xenvif_tx_build_gops()
928 /* At this point shinfo->nr_frags is in fact the number of in xenvif_tx_build_gops()
933 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) { in xenvif_tx_build_gops()
934 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS; in xenvif_tx_build_gops()
936 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; in xenvif_tx_build_gops()
939 skb_shinfo(skb)->nr_frags = 0; in xenvif_tx_build_gops()
943 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
949 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { in xenvif_tx_build_gops()
951 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; in xenvif_tx_build_gops()
953 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { in xenvif_tx_build_gops()
955 skb_shinfo(skb)->nr_frags = 0; in xenvif_tx_build_gops()
962 if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) { in xenvif_tx_build_gops()
966 extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1]; in xenvif_tx_build_gops()
968 switch (extra->u.hash.type) { in xenvif_tx_build_gops()
985 *(u32 *)extra->u.hash.value, in xenvif_tx_build_gops()
989 XENVIF_TX_CB(skb)->pending_idx = pending_idx; in xenvif_tx_build_gops()
992 queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; in xenvif_tx_build_gops()
993 queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid; in xenvif_tx_build_gops()
994 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; in xenvif_tx_build_gops()
996 queue->tx_copy_ops[*copy_ops].dest.u.gmfn = in xenvif_tx_build_gops()
997 virt_to_gfn(skb->data); in xenvif_tx_build_gops()
998 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; in xenvif_tx_build_gops()
999 queue->tx_copy_ops[*copy_ops].dest.offset = in xenvif_tx_build_gops()
1000 offset_in_page(skb->data) & ~XEN_PAGE_MASK; in xenvif_tx_build_gops()
1002 queue->tx_copy_ops[*copy_ops].len = data_len; in xenvif_tx_build_gops()
1003 queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; in xenvif_tx_build_gops()
1008 frag_set_pending_idx(&skb_shinfo(skb)->frags[0], in xenvif_tx_build_gops()
1014 frag_set_pending_idx(&skb_shinfo(skb)->frags[0], in xenvif_tx_build_gops()
1016 memcpy(&queue->pending_tx_info[pending_idx].req, in xenvif_tx_build_gops()
1018 queue->pending_tx_info[pending_idx].extra_count = in xenvif_tx_build_gops()
1022 queue->pending_cons++; in xenvif_tx_build_gops()
1027 __skb_queue_tail(&queue->tx_queue, skb); in xenvif_tx_build_gops()
1029 queue->tx.req_cons = idx; in xenvif_tx_build_gops()
1031 if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) || in xenvif_tx_build_gops()
1032 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops))) in xenvif_tx_build_gops()
1036 (*map_ops) = gop - queue->tx_map_ops; in xenvif_tx_build_gops()
1041 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1049 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; in xenvif_handle_frag_list()
1051 queue->stats.tx_zerocopy_sent += 2; in xenvif_handle_frag_list()
1052 queue->stats.tx_frag_overflow++; in xenvif_handle_frag_list()
1056 skb->truesize -= skb->data_len; in xenvif_handle_frag_list()
1057 skb->len += nskb->len; in xenvif_handle_frag_list()
1058 skb->data_len += nskb->len; in xenvif_handle_frag_list()
1061 for (i = 0; offset < skb->len; i++) { in xenvif_handle_frag_list()
1069 skb->truesize += skb->data_len; in xenvif_handle_frag_list()
1072 return -ENOMEM; in xenvif_handle_frag_list()
1075 if (offset + PAGE_SIZE < skb->len) in xenvif_handle_frag_list()
1078 len = skb->len - offset; in xenvif_handle_frag_list()
1089 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) in xenvif_handle_frag_list()
1091 uarg = skb_shinfo(skb)->destructor_arg; in xenvif_handle_frag_list()
1093 atomic_inc(&queue->inflight_packets); in xenvif_handle_frag_list()
1094 uarg->callback(uarg, true); in xenvif_handle_frag_list()
1095 skb_shinfo(skb)->destructor_arg = NULL; in xenvif_handle_frag_list()
1098 memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); in xenvif_handle_frag_list()
1099 skb_shinfo(skb)->nr_frags = i; in xenvif_handle_frag_list()
1100 skb->truesize += i * PAGE_SIZE; in xenvif_handle_frag_list()
1107 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops; in xenvif_tx_submit()
1108 struct gnttab_copy *gop_copy = queue->tx_copy_ops; in xenvif_tx_submit()
1112 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { in xenvif_tx_submit()
1117 pending_idx = XENVIF_TX_CB(skb)->pending_idx; in xenvif_tx_submit()
1118 txp = &queue->pending_tx_info[pending_idx].req; in xenvif_tx_submit()
1126 skb_shinfo(skb)->nr_frags = 0; in xenvif_tx_submit()
1129 skb_shinfo(skb)->frag_list; in xenvif_tx_submit()
1130 skb_shinfo(nskb)->nr_frags = 0; in xenvif_tx_submit()
1136 data_len = skb->len; in xenvif_tx_submit()
1138 if (data_len < txp->size) { in xenvif_tx_submit()
1140 txp->offset += data_len; in xenvif_tx_submit()
1141 txp->size -= data_len; in xenvif_tx_submit()
1148 if (txp->flags & XEN_NETTXF_csum_blank) in xenvif_tx_submit()
1149 skb->ip_summed = CHECKSUM_PARTIAL; in xenvif_tx_submit()
1150 else if (txp->flags & XEN_NETTXF_data_validated) in xenvif_tx_submit()
1151 skb->ip_summed = CHECKSUM_UNNECESSARY; in xenvif_tx_submit()
1156 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; in xenvif_tx_submit()
1160 netdev_err(queue->vif->dev, in xenvif_tx_submit()
1166 /* Copied all the bits from the frag list -- free it. */ in xenvif_tx_submit()
1171 skb->dev = queue->vif->dev; in xenvif_tx_submit()
1172 skb->protocol = eth_type_trans(skb, skb->dev); in xenvif_tx_submit()
1176 netdev_dbg(queue->vif->dev, in xenvif_tx_submit()
1179 if (skb_shinfo(skb)->destructor_arg) in xenvif_tx_submit()
1201 mss = skb_shinfo(skb)->gso_size; in xenvif_tx_submit()
1202 hdrlen = skb_transport_header(skb) - in xenvif_tx_submit()
1206 skb_shinfo(skb)->gso_segs = in xenvif_tx_submit()
1207 DIV_ROUND_UP(skb->len - hdrlen, mss); in xenvif_tx_submit()
1210 queue->stats.rx_bytes += skb->len; in xenvif_tx_submit()
1211 queue->stats.rx_packets++; in xenvif_tx_submit()
1220 if (skb_shinfo(skb)->destructor_arg) { in xenvif_tx_submit()
1222 queue->stats.tx_zerocopy_sent++; in xenvif_tx_submit()
1240 spin_lock_irqsave(&queue->callback_lock, flags); in xenvif_zerocopy_callback()
1242 u16 pending_idx = ubuf->desc; in xenvif_zerocopy_callback()
1243 ubuf = (struct ubuf_info *) ubuf->ctx; in xenvif_zerocopy_callback()
1244 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >= in xenvif_zerocopy_callback()
1246 index = pending_index(queue->dealloc_prod); in xenvif_zerocopy_callback()
1247 queue->dealloc_ring[index] = pending_idx; in xenvif_zerocopy_callback()
1252 queue->dealloc_prod++; in xenvif_zerocopy_callback()
1254 spin_unlock_irqrestore(&queue->callback_lock, flags); in xenvif_zerocopy_callback()
1257 queue->stats.tx_zerocopy_success++; in xenvif_zerocopy_callback()
1259 queue->stats.tx_zerocopy_fail++; in xenvif_zerocopy_callback()
1270 dc = queue->dealloc_cons; in xenvif_tx_dealloc_action()
1271 gop = queue->tx_unmap_ops; in xenvif_tx_dealloc_action()
1275 dp = queue->dealloc_prod; in xenvif_tx_dealloc_action()
1283 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS); in xenvif_tx_dealloc_action()
1285 queue->dealloc_ring[pending_index(dc++)]; in xenvif_tx_dealloc_action()
1287 pending_idx_release[gop - queue->tx_unmap_ops] = in xenvif_tx_dealloc_action()
1289 queue->pages_to_unmap[gop - queue->tx_unmap_ops] = in xenvif_tx_dealloc_action()
1290 queue->mmap_pages[pending_idx]; in xenvif_tx_dealloc_action()
1294 queue->grant_tx_handle[pending_idx]); in xenvif_tx_dealloc_action()
1299 } while (dp != queue->dealloc_prod); in xenvif_tx_dealloc_action()
1301 queue->dealloc_cons = dc; in xenvif_tx_dealloc_action()
1303 if (gop - queue->tx_unmap_ops > 0) { in xenvif_tx_dealloc_action()
1305 ret = gnttab_unmap_refs(queue->tx_unmap_ops, in xenvif_tx_dealloc_action()
1307 queue->pages_to_unmap, in xenvif_tx_dealloc_action()
1308 gop - queue->tx_unmap_ops); in xenvif_tx_dealloc_action()
1310 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n", in xenvif_tx_dealloc_action()
1311 gop - queue->tx_unmap_ops, ret); in xenvif_tx_dealloc_action()
1312 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) { in xenvif_tx_dealloc_action()
1314 netdev_err(queue->vif->dev, in xenvif_tx_dealloc_action()
1324 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) in xenvif_tx_dealloc_action()
1344 gnttab_batch_copy(queue->tx_copy_ops, nr_cops); in xenvif_tx_action()
1346 ret = gnttab_map_refs(queue->tx_map_ops, in xenvif_tx_action()
1348 queue->pages_to_map, in xenvif_tx_action()
1365 pending_tx_info = &queue->pending_tx_info[pending_idx]; in xenvif_idx_release()
1367 spin_lock_irqsave(&queue->response_lock, flags); in xenvif_idx_release()
1369 make_tx_response(queue, &pending_tx_info->req, in xenvif_idx_release()
1370 pending_tx_info->extra_count, status); in xenvif_idx_release()
1376 index = pending_index(queue->pending_prod++); in xenvif_idx_release()
1377 queue->pending_ring[index] = pending_idx; in xenvif_idx_release()
1381 spin_unlock_irqrestore(&queue->response_lock, flags); in xenvif_idx_release()
1390 RING_IDX i = queue->tx.rsp_prod_pvt; in make_tx_response()
1393 resp = RING_GET_RESPONSE(&queue->tx, i); in make_tx_response()
1394 resp->id = txp->id; in make_tx_response()
1395 resp->status = st; in make_tx_response()
1397 while (extra_count-- != 0) in make_tx_response()
1398 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; in make_tx_response()
1400 queue->tx.rsp_prod_pvt = ++i; in make_tx_response()
1407 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); in push_tx_responses()
1409 notify_remote_via_irq(queue->tx_irq); in push_tx_responses()
1420 queue->grant_tx_handle[pending_idx]); in xenvif_idx_unmap()
1424 &queue->mmap_pages[pending_idx], 1); in xenvif_idx_unmap()
1426 netdev_err(queue->vif->dev, in xenvif_idx_unmap()
1439 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))) in tx_work_todo()
1447 return queue->dealloc_cons != queue->dealloc_prod; in tx_dealloc_work_todo()
1452 if (queue->tx.sring) in xenvif_unmap_frontend_data_rings()
1453 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), in xenvif_unmap_frontend_data_rings()
1454 queue->tx.sring); in xenvif_unmap_frontend_data_rings()
1455 if (queue->rx.sring) in xenvif_unmap_frontend_data_rings()
1456 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), in xenvif_unmap_frontend_data_rings()
1457 queue->rx.sring); in xenvif_unmap_frontend_data_rings()
1468 int err = -ENOMEM; in xenvif_map_frontend_data_rings()
1470 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), in xenvif_map_frontend_data_rings()
1476 rsp_prod = READ_ONCE(txs->rsp_prod); in xenvif_map_frontend_data_rings()
1477 req_prod = READ_ONCE(txs->req_prod); in xenvif_map_frontend_data_rings()
1479 BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE); in xenvif_map_frontend_data_rings()
1481 err = -EIO; in xenvif_map_frontend_data_rings()
1482 if (req_prod - rsp_prod > RING_SIZE(&queue->tx)) in xenvif_map_frontend_data_rings()
1485 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), in xenvif_map_frontend_data_rings()
1491 rsp_prod = READ_ONCE(rxs->rsp_prod); in xenvif_map_frontend_data_rings()
1492 req_prod = READ_ONCE(rxs->req_prod); in xenvif_map_frontend_data_rings()
1494 BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE); in xenvif_map_frontend_data_rings()
1496 err = -EIO; in xenvif_map_frontend_data_rings()
1497 if (req_prod - rsp_prod > RING_SIZE(&queue->rx)) in xenvif_map_frontend_data_rings()
1513 !atomic_read(&queue->inflight_packets); in xenvif_dealloc_kthread_should_stop()
1521 wait_event_interruptible(queue->dealloc_wq, in xenvif_dealloc_kthread()
1542 RING_IDX idx = vif->ctrl.rsp_prod_pvt; in make_ctrl_response()
1544 .id = req->id, in make_ctrl_response()
1545 .type = req->type, in make_ctrl_response()
1550 *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp; in make_ctrl_response()
1551 vif->ctrl.rsp_prod_pvt = ++idx; in make_ctrl_response()
1558 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify); in push_ctrl_response()
1560 notify_remote_via_irq(vif->ctrl_irq); in push_ctrl_response()
1569 switch (req->type) { in process_ctrl_request()
1571 status = xenvif_set_hash_alg(vif, req->data[0]); in process_ctrl_request()
1579 status = xenvif_set_hash_flags(vif, req->data[0]); in process_ctrl_request()
1583 status = xenvif_set_hash_key(vif, req->data[0], in process_ctrl_request()
1584 req->data[1]); in process_ctrl_request()
1594 req->data[0]); in process_ctrl_request()
1598 status = xenvif_set_hash_mapping(vif, req->data[0], in process_ctrl_request()
1599 req->data[1], in process_ctrl_request()
1600 req->data[2]); in process_ctrl_request()
1616 req_prod = vif->ctrl.sring->req_prod; in xenvif_ctrl_action()
1617 req_cons = vif->ctrl.req_cons; in xenvif_ctrl_action()
1628 RING_COPY_REQUEST(&vif->ctrl, req_cons, &req); in xenvif_ctrl_action()
1634 vif->ctrl.req_cons = req_cons; in xenvif_ctrl_action()
1635 vif->ctrl.sring->req_event = req_cons + 1; in xenvif_ctrl_action()
1641 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl))) in xenvif_ctrl_work_todo()
1667 return -ENODEV; in netback_init()
1670 * specified a value. in netback_init()
1687 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL); in netback_init()
1708 MODULE_ALIAS("xen-backend:vif");