Lines Matching full:queue
46 /* Number of bytes allowed on the internal guest Rx queue. */
54 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, in xenvif_skb_zerocopy_prepare() argument
58 atomic_inc(&queue->inflight_packets); in xenvif_skb_zerocopy_prepare()
61 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) in xenvif_skb_zerocopy_complete() argument
63 atomic_dec(&queue->inflight_packets); in xenvif_skb_zerocopy_complete()
69 wake_up(&queue->dealloc_wq); in xenvif_skb_zerocopy_complete()
79 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) in xenvif_handle_tx_interrupt() argument
83 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_handle_tx_interrupt()
85 napi_schedule(&queue->napi); in xenvif_handle_tx_interrupt()
91 struct xenvif_queue *queue = dev_id; in xenvif_tx_interrupt() local
94 old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending); in xenvif_tx_interrupt()
97 if (!xenvif_handle_tx_interrupt(queue)) { in xenvif_tx_interrupt()
98 atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending); in xenvif_tx_interrupt()
107 struct xenvif_queue *queue = in xenvif_poll() local
115 if (unlikely(queue->vif->disabled)) { in xenvif_poll()
120 work_done = xenvif_tx_action(queue, budget); in xenvif_poll()
124 /* If the queue is rate-limited, it shall be in xenvif_poll()
127 if (likely(!queue->rate_limited)) in xenvif_poll()
128 xenvif_napi_schedule_or_enable_events(queue); in xenvif_poll()
134 static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue) in xenvif_handle_rx_interrupt() argument
138 rc = xenvif_have_rx_work(queue, false); in xenvif_handle_rx_interrupt()
140 xenvif_kick_thread(queue); in xenvif_handle_rx_interrupt()
146 struct xenvif_queue *queue = dev_id; in xenvif_rx_interrupt() local
149 old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending); in xenvif_rx_interrupt()
152 if (!xenvif_handle_rx_interrupt(queue)) { in xenvif_rx_interrupt()
153 atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending); in xenvif_rx_interrupt()
162 struct xenvif_queue *queue = dev_id; in xenvif_interrupt() local
166 old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending); in xenvif_interrupt()
169 has_tx = xenvif_handle_tx_interrupt(queue); in xenvif_interrupt()
170 has_rx = xenvif_handle_rx_interrupt(queue); in xenvif_interrupt()
173 atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending); in xenvif_interrupt()
210 struct xenvif_queue *queue = NULL; in xenvif_start_xmit() local
225 /* Obtain the queue to be used to transmit this packet */ in xenvif_start_xmit()
228 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n", in xenvif_start_xmit()
232 queue = &vif->queues[index]; in xenvif_start_xmit()
234 /* Drop the packet if queue is not ready */ in xenvif_start_xmit()
235 if (queue->task == NULL || in xenvif_start_xmit()
236 queue->dealloc_task == NULL || in xenvif_start_xmit()
257 if (!xenvif_rx_queue_tail(queue, skb)) in xenvif_start_xmit()
260 xenvif_kick_thread(queue); in xenvif_start_xmit()
273 struct xenvif_queue *queue = NULL; in xenvif_get_stats() local
284 /* Aggregate tx and rx stats from each queue */ in xenvif_get_stats()
286 queue = &vif->queues[index]; in xenvif_get_stats()
287 rx_bytes += queue->stats.rx_bytes; in xenvif_get_stats()
288 rx_packets += queue->stats.rx_packets; in xenvif_get_stats()
289 tx_bytes += queue->stats.tx_bytes; in xenvif_get_stats()
290 tx_packets += queue->stats.tx_packets; in xenvif_get_stats()
305 struct xenvif_queue *queue = NULL; in xenvif_up() local
310 queue = &vif->queues[queue_index]; in xenvif_up()
311 napi_enable(&queue->napi); in xenvif_up()
312 enable_irq(queue->tx_irq); in xenvif_up()
313 if (queue->tx_irq != queue->rx_irq) in xenvif_up()
314 enable_irq(queue->rx_irq); in xenvif_up()
315 xenvif_napi_schedule_or_enable_events(queue); in xenvif_up()
321 struct xenvif_queue *queue = NULL; in xenvif_down() local
326 queue = &vif->queues[queue_index]; in xenvif_down()
327 disable_irq(queue->tx_irq); in xenvif_down()
328 if (queue->tx_irq != queue->rx_irq) in xenvif_down()
329 disable_irq(queue->rx_irq); in xenvif_down()
330 napi_disable(&queue->napi); in xenvif_down()
331 del_timer_sync(&queue->credit_timeout); in xenvif_down()
562 int xenvif_init_queue(struct xenvif_queue *queue) in xenvif_init_queue() argument
566 queue->credit_bytes = queue->remaining_credit = ~0UL; in xenvif_init_queue()
567 queue->credit_usec = 0UL; in xenvif_init_queue()
568 timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0); in xenvif_init_queue()
569 queue->credit_window_start = get_jiffies_64(); in xenvif_init_queue()
571 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES; in xenvif_init_queue()
573 skb_queue_head_init(&queue->rx_queue); in xenvif_init_queue()
574 skb_queue_head_init(&queue->tx_queue); in xenvif_init_queue()
576 queue->pending_cons = 0; in xenvif_init_queue()
577 queue->pending_prod = MAX_PENDING_REQS; in xenvif_init_queue()
579 queue->pending_ring[i] = i; in xenvif_init_queue()
581 spin_lock_init(&queue->callback_lock); in xenvif_init_queue()
582 spin_lock_init(&queue->response_lock); in xenvif_init_queue()
589 queue->mmap_pages); in xenvif_init_queue()
591 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); in xenvif_init_queue()
596 queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc) in xenvif_init_queue()
600 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; in xenvif_init_queue()
672 static void xenvif_disconnect_queue(struct xenvif_queue *queue) in xenvif_disconnect_queue() argument
674 if (queue->task) { in xenvif_disconnect_queue()
675 kthread_stop(queue->task); in xenvif_disconnect_queue()
676 put_task_struct(queue->task); in xenvif_disconnect_queue()
677 queue->task = NULL; in xenvif_disconnect_queue()
680 if (queue->dealloc_task) { in xenvif_disconnect_queue()
681 kthread_stop(queue->dealloc_task); in xenvif_disconnect_queue()
682 queue->dealloc_task = NULL; in xenvif_disconnect_queue()
685 if (queue->napi.poll) { in xenvif_disconnect_queue()
686 netif_napi_del(&queue->napi); in xenvif_disconnect_queue()
687 queue->napi.poll = NULL; in xenvif_disconnect_queue()
690 if (queue->tx_irq) { in xenvif_disconnect_queue()
691 unbind_from_irqhandler(queue->tx_irq, queue); in xenvif_disconnect_queue()
692 if (queue->tx_irq == queue->rx_irq) in xenvif_disconnect_queue()
693 queue->rx_irq = 0; in xenvif_disconnect_queue()
694 queue->tx_irq = 0; in xenvif_disconnect_queue()
697 if (queue->rx_irq) { in xenvif_disconnect_queue()
698 unbind_from_irqhandler(queue->rx_irq, queue); in xenvif_disconnect_queue()
699 queue->rx_irq = 0; in xenvif_disconnect_queue()
702 xenvif_unmap_frontend_data_rings(queue); in xenvif_disconnect_queue()
705 int xenvif_connect_data(struct xenvif_queue *queue, in xenvif_connect_data() argument
711 struct xenbus_device *dev = xenvif_to_xenbus_device(queue->vif); in xenvif_connect_data()
715 BUG_ON(queue->tx_irq); in xenvif_connect_data()
716 BUG_ON(queue->task); in xenvif_connect_data()
717 BUG_ON(queue->dealloc_task); in xenvif_connect_data()
719 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref, in xenvif_connect_data()
724 init_waitqueue_head(&queue->wq); in xenvif_connect_data()
725 init_waitqueue_head(&queue->dealloc_wq); in xenvif_connect_data()
726 atomic_set(&queue->inflight_packets, 0); in xenvif_connect_data()
728 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll); in xenvif_connect_data()
730 queue->stalled = true; in xenvif_connect_data()
732 task = kthread_run(xenvif_kthread_guest_rx, queue, in xenvif_connect_data()
733 "%s-guest-rx", queue->name); in xenvif_connect_data()
736 queue->task = task; in xenvif_connect_data()
743 task = kthread_run(xenvif_dealloc_kthread, queue, in xenvif_connect_data()
744 "%s-dealloc", queue->name); in xenvif_connect_data()
747 queue->dealloc_task = task; in xenvif_connect_data()
753 queue->name, queue); in xenvif_connect_data()
756 queue->tx_irq = queue->rx_irq = err; in xenvif_connect_data()
757 disable_irq(queue->tx_irq); in xenvif_connect_data()
760 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), in xenvif_connect_data()
761 "%s-tx", queue->name); in xenvif_connect_data()
764 queue->tx_irq_name, queue); in xenvif_connect_data()
767 queue->tx_irq = err; in xenvif_connect_data()
768 disable_irq(queue->tx_irq); in xenvif_connect_data()
770 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), in xenvif_connect_data()
771 "%s-rx", queue->name); in xenvif_connect_data()
774 queue->rx_irq_name, queue); in xenvif_connect_data()
777 queue->rx_irq = err; in xenvif_connect_data()
778 disable_irq(queue->rx_irq); in xenvif_connect_data()
784 pr_warn("Could not allocate kthread for %s\n", queue->name); in xenvif_connect_data()
787 xenvif_disconnect_queue(queue); in xenvif_connect_data()
806 struct xenvif_queue *queue = NULL; in xenvif_disconnect_data() local
813 queue = &vif->queues[queue_index]; in xenvif_disconnect_data()
815 xenvif_disconnect_queue(queue); in xenvif_disconnect_data()
837 * Used for queue teardown from xenvif_free(), and on the
840 void xenvif_deinit_queue(struct xenvif_queue *queue) in xenvif_deinit_queue() argument
842 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages); in xenvif_deinit_queue()