Lines Matching full:queue

36 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)  in xenvif_rx_ring_slots_available()  argument
42 skb = skb_peek(&queue->rx_queue); in xenvif_rx_ring_slots_available()
53 prod = queue->rx.sring->req_prod; in xenvif_rx_ring_slots_available()
54 cons = queue->rx.req_cons; in xenvif_rx_ring_slots_available()
59 queue->rx.sring->req_event = prod + 1; in xenvif_rx_ring_slots_available()
65 } while (queue->rx.sring->req_prod != prod); in xenvif_rx_ring_slots_available()
70 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_rx_queue_tail() argument
74 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail()
76 __skb_queue_tail(&queue->rx_queue, skb); in xenvif_rx_queue_tail()
78 queue->rx_queue_len += skb->len; in xenvif_rx_queue_tail()
79 if (queue->rx_queue_len > queue->rx_queue_max) { in xenvif_rx_queue_tail()
80 struct net_device *dev = queue->vif->dev; in xenvif_rx_queue_tail()
82 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); in xenvif_rx_queue_tail()
85 spin_unlock_irqrestore(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail()
88 static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) in xenvif_rx_dequeue() argument
92 spin_lock_irq(&queue->rx_queue.lock); in xenvif_rx_dequeue()
94 skb = __skb_dequeue(&queue->rx_queue); in xenvif_rx_dequeue()
96 queue->rx_queue_len -= skb->len; in xenvif_rx_dequeue()
97 if (queue->rx_queue_len < queue->rx_queue_max) { in xenvif_rx_dequeue()
100 txq = netdev_get_tx_queue(queue->vif->dev, queue->id); in xenvif_rx_dequeue()
105 spin_unlock_irq(&queue->rx_queue.lock); in xenvif_rx_dequeue()
110 static void xenvif_rx_queue_purge(struct xenvif_queue *queue) in xenvif_rx_queue_purge() argument
114 while ((skb = xenvif_rx_dequeue(queue)) != NULL) in xenvif_rx_queue_purge()
118 static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) in xenvif_rx_queue_drop_expired() argument
123 skb = skb_peek(&queue->rx_queue); in xenvif_rx_queue_drop_expired()
128 xenvif_rx_dequeue(queue); in xenvif_rx_queue_drop_expired()
133 static void xenvif_rx_copy_flush(struct xenvif_queue *queue) in xenvif_rx_copy_flush() argument
138 gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num); in xenvif_rx_copy_flush()
140 for (i = 0; i < queue->rx_copy.num; i++) { in xenvif_rx_copy_flush()
143 op = &queue->rx_copy.op[i]; in xenvif_rx_copy_flush()
151 rsp = RING_GET_RESPONSE(&queue->rx, in xenvif_rx_copy_flush()
152 queue->rx_copy.idx[i]); in xenvif_rx_copy_flush()
157 queue->rx_copy.num = 0; in xenvif_rx_copy_flush()
160 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify); in xenvif_rx_copy_flush()
162 notify_remote_via_irq(queue->rx_irq); in xenvif_rx_copy_flush()
164 __skb_queue_purge(queue->rx_copy.completed); in xenvif_rx_copy_flush()
167 static void xenvif_rx_copy_add(struct xenvif_queue *queue, in xenvif_rx_copy_add() argument
175 if (queue->rx_copy.num == COPY_BATCH_SIZE) in xenvif_rx_copy_add()
176 xenvif_rx_copy_flush(queue); in xenvif_rx_copy_add()
178 op = &queue->rx_copy.op[queue->rx_copy.num]; in xenvif_rx_copy_add()
196 op->dest.domid = queue->vif->domid; in xenvif_rx_copy_add()
200 queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons; in xenvif_rx_copy_add()
201 queue->rx_copy.num++; in xenvif_rx_copy_add()
226 static void xenvif_rx_next_skb(struct xenvif_queue *queue, in xenvif_rx_next_skb() argument
232 skb = xenvif_rx_dequeue(queue); in xenvif_rx_next_skb()
234 queue->stats.tx_bytes += skb->len; in xenvif_rx_next_skb()
235 queue->stats.tx_packets++; in xenvif_rx_next_skb()
246 if ((1 << gso_type) & queue->vif->gso_mask) { in xenvif_rx_next_skb()
261 if (queue->vif->xdp_headroom) { in xenvif_rx_next_skb()
267 extra->u.xdp.headroom = queue->vif->xdp_headroom; in xenvif_rx_next_skb()
302 static void xenvif_rx_complete(struct xenvif_queue *queue, in xenvif_rx_complete() argument
306 queue->rx.rsp_prod_pvt = queue->rx.req_cons; in xenvif_rx_complete()
308 __skb_queue_tail(queue->rx_copy.completed, pkt->skb); in xenvif_rx_complete()
329 static void xenvif_rx_next_chunk(struct xenvif_queue *queue, in xenvif_rx_next_chunk() argument
367 static void xenvif_rx_data_slot(struct xenvif_queue *queue, in xenvif_rx_data_slot() argument
372 unsigned int offset = queue->vif->xdp_headroom; in xenvif_rx_data_slot()
379 xenvif_rx_next_chunk(queue, pkt, offset, &data, &len); in xenvif_rx_data_slot()
380 xenvif_rx_copy_add(queue, req, offset, data, len); in xenvif_rx_data_slot()
411 static void xenvif_rx_extra_slot(struct xenvif_queue *queue, in xenvif_rx_extra_slot() argument
435 static void xenvif_rx_skb(struct xenvif_queue *queue) in xenvif_rx_skb() argument
439 xenvif_rx_next_skb(queue, &pkt); in xenvif_rx_skb()
441 queue->last_rx_time = jiffies; in xenvif_rx_skb()
447 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons); in xenvif_rx_skb()
448 rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons); in xenvif_rx_skb()
452 xenvif_rx_extra_slot(queue, &pkt, req, rsp); in xenvif_rx_skb()
454 xenvif_rx_data_slot(queue, &pkt, req, rsp); in xenvif_rx_skb()
456 queue->rx.req_cons++; in xenvif_rx_skb()
460 xenvif_rx_complete(queue, &pkt); in xenvif_rx_skb()
465 void xenvif_rx_action(struct xenvif_queue *queue) in xenvif_rx_action() argument
471 queue->rx_copy.completed = &completed_skbs; in xenvif_rx_action()
473 while (xenvif_rx_ring_slots_available(queue) && in xenvif_rx_action()
475 xenvif_rx_skb(queue); in xenvif_rx_action()
480 xenvif_rx_copy_flush(queue); in xenvif_rx_action()
483 static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) in xenvif_rx_queue_stalled() argument
487 prod = queue->rx.sring->req_prod; in xenvif_rx_queue_stalled()
488 cons = queue->rx.req_cons; in xenvif_rx_queue_stalled()
490 return !queue->stalled && in xenvif_rx_queue_stalled()
493 queue->last_rx_time + queue->vif->stall_timeout); in xenvif_rx_queue_stalled()
496 static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) in xenvif_rx_queue_ready() argument
500 prod = queue->rx.sring->req_prod; in xenvif_rx_queue_ready()
501 cons = queue->rx.req_cons; in xenvif_rx_queue_ready()
503 return queue->stalled && prod - cons >= 1; in xenvif_rx_queue_ready()
506 bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread) in xenvif_have_rx_work() argument
508 return xenvif_rx_ring_slots_available(queue) || in xenvif_have_rx_work()
509 (queue->vif->stall_timeout && in xenvif_have_rx_work()
510 (xenvif_rx_queue_stalled(queue) || in xenvif_have_rx_work()
511 xenvif_rx_queue_ready(queue))) || in xenvif_have_rx_work()
513 queue->vif->disabled; in xenvif_have_rx_work()
516 static long xenvif_rx_queue_timeout(struct xenvif_queue *queue) in xenvif_rx_queue_timeout() argument
521 skb = skb_peek(&queue->rx_queue); in xenvif_rx_queue_timeout()
532 * queue (and not just the head at the beginning). In particular, if
533 * the queue is initially empty an infinite timeout is used and this
539 static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) in xenvif_wait_for_rx_work() argument
543 if (xenvif_have_rx_work(queue, true)) in xenvif_wait_for_rx_work()
549 prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); in xenvif_wait_for_rx_work()
550 if (xenvif_have_rx_work(queue, true)) in xenvif_wait_for_rx_work()
553 &queue->eoi_pending) & in xenvif_wait_for_rx_work()
555 xen_irq_lateeoi(queue->rx_irq, 0); in xenvif_wait_for_rx_work()
557 ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); in xenvif_wait_for_rx_work()
561 finish_wait(&queue->wq, &wait); in xenvif_wait_for_rx_work()
564 static void xenvif_queue_carrier_off(struct xenvif_queue *queue) in xenvif_queue_carrier_off() argument
566 struct xenvif *vif = queue->vif; in xenvif_queue_carrier_off()
568 queue->stalled = true; in xenvif_queue_carrier_off()
570 /* At least one queue has stalled? Disable the carrier. */ in xenvif_queue_carrier_off()
579 static void xenvif_queue_carrier_on(struct xenvif_queue *queue) in xenvif_queue_carrier_on() argument
581 struct xenvif *vif = queue->vif; in xenvif_queue_carrier_on()
583 queue->last_rx_time = jiffies; /* Reset Rx stall detection. */ in xenvif_queue_carrier_on()
584 queue->stalled = false; in xenvif_queue_carrier_on()
597 struct xenvif_queue *queue = data; in xenvif_kthread_guest_rx() local
598 struct xenvif *vif = queue->vif; in xenvif_kthread_guest_rx()
601 xenvif_queue_carrier_on(queue); in xenvif_kthread_guest_rx()
604 xenvif_wait_for_rx_work(queue); in xenvif_kthread_guest_rx()
614 * associated with queue 0. in xenvif_kthread_guest_rx()
616 if (unlikely(vif->disabled && queue->id == 0)) { in xenvif_kthread_guest_rx()
621 if (!skb_queue_empty(&queue->rx_queue)) in xenvif_kthread_guest_rx()
622 xenvif_rx_action(queue); in xenvif_kthread_guest_rx()
629 if (xenvif_rx_queue_stalled(queue)) in xenvif_kthread_guest_rx()
630 xenvif_queue_carrier_off(queue); in xenvif_kthread_guest_rx()
631 else if (xenvif_rx_queue_ready(queue)) in xenvif_kthread_guest_rx()
632 xenvif_queue_carrier_on(queue); in xenvif_kthread_guest_rx()
640 xenvif_rx_queue_drop_expired(queue); in xenvif_kthread_guest_rx()
646 xenvif_rx_queue_purge(queue); in xenvif_kthread_guest_rx()