Lines Matching +full:queue +full:- +full:rx
3 * Copyright (c) 2002-2005, K A Fraser
36 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) in xenvif_rx_ring_slots_available() argument
43 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_ring_slots_available()
45 skb = skb_peek(&queue->rx_queue); in xenvif_rx_ring_slots_available()
47 spin_unlock_irqrestore(&queue->rx_queue.lock, flags); in xenvif_rx_ring_slots_available()
51 needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); in xenvif_rx_ring_slots_available()
54 if (skb->sw_hash) in xenvif_rx_ring_slots_available()
57 spin_unlock_irqrestore(&queue->rx_queue.lock, flags); in xenvif_rx_ring_slots_available()
60 prod = queue->rx.sring->req_prod; in xenvif_rx_ring_slots_available()
61 cons = queue->rx.req_cons; in xenvif_rx_ring_slots_available()
63 if (prod - cons >= needed) in xenvif_rx_ring_slots_available()
66 queue->rx.sring->req_event = prod + 1; in xenvif_rx_ring_slots_available()
72 } while (queue->rx.sring->req_prod != prod); in xenvif_rx_ring_slots_available()
77 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_rx_queue_tail() argument
81 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail()
83 __skb_queue_tail(&queue->rx_queue, skb); in xenvif_rx_queue_tail()
85 queue->rx_queue_len += skb->len; in xenvif_rx_queue_tail()
86 if (queue->rx_queue_len > queue->rx_queue_max) { in xenvif_rx_queue_tail()
87 struct net_device *dev = queue->vif->dev; in xenvif_rx_queue_tail()
89 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); in xenvif_rx_queue_tail()
92 spin_unlock_irqrestore(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail()
95 static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) in xenvif_rx_dequeue() argument
99 spin_lock_irq(&queue->rx_queue.lock); in xenvif_rx_dequeue()
101 skb = __skb_dequeue(&queue->rx_queue); in xenvif_rx_dequeue()
103 queue->rx_queue_len -= skb->len; in xenvif_rx_dequeue()
104 if (queue->rx_queue_len < queue->rx_queue_max) { in xenvif_rx_dequeue()
107 txq = netdev_get_tx_queue(queue->vif->dev, queue->id); in xenvif_rx_dequeue()
112 spin_unlock_irq(&queue->rx_queue.lock); in xenvif_rx_dequeue()
117 static void xenvif_rx_queue_purge(struct xenvif_queue *queue) in xenvif_rx_queue_purge() argument
121 while ((skb = xenvif_rx_dequeue(queue)) != NULL) in xenvif_rx_queue_purge()
125 static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) in xenvif_rx_queue_drop_expired() argument
130 skb = skb_peek(&queue->rx_queue); in xenvif_rx_queue_drop_expired()
133 if (time_before(jiffies, XENVIF_RX_CB(skb)->expires)) in xenvif_rx_queue_drop_expired()
135 xenvif_rx_dequeue(queue); in xenvif_rx_queue_drop_expired()
140 static void xenvif_rx_copy_flush(struct xenvif_queue *queue) in xenvif_rx_copy_flush() argument
145 gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num); in xenvif_rx_copy_flush()
147 for (i = 0; i < queue->rx_copy.num; i++) { in xenvif_rx_copy_flush()
150 op = &queue->rx_copy.op[i]; in xenvif_rx_copy_flush()
155 if (unlikely(op->status != GNTST_okay)) { in xenvif_rx_copy_flush()
158 rsp = RING_GET_RESPONSE(&queue->rx, in xenvif_rx_copy_flush()
159 queue->rx_copy.idx[i]); in xenvif_rx_copy_flush()
160 rsp->status = op->status; in xenvif_rx_copy_flush()
164 queue->rx_copy.num = 0; in xenvif_rx_copy_flush()
167 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify); in xenvif_rx_copy_flush()
169 notify_remote_via_irq(queue->rx_irq); in xenvif_rx_copy_flush()
171 __skb_queue_purge(queue->rx_copy.completed); in xenvif_rx_copy_flush()
174 static void xenvif_rx_copy_add(struct xenvif_queue *queue, in xenvif_rx_copy_add() argument
182 if (queue->rx_copy.num == COPY_BATCH_SIZE) in xenvif_rx_copy_add()
183 xenvif_rx_copy_flush(queue); in xenvif_rx_copy_add()
185 op = &queue->rx_copy.op[queue->rx_copy.num]; in xenvif_rx_copy_add()
189 op->flags = GNTCOPY_dest_gref; in xenvif_rx_copy_add()
193 op->source.domid = foreign->domid; in xenvif_rx_copy_add()
194 op->source.u.ref = foreign->gref; in xenvif_rx_copy_add()
195 op->flags |= GNTCOPY_source_gref; in xenvif_rx_copy_add()
197 op->source.u.gmfn = virt_to_gfn(data); in xenvif_rx_copy_add()
198 op->source.domid = DOMID_SELF; in xenvif_rx_copy_add()
201 op->source.offset = xen_offset_in_page(data); in xenvif_rx_copy_add()
202 op->dest.u.ref = req->gref; in xenvif_rx_copy_add()
203 op->dest.domid = queue->vif->domid; in xenvif_rx_copy_add()
204 op->dest.offset = offset; in xenvif_rx_copy_add()
205 op->len = len; in xenvif_rx_copy_add()
207 queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons; in xenvif_rx_copy_add()
208 queue->rx_copy.num++; in xenvif_rx_copy_add()
214 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) in xenvif_gso_type()
226 int frag; /* frag == -1 => frag_iter->head */
228 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
233 static void xenvif_rx_next_skb(struct xenvif_queue *queue, in xenvif_rx_next_skb() argument
239 skb = xenvif_rx_dequeue(queue); in xenvif_rx_next_skb()
241 queue->stats.tx_bytes += skb->len; in xenvif_rx_next_skb()
242 queue->stats.tx_packets++; in xenvif_rx_next_skb()
247 pkt->skb = skb; in xenvif_rx_next_skb()
248 pkt->frag_iter = skb; in xenvif_rx_next_skb()
249 pkt->remaining_len = skb->len; in xenvif_rx_next_skb()
250 pkt->frag = -1; in xenvif_rx_next_skb()
253 if ((1 << gso_type) & queue->vif->gso_mask) { in xenvif_rx_next_skb()
256 extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; in xenvif_rx_next_skb()
258 extra->u.gso.type = gso_type; in xenvif_rx_next_skb()
259 extra->u.gso.size = skb_shinfo(skb)->gso_size; in xenvif_rx_next_skb()
260 extra->u.gso.pad = 0; in xenvif_rx_next_skb()
261 extra->u.gso.features = 0; in xenvif_rx_next_skb()
262 extra->type = XEN_NETIF_EXTRA_TYPE_GSO; in xenvif_rx_next_skb()
263 extra->flags = 0; in xenvif_rx_next_skb()
265 pkt->extra_count++; in xenvif_rx_next_skb()
268 if (queue->vif->xdp_headroom) { in xenvif_rx_next_skb()
271 extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_XDP - 1]; in xenvif_rx_next_skb()
274 extra->u.xdp.headroom = queue->vif->xdp_headroom; in xenvif_rx_next_skb()
275 extra->type = XEN_NETIF_EXTRA_TYPE_XDP; in xenvif_rx_next_skb()
276 extra->flags = 0; in xenvif_rx_next_skb()
278 pkt->extra_count++; in xenvif_rx_next_skb()
281 if (skb->sw_hash) { in xenvif_rx_next_skb()
284 extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1]; in xenvif_rx_next_skb()
286 extra->u.hash.algorithm = in xenvif_rx_next_skb()
289 if (skb->l4_hash) in xenvif_rx_next_skb()
290 extra->u.hash.type = in xenvif_rx_next_skb()
291 skb->protocol == htons(ETH_P_IP) ? in xenvif_rx_next_skb()
295 extra->u.hash.type = in xenvif_rx_next_skb()
296 skb->protocol == htons(ETH_P_IP) ? in xenvif_rx_next_skb()
300 *(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb); in xenvif_rx_next_skb()
302 extra->type = XEN_NETIF_EXTRA_TYPE_HASH; in xenvif_rx_next_skb()
303 extra->flags = 0; in xenvif_rx_next_skb()
305 pkt->extra_count++; in xenvif_rx_next_skb()
309 static void xenvif_rx_complete(struct xenvif_queue *queue, in xenvif_rx_complete() argument
313 queue->rx.rsp_prod_pvt = queue->rx.req_cons; in xenvif_rx_complete()
315 __skb_queue_tail(queue->rx_copy.completed, pkt->skb); in xenvif_rx_complete()
320 struct sk_buff *frag_iter = pkt->frag_iter; in xenvif_rx_next_frag()
321 unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags; in xenvif_rx_next_frag()
323 pkt->frag++; in xenvif_rx_next_frag()
324 pkt->frag_offset = 0; in xenvif_rx_next_frag()
326 if (pkt->frag >= nr_frags) { in xenvif_rx_next_frag()
327 if (frag_iter == pkt->skb) in xenvif_rx_next_frag()
328 pkt->frag_iter = skb_shinfo(frag_iter)->frag_list; in xenvif_rx_next_frag()
330 pkt->frag_iter = frag_iter->next; in xenvif_rx_next_frag()
332 pkt->frag = -1; in xenvif_rx_next_frag()
336 static void xenvif_rx_next_chunk(struct xenvif_queue *queue, in xenvif_rx_next_chunk() argument
341 struct sk_buff *frag_iter = pkt->frag_iter; in xenvif_rx_next_chunk()
347 if (pkt->frag == -1) { in xenvif_rx_next_chunk()
348 frag_data = frag_iter->data; in xenvif_rx_next_chunk()
351 skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag]; in xenvif_rx_next_chunk()
357 frag_data += pkt->frag_offset; in xenvif_rx_next_chunk()
358 frag_len -= pkt->frag_offset; in xenvif_rx_next_chunk()
360 chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset); in xenvif_rx_next_chunk()
361 chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE - in xenvif_rx_next_chunk()
364 pkt->frag_offset += chunk_len; in xenvif_rx_next_chunk()
374 static void xenvif_rx_data_slot(struct xenvif_queue *queue, in xenvif_rx_data_slot() argument
379 unsigned int offset = queue->vif->xdp_headroom; in xenvif_rx_data_slot()
386 xenvif_rx_next_chunk(queue, pkt, offset, &data, &len); in xenvif_rx_data_slot()
387 xenvif_rx_copy_add(queue, req, offset, data, len); in xenvif_rx_data_slot()
390 pkt->remaining_len -= len; in xenvif_rx_data_slot()
392 } while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0); in xenvif_rx_data_slot()
394 if (pkt->remaining_len > 0) in xenvif_rx_data_slot()
399 if (pkt->slot == 0) { in xenvif_rx_data_slot()
400 struct sk_buff *skb = pkt->skb; in xenvif_rx_data_slot()
402 if (skb->ip_summed == CHECKSUM_PARTIAL) in xenvif_rx_data_slot()
405 else if (skb->ip_summed == CHECKSUM_UNNECESSARY) in xenvif_rx_data_slot()
408 if (pkt->extra_count != 0) in xenvif_rx_data_slot()
412 rsp->offset = 0; in xenvif_rx_data_slot()
413 rsp->flags = flags; in xenvif_rx_data_slot()
414 rsp->id = req->id; in xenvif_rx_data_slot()
415 rsp->status = (s16)offset; in xenvif_rx_data_slot()
418 static void xenvif_rx_extra_slot(struct xenvif_queue *queue, in xenvif_rx_extra_slot() argument
426 pkt->extra_count--; in xenvif_rx_extra_slot()
428 for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) { in xenvif_rx_extra_slot()
429 if (pkt->extras[i].type) { in xenvif_rx_extra_slot()
430 *extra = pkt->extras[i]; in xenvif_rx_extra_slot()
432 if (pkt->extra_count != 0) in xenvif_rx_extra_slot()
433 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; in xenvif_rx_extra_slot()
435 pkt->extras[i].type = 0; in xenvif_rx_extra_slot()
442 static void xenvif_rx_skb(struct xenvif_queue *queue) in xenvif_rx_skb() argument
446 xenvif_rx_next_skb(queue, &pkt); in xenvif_rx_skb()
448 queue->last_rx_time = jiffies; in xenvif_rx_skb()
454 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons); in xenvif_rx_skb()
455 rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons); in xenvif_rx_skb()
459 xenvif_rx_extra_slot(queue, &pkt, req, rsp); in xenvif_rx_skb()
461 xenvif_rx_data_slot(queue, &pkt, req, rsp); in xenvif_rx_skb()
463 queue->rx.req_cons++; in xenvif_rx_skb()
467 xenvif_rx_complete(queue, &pkt); in xenvif_rx_skb()
472 void xenvif_rx_action(struct xenvif_queue *queue) in xenvif_rx_action() argument
478 queue->rx_copy.completed = &completed_skbs; in xenvif_rx_action()
480 while (xenvif_rx_ring_slots_available(queue) && in xenvif_rx_action()
482 xenvif_rx_skb(queue); in xenvif_rx_action()
487 xenvif_rx_copy_flush(queue); in xenvif_rx_action()
490 static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) in xenvif_rx_queue_stalled() argument
494 prod = queue->rx.sring->req_prod; in xenvif_rx_queue_stalled()
495 cons = queue->rx.req_cons; in xenvif_rx_queue_stalled()
497 return !queue->stalled && in xenvif_rx_queue_stalled()
498 prod - cons < 1 && in xenvif_rx_queue_stalled()
500 queue->last_rx_time + queue->vif->stall_timeout); in xenvif_rx_queue_stalled()
503 static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) in xenvif_rx_queue_ready() argument
507 prod = queue->rx.sring->req_prod; in xenvif_rx_queue_ready()
508 cons = queue->rx.req_cons; in xenvif_rx_queue_ready()
510 return queue->stalled && prod - cons >= 1; in xenvif_rx_queue_ready()
513 bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread) in xenvif_have_rx_work() argument
515 return xenvif_rx_ring_slots_available(queue) || in xenvif_have_rx_work()
516 (queue->vif->stall_timeout && in xenvif_have_rx_work()
517 (xenvif_rx_queue_stalled(queue) || in xenvif_have_rx_work()
518 xenvif_rx_queue_ready(queue))) || in xenvif_have_rx_work()
520 queue->vif->disabled; in xenvif_have_rx_work()
523 static long xenvif_rx_queue_timeout(struct xenvif_queue *queue) in xenvif_rx_queue_timeout() argument
528 skb = skb_peek(&queue->rx_queue); in xenvif_rx_queue_timeout()
532 timeout = XENVIF_RX_CB(skb)->expires - jiffies; in xenvif_rx_queue_timeout()
536 /* Wait until the guest Rx thread has work.
539 * queue (and not just the head at the beginning). In particular, if
540 * the queue is initially empty an infinite timeout is used and this
546 static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) in xenvif_wait_for_rx_work() argument
550 if (xenvif_have_rx_work(queue, true)) in xenvif_wait_for_rx_work()
556 prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); in xenvif_wait_for_rx_work()
557 if (xenvif_have_rx_work(queue, true)) in xenvif_wait_for_rx_work()
560 &queue->eoi_pending) & in xenvif_wait_for_rx_work()
562 xen_irq_lateeoi(queue->rx_irq, 0); in xenvif_wait_for_rx_work()
564 ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); in xenvif_wait_for_rx_work()
568 finish_wait(&queue->wq, &wait); in xenvif_wait_for_rx_work()
571 static void xenvif_queue_carrier_off(struct xenvif_queue *queue) in xenvif_queue_carrier_off() argument
573 struct xenvif *vif = queue->vif; in xenvif_queue_carrier_off()
575 queue->stalled = true; in xenvif_queue_carrier_off()
577 /* At least one queue has stalled? Disable the carrier. */ in xenvif_queue_carrier_off()
578 spin_lock(&vif->lock); in xenvif_queue_carrier_off()
579 if (vif->stalled_queues++ == 0) { in xenvif_queue_carrier_off()
580 netdev_info(vif->dev, "Guest Rx stalled"); in xenvif_queue_carrier_off()
581 netif_carrier_off(vif->dev); in xenvif_queue_carrier_off()
583 spin_unlock(&vif->lock); in xenvif_queue_carrier_off()
586 static void xenvif_queue_carrier_on(struct xenvif_queue *queue) in xenvif_queue_carrier_on() argument
588 struct xenvif *vif = queue->vif; in xenvif_queue_carrier_on()
590 queue->last_rx_time = jiffies; /* Reset Rx stall detection. */ in xenvif_queue_carrier_on()
591 queue->stalled = false; in xenvif_queue_carrier_on()
594 spin_lock(&vif->lock); in xenvif_queue_carrier_on()
595 if (--vif->stalled_queues == 0) { in xenvif_queue_carrier_on()
596 netdev_info(vif->dev, "Guest Rx ready"); in xenvif_queue_carrier_on()
597 netif_carrier_on(vif->dev); in xenvif_queue_carrier_on()
599 spin_unlock(&vif->lock); in xenvif_queue_carrier_on()
604 struct xenvif_queue *queue = data; in xenvif_kthread_guest_rx() local
605 struct xenvif *vif = queue->vif; in xenvif_kthread_guest_rx()
607 if (!vif->stall_timeout) in xenvif_kthread_guest_rx()
608 xenvif_queue_carrier_on(queue); in xenvif_kthread_guest_rx()
611 xenvif_wait_for_rx_work(queue); in xenvif_kthread_guest_rx()
621 * associated with queue 0. in xenvif_kthread_guest_rx()
623 if (unlikely(vif->disabled && queue->id == 0)) { in xenvif_kthread_guest_rx()
628 if (!skb_queue_empty(&queue->rx_queue)) in xenvif_kthread_guest_rx()
629 xenvif_rx_action(queue); in xenvif_kthread_guest_rx()
631 /* If the guest hasn't provided any Rx slots for a in xenvif_kthread_guest_rx()
635 if (vif->stall_timeout) { in xenvif_kthread_guest_rx()
636 if (xenvif_rx_queue_stalled(queue)) in xenvif_kthread_guest_rx()
637 xenvif_queue_carrier_off(queue); in xenvif_kthread_guest_rx()
638 else if (xenvif_rx_queue_ready(queue)) in xenvif_kthread_guest_rx()
639 xenvif_queue_carrier_on(queue); in xenvif_kthread_guest_rx()
647 xenvif_rx_queue_drop_expired(queue); in xenvif_kthread_guest_rx()
653 xenvif_rx_queue_purge(queue); in xenvif_kthread_guest_rx()