Lines Matching refs:bq
344 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) in bq_xmit_all() argument
346 struct net_device *dev = bq->dev; in bq_xmit_all()
350 if (unlikely(!bq->count)) in bq_xmit_all()
353 for (i = 0; i < bq->count; i++) { in bq_xmit_all()
354 struct xdp_frame *xdpf = bq->q[i]; in bq_xmit_all()
359 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); in bq_xmit_all()
365 drops = bq->count - sent; in bq_xmit_all()
367 bq->count = 0; in bq_xmit_all()
369 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err); in bq_xmit_all()
370 bq->dev_rx = NULL; in bq_xmit_all()
371 __list_del_clearprev(&bq->flush_node); in bq_xmit_all()
377 for (i = 0; i < bq->count; i++) { in bq_xmit_all()
378 struct xdp_frame *xdpf = bq->q[i]; in bq_xmit_all()
399 struct xdp_dev_bulk_queue *bq, *tmp; in __dev_flush() local
401 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) in __dev_flush()
402 bq_xmit_all(bq, XDP_XMIT_FLUSH); in __dev_flush()
428 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); in bq_enqueue() local
430 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) in bq_enqueue()
431 bq_xmit_all(bq, 0); in bq_enqueue()
437 if (!bq->dev_rx) in bq_enqueue()
438 bq->dev_rx = dev_rx; in bq_enqueue()
440 bq->q[bq->count++] = xdpf; in bq_enqueue()
442 if (!bq->flush_node.prev) in bq_enqueue()
443 list_add(&bq->flush_node, flush_list); in bq_enqueue()