Lines Matching refs:napi

135 	struct napi_struct napi;  member
143 struct napi_struct napi; member
322 static void virtqueue_napi_schedule(struct napi_struct *napi, in virtqueue_napi_schedule() argument
325 if (napi_schedule_prep(napi)) { in virtqueue_napi_schedule()
327 __napi_schedule(napi); in virtqueue_napi_schedule()
331 static void virtqueue_napi_complete(struct napi_struct *napi, in virtqueue_napi_complete() argument
337 if (napi_complete_done(napi, processed)) { in virtqueue_napi_complete()
339 virtqueue_napi_schedule(napi, vq); in virtqueue_napi_complete()
348 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; in skb_xmit_done() local
353 if (napi->weight) in skb_xmit_done()
354 virtqueue_napi_schedule(napi, vq); in skb_xmit_done()
434 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); in page_to_skb()
1163 napi_gro_receive(&rq->napi, skb); in receive_buf()
1351 virtqueue_napi_schedule(&rq->napi, rvq); in skb_recv_done()
1354 static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) in virtnet_napi_enable() argument
1356 napi_enable(napi); in virtnet_napi_enable()
1363 virtqueue_napi_schedule(napi, vq); in virtnet_napi_enable()
1369 struct napi_struct *napi) in virtnet_napi_tx_enable() argument
1371 if (!napi->weight) in virtnet_napi_tx_enable()
1378 napi->weight = 0; in virtnet_napi_tx_enable()
1382 return virtnet_napi_enable(vq, napi); in virtnet_napi_tx_enable()
1385 static void virtnet_napi_tx_disable(struct napi_struct *napi) in virtnet_napi_tx_disable() argument
1387 if (napi->weight) in virtnet_napi_tx_disable()
1388 napi_disable(napi); in virtnet_napi_tx_disable()
1401 napi_disable(&rq->napi); in refill_work()
1403 virtnet_napi_enable(rq->vq, &rq->napi); in refill_work()
1509 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) in virtnet_poll_cleantx()
1525 static int virtnet_poll(struct napi_struct *napi, int budget) in virtnet_poll() argument
1528 container_of(napi, struct receive_queue, napi); in virtnet_poll()
1540 virtqueue_napi_complete(napi, rq->vq, received); in virtnet_poll()
1569 err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id); in virtnet_open()
1580 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); in virtnet_open()
1581 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); in virtnet_open()
1587 static int virtnet_poll_tx(struct napi_struct *napi, int budget) in virtnet_poll_tx() argument
1589 struct send_queue *sq = container_of(napi, struct send_queue, napi); in virtnet_poll_tx()
1598 napi_complete_done(napi, 0); in virtnet_poll_tx()
1612 done = napi_complete_done(napi, 0); in virtnet_poll_tx()
1621 if (napi_schedule_prep(napi)) { in virtnet_poll_tx()
1625 __napi_schedule(napi); in virtnet_poll_tx()
1688 bool use_napi = sq->napi.weight; in start_xmit()
1948 napi_disable(&vi->rq[i].napi); in virtnet_close()
1949 virtnet_napi_tx_disable(&vi->sq[i].napi); in virtnet_close()
2340 if (napi_weight ^ vi->sq[0].napi.weight) { in virtnet_set_coalesce()
2344 vi->sq[i].napi.weight = napi_weight; in virtnet_set_coalesce()
2363 if (vi->sq[0].napi.weight) in virtnet_get_coalesce()
2428 napi_disable(&vi->rq[i].napi); in virtnet_freeze_down()
2429 virtnet_napi_tx_disable(&vi->sq[i].napi); in virtnet_freeze_down()
2453 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); in virtnet_restore_up()
2455 &vi->sq[i].napi); in virtnet_restore_up()
2552 napi_disable(&vi->rq[i].napi); in virtnet_xdp_set()
2553 virtnet_napi_tx_disable(&vi->sq[i].napi); in virtnet_xdp_set()
2587 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); in virtnet_xdp_set()
2589 &vi->sq[i].napi); in virtnet_xdp_set()
2604 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); in virtnet_xdp_set()
2606 &vi->sq[i].napi); in virtnet_xdp_set()
2728 __netif_napi_del(&vi->rq[i].napi); in virtnet_free_queues()
2729 __netif_napi_del(&vi->sq[i].napi); in virtnet_free_queues()
2935 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, in virtnet_alloc_queues()
2937 netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, in virtnet_alloc_queues()