Home
last modified time | relevance | path

Searched refs:cqes (Results 1 – 25 of 25) sorted by relevance

/Linux-v6.1/tools/io_uring/
Dsetup.c41 cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe); in io_uring_mmap()
54 cq->cqes = ptr + p->cq_off.cqes; in io_uring_mmap()
Dio_uring-bench.c51 struct io_uring_cqe *cqes; member
257 cqe = &ring->cqes[head & cq_ring_mask]; in reap_events()
449 ptr = mmap(0, p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe), in setup_ring()
457 cring->cqes = ptr + p.cq_off.cqes; in setup_ring()
Dqueue.c31 *cqe_ptr = &cq->cqes[head & mask]; in __io_uring_get_cqe()
Dliburing.h41 struct io_uring_cqe *cqes; member
/Linux-v6.1/drivers/infiniband/hw/cxgb4/
Drestrack.c334 struct t4_cqe *cqes) in fill_hwcqes() argument
339 if (fill_cqe(msg, cqes, idx, "hwcq_idx")) in fill_hwcqes()
342 if (fill_cqe(msg, cqes + 1, idx, "hwcq_idx")) in fill_hwcqes()
351 struct t4_cqe *cqes) in fill_swcqes() argument
359 if (fill_cqe(msg, cqes, idx, "swcq_idx")) in fill_swcqes()
364 if (fill_cqe(msg, cqes + 1, idx, "swcq_idx")) in fill_swcqes()
/Linux-v6.1/tools/testing/selftests/net/
Dio_uring_zerocopy_tx.c77 struct io_uring_cqe *cqes; member
102 struct io_uring_cqe *cqes; member
198 cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe); in io_uring_mmap()
211 cq->cqes = ptr + p->cq_off.cqes; in io_uring_mmap()
311 *cqe_ptr = &cq->cqes[head & mask]; in io_uring_wait_cqe()
/Linux-v6.1/drivers/net/ethernet/fungible/funeth/
Dfuneth_rx.c354 q->next_cqe_info = cqe_to_info(q->cqes); in advance_cq()
645 q->cqes = fun_alloc_ring_mem(q->dma_dev, ncqe, FUNETH_CQE_SIZE, 0, in fun_rxq_create_sw()
648 if (!q->cqes) in fun_rxq_create_sw()
666 dma_free_coherent(q->dma_dev, ncqe * FUNETH_CQE_SIZE, q->cqes, in fun_rxq_create_sw()
687 q->cqes, q->cq_dma_addr); in fun_rxq_free_sw()
723 q->next_cqe_info = cqe_to_info(q->cqes); in fun_rxq_create_dev()
Dfuneth_txrx.h166 void *cqes; /* base of CQ descriptor ring */ member
/Linux-v6.1/drivers/net/ethernet/fungible/funcore/
Dfun_queue.c295 cqe = funq->cqes + (funq->cq_head << funq->cqe_size_log2); in __fun_process_cq()
366 funq->cqes = fun_alloc_ring_mem(funq->fdev->dev, funq->cq_depth, in fun_alloc_cqes()
370 return funq->cqes ? 0 : -ENOMEM; in fun_alloc_cqes()
389 funq->cqes, funq->cq_dma_addr, NULL); in fun_free_queue()
Dfun_queue.h36 void *cqes; member
/Linux-v6.1/drivers/net/ethernet/broadcom/
Dcnic.c1425 struct kcqe *cqes[], u32 num_cqes) in cnic_reply_bnx2x_kcqes() argument
1434 cqes, num_cqes); in cnic_reply_bnx2x_kcqes()
1552 struct kcqe *cqes[1]; in cnic_bnx2x_iscsi_init2() local
1582 cqes[0] = (struct kcqe *) &kcqe; in cnic_bnx2x_iscsi_init2()
1583 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); in cnic_bnx2x_iscsi_init2()
1881 struct kcqe *cqes[1]; in cnic_bnx2x_iscsi_ofld1() local
1934 cqes[0] = (struct kcqe *) &kcqe; in cnic_bnx2x_iscsi_ofld1()
1935 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); in cnic_bnx2x_iscsi_ofld1()
1999 struct kcqe *cqes[1]; in cnic_bnx2x_iscsi_destroy() local
2032 cqes[0] = (struct kcqe *) &kcqe; in cnic_bnx2x_iscsi_destroy()
[all …]
Dcnic_if.h369 void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
/Linux-v6.1/drivers/nvme/host/
Dapple.c135 struct nvme_completion *cqes; member
571 struct nvme_completion *hcqe = &q->cqes[q->cq_head]; in apple_nvme_cqe_pending()
589 struct nvme_completion *cqe = &q->cqes[idx]; in apple_nvme_handle_cqe()
962 memset(q->cqes, 0, depth * sizeof(struct nvme_completion)); in apple_nvme_init_queue()
1285 q->cqes = dmam_alloc_coherent(anv->dev, in apple_nvme_queue_alloc()
1288 if (!q->cqes) in apple_nvme_queue_alloc()
Dpci.c196 struct nvme_completion *cqes; member
1046 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; in nvme_cqe_pending()
1070 struct nvme_completion *cqe = &nvmeq->cqes[idx]; in nvme_handle_cqe()
1454 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); in nvme_free_queue()
1594 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), in nvme_alloc_queue()
1596 if (!nvmeq->cqes) in nvme_alloc_queue()
1614 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, in nvme_alloc_queue()
1643 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); in nvme_init_queue()
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/
Den_stats.c282 s->tx_xdp_cqes += xdpsq_red_stats->cqes; in mlx5e_stats_grp_sw_update_stats_xdp_red()
294 s->rx_xdp_tx_cqe += xdpsq_stats->cqes; in mlx5e_stats_grp_sw_update_stats_xdpsq()
305 s->tx_xsk_cqes += xsksq_stats->cqes; in mlx5e_stats_grp_sw_update_stats_xsksq()
440 s->tx_cqes += sq_stats->cqes; in mlx5e_stats_grp_sw_update_stats_sq()
2028 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
2040 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2050 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2081 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2105 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
2187 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
Den_stats.h430 u64 cqes ____cacheline_aligned_in_smp;
443 u64 cqes ____cacheline_aligned_in_smp;
Den_tx.c867 stats->cqes += i; in mlx5e_poll_tx_cq()
/Linux-v6.1/drivers/nvme/target/
Dpassthru.c134 id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes); in nvmet_passthru_override_id_ctrl()
Dadmin-cmd.c426 id->cqes = (0x4 << 4) | 0x4; in nvmet_execute_identify_ctrl()
/Linux-v6.1/include/uapi/linux/
Dio_uring.h393 __u32 cqes; member
/Linux-v6.1/io_uring/
Dfdinfo.c122 struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift]; in __io_uring_show_fdinfo()
Dio_uring.c771 ctx->cqe_cached = &rings->cqes[off]; in __io_get_cqe()
778 return &rings->cqes[off]; in __io_get_cqe()
2484 off = struct_size(rings, cqes, cq_entries); in rings_size()
3531 p->cq_off.cqes = offsetof(struct io_rings, cqes); in io_uring_create()
/Linux-v6.1/include/linux/
Dio_uring_types.h151 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; member
Dnvme.h327 __u8 cqes; member
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/en/
Dxdp.c570 sq->stats->cqes += i; in mlx5e_poll_xdpsq_cq()