Home
last modified time | relevance | path

Searched refs:hr_cq (Results 1 – 6 of 6) sorted by relevance

/Linux-v6.1/drivers/infiniband/hw/hns/
Dhns_roce_cq.c58 static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) in alloc_cqn() argument
76 hr_cq->cqn = (id << CQ_BANKID_SHIFT) | bankid; in alloc_cqn()
104 struct hns_roce_cq *hr_cq, in hns_roce_create_cqc() argument
117 hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle); in hns_roce_create_cqc()
120 hr_cq->cqn); in hns_roce_create_cqc()
124 hr_cq->cqn, ret); in hns_roce_create_cqc()
131 static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) in alloc_cqc() argument
139 ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts), in alloc_cqc()
147 ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn); in alloc_cqc()
150 hr_cq->cqn, ret); in alloc_cqc()
[all …]
Dhns_roce_restrack.c16 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); in hns_roce_fill_res_cq_entry() local
23 if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth)) in hns_roce_fill_res_cq_entry()
26 if (rdma_nl_put_driver_u32(msg, "cons_index", hr_cq->cons_index)) in hns_roce_fill_res_cq_entry()
29 if (rdma_nl_put_driver_u32(msg, "cqe_size", hr_cq->cqe_size)) in hns_roce_fill_res_cq_entry()
32 if (rdma_nl_put_driver_u32(msg, "arm_sn", hr_cq->arm_sn)) in hns_roce_fill_res_cq_entry()
48 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); in hns_roce_fill_res_cq_entry_raw() local
57 ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context); in hns_roce_fill_res_cq_entry_raw()
Dhns_roce_hw_v2.c3459 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) in get_cqe_v2() argument
3461 return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size); in get_cqe_v2()
3464 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n) in get_sw_cqe_v2() argument
3466 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe); in get_sw_cqe_v2()
3469 return (hr_reg_read(cqe, CQE_OWNER) ^ !!(n & hr_cq->cq_depth)) ? cqe : in get_sw_cqe_v2()
3474 struct hns_roce_cq *hr_cq) in update_cq_db() argument
3476 if (likely(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) { in update_cq_db()
3477 *hr_cq->set_ci_db = hr_cq->cons_index & V2_CQ_DB_CONS_IDX_M; in update_cq_db()
3481 hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn); in update_cq_db()
3483 hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index); in update_cq_db()
[all …]
Dhns_roce_main.c861 struct hns_roce_cq *hr_cq = to_hr_cq(cq); in check_and_get_armed_cq() local
864 spin_lock_irqsave(&hr_cq->lock, flags); in check_and_get_armed_cq()
866 if (!hr_cq->is_armed) { in check_and_get_armed_cq()
867 hr_cq->is_armed = 1; in check_and_get_armed_cq()
868 list_add_tail(&hr_cq->node, cq_list); in check_and_get_armed_cq()
871 spin_unlock_irqrestore(&hr_cq->lock, flags); in check_and_get_armed_cq()
877 struct hns_roce_cq *hr_cq; in hns_roce_handle_device_err() local
897 list_for_each_entry(hr_cq, &cq_list, node) in hns_roce_handle_device_err()
898 hns_roce_cq_completion(hr_dev, hr_cq->cqn); in hns_roce_handle_device_err()
Dhns_roce_qp.c1420 struct hns_roce_cq *hr_cq; in hns_roce_wq_overflow() local
1427 hr_cq = to_hr_cq(ib_cq); in hns_roce_wq_overflow()
1428 spin_lock(&hr_cq->lock); in hns_roce_wq_overflow()
1430 spin_unlock(&hr_cq->lock); in hns_roce_wq_overflow()
Dhns_roce_device.h878 struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,