Lines Matching refs:hr_dev
48 struct device *dev = flush_work->hr_dev->dev; in flush_work_handle()
71 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) in init_flush_work() argument
75 flush_work->hr_dev = hr_dev; in init_flush_work()
78 queue_work(hr_dev->irq_workq, &flush_work->work); in init_flush_work()
96 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) in hns_roce_qp_event() argument
98 struct device *dev = hr_dev->dev; in hns_roce_qp_event()
101 xa_lock(&hr_dev->qp_table_xa); in hns_roce_qp_event()
102 qp = __hns_roce_qp_lookup(hr_dev, qpn); in hns_roce_qp_event()
105 xa_unlock(&hr_dev->qp_table_xa); in hns_roce_qp_event()
119 flush_cqe(hr_dev, qp); in hns_roce_qp_event()
227 static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, in alloc_qpn() argument
230 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in alloc_qpn()
244 ibdev_err(&hr_dev->ib_dev, in alloc_qpn()
259 static void add_qp_to_list(struct hns_roce_dev *hr_dev, in add_qp_to_list() argument
269 spin_lock_irqsave(&hr_dev->qp_list_lock, flags); in add_qp_to_list()
272 list_add_tail(&hr_qp->node, &hr_dev->qp_list); in add_qp_to_list()
279 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); in add_qp_to_list()
282 static int hns_roce_qp_store(struct hns_roce_dev *hr_dev, in hns_roce_qp_store() argument
286 struct xarray *xa = &hr_dev->qp_table_xa; in hns_roce_qp_store()
294 dev_err(hr_dev->dev, "failed to xa store for QPC\n"); in hns_roce_qp_store()
297 add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, in hns_roce_qp_store()
303 static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) in alloc_qpc() argument
305 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in alloc_qpc()
306 struct device *dev = hr_dev->dev; in alloc_qpc()
313 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); in alloc_qpc()
320 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); in alloc_qpc()
326 if (hr_dev->caps.trrl_entry_sz) { in alloc_qpc()
328 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, in alloc_qpc()
336 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { in alloc_qpc()
338 ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table, in alloc_qpc()
349 if (hr_dev->caps.trrl_entry_sz) in alloc_qpc()
350 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); in alloc_qpc()
353 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); in alloc_qpc()
356 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); in alloc_qpc()
367 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) in hns_roce_qp_remove() argument
369 struct xarray *xa = &hr_dev->qp_table_xa; in hns_roce_qp_remove()
386 static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) in free_qpc() argument
388 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in free_qpc()
390 if (hr_dev->caps.trrl_entry_sz) in free_qpc()
391 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); in free_qpc()
392 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); in free_qpc()
401 static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) in free_qpn() argument
408 if (hr_qp->qpn < hr_dev->caps.reserved_qps) in free_qpn()
413 ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3); in free_qpn()
415 mutex_lock(&hr_dev->qp_table.bank_mutex); in free_qpn()
416 hr_dev->qp_table.bank[bankid].inuse--; in free_qpn()
417 mutex_unlock(&hr_dev->qp_table.bank_mutex); in free_qpn()
442 static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, in set_rq_size() argument
445 u32 max_sge = proc_rq_sge(hr_dev, hr_qp, user); in set_rq_size()
459 if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes || in set_rq_size()
461 ibdev_err(&hr_dev->ib_dev, in set_rq_size()
467 cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes)); in set_rq_size()
468 if (cnt > hr_dev->caps.max_wqes) { in set_rq_size()
469 ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n", in set_rq_size()
477 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * in set_rq_size()
488 static u32 get_max_inline_data(struct hns_roce_dev *hr_dev, in get_max_inline_data() argument
494 hr_dev->caps.max_sq_inline); in get_max_inline_data()
547 static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt, in set_ext_sge_param() argument
557 cap->max_inline_data = get_max_inline_data(hr_dev, cap); in set_ext_sge_param()
572 hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg); in set_ext_sge_param()
577 hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg); in set_ext_sge_param()
593 static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, in check_sq_size_with_integrity() argument
597 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); in check_sq_size_with_integrity()
603 ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n"); in check_sq_size_with_integrity()
607 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { in check_sq_size_with_integrity()
608 ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n", in check_sq_size_with_integrity()
616 static int set_user_sq_size(struct hns_roce_dev *hr_dev, in set_user_sq_size() argument
620 struct ib_device *ibdev = &hr_dev->ib_dev; in set_user_sq_size()
625 cnt > hr_dev->caps.max_wqes) in set_user_sq_size()
628 ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); in set_user_sq_size()
635 set_ext_sge_param(hr_dev, cnt, hr_qp, cap); in set_user_sq_size()
644 static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev, in set_wqe_buf_attr() argument
659 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num; in set_wqe_buf_attr()
670 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num; in set_wqe_buf_attr()
681 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num; in set_wqe_buf_attr()
689 buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; in set_wqe_buf_attr()
695 static int set_kernel_sq_size(struct hns_roce_dev *hr_dev, in set_kernel_sq_size() argument
698 struct ib_device *ibdev = &hr_dev->ib_dev; in set_kernel_sq_size()
701 if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes || in set_kernel_sq_size()
702 cap->max_send_sge > hr_dev->caps.max_sq_sg) { in set_kernel_sq_size()
707 cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes)); in set_kernel_sq_size()
708 if (cnt > hr_dev->caps.max_wqes) { in set_kernel_sq_size()
714 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); in set_kernel_sq_size()
717 set_ext_sge_param(hr_dev, cnt, hr_qp, cap); in set_kernel_sq_size()
744 static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, in alloc_qp_buf() argument
748 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_qp_buf()
752 ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr); in alloc_qp_buf()
757 ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr, in alloc_qp_buf()
758 PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz, in alloc_qp_buf()
765 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) in alloc_qp_buf()
775 static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) in free_qp_buf() argument
777 hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr); in free_qp_buf()
780 static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev, in user_qp_has_sdb() argument
786 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && in user_qp_has_sdb()
792 static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev, in user_qp_has_rdb() argument
797 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && in user_qp_has_rdb()
802 static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev, in kernel_qp_has_rdb() argument
805 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && in kernel_qp_has_rdb()
810 struct hns_roce_dev *hr_dev, in qp_mmap_entry() argument
820 address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE; in qp_mmap_entry()
828 ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n"); in qp_mmap_entry()
838 static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, in alloc_user_qp_db() argument
847 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_user_qp_db()
850 if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) { in alloc_user_qp_db()
861 if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) { in alloc_user_qp_db()
881 static int alloc_kernel_qp_db(struct hns_roce_dev *hr_dev, in alloc_kernel_qp_db() argument
885 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_kernel_qp_db()
888 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in alloc_kernel_qp_db()
889 hr_qp->sq.db_reg = hr_dev->mem_base + in alloc_kernel_qp_db()
892 hr_qp->sq.db_reg = hr_dev->reg_base + hr_dev->sdb_offset + in alloc_kernel_qp_db()
893 DB_REG_OFFSET * hr_dev->priv_uar.index; in alloc_kernel_qp_db()
895 hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset + in alloc_kernel_qp_db()
896 DB_REG_OFFSET * hr_dev->priv_uar.index; in alloc_kernel_qp_db()
898 if (kernel_qp_has_rdb(hr_dev, init_attr)) { in alloc_kernel_qp_db()
899 ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); in alloc_kernel_qp_db()
913 static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, in alloc_qp_db() argument
921 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE) in alloc_qp_db()
926 ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp); in alloc_qp_db()
931 ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd, in alloc_qp_db()
936 ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr); in alloc_qp_db()
950 static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, in free_qp_db() argument
965 hns_roce_free_db(hr_dev, &hr_qp->rdb); in free_qp_db()
969 static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev, in alloc_kernel_wrid() argument
972 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_kernel_wrid()
1007 static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, in set_qp_param() argument
1012 struct ib_device *ibdev = &hr_dev->ib_dev; in set_qp_param()
1021 ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp, in set_qp_param()
1041 ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); in set_qp_param()
1047 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in set_qp_param()
1049 ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp); in set_qp_param()
1059 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, in hns_roce_create_qp_common() argument
1066 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_create_qp_common()
1080 ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd); in hns_roce_create_qp_common()
1087 ret = alloc_kernel_wrid(hr_dev, hr_qp); in hns_roce_create_qp_common()
1095 ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr); in hns_roce_create_qp_common()
1101 ret = alloc_qpn(hr_dev, hr_qp, init_attr); in hns_roce_create_qp_common()
1107 ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp); in hns_roce_create_qp_common()
1114 ret = alloc_qpc(hr_dev, hr_qp); in hns_roce_create_qp_common()
1121 ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr); in hns_roce_create_qp_common()
1137 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { in hns_roce_create_qp_common()
1138 ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp); in hns_roce_create_qp_common()
1151 hns_roce_qp_remove(hr_dev, hr_qp); in hns_roce_create_qp_common()
1153 free_qpc(hr_dev, hr_qp); in hns_roce_create_qp_common()
1155 free_qp_db(hr_dev, hr_qp, udata); in hns_roce_create_qp_common()
1157 free_qpn(hr_dev, hr_qp); in hns_roce_create_qp_common()
1159 free_qp_buf(hr_dev, hr_qp); in hns_roce_create_qp_common()
1165 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, in hns_roce_qp_destroy() argument
1172 free_qpc(hr_dev, hr_qp); in hns_roce_qp_destroy()
1173 free_qpn(hr_dev, hr_qp); in hns_roce_qp_destroy()
1174 free_qp_buf(hr_dev, hr_qp); in hns_roce_qp_destroy()
1176 free_qp_db(hr_dev, hr_qp, udata); in hns_roce_qp_destroy()
1179 static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type, in check_qp_type() argument
1185 if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)) in check_qp_type()
1189 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && in check_qp_type()
1203 ibdev_err(&hr_dev->ib_dev, "not support QP type %d\n", type); in check_qp_type()
1212 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); in hns_roce_create_qp() local
1217 ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata); in hns_roce_create_qp()
1226 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; in hns_roce_create_qp()
1229 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp); in hns_roce_create_qp()
1253 static int check_mtu_validate(struct hns_roce_dev *hr_dev, in check_mtu_validate() argument
1261 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); in check_mtu_validate()
1263 if ((hr_dev->caps.max_mtu >= IB_MTU_2048 && in check_mtu_validate()
1264 attr->path_mtu > hr_dev->caps.max_mtu) || in check_mtu_validate()
1266 ibdev_err(&hr_dev->ib_dev, in check_mtu_validate()
1278 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_check_qp_attr() local
1283 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { in hns_roce_check_qp_attr()
1284 ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n", in hns_roce_check_qp_attr()
1291 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { in hns_roce_check_qp_attr()
1292 ibdev_err(&hr_dev->ib_dev, in hns_roce_check_qp_attr()
1300 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { in hns_roce_check_qp_attr()
1301 ibdev_err(&hr_dev->ib_dev, in hns_roce_check_qp_attr()
1308 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { in hns_roce_check_qp_attr()
1309 ibdev_err(&hr_dev->ib_dev, in hns_roce_check_qp_attr()
1316 return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask); in hns_roce_check_qp_attr()
1324 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_modify_qp() local
1345 ibdev_warn(&hr_dev->ib_dev, in hns_roce_modify_qp()
1353 ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n"); in hns_roce_modify_qp()
1364 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state, in hns_roce_modify_qp()
1460 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) in hns_roce_init_qp_table() argument
1462 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in hns_roce_init_qp_table()
1466 qp_table->idx_table.spare_idx = kcalloc(hr_dev->caps.num_qps, in hns_roce_init_qp_table()
1473 xa_init(&hr_dev->qp_table_xa); in hns_roce_init_qp_table()
1475 reserved_from_bot = hr_dev->caps.reserved_qps; in hns_roce_init_qp_table()
1478 hr_dev->qp_table.bank[get_qp_bankid(i)].inuse++; in hns_roce_init_qp_table()
1479 hr_dev->qp_table.bank[get_qp_bankid(i)].min++; in hns_roce_init_qp_table()
1483 ida_init(&hr_dev->qp_table.bank[i].ida); in hns_roce_init_qp_table()
1484 hr_dev->qp_table.bank[i].max = hr_dev->caps.num_qps / in hns_roce_init_qp_table()
1486 hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min; in hns_roce_init_qp_table()
1492 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev) in hns_roce_cleanup_qp_table() argument
1497 ida_destroy(&hr_dev->qp_table.bank[i].ida); in hns_roce_cleanup_qp_table()
1498 kfree(hr_dev->qp_table.idx_table.spare_idx); in hns_roce_cleanup_qp_table()