/Linux-v4.19/drivers/infiniband/hw/hns/ |
D | hns_roce_main.c | 56 int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index) in hns_get_gid_index() argument 58 return gid_index * hr_dev->caps.num_ports + port; in hns_get_gid_index() 62 static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr) in hns_roce_set_mac() argument 67 if (!memcmp(hr_dev->dev_addr[port], addr, MAC_ADDR_OCTET_NUM)) in hns_roce_set_mac() 71 hr_dev->dev_addr[port][i] = addr[i]; in hns_roce_set_mac() 73 phy_port = hr_dev->iboe.phy_port[port]; in hns_roce_set_mac() 74 return hr_dev->hw->set_mac(hr_dev, phy_port, addr); in hns_roce_set_mac() 79 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); in hns_roce_add_gid() local 84 if (port >= hr_dev->caps.num_ports) in hns_roce_add_gid() 87 spin_lock_irqsave(&hr_dev->iboe.lock, flags); in hns_roce_add_gid() [all …]
|
D | hns_roce_cq.c | 51 struct hns_roce_dev *hr_dev; in hns_roce_ib_cq_event() local 56 hr_dev = to_hr_dev(ibcq->device); in hns_roce_ib_cq_event() 61 dev_err(hr_dev->dev, in hns_roce_ib_cq_event() 83 static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, in hns_roce_cq_alloc() argument 91 struct device *dev = hr_dev->dev; in hns_roce_cq_alloc() 96 cq_table = &hr_dev->cq_table; in hns_roce_cq_alloc() 99 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) in hns_roce_cq_alloc() 100 mtt_table = &hr_dev->mr_table.mtt_cqe_table; in hns_roce_cq_alloc() 102 mtt_table = &hr_dev->mr_table.mtt_table; in hns_roce_cq_alloc() 104 mtts = hns_roce_table_find(hr_dev, mtt_table, in hns_roce_cq_alloc() [all …]
|
D | hns_roce_pd.c | 38 static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn) in hns_roce_pd_alloc() argument 40 return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0; in hns_roce_pd_alloc() 43 static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn) in hns_roce_pd_free() argument 45 hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn, BITMAP_NO_RR); in hns_roce_pd_free() 48 int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev) in hns_roce_init_pd_table() argument 50 return hns_roce_bitmap_init(&hr_dev->pd_bitmap, hr_dev->caps.num_pds, in hns_roce_init_pd_table() 51 hr_dev->caps.num_pds - 1, in hns_roce_init_pd_table() 52 hr_dev->caps.reserved_pds, 0); in hns_roce_init_pd_table() 55 void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev) in hns_roce_cleanup_pd_table() argument 57 hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap); in hns_roce_cleanup_pd_table() [all …]
|
D | hns_roce_cmd.c | 43 static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param, in hns_roce_cmd_mbox_post_hw() argument 48 struct hns_roce_cmdq *cmd = &hr_dev->cmd; in hns_roce_cmd_mbox_post_hw() 52 ret = hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier, in hns_roce_cmd_mbox_post_hw() 60 static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, in __hns_roce_cmd_mbox_poll() argument 65 struct device *dev = hr_dev->dev; in __hns_roce_cmd_mbox_poll() 68 ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param, in __hns_roce_cmd_mbox_poll() 76 return hr_dev->hw->chk_mbox(hr_dev, timeout); in __hns_roce_cmd_mbox_poll() 79 static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, in hns_roce_cmd_mbox_poll() argument 85 down(&hr_dev->cmd.poll_sem); in hns_roce_cmd_mbox_poll() 86 ret = __hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, in_modifier, in hns_roce_cmd_mbox_poll() [all …]
|
D | hns_roce_hem.c | 42 bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type) in hns_roce_check_whether_mhop() argument 44 if ((hr_dev->caps.qpc_hop_num && type == HEM_TYPE_QPC) || in hns_roce_check_whether_mhop() 45 (hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) || in hns_roce_check_whether_mhop() 46 (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) || in hns_roce_check_whether_mhop() 47 (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) || in hns_roce_check_whether_mhop() 48 (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) || in hns_roce_check_whether_mhop() 49 (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT)) in hns_roce_check_whether_mhop() 91 int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, in hns_roce_calc_hem_mhop() argument 95 struct device *dev = hr_dev->dev; in hns_roce_calc_hem_mhop() 103 mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz in hns_roce_calc_hem_mhop() [all …]
|
D | hns_roce_qp.c | 44 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) in hns_roce_qp_event() argument 46 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in hns_roce_qp_event() 47 struct device *dev = hr_dev->dev; in hns_roce_qp_event() 52 qp = __hns_roce_qp_lookup(hr_dev, qpn); in hns_roce_qp_event() 113 static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt, in hns_roce_reserve_range_qp() argument 116 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in hns_roce_reserve_range_qp() 145 static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, in hns_roce_gsi_qp_alloc() argument 148 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in hns_roce_gsi_qp_alloc() 157 ret = radix_tree_insert(&hr_dev->qp_table_tree, in hns_roce_gsi_qp_alloc() 158 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp); in hns_roce_gsi_qp_alloc() [all …]
|
D | hns_roce_mr.c | 52 static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev, in hns_roce_sw2hw_mpt() argument 56 return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0, in hns_roce_sw2hw_mpt() 61 int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev, in hns_roce_hw2sw_mpt() argument 65 return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0, in hns_roce_hw2sw_mpt() 179 static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order, in hns_roce_alloc_mtt_range() argument 182 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; in hns_roce_alloc_mtt_range() 199 if (hns_roce_table_get_range(hr_dev, table, *seg, in hns_roce_alloc_mtt_range() 208 int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, in hns_roce_mtt_init() argument 230 ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg, in hns_roce_mtt_init() 238 void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt) in hns_roce_mtt_cleanup() argument [all …]
|
D | hns_roce_hw_v1.c | 65 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_v1_post_send() local 71 struct device *dev = &hr_dev->pdev->dev; in hns_roce_v1_post_send() 140 smac = (u8 *)hr_dev->dev_addr[qp->port]; in hns_roce_v1_post_send() 188 hns_get_gid_index(hr_dev, qp->phy_port, in hns_roce_v1_post_send() 291 hr_dev->caps.max_sq_inline) { in hns_roce_v1_post_send() 296 hr_dev->caps.max_sq_inline); in hns_roce_v1_post_send() 363 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_v1_post_recv() local 364 struct device *dev = &hr_dev->pdev->dev; in hns_roce_v1_post_recv() 452 static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev, in hns_roce_set_db_event_mode() argument 458 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); in hns_roce_set_db_event_mode() [all …]
|
D | hns_roce_hw_v2.c | 109 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in set_rwqe_data_seg() local 116 hr_dev->caps.max_sq_inline) { in set_rwqe_data_seg() 118 dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal", in set_rwqe_data_seg() 119 rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline); in set_rwqe_data_seg() 124 dev_err(hr_dev->dev, "Not support inline data!\n"); in set_rwqe_data_seg() 177 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_v2_post_send() local 182 struct device *dev = hr_dev->dev; in hns_roce_v2_post_send() 263 smac = (u8 *)hr_dev->dev_addr[qp->port]; in hns_roce_v2_post_send() 362 hns_get_gid_index(hr_dev, qp->phy_port, in hns_roce_v2_post_send() 558 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_v2_post_recv() local [all …]
|
D | hns_roce_device.h | 617 struct hns_roce_dev *hr_dev; member 738 struct hns_roce_dev *hr_dev; member 746 int (*reset)(struct hns_roce_dev *hr_dev, bool enable); 747 int (*cmq_init)(struct hns_roce_dev *hr_dev); 748 void (*cmq_exit)(struct hns_roce_dev *hr_dev); 749 int (*hw_profile)(struct hns_roce_dev *hr_dev); 750 int (*hw_init)(struct hns_roce_dev *hr_dev); 751 void (*hw_exit)(struct hns_roce_dev *hr_dev); 752 int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param, 755 int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout); [all …]
|
D | hns_roce_hem.h | 105 void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem); 106 int hns_roce_table_get(struct hns_roce_dev *hr_dev, 108 void hns_roce_table_put(struct hns_roce_dev *hr_dev, 110 void *hns_roce_table_find(struct hns_roce_dev *hr_dev, 113 int hns_roce_table_get_range(struct hns_roce_dev *hr_dev, 116 void hns_roce_table_put_range(struct hns_roce_dev *hr_dev, 119 int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, 123 void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, 125 void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev); 126 int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, [all …]
|
D | hns_roce_db.c | 125 int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db, in hns_roce_alloc_db() argument 131 mutex_lock(&hr_dev->pgdir_mutex); in hns_roce_alloc_db() 133 list_for_each_entry(pgdir, &hr_dev->pgdir_list, list) in hns_roce_alloc_db() 137 pgdir = hns_roce_alloc_db_pgdir(hr_dev->dev); in hns_roce_alloc_db() 143 list_add(&pgdir->list, &hr_dev->pgdir_list); in hns_roce_alloc_db() 149 mutex_unlock(&hr_dev->pgdir_mutex); in hns_roce_alloc_db() 155 void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db) in hns_roce_free_db() argument 160 mutex_lock(&hr_dev->pgdir_mutex); in hns_roce_free_db() 174 dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page, in hns_roce_free_db() 180 mutex_unlock(&hr_dev->pgdir_mutex); in hns_roce_free_db()
|
D | hns_roce_alloc.c | 160 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size, in hns_roce_buf_free() argument 164 struct device *dev = hr_dev->dev; in hns_roce_buf_free() 179 int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, in hns_roce_buf_alloc() argument 184 struct device *dev = hr_dev->dev; in hns_roce_buf_alloc() 236 hns_roce_buf_free(hr_dev, size, buf); in hns_roce_buf_alloc() 240 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev) in hns_roce_cleanup_bitmap() argument 242 hns_roce_cleanup_qp_table(hr_dev); in hns_roce_cleanup_bitmap() 243 hns_roce_cleanup_cq_table(hr_dev); in hns_roce_cleanup_bitmap() 244 hns_roce_cleanup_mr_table(hr_dev); in hns_roce_cleanup_bitmap() 245 hns_roce_cleanup_pd_table(hr_dev); in hns_roce_cleanup_bitmap() [all …]
|
D | hns_roce_cmd.h | 125 int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, 130 *hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev); 131 void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
|
D | hns_roce_ah.c | 46 struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device); in hns_roce_create_ah() local 48 struct device *dev = hr_dev->dev; in hns_roce_create_ah()
|