1 /*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33 #include <linux/acpi.h>
34 #include <linux/module.h>
35 #include <linux/pci.h>
36 #include <rdma/ib_addr.h>
37 #include <rdma/ib_smi.h>
38 #include <rdma/ib_user_verbs.h>
39 #include <rdma/ib_cache.h>
40 #include "hns_roce_common.h"
41 #include "hns_roce_device.h"
42 #include "hns_roce_hem.h"
43
hns_roce_set_mac(struct hns_roce_dev * hr_dev,u32 port,const u8 * addr)44 static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port,
45 const u8 *addr)
46 {
47 u8 phy_port;
48 u32 i;
49
50 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
51 return 0;
52
53 if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN))
54 return 0;
55
56 for (i = 0; i < ETH_ALEN; i++)
57 hr_dev->dev_addr[port][i] = addr[i];
58
59 phy_port = hr_dev->iboe.phy_port[port];
60 return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
61 }
62
hns_roce_add_gid(const struct ib_gid_attr * attr,void ** context)63 static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
64 {
65 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
66 u32 port = attr->port_num - 1;
67 int ret;
68
69 if (port >= hr_dev->caps.num_ports)
70 return -EINVAL;
71
72 ret = hr_dev->hw->set_gid(hr_dev, attr->index, &attr->gid, attr);
73
74 return ret;
75 }
76
hns_roce_del_gid(const struct ib_gid_attr * attr,void ** context)77 static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
78 {
79 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
80 u32 port = attr->port_num - 1;
81 int ret;
82
83 if (port >= hr_dev->caps.num_ports)
84 return -EINVAL;
85
86 ret = hr_dev->hw->set_gid(hr_dev, attr->index, NULL, NULL);
87
88 return ret;
89 }
90
handle_en_event(struct hns_roce_dev * hr_dev,u32 port,unsigned long event)91 static int handle_en_event(struct hns_roce_dev *hr_dev, u32 port,
92 unsigned long event)
93 {
94 struct device *dev = hr_dev->dev;
95 struct net_device *netdev;
96 int ret = 0;
97
98 netdev = hr_dev->iboe.netdevs[port];
99 if (!netdev) {
100 dev_err(dev, "can't find netdev on port(%u)!\n", port);
101 return -ENODEV;
102 }
103
104 switch (event) {
105 case NETDEV_UP:
106 case NETDEV_CHANGE:
107 case NETDEV_REGISTER:
108 case NETDEV_CHANGEADDR:
109 ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
110 break;
111 case NETDEV_DOWN:
112 /*
113 * In v1 engine, only support all ports closed together.
114 */
115 break;
116 default:
117 dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event));
118 break;
119 }
120
121 return ret;
122 }
123
hns_roce_netdev_event(struct notifier_block * self,unsigned long event,void * ptr)124 static int hns_roce_netdev_event(struct notifier_block *self,
125 unsigned long event, void *ptr)
126 {
127 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
128 struct hns_roce_ib_iboe *iboe = NULL;
129 struct hns_roce_dev *hr_dev = NULL;
130 int ret;
131 u32 port;
132
133 hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
134 iboe = &hr_dev->iboe;
135
136 for (port = 0; port < hr_dev->caps.num_ports; port++) {
137 if (dev == iboe->netdevs[port]) {
138 ret = handle_en_event(hr_dev, port, event);
139 if (ret)
140 return NOTIFY_DONE;
141 break;
142 }
143 }
144
145 return NOTIFY_DONE;
146 }
147
hns_roce_setup_mtu_mac(struct hns_roce_dev * hr_dev)148 static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
149 {
150 int ret;
151 u8 i;
152
153 for (i = 0; i < hr_dev->caps.num_ports; i++) {
154 ret = hns_roce_set_mac(hr_dev, i,
155 hr_dev->iboe.netdevs[i]->dev_addr);
156 if (ret)
157 return ret;
158 }
159
160 return 0;
161 }
162
hns_roce_query_device(struct ib_device * ib_dev,struct ib_device_attr * props,struct ib_udata * uhw)163 static int hns_roce_query_device(struct ib_device *ib_dev,
164 struct ib_device_attr *props,
165 struct ib_udata *uhw)
166 {
167 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
168
169 memset(props, 0, sizeof(*props));
170
171 props->fw_ver = hr_dev->caps.fw_ver;
172 props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
173 props->max_mr_size = (u64)(~(0ULL));
174 props->page_size_cap = hr_dev->caps.page_size_cap;
175 props->vendor_id = hr_dev->vendor_id;
176 props->vendor_part_id = hr_dev->vendor_part_id;
177 props->hw_ver = hr_dev->hw_rev;
178 props->max_qp = hr_dev->caps.num_qps;
179 props->max_qp_wr = hr_dev->caps.max_wqes;
180 props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
181 IB_DEVICE_RC_RNR_NAK_GEN;
182 props->max_send_sge = hr_dev->caps.max_sq_sg;
183 props->max_recv_sge = hr_dev->caps.max_rq_sg;
184 props->max_sge_rd = 1;
185 props->max_cq = hr_dev->caps.num_cqs;
186 props->max_cqe = hr_dev->caps.max_cqes;
187 props->max_mr = hr_dev->caps.num_mtpts;
188 props->max_pd = hr_dev->caps.num_pds;
189 props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
190 props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
191 props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ?
192 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
193 props->max_pkeys = 1;
194 props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
195 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
196 props->max_srq = hr_dev->caps.num_srqs;
197 props->max_srq_wr = hr_dev->caps.max_srq_wrs;
198 props->max_srq_sge = hr_dev->caps.max_srq_sges;
199 }
200
201 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR &&
202 hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
203 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
204 props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
205 }
206
207 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
208 props->device_cap_flags |= IB_DEVICE_XRC;
209
210 return 0;
211 }
212
hns_roce_query_port(struct ib_device * ib_dev,u32 port_num,struct ib_port_attr * props)213 static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
214 struct ib_port_attr *props)
215 {
216 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
217 struct device *dev = hr_dev->dev;
218 struct net_device *net_dev;
219 unsigned long flags;
220 enum ib_mtu mtu;
221 u32 port;
222 int ret;
223
224 port = port_num - 1;
225
226 /* props being zeroed by the caller, avoid zeroing it here */
227
228 props->max_mtu = hr_dev->caps.max_mtu;
229 props->gid_tbl_len = hr_dev->caps.gid_table_len[port];
230 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
231 IB_PORT_VENDOR_CLASS_SUP |
232 IB_PORT_BOOT_MGMT_SUP;
233 props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN;
234 props->pkey_tbl_len = 1;
235 ret = ib_get_eth_speed(ib_dev, port_num, &props->active_speed,
236 &props->active_width);
237 if (ret)
238 ibdev_warn(ib_dev, "failed to get speed, ret = %d.\n", ret);
239
240 spin_lock_irqsave(&hr_dev->iboe.lock, flags);
241
242 net_dev = hr_dev->iboe.netdevs[port];
243 if (!net_dev) {
244 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
245 dev_err(dev, "find netdev %u failed!\n", port);
246 return -EINVAL;
247 }
248
249 mtu = iboe_get_mtu(net_dev->mtu);
250 props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
251 props->state = netif_running(net_dev) && netif_carrier_ok(net_dev) ?
252 IB_PORT_ACTIVE :
253 IB_PORT_DOWN;
254 props->phys_state = props->state == IB_PORT_ACTIVE ?
255 IB_PORT_PHYS_STATE_LINK_UP :
256 IB_PORT_PHYS_STATE_DISABLED;
257
258 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
259
260 return 0;
261 }
262
hns_roce_get_link_layer(struct ib_device * device,u32 port_num)263 static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
264 u32 port_num)
265 {
266 return IB_LINK_LAYER_ETHERNET;
267 }
268
hns_roce_query_pkey(struct ib_device * ib_dev,u32 port,u16 index,u16 * pkey)269 static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index,
270 u16 *pkey)
271 {
272 if (index > 0)
273 return -EINVAL;
274
275 *pkey = PKEY_ID;
276
277 return 0;
278 }
279
hns_roce_modify_device(struct ib_device * ib_dev,int mask,struct ib_device_modify * props)280 static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
281 struct ib_device_modify *props)
282 {
283 unsigned long flags;
284
285 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
286 return -EOPNOTSUPP;
287
288 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
289 spin_lock_irqsave(&to_hr_dev(ib_dev)->sm_lock, flags);
290 memcpy(ib_dev->node_desc, props->node_desc, NODE_DESC_SIZE);
291 spin_unlock_irqrestore(&to_hr_dev(ib_dev)->sm_lock, flags);
292 }
293
294 return 0;
295 }
296
297 struct hns_user_mmap_entry *
hns_roce_user_mmap_entry_insert(struct ib_ucontext * ucontext,u64 address,size_t length,enum hns_roce_mmap_type mmap_type)298 hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
299 size_t length,
300 enum hns_roce_mmap_type mmap_type)
301 {
302 struct hns_user_mmap_entry *entry;
303 int ret;
304
305 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
306 if (!entry)
307 return NULL;
308
309 entry->address = address;
310 entry->mmap_type = mmap_type;
311
312 switch (mmap_type) {
313 /* pgoff 0 must be used by DB for compatibility */
314 case HNS_ROCE_MMAP_TYPE_DB:
315 ret = rdma_user_mmap_entry_insert_exact(
316 ucontext, &entry->rdma_entry, length, 0);
317 break;
318 case HNS_ROCE_MMAP_TYPE_DWQE:
319 ret = rdma_user_mmap_entry_insert_range(
320 ucontext, &entry->rdma_entry, length, 1,
321 U32_MAX);
322 break;
323 default:
324 ret = -EINVAL;
325 break;
326 }
327
328 if (ret) {
329 kfree(entry);
330 return NULL;
331 }
332
333 return entry;
334 }
335
hns_roce_dealloc_uar_entry(struct hns_roce_ucontext * context)336 static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context)
337 {
338 if (context->db_mmap_entry)
339 rdma_user_mmap_entry_remove(
340 &context->db_mmap_entry->rdma_entry);
341 }
342
hns_roce_alloc_uar_entry(struct ib_ucontext * uctx)343 static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
344 {
345 struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
346 u64 address;
347
348 address = context->uar.pfn << PAGE_SHIFT;
349 context->db_mmap_entry = hns_roce_user_mmap_entry_insert(
350 uctx, address, PAGE_SIZE, HNS_ROCE_MMAP_TYPE_DB);
351 if (!context->db_mmap_entry)
352 return -ENOMEM;
353
354 return 0;
355 }
356
hns_roce_alloc_ucontext(struct ib_ucontext * uctx,struct ib_udata * udata)357 static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
358 struct ib_udata *udata)
359 {
360 struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
361 struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
362 struct hns_roce_ib_alloc_ucontext_resp resp = {};
363 struct hns_roce_ib_alloc_ucontext ucmd = {};
364 int ret;
365
366 if (!hr_dev->active)
367 return -EAGAIN;
368
369 resp.qp_tab_size = hr_dev->caps.num_qps;
370 resp.srq_tab_size = hr_dev->caps.num_srqs;
371
372 ret = ib_copy_from_udata(&ucmd, udata,
373 min(udata->inlen, sizeof(ucmd)));
374 if (ret)
375 return ret;
376
377 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
378 context->config = ucmd.config & HNS_ROCE_EXSGE_FLAGS;
379
380 if (context->config & HNS_ROCE_EXSGE_FLAGS) {
381 resp.config |= HNS_ROCE_RSP_EXSGE_FLAGS;
382 resp.max_inline_data = hr_dev->caps.max_sq_inline;
383 }
384
385 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
386 context->config |= ucmd.config & HNS_ROCE_RQ_INLINE_FLAGS;
387 if (context->config & HNS_ROCE_RQ_INLINE_FLAGS)
388 resp.config |= HNS_ROCE_RSP_RQ_INLINE_FLAGS;
389 }
390
391 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQE_INLINE) {
392 context->config |= ucmd.config & HNS_ROCE_CQE_INLINE_FLAGS;
393 if (context->config & HNS_ROCE_CQE_INLINE_FLAGS)
394 resp.config |= HNS_ROCE_RSP_CQE_INLINE_FLAGS;
395 }
396
397 ret = hns_roce_uar_alloc(hr_dev, &context->uar);
398 if (ret)
399 goto error_fail_uar_alloc;
400
401 ret = hns_roce_alloc_uar_entry(uctx);
402 if (ret)
403 goto error_fail_uar_entry;
404
405 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
406 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
407 INIT_LIST_HEAD(&context->page_list);
408 mutex_init(&context->page_mutex);
409 }
410
411 resp.cqe_size = hr_dev->caps.cqe_sz;
412
413 ret = ib_copy_to_udata(udata, &resp,
414 min(udata->outlen, sizeof(resp)));
415 if (ret)
416 goto error_fail_copy_to_udata;
417
418 return 0;
419
420 error_fail_copy_to_udata:
421 hns_roce_dealloc_uar_entry(context);
422
423 error_fail_uar_entry:
424 ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
425
426 error_fail_uar_alloc:
427 return ret;
428 }
429
hns_roce_dealloc_ucontext(struct ib_ucontext * ibcontext)430 static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
431 {
432 struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
433 struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
434
435 hns_roce_dealloc_uar_entry(context);
436
437 ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
438 }
439
hns_roce_mmap(struct ib_ucontext * uctx,struct vm_area_struct * vma)440 static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
441 {
442 struct rdma_user_mmap_entry *rdma_entry;
443 struct hns_user_mmap_entry *entry;
444 phys_addr_t pfn;
445 pgprot_t prot;
446 int ret;
447
448 rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff);
449 if (!rdma_entry)
450 return -EINVAL;
451
452 entry = to_hns_mmap(rdma_entry);
453 pfn = entry->address >> PAGE_SHIFT;
454
455 switch (entry->mmap_type) {
456 case HNS_ROCE_MMAP_TYPE_DB:
457 case HNS_ROCE_MMAP_TYPE_DWQE:
458 prot = pgprot_device(vma->vm_page_prot);
459 break;
460 default:
461 ret = -EINVAL;
462 goto out;
463 }
464
465 ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
466 prot, rdma_entry);
467
468 out:
469 rdma_user_mmap_entry_put(rdma_entry);
470 return ret;
471 }
472
hns_roce_free_mmap(struct rdma_user_mmap_entry * rdma_entry)473 static void hns_roce_free_mmap(struct rdma_user_mmap_entry *rdma_entry)
474 {
475 struct hns_user_mmap_entry *entry = to_hns_mmap(rdma_entry);
476
477 kfree(entry);
478 }
479
hns_roce_port_immutable(struct ib_device * ib_dev,u32 port_num,struct ib_port_immutable * immutable)480 static int hns_roce_port_immutable(struct ib_device *ib_dev, u32 port_num,
481 struct ib_port_immutable *immutable)
482 {
483 struct ib_port_attr attr;
484 int ret;
485
486 ret = ib_query_port(ib_dev, port_num, &attr);
487 if (ret)
488 return ret;
489
490 immutable->pkey_tbl_len = attr.pkey_tbl_len;
491 immutable->gid_tbl_len = attr.gid_tbl_len;
492
493 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
494 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
495 if (to_hr_dev(ib_dev)->caps.flags & HNS_ROCE_CAP_FLAG_ROCE_V1_V2)
496 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
497
498 return 0;
499 }
500
hns_roce_disassociate_ucontext(struct ib_ucontext * ibcontext)501 static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext)
502 {
503 }
504
hns_roce_get_fw_ver(struct ib_device * device,char * str)505 static void hns_roce_get_fw_ver(struct ib_device *device, char *str)
506 {
507 u64 fw_ver = to_hr_dev(device)->caps.fw_ver;
508 unsigned int major, minor, sub_minor;
509
510 major = upper_32_bits(fw_ver);
511 minor = high_16_bits(lower_32_bits(fw_ver));
512 sub_minor = low_16_bits(fw_ver);
513
514 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%04u", major, minor,
515 sub_minor);
516 }
517
518 #define HNS_ROCE_HW_CNT(ename, cname) \
519 [HNS_ROCE_HW_##ename##_CNT].name = cname
520
521 static const struct rdma_stat_desc hns_roce_port_stats_descs[] = {
522 HNS_ROCE_HW_CNT(RX_RC_PKT, "rx_rc_pkt"),
523 HNS_ROCE_HW_CNT(RX_UC_PKT, "rx_uc_pkt"),
524 HNS_ROCE_HW_CNT(RX_UD_PKT, "rx_ud_pkt"),
525 HNS_ROCE_HW_CNT(RX_XRC_PKT, "rx_xrc_pkt"),
526 HNS_ROCE_HW_CNT(RX_PKT, "rx_pkt"),
527 HNS_ROCE_HW_CNT(RX_ERR_PKT, "rx_err_pkt"),
528 HNS_ROCE_HW_CNT(RX_CNP_PKT, "rx_cnp_pkt"),
529 HNS_ROCE_HW_CNT(TX_RC_PKT, "tx_rc_pkt"),
530 HNS_ROCE_HW_CNT(TX_UC_PKT, "tx_uc_pkt"),
531 HNS_ROCE_HW_CNT(TX_UD_PKT, "tx_ud_pkt"),
532 HNS_ROCE_HW_CNT(TX_XRC_PKT, "tx_xrc_pkt"),
533 HNS_ROCE_HW_CNT(TX_PKT, "tx_pkt"),
534 HNS_ROCE_HW_CNT(TX_ERR_PKT, "tx_err_pkt"),
535 HNS_ROCE_HW_CNT(TX_CNP_PKT, "tx_cnp_pkt"),
536 HNS_ROCE_HW_CNT(TRP_GET_MPT_ERR_PKT, "trp_get_mpt_err_pkt"),
537 HNS_ROCE_HW_CNT(TRP_GET_IRRL_ERR_PKT, "trp_get_irrl_err_pkt"),
538 HNS_ROCE_HW_CNT(ECN_DB, "ecn_doorbell"),
539 HNS_ROCE_HW_CNT(RX_BUF, "rx_buffer"),
540 HNS_ROCE_HW_CNT(TRP_RX_SOF, "trp_rx_sof"),
541 HNS_ROCE_HW_CNT(CQ_CQE, "cq_cqe"),
542 HNS_ROCE_HW_CNT(CQ_POE, "cq_poe"),
543 HNS_ROCE_HW_CNT(CQ_NOTIFY, "cq_notify"),
544 };
545
hns_roce_alloc_hw_port_stats(struct ib_device * device,u32 port_num)546 static struct rdma_hw_stats *hns_roce_alloc_hw_port_stats(
547 struct ib_device *device, u32 port_num)
548 {
549 struct hns_roce_dev *hr_dev = to_hr_dev(device);
550 u32 port = port_num - 1;
551
552 if (port > hr_dev->caps.num_ports) {
553 ibdev_err(device, "invalid port num.\n");
554 return NULL;
555 }
556
557 if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 ||
558 hr_dev->is_vf)
559 return NULL;
560
561 return rdma_alloc_hw_stats_struct(hns_roce_port_stats_descs,
562 ARRAY_SIZE(hns_roce_port_stats_descs),
563 RDMA_HW_STATS_DEFAULT_LIFESPAN);
564 }
565
hns_roce_get_hw_stats(struct ib_device * device,struct rdma_hw_stats * stats,u32 port,int index)566 static int hns_roce_get_hw_stats(struct ib_device *device,
567 struct rdma_hw_stats *stats,
568 u32 port, int index)
569 {
570 struct hns_roce_dev *hr_dev = to_hr_dev(device);
571 int num_counters = HNS_ROCE_HW_CNT_TOTAL;
572 int ret;
573
574 if (port == 0)
575 return 0;
576
577 if (port > hr_dev->caps.num_ports)
578 return -EINVAL;
579
580 if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 ||
581 hr_dev->is_vf)
582 return -EOPNOTSUPP;
583
584 ret = hr_dev->hw->query_hw_counter(hr_dev, stats->value, port,
585 &num_counters);
586 if (ret) {
587 ibdev_err(device, "failed to query hw counter, ret = %d\n",
588 ret);
589 return ret;
590 }
591
592 return num_counters;
593 }
594
hns_roce_unregister_device(struct hns_roce_dev * hr_dev)595 static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
596 {
597 struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
598
599 hr_dev->active = false;
600 unregister_netdevice_notifier(&iboe->nb);
601 ib_unregister_device(&hr_dev->ib_dev);
602 }
603
604 static const struct ib_device_ops hns_roce_dev_ops = {
605 .owner = THIS_MODULE,
606 .driver_id = RDMA_DRIVER_HNS,
607 .uverbs_abi_ver = 1,
608 .uverbs_no_driver_id_binding = 1,
609
610 .get_dev_fw_str = hns_roce_get_fw_ver,
611 .add_gid = hns_roce_add_gid,
612 .alloc_pd = hns_roce_alloc_pd,
613 .alloc_ucontext = hns_roce_alloc_ucontext,
614 .create_ah = hns_roce_create_ah,
615 .create_user_ah = hns_roce_create_ah,
616 .create_cq = hns_roce_create_cq,
617 .create_qp = hns_roce_create_qp,
618 .dealloc_pd = hns_roce_dealloc_pd,
619 .dealloc_ucontext = hns_roce_dealloc_ucontext,
620 .del_gid = hns_roce_del_gid,
621 .dereg_mr = hns_roce_dereg_mr,
622 .destroy_ah = hns_roce_destroy_ah,
623 .destroy_cq = hns_roce_destroy_cq,
624 .disassociate_ucontext = hns_roce_disassociate_ucontext,
625 .get_dma_mr = hns_roce_get_dma_mr,
626 .get_link_layer = hns_roce_get_link_layer,
627 .get_port_immutable = hns_roce_port_immutable,
628 .mmap = hns_roce_mmap,
629 .mmap_free = hns_roce_free_mmap,
630 .modify_device = hns_roce_modify_device,
631 .modify_qp = hns_roce_modify_qp,
632 .query_ah = hns_roce_query_ah,
633 .query_device = hns_roce_query_device,
634 .query_pkey = hns_roce_query_pkey,
635 .query_port = hns_roce_query_port,
636 .reg_user_mr = hns_roce_reg_user_mr,
637 .alloc_hw_port_stats = hns_roce_alloc_hw_port_stats,
638 .get_hw_stats = hns_roce_get_hw_stats,
639
640 INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
641 INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
642 INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd),
643 INIT_RDMA_OBJ_SIZE(ib_qp, hns_roce_qp, ibqp),
644 INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
645 };
646
647 static const struct ib_device_ops hns_roce_dev_mr_ops = {
648 .rereg_user_mr = hns_roce_rereg_user_mr,
649 };
650
651 static const struct ib_device_ops hns_roce_dev_mw_ops = {
652 .alloc_mw = hns_roce_alloc_mw,
653 .dealloc_mw = hns_roce_dealloc_mw,
654
655 INIT_RDMA_OBJ_SIZE(ib_mw, hns_roce_mw, ibmw),
656 };
657
658 static const struct ib_device_ops hns_roce_dev_frmr_ops = {
659 .alloc_mr = hns_roce_alloc_mr,
660 .map_mr_sg = hns_roce_map_mr_sg,
661 };
662
663 static const struct ib_device_ops hns_roce_dev_srq_ops = {
664 .create_srq = hns_roce_create_srq,
665 .destroy_srq = hns_roce_destroy_srq,
666
667 INIT_RDMA_OBJ_SIZE(ib_srq, hns_roce_srq, ibsrq),
668 };
669
670 static const struct ib_device_ops hns_roce_dev_xrcd_ops = {
671 .alloc_xrcd = hns_roce_alloc_xrcd,
672 .dealloc_xrcd = hns_roce_dealloc_xrcd,
673
674 INIT_RDMA_OBJ_SIZE(ib_xrcd, hns_roce_xrcd, ibxrcd),
675 };
676
677 static const struct ib_device_ops hns_roce_dev_restrack_ops = {
678 .fill_res_cq_entry = hns_roce_fill_res_cq_entry,
679 .fill_res_cq_entry_raw = hns_roce_fill_res_cq_entry_raw,
680 .fill_res_qp_entry = hns_roce_fill_res_qp_entry,
681 .fill_res_qp_entry_raw = hns_roce_fill_res_qp_entry_raw,
682 .fill_res_mr_entry = hns_roce_fill_res_mr_entry,
683 .fill_res_mr_entry_raw = hns_roce_fill_res_mr_entry_raw,
684 };
685
hns_roce_register_device(struct hns_roce_dev * hr_dev)686 static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
687 {
688 int ret;
689 struct hns_roce_ib_iboe *iboe = NULL;
690 struct ib_device *ib_dev = NULL;
691 struct device *dev = hr_dev->dev;
692 unsigned int i;
693
694 iboe = &hr_dev->iboe;
695 spin_lock_init(&iboe->lock);
696
697 ib_dev = &hr_dev->ib_dev;
698
699 ib_dev->node_type = RDMA_NODE_IB_CA;
700 ib_dev->dev.parent = dev;
701
702 ib_dev->phys_port_cnt = hr_dev->caps.num_ports;
703 ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
704 ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors;
705
706 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR)
707 ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops);
708
709 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW)
710 ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops);
711
712 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
713 ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops);
714
715 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
716 ib_set_device_ops(ib_dev, &hns_roce_dev_srq_ops);
717 ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops);
718 }
719
720 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
721 ib_set_device_ops(ib_dev, &hns_roce_dev_xrcd_ops);
722
723 ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
724 ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
725 ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops);
726 for (i = 0; i < hr_dev->caps.num_ports; i++) {
727 if (!hr_dev->iboe.netdevs[i])
728 continue;
729
730 ret = ib_device_set_netdev(ib_dev, hr_dev->iboe.netdevs[i],
731 i + 1);
732 if (ret)
733 return ret;
734 }
735 dma_set_max_seg_size(dev, UINT_MAX);
736 ret = ib_register_device(ib_dev, "hns_%d", dev);
737 if (ret) {
738 dev_err(dev, "ib_register_device failed!\n");
739 return ret;
740 }
741
742 ret = hns_roce_setup_mtu_mac(hr_dev);
743 if (ret) {
744 dev_err(dev, "setup_mtu_mac failed!\n");
745 goto error_failed_setup_mtu_mac;
746 }
747
748 iboe->nb.notifier_call = hns_roce_netdev_event;
749 ret = register_netdevice_notifier(&iboe->nb);
750 if (ret) {
751 dev_err(dev, "register_netdevice_notifier failed!\n");
752 goto error_failed_setup_mtu_mac;
753 }
754
755 hr_dev->active = true;
756 return 0;
757
758 error_failed_setup_mtu_mac:
759 ib_unregister_device(ib_dev);
760
761 return ret;
762 }
763
hns_roce_init_hem(struct hns_roce_dev * hr_dev)764 static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
765 {
766 struct device *dev = hr_dev->dev;
767 int ret;
768
769 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
770 HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
771 hr_dev->caps.num_mtpts);
772 if (ret) {
773 dev_err(dev, "failed to init MTPT context memory, aborting.\n");
774 return ret;
775 }
776
777 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
778 HEM_TYPE_QPC, hr_dev->caps.qpc_sz,
779 hr_dev->caps.num_qps);
780 if (ret) {
781 dev_err(dev, "failed to init QP context memory, aborting.\n");
782 goto err_unmap_dmpt;
783 }
784
785 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table,
786 HEM_TYPE_IRRL,
787 hr_dev->caps.irrl_entry_sz *
788 hr_dev->caps.max_qp_init_rdma,
789 hr_dev->caps.num_qps);
790 if (ret) {
791 dev_err(dev, "failed to init irrl_table memory, aborting.\n");
792 goto err_unmap_qp;
793 }
794
795 if (hr_dev->caps.trrl_entry_sz) {
796 ret = hns_roce_init_hem_table(hr_dev,
797 &hr_dev->qp_table.trrl_table,
798 HEM_TYPE_TRRL,
799 hr_dev->caps.trrl_entry_sz *
800 hr_dev->caps.max_qp_dest_rdma,
801 hr_dev->caps.num_qps);
802 if (ret) {
803 dev_err(dev,
804 "failed to init trrl_table memory, aborting.\n");
805 goto err_unmap_irrl;
806 }
807 }
808
809 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
810 HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
811 hr_dev->caps.num_cqs);
812 if (ret) {
813 dev_err(dev, "failed to init CQ context memory, aborting.\n");
814 goto err_unmap_trrl;
815 }
816
817 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
818 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table,
819 HEM_TYPE_SRQC,
820 hr_dev->caps.srqc_entry_sz,
821 hr_dev->caps.num_srqs);
822 if (ret) {
823 dev_err(dev,
824 "failed to init SRQ context memory, aborting.\n");
825 goto err_unmap_cq;
826 }
827 }
828
829 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
830 ret = hns_roce_init_hem_table(hr_dev,
831 &hr_dev->qp_table.sccc_table,
832 HEM_TYPE_SCCC,
833 hr_dev->caps.sccc_sz,
834 hr_dev->caps.num_qps);
835 if (ret) {
836 dev_err(dev,
837 "failed to init SCC context memory, aborting.\n");
838 goto err_unmap_srq;
839 }
840 }
841
842 if (hr_dev->caps.qpc_timer_entry_sz) {
843 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
844 HEM_TYPE_QPC_TIMER,
845 hr_dev->caps.qpc_timer_entry_sz,
846 hr_dev->caps.qpc_timer_bt_num);
847 if (ret) {
848 dev_err(dev,
849 "failed to init QPC timer memory, aborting.\n");
850 goto err_unmap_ctx;
851 }
852 }
853
854 if (hr_dev->caps.cqc_timer_entry_sz) {
855 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table,
856 HEM_TYPE_CQC_TIMER,
857 hr_dev->caps.cqc_timer_entry_sz,
858 hr_dev->caps.cqc_timer_bt_num);
859 if (ret) {
860 dev_err(dev,
861 "failed to init CQC timer memory, aborting.\n");
862 goto err_unmap_qpc_timer;
863 }
864 }
865
866 if (hr_dev->caps.gmv_entry_sz) {
867 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->gmv_table,
868 HEM_TYPE_GMV,
869 hr_dev->caps.gmv_entry_sz,
870 hr_dev->caps.gmv_entry_num);
871 if (ret) {
872 dev_err(dev,
873 "failed to init gmv table memory, ret = %d\n",
874 ret);
875 goto err_unmap_cqc_timer;
876 }
877 }
878
879 return 0;
880
881 err_unmap_cqc_timer:
882 if (hr_dev->caps.cqc_timer_entry_sz)
883 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cqc_timer_table);
884
885 err_unmap_qpc_timer:
886 if (hr_dev->caps.qpc_timer_entry_sz)
887 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table);
888
889 err_unmap_ctx:
890 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
891 hns_roce_cleanup_hem_table(hr_dev,
892 &hr_dev->qp_table.sccc_table);
893 err_unmap_srq:
894 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
895 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table);
896
897 err_unmap_cq:
898 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
899
900 err_unmap_trrl:
901 if (hr_dev->caps.trrl_entry_sz)
902 hns_roce_cleanup_hem_table(hr_dev,
903 &hr_dev->qp_table.trrl_table);
904
905 err_unmap_irrl:
906 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
907
908 err_unmap_qp:
909 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
910
911 err_unmap_dmpt:
912 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
913
914 return ret;
915 }
916
917 /**
918 * hns_roce_setup_hca - setup host channel adapter
919 * @hr_dev: pointer to hns roce device
920 * Return : int
921 */
hns_roce_setup_hca(struct hns_roce_dev * hr_dev)922 static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
923 {
924 struct device *dev = hr_dev->dev;
925 int ret;
926
927 spin_lock_init(&hr_dev->sm_lock);
928
929 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
930 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
931 INIT_LIST_HEAD(&hr_dev->pgdir_list);
932 mutex_init(&hr_dev->pgdir_mutex);
933 }
934
935 hns_roce_init_uar_table(hr_dev);
936
937 ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
938 if (ret) {
939 dev_err(dev, "failed to allocate priv_uar.\n");
940 goto err_uar_table_free;
941 }
942
943 ret = hns_roce_init_qp_table(hr_dev);
944 if (ret) {
945 dev_err(dev, "failed to init qp_table.\n");
946 goto err_uar_table_free;
947 }
948
949 hns_roce_init_pd_table(hr_dev);
950
951 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
952 hns_roce_init_xrcd_table(hr_dev);
953
954 hns_roce_init_mr_table(hr_dev);
955
956 hns_roce_init_cq_table(hr_dev);
957
958 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
959 hns_roce_init_srq_table(hr_dev);
960
961 return 0;
962
963 err_uar_table_free:
964 ida_destroy(&hr_dev->uar_ida.ida);
965 return ret;
966 }
967
check_and_get_armed_cq(struct list_head * cq_list,struct ib_cq * cq)968 static void check_and_get_armed_cq(struct list_head *cq_list, struct ib_cq *cq)
969 {
970 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
971 unsigned long flags;
972
973 spin_lock_irqsave(&hr_cq->lock, flags);
974 if (cq->comp_handler) {
975 if (!hr_cq->is_armed) {
976 hr_cq->is_armed = 1;
977 list_add_tail(&hr_cq->node, cq_list);
978 }
979 }
980 spin_unlock_irqrestore(&hr_cq->lock, flags);
981 }
982
hns_roce_handle_device_err(struct hns_roce_dev * hr_dev)983 void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev)
984 {
985 struct hns_roce_qp *hr_qp;
986 struct hns_roce_cq *hr_cq;
987 struct list_head cq_list;
988 unsigned long flags_qp;
989 unsigned long flags;
990
991 INIT_LIST_HEAD(&cq_list);
992
993 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
994 list_for_each_entry(hr_qp, &hr_dev->qp_list, node) {
995 spin_lock_irqsave(&hr_qp->sq.lock, flags_qp);
996 if (hr_qp->sq.tail != hr_qp->sq.head)
997 check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq);
998 spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp);
999
1000 spin_lock_irqsave(&hr_qp->rq.lock, flags_qp);
1001 if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head))
1002 check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq);
1003 spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp);
1004 }
1005
1006 list_for_each_entry(hr_cq, &cq_list, node)
1007 hns_roce_cq_completion(hr_dev, hr_cq->cqn);
1008
1009 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
1010 }
1011
hns_roce_init(struct hns_roce_dev * hr_dev)1012 int hns_roce_init(struct hns_roce_dev *hr_dev)
1013 {
1014 struct device *dev = hr_dev->dev;
1015 int ret;
1016
1017 hr_dev->is_reset = false;
1018
1019 if (hr_dev->hw->cmq_init) {
1020 ret = hr_dev->hw->cmq_init(hr_dev);
1021 if (ret) {
1022 dev_err(dev, "init RoCE Command Queue failed!\n");
1023 return ret;
1024 }
1025 }
1026
1027 ret = hr_dev->hw->hw_profile(hr_dev);
1028 if (ret) {
1029 dev_err(dev, "get RoCE engine profile failed!\n");
1030 goto error_failed_cmd_init;
1031 }
1032
1033 ret = hns_roce_cmd_init(hr_dev);
1034 if (ret) {
1035 dev_err(dev, "cmd init failed!\n");
1036 goto error_failed_cmd_init;
1037 }
1038
1039 /* EQ depends on poll mode, event mode depends on EQ */
1040 ret = hr_dev->hw->init_eq(hr_dev);
1041 if (ret) {
1042 dev_err(dev, "eq init failed!\n");
1043 goto error_failed_eq_table;
1044 }
1045
1046 if (hr_dev->cmd_mod) {
1047 ret = hns_roce_cmd_use_events(hr_dev);
1048 if (ret)
1049 dev_warn(dev,
1050 "Cmd event mode failed, set back to poll!\n");
1051 }
1052
1053 ret = hns_roce_init_hem(hr_dev);
1054 if (ret) {
1055 dev_err(dev, "init HEM(Hardware Entry Memory) failed!\n");
1056 goto error_failed_init_hem;
1057 }
1058
1059 ret = hns_roce_setup_hca(hr_dev);
1060 if (ret) {
1061 dev_err(dev, "setup hca failed!\n");
1062 goto error_failed_setup_hca;
1063 }
1064
1065 if (hr_dev->hw->hw_init) {
1066 ret = hr_dev->hw->hw_init(hr_dev);
1067 if (ret) {
1068 dev_err(dev, "hw_init failed!\n");
1069 goto error_failed_engine_init;
1070 }
1071 }
1072
1073 INIT_LIST_HEAD(&hr_dev->qp_list);
1074 spin_lock_init(&hr_dev->qp_list_lock);
1075 INIT_LIST_HEAD(&hr_dev->dip_list);
1076 spin_lock_init(&hr_dev->dip_list_lock);
1077
1078 ret = hns_roce_register_device(hr_dev);
1079 if (ret)
1080 goto error_failed_register_device;
1081
1082 return 0;
1083
1084 error_failed_register_device:
1085 if (hr_dev->hw->hw_exit)
1086 hr_dev->hw->hw_exit(hr_dev);
1087
1088 error_failed_engine_init:
1089 hns_roce_cleanup_bitmap(hr_dev);
1090
1091 error_failed_setup_hca:
1092 hns_roce_cleanup_hem(hr_dev);
1093
1094 error_failed_init_hem:
1095 if (hr_dev->cmd_mod)
1096 hns_roce_cmd_use_polling(hr_dev);
1097 hr_dev->hw->cleanup_eq(hr_dev);
1098
1099 error_failed_eq_table:
1100 hns_roce_cmd_cleanup(hr_dev);
1101
1102 error_failed_cmd_init:
1103 if (hr_dev->hw->cmq_exit)
1104 hr_dev->hw->cmq_exit(hr_dev);
1105
1106 return ret;
1107 }
1108
hns_roce_exit(struct hns_roce_dev * hr_dev)1109 void hns_roce_exit(struct hns_roce_dev *hr_dev)
1110 {
1111 hns_roce_unregister_device(hr_dev);
1112
1113 if (hr_dev->hw->hw_exit)
1114 hr_dev->hw->hw_exit(hr_dev);
1115 hns_roce_cleanup_bitmap(hr_dev);
1116 hns_roce_cleanup_hem(hr_dev);
1117
1118 if (hr_dev->cmd_mod)
1119 hns_roce_cmd_use_polling(hr_dev);
1120
1121 hr_dev->hw->cleanup_eq(hr_dev);
1122 hns_roce_cmd_cleanup(hr_dev);
1123 if (hr_dev->hw->cmq_exit)
1124 hr_dev->hw->cmq_exit(hr_dev);
1125 }
1126
1127 MODULE_LICENSE("Dual BSD/GPL");
1128 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
1129 MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
1130 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
1131 MODULE_DESCRIPTION("HNS RoCE Driver");
1132