Lines Matching refs:dd
126 static int hfi1_create_kctxt(struct hfi1_devdata *dd, in hfi1_create_kctxt() argument
135 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd); in hfi1_create_kctxt()
137 dd_dev_err(dd, "Kernel receive context allocation failed\n"); in hfi1_create_kctxt()
161 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); in hfi1_create_kctxt()
163 dd_dev_err(dd, "Kernel send context allocation failed\n"); in hfi1_create_kctxt()
174 int hfi1_create_kctxts(struct hfi1_devdata *dd) in hfi1_create_kctxts() argument
179 dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd), in hfi1_create_kctxts()
180 GFP_KERNEL, dd->node); in hfi1_create_kctxts()
181 if (!dd->rcd) in hfi1_create_kctxts()
184 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { in hfi1_create_kctxts()
185 ret = hfi1_create_kctxt(dd, dd->pport); in hfi1_create_kctxts()
192 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) in hfi1_create_kctxts()
193 hfi1_free_ctxt(dd->rcd[i]); in hfi1_create_kctxts()
196 kfree(dd->rcd); in hfi1_create_kctxts()
197 dd->rcd = NULL; in hfi1_create_kctxts()
220 spin_lock_irqsave(&rcd->dd->uctxt_lock, flags); in hfi1_rcd_free()
221 rcd->dd->rcd[rcd->ctxt] = NULL; in hfi1_rcd_free()
222 spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags); in hfi1_rcd_free()
224 hfi1_free_ctxtdata(rcd->dd, rcd); in hfi1_rcd_free()
267 static int allocate_rcd_index(struct hfi1_devdata *dd, in allocate_rcd_index() argument
273 spin_lock_irqsave(&dd->uctxt_lock, flags); in allocate_rcd_index()
274 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) in allocate_rcd_index()
275 if (!dd->rcd[ctxt]) in allocate_rcd_index()
278 if (ctxt < dd->num_rcv_contexts) { in allocate_rcd_index()
280 dd->rcd[ctxt] = rcd; in allocate_rcd_index()
283 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in allocate_rcd_index()
285 if (ctxt >= dd->num_rcv_contexts) in allocate_rcd_index()
305 struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd, in hfi1_rcd_get_by_index_safe() argument
308 if (ctxt < dd->num_rcv_contexts) in hfi1_rcd_get_by_index_safe()
309 return hfi1_rcd_get_by_index(dd, ctxt); in hfi1_rcd_get_by_index_safe()
326 struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt) in hfi1_rcd_get_by_index() argument
331 spin_lock_irqsave(&dd->uctxt_lock, flags); in hfi1_rcd_get_by_index()
332 if (dd->rcd[ctxt]) { in hfi1_rcd_get_by_index()
333 rcd = dd->rcd[ctxt]; in hfi1_rcd_get_by_index()
337 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in hfi1_rcd_get_by_index()
349 struct hfi1_devdata *dd = ppd->dd; in hfi1_create_ctxtdata() local
354 if (dd->rcv_entries.nctxt_extra > in hfi1_create_ctxtdata()
355 dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt) in hfi1_create_ctxtdata()
356 kctxt_ngroups = (dd->rcv_entries.nctxt_extra - in hfi1_create_ctxtdata()
357 (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)); in hfi1_create_ctxtdata()
364 ret = allocate_rcd_index(dd, rcd, &ctxt); in hfi1_create_ctxtdata()
374 rcd->dd = dd; in hfi1_create_ctxtdata()
376 rcd->rcv_array_groups = dd->rcv_entries.ngroups; in hfi1_create_ctxtdata()
394 if (ctxt < dd->first_dyn_alloc_ctxt) { in hfi1_create_ctxtdata()
396 base = ctxt * (dd->rcv_entries.ngroups + 1); in hfi1_create_ctxtdata()
400 (ctxt * dd->rcv_entries.ngroups); in hfi1_create_ctxtdata()
403 u16 ct = ctxt - dd->first_dyn_alloc_ctxt; in hfi1_create_ctxtdata()
405 base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) + in hfi1_create_ctxtdata()
407 if (ct < dd->rcv_entries.nctxt_extra) { in hfi1_create_ctxtdata()
408 base += ct * (dd->rcv_entries.ngroups + 1); in hfi1_create_ctxtdata()
411 base += dd->rcv_entries.nctxt_extra + in hfi1_create_ctxtdata()
412 (ct * dd->rcv_entries.ngroups); in hfi1_create_ctxtdata()
415 rcd->eager_base = base * dd->rcv_entries.group_size; in hfi1_create_ctxtdata()
433 dd->rcv_entries.group_size; in hfi1_create_ctxtdata()
436 dd->rcv_entries.group_size); in hfi1_create_ctxtdata()
438 dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n", in hfi1_create_ctxtdata()
481 if (ctxt < dd->first_dyn_alloc_ctxt) { in hfi1_create_ctxtdata()
525 struct hfi1_devdata *dd = ppd->dd; in set_link_ipg() local
572 write_csr(dd, SEND_STATIC_RATE_CONTROL, src); in set_link_ipg()
630 struct hfi1_devdata *dd, u8 hw_pidx, u8 port) in hfi1_init_pportdata() argument
636 ppd->dd = dd; in hfi1_init_pportdata()
655 dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n", in hfi1_init_pportdata()
702 dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port); in hfi1_init_pportdata()
709 static int loadtime_init(struct hfi1_devdata *dd) in loadtime_init() argument
722 static int init_after_reset(struct hfi1_devdata *dd) in init_after_reset() argument
731 for (i = 0; i < dd->num_rcv_contexts; i++) { in init_after_reset()
732 rcd = hfi1_rcd_get_by_index(dd, i); in init_after_reset()
733 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | in init_after_reset()
738 pio_send_control(dd, PSC_GLOBAL_DISABLE); in init_after_reset()
739 for (i = 0; i < dd->num_send_contexts; i++) in init_after_reset()
740 sc_disable(dd->send_contexts[i].sc); in init_after_reset()
745 static void enable_chip(struct hfi1_devdata *dd) in enable_chip() argument
752 pio_send_control(dd, PSC_GLOBAL_ENABLE); in enable_chip()
758 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { in enable_chip()
759 rcd = hfi1_rcd_get_by_index(dd, i); in enable_chip()
773 hfi1_rcvctrl(dd, rcvmask, rcd); in enable_chip()
783 static int create_workqueues(struct hfi1_devdata *dd) in create_workqueues() argument
788 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in create_workqueues()
789 ppd = dd->pport + pidx; in create_workqueues()
797 dd->unit, pidx); in create_workqueues()
811 dd->unit, pidx); in create_workqueues()
819 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in create_workqueues()
820 ppd = dd->pport + pidx; in create_workqueues()
837 static void destroy_workqueues(struct hfi1_devdata *dd) in destroy_workqueues() argument
842 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in destroy_workqueues()
843 ppd = dd->pport + pidx; in destroy_workqueues()
862 static void enable_general_intr(struct hfi1_devdata *dd) in enable_general_intr() argument
864 set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true); in enable_general_intr()
865 set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true); in enable_general_intr()
866 set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true); in enable_general_intr()
867 set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true); in enable_general_intr()
868 set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true); in enable_general_intr()
869 set_intr_bits(dd, IS_DC_START, IS_DC_END, true); in enable_general_intr()
870 set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true); in enable_general_intr()
888 int hfi1_init(struct hfi1_devdata *dd, int reinit) in hfi1_init() argument
897 dd->process_pio_send = hfi1_verbs_send_pio; in hfi1_init()
898 dd->process_dma_send = hfi1_verbs_send_dma; in hfi1_init()
899 dd->pio_inline_send = pio_copy; in hfi1_init()
900 dd->process_vnic_dma_send = hfi1_vnic_send_dma; in hfi1_init()
902 if (is_ax(dd)) { in hfi1_init()
903 atomic_set(&dd->drop_packet, DROP_PACKET_ON); in hfi1_init()
904 dd->do_drop = true; in hfi1_init()
906 atomic_set(&dd->drop_packet, DROP_PACKET_OFF); in hfi1_init()
907 dd->do_drop = false; in hfi1_init()
911 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in hfi1_init()
912 ppd = dd->pport + pidx; in hfi1_init()
917 ret = init_after_reset(dd); in hfi1_init()
919 ret = loadtime_init(dd); in hfi1_init()
924 dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, in hfi1_init()
926 &dd->rcvhdrtail_dummy_dma, in hfi1_init()
929 if (!dd->rcvhdrtail_dummy_kvaddr) { in hfi1_init()
930 dd_dev_err(dd, "cannot allocate dummy tail memory\n"); in hfi1_init()
936 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) { in hfi1_init()
943 rcd = hfi1_rcd_get_by_index(dd, i); in hfi1_init()
949 lastfail = hfi1_create_rcvhdrq(dd, rcd); in hfi1_init()
955 dd_dev_err(dd, in hfi1_init()
964 len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS * in hfi1_init()
965 sizeof(*dd->events)); in hfi1_init()
966 dd->events = vmalloc_user(len); in hfi1_init()
967 if (!dd->events) in hfi1_init()
968 dd_dev_err(dd, "Failed to allocate user events page\n"); in hfi1_init()
973 dd->status = vmalloc_user(PAGE_SIZE); in hfi1_init()
974 if (!dd->status) in hfi1_init()
975 dd_dev_err(dd, "Failed to allocate dev status page\n"); in hfi1_init()
976 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in hfi1_init()
977 ppd = dd->pport + pidx; in hfi1_init()
978 if (dd->status) in hfi1_init()
980 ppd->statusp = &dd->status->port; in hfi1_init()
986 enable_chip(dd); in hfi1_init()
993 if (dd->status) in hfi1_init()
994 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT | in hfi1_init()
998 enable_general_intr(dd); in hfi1_init()
999 init_qsfp_int(dd); in hfi1_init()
1002 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in hfi1_init()
1003 ppd = dd->pport + pidx; in hfi1_init()
1011 dd_dev_info(dd, in hfi1_init()
1040 static void stop_timers(struct hfi1_devdata *dd) in stop_timers() argument
1045 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in stop_timers()
1046 ppd = dd->pport + pidx; in stop_timers()
1063 static void shutdown_device(struct hfi1_devdata *dd) in shutdown_device() argument
1070 if (dd->flags & HFI1_SHUTDOWN) in shutdown_device()
1072 dd->flags |= HFI1_SHUTDOWN; in shutdown_device()
1074 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in shutdown_device()
1075 ppd = dd->pport + pidx; in shutdown_device()
1082 dd->flags &= ~HFI1_INITTED; in shutdown_device()
1085 set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false); in shutdown_device()
1086 msix_clean_up_interrupts(dd); in shutdown_device()
1088 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in shutdown_device()
1089 ppd = dd->pport + pidx; in shutdown_device()
1090 for (i = 0; i < dd->num_rcv_contexts; i++) { in shutdown_device()
1091 rcd = hfi1_rcd_get_by_index(dd, i); in shutdown_device()
1092 hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS | in shutdown_device()
1103 for (i = 0; i < dd->num_send_contexts; i++) in shutdown_device()
1104 sc_flush(dd->send_contexts[i].sc); in shutdown_device()
1113 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in shutdown_device()
1114 ppd = dd->pport + pidx; in shutdown_device()
1117 for (i = 0; i < dd->num_send_contexts; i++) in shutdown_device()
1118 sc_disable(dd->send_contexts[i].sc); in shutdown_device()
1120 pio_send_control(dd, PSC_GLOBAL_DISABLE); in shutdown_device()
1134 sdma_exit(dd); in shutdown_device()
1145 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) in hfi1_free_ctxtdata() argument
1153 dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd), in hfi1_free_ctxtdata()
1157 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, in hfi1_free_ctxtdata()
1170 dma_free_coherent(&dd->pcidev->dev, in hfi1_free_ctxtdata()
1198 static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd) in release_asic_data() argument
1203 if (!dd->asic_data) in release_asic_data()
1205 dd->asic_data->dds[dd->hfi1_id] = NULL; in release_asic_data()
1206 other = dd->hfi1_id ? 0 : 1; in release_asic_data()
1207 ad = dd->asic_data; in release_asic_data()
1208 dd->asic_data = NULL; in release_asic_data()
1213 static void finalize_asic_data(struct hfi1_devdata *dd, in finalize_asic_data() argument
1216 clean_up_i2c(dd, ad); in finalize_asic_data()
1227 void hfi1_free_devdata(struct hfi1_devdata *dd) in hfi1_free_devdata() argument
1233 __xa_erase(&hfi1_dev_table, dd->unit); in hfi1_free_devdata()
1234 ad = release_asic_data(dd); in hfi1_free_devdata()
1237 finalize_asic_data(dd, ad); in hfi1_free_devdata()
1238 free_platform_config(dd); in hfi1_free_devdata()
1240 free_percpu(dd->int_counter); in hfi1_free_devdata()
1241 free_percpu(dd->rcv_limit); in hfi1_free_devdata()
1242 free_percpu(dd->send_schedule); in hfi1_free_devdata()
1243 free_percpu(dd->tx_opstats); in hfi1_free_devdata()
1244 dd->int_counter = NULL; in hfi1_free_devdata()
1245 dd->rcv_limit = NULL; in hfi1_free_devdata()
1246 dd->send_schedule = NULL; in hfi1_free_devdata()
1247 dd->tx_opstats = NULL; in hfi1_free_devdata()
1248 kfree(dd->comp_vect); in hfi1_free_devdata()
1249 dd->comp_vect = NULL; in hfi1_free_devdata()
1250 sdma_clean(dd, dd->num_sdma); in hfi1_free_devdata()
1251 rvt_dealloc_device(&dd->verbs_dev.rdi); in hfi1_free_devdata()
1266 struct hfi1_devdata *dd; in hfi1_alloc_devdata() local
1272 dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra, in hfi1_alloc_devdata()
1274 if (!dd) in hfi1_alloc_devdata()
1276 dd->num_pports = nports; in hfi1_alloc_devdata()
1277 dd->pport = (struct hfi1_pportdata *)(dd + 1); in hfi1_alloc_devdata()
1278 dd->pcidev = pdev; in hfi1_alloc_devdata()
1279 pci_set_drvdata(pdev, dd); in hfi1_alloc_devdata()
1280 dd->node = NUMA_NO_NODE; in hfi1_alloc_devdata()
1282 ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b, in hfi1_alloc_devdata()
1289 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit); in hfi1_alloc_devdata()
1295 spin_lock_init(&dd->sc_lock); in hfi1_alloc_devdata()
1296 spin_lock_init(&dd->sendctrl_lock); in hfi1_alloc_devdata()
1297 spin_lock_init(&dd->rcvctrl_lock); in hfi1_alloc_devdata()
1298 spin_lock_init(&dd->uctxt_lock); in hfi1_alloc_devdata()
1299 spin_lock_init(&dd->hfi1_diag_trans_lock); in hfi1_alloc_devdata()
1300 spin_lock_init(&dd->sc_init_lock); in hfi1_alloc_devdata()
1301 spin_lock_init(&dd->dc8051_memlock); in hfi1_alloc_devdata()
1302 seqlock_init(&dd->sc2vl_lock); in hfi1_alloc_devdata()
1303 spin_lock_init(&dd->sde_map_lock); in hfi1_alloc_devdata()
1304 spin_lock_init(&dd->pio_map_lock); in hfi1_alloc_devdata()
1305 mutex_init(&dd->dc8051_lock); in hfi1_alloc_devdata()
1306 init_waitqueue_head(&dd->event_queue); in hfi1_alloc_devdata()
1307 spin_lock_init(&dd->irq_src_lock); in hfi1_alloc_devdata()
1309 dd->int_counter = alloc_percpu(u64); in hfi1_alloc_devdata()
1310 if (!dd->int_counter) { in hfi1_alloc_devdata()
1315 dd->rcv_limit = alloc_percpu(u64); in hfi1_alloc_devdata()
1316 if (!dd->rcv_limit) { in hfi1_alloc_devdata()
1321 dd->send_schedule = alloc_percpu(u64); in hfi1_alloc_devdata()
1322 if (!dd->send_schedule) { in hfi1_alloc_devdata()
1327 dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx); in hfi1_alloc_devdata()
1328 if (!dd->tx_opstats) { in hfi1_alloc_devdata()
1333 dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL); in hfi1_alloc_devdata()
1334 if (!dd->comp_vect) { in hfi1_alloc_devdata()
1339 atomic_set(&dd->ipoib_rsm_usr_num, 0); in hfi1_alloc_devdata()
1340 return dd; in hfi1_alloc_devdata()
1343 hfi1_free_devdata(dd); in hfi1_alloc_devdata()
1352 void hfi1_disable_after_error(struct hfi1_devdata *dd) in hfi1_disable_after_error() argument
1354 if (dd->flags & HFI1_INITTED) { in hfi1_disable_after_error()
1357 dd->flags &= ~HFI1_INITTED; in hfi1_disable_after_error()
1358 if (dd->pport) in hfi1_disable_after_error()
1359 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in hfi1_disable_after_error()
1362 ppd = dd->pport + pidx; in hfi1_disable_after_error()
1363 if (dd->flags & HFI1_PRESENT) in hfi1_disable_after_error()
1376 if (dd->status) in hfi1_disable_after_error()
1377 dd->status->dev |= HFI1_STATUS_HWERROR; in hfi1_disable_after_error()
1518 static void cleanup_device_data(struct hfi1_devdata *dd) in cleanup_device_data() argument
1524 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in cleanup_device_data()
1525 struct hfi1_pportdata *ppd = &dd->pport[pidx]; in cleanup_device_data()
1544 free_credit_return(dd); in cleanup_device_data()
1546 if (dd->rcvhdrtail_dummy_kvaddr) { in cleanup_device_data()
1547 dma_free_coherent(&dd->pcidev->dev, sizeof(u64), in cleanup_device_data()
1548 (void *)dd->rcvhdrtail_dummy_kvaddr, in cleanup_device_data()
1549 dd->rcvhdrtail_dummy_dma); in cleanup_device_data()
1550 dd->rcvhdrtail_dummy_kvaddr = NULL; in cleanup_device_data()
1557 for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) { in cleanup_device_data()
1558 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt]; in cleanup_device_data()
1566 kfree(dd->rcd); in cleanup_device_data()
1567 dd->rcd = NULL; in cleanup_device_data()
1569 free_pio_map(dd); in cleanup_device_data()
1571 for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) in cleanup_device_data()
1572 sc_free(dd->send_contexts[ctxt].sc); in cleanup_device_data()
1573 dd->num_send_contexts = 0; in cleanup_device_data()
1574 kfree(dd->send_contexts); in cleanup_device_data()
1575 dd->send_contexts = NULL; in cleanup_device_data()
1576 kfree(dd->hw_to_sw); in cleanup_device_data()
1577 dd->hw_to_sw = NULL; in cleanup_device_data()
1578 kfree(dd->boardname); in cleanup_device_data()
1579 vfree(dd->events); in cleanup_device_data()
1580 vfree(dd->status); in cleanup_device_data()
1587 static void postinit_cleanup(struct hfi1_devdata *dd) in postinit_cleanup() argument
1589 hfi1_start_cleanup(dd); in postinit_cleanup()
1590 hfi1_comp_vectors_clean_up(dd); in postinit_cleanup()
1591 hfi1_dev_affinity_clean_up(dd); in postinit_cleanup()
1593 hfi1_pcie_ddcleanup(dd); in postinit_cleanup()
1594 hfi1_pcie_cleanup(dd->pcidev); in postinit_cleanup()
1596 cleanup_device_data(dd); in postinit_cleanup()
1598 hfi1_free_devdata(dd); in postinit_cleanup()
1604 struct hfi1_devdata *dd; in init_one() local
1620 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS * in init_one()
1622 if (IS_ERR(dd)) { in init_one()
1623 ret = PTR_ERR(dd); in init_one()
1628 ret = hfi1_validate_rcvhdrcnt(dd, rcvhdrcnt); in init_one()
1634 dd_dev_err(dd, "Invalid HdrQ Entry size %u\n", in init_one()
1657 dd_dev_info(dd, "Eager buffer size %u\n", in init_one()
1660 dd_dev_err(dd, "Invalid Eager buffer size of 0\n"); in init_one()
1668 ret = hfi1_pcie_init(dd); in init_one()
1676 ret = hfi1_init_dd(dd); in init_one()
1680 ret = create_workqueues(dd); in init_one()
1685 initfail = hfi1_init(dd, 0); in init_one()
1687 ret = hfi1_register_ib_device(dd); in init_one()
1696 dd->flags |= HFI1_INITTED; in init_one()
1698 hfi1_dbg_ibdev_init(&dd->verbs_dev); in init_one()
1701 j = hfi1_device_create(dd); in init_one()
1703 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); in init_one()
1706 msix_clean_up_interrupts(dd); in init_one()
1707 stop_timers(dd); in init_one()
1709 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in init_one()
1710 hfi1_quiet_serdes(dd->pport + pidx); in init_one()
1711 ppd = dd->pport + pidx; in init_one()
1722 hfi1_device_remove(dd); in init_one()
1724 hfi1_unregister_ib_device(dd); in init_one()
1725 postinit_cleanup(dd); in init_one()
1731 sdma_start(dd); in init_one()
1741 static void wait_for_clients(struct hfi1_devdata *dd) in wait_for_clients() argument
1747 if (atomic_dec_and_test(&dd->user_refcount)) in wait_for_clients()
1748 complete(&dd->user_comp); in wait_for_clients()
1750 wait_for_completion(&dd->user_comp); in wait_for_clients()
1755 struct hfi1_devdata *dd = pci_get_drvdata(pdev); in remove_one() local
1758 hfi1_dbg_ibdev_exit(&dd->verbs_dev); in remove_one()
1761 hfi1_device_remove(dd); in remove_one()
1764 wait_for_clients(dd); in remove_one()
1767 hfi1_unregister_ib_device(dd); in remove_one()
1770 hfi1_netdev_free(dd); in remove_one()
1776 shutdown_device(dd); in remove_one()
1777 destroy_workqueues(dd); in remove_one()
1779 stop_timers(dd); in remove_one()
1784 postinit_cleanup(dd); in remove_one()
1789 struct hfi1_devdata *dd = pci_get_drvdata(pdev); in shutdown_one() local
1791 shutdown_device(dd); in shutdown_one()
1803 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) in hfi1_create_rcvhdrq() argument
1812 if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic) in hfi1_create_rcvhdrq()
1816 rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, in hfi1_create_rcvhdrq()
1821 dd_dev_err(dd, in hfi1_create_rcvhdrq()
1829 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, in hfi1_create_rcvhdrq()
1838 set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize, in hfi1_create_rcvhdrq()
1844 dd_dev_err(dd, in hfi1_create_rcvhdrq()
1847 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, in hfi1_create_rcvhdrq()
1865 struct hfi1_devdata *dd = rcd->dd; in hfi1_setup_eagerbufs() local
1887 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size)) in hfi1_setup_eagerbufs()
1888 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size; in hfi1_setup_eagerbufs()
1907 dma_alloc_coherent(&dd->pcidev->dev, in hfi1_setup_eagerbufs()
1933 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", in hfi1_setup_eagerbufs()
1997 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; in hfi1_setup_eagerbufs()
1998 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size); in hfi1_setup_eagerbufs()
2017 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, in hfi1_setup_eagerbufs()
2028 dma_free_coherent(&dd->pcidev->dev, in hfi1_setup_eagerbufs()