Lines Matching refs:smcibdev

125 	rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv,  in smc_ib_ready_link()
144 static int smc_ib_fill_mac(struct smc_ib_device *smcibdev, u8 ibport) in smc_ib_fill_mac() argument
149 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, 0); in smc_ib_fill_mac()
153 rc = rdma_read_gid_l2_fields(attr, NULL, smcibdev->mac[ibport - 1]); in smc_ib_fill_mac()
163 static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev, in smc_ib_define_local_systemid() argument
166 memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1], in smc_ib_define_local_systemid()
167 sizeof(smcibdev->mac[ibport - 1])); in smc_ib_define_local_systemid()
180 bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport) in smc_ib_port_active() argument
182 return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE; in smc_ib_port_active()
186 int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport, in smc_ib_determine_gid() argument
193 for (i = 0; i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) { in smc_ib_determine_gid()
194 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i); in smc_ib_determine_gid()
219 static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport) in smc_ib_remember_port_attr() argument
223 memset(&smcibdev->pattr[ibport - 1], 0, in smc_ib_remember_port_attr()
224 sizeof(smcibdev->pattr[ibport - 1])); in smc_ib_remember_port_attr()
225 rc = ib_query_port(smcibdev->ibdev, ibport, in smc_ib_remember_port_attr()
226 &smcibdev->pattr[ibport - 1]); in smc_ib_remember_port_attr()
230 rc = smc_ib_fill_mac(smcibdev, ibport); in smc_ib_remember_port_attr()
234 smc_ib_port_active(smcibdev, ibport)) in smc_ib_remember_port_attr()
236 smc_ib_define_local_systemid(smcibdev, ibport); in smc_ib_remember_port_attr()
244 struct smc_ib_device *smcibdev = container_of( in smc_ib_port_event_work() local
248 for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) { in smc_ib_port_event_work()
249 smc_ib_remember_port_attr(smcibdev, port_idx + 1); in smc_ib_port_event_work()
250 clear_bit(port_idx, &smcibdev->port_event_mask); in smc_ib_port_event_work()
251 if (!smc_ib_port_active(smcibdev, port_idx + 1)) { in smc_ib_port_event_work()
252 set_bit(port_idx, smcibdev->ports_going_away); in smc_ib_port_event_work()
253 smcr_port_err(smcibdev, port_idx + 1); in smc_ib_port_event_work()
255 clear_bit(port_idx, smcibdev->ports_going_away); in smc_ib_port_event_work()
256 smcr_port_add(smcibdev, port_idx + 1); in smc_ib_port_event_work()
265 struct smc_ib_device *smcibdev; in smc_ib_global_event_handler() local
269 smcibdev = container_of(handler, struct smc_ib_device, event_handler); in smc_ib_global_event_handler()
275 set_bit(port_idx, &smcibdev->port_event_mask); in smc_ib_global_event_handler()
277 smcibdev->ports_going_away)) in smc_ib_global_event_handler()
281 schedule_work(&smcibdev->port_event_work); in smc_ib_global_event_handler()
287 set_bit(port_idx, &smcibdev->port_event_mask); in smc_ib_global_event_handler()
288 if (test_and_clear_bit(port_idx, smcibdev->ports_going_away)) in smc_ib_global_event_handler()
289 schedule_work(&smcibdev->port_event_work); in smc_ib_global_event_handler()
295 set_bit(port_idx, &smcibdev->port_event_mask); in smc_ib_global_event_handler()
296 if (!test_and_set_bit(port_idx, smcibdev->ports_going_away)) in smc_ib_global_event_handler()
297 schedule_work(&smcibdev->port_event_work); in smc_ib_global_event_handler()
303 set_bit(port_idx, &smcibdev->port_event_mask); in smc_ib_global_event_handler()
304 schedule_work(&smcibdev->port_event_work); in smc_ib_global_event_handler()
322 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0); in smc_ib_create_protection_domain()
332 struct smc_ib_device *smcibdev = lnk->smcibdev; in smc_ib_qp_event_handler() local
341 set_bit(port_idx, &smcibdev->port_event_mask); in smc_ib_qp_event_handler()
342 if (!test_and_set_bit(port_idx, smcibdev->ports_going_away)) in smc_ib_qp_event_handler()
343 schedule_work(&smcibdev->port_event_work); in smc_ib_qp_event_handler()
363 .send_cq = lnk->smcibdev->roce_cq_send, in smc_ib_create_queue_pair()
364 .recv_cq = lnk->smcibdev->roce_cq_recv, in smc_ib_create_queue_pair()
444 ib_dma_sync_single_for_cpu(lnk->smcibdev->ibdev, in smc_ib_sync_sg_for_cpu()
464 ib_dma_sync_single_for_device(lnk->smcibdev->ibdev, in smc_ib_sync_sg_for_device()
478 mapped_nents = ib_dma_map_sg(lnk->smcibdev->ibdev, in smc_ib_buf_map_sg()
495 ib_dma_unmap_sg(lnk->smcibdev->ibdev, in smc_ib_buf_unmap_sg()
502 long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) in smc_ib_setup_per_ibdev() argument
509 mutex_lock(&smcibdev->mutex); in smc_ib_setup_per_ibdev()
511 if (smcibdev->initialized) in smc_ib_setup_per_ibdev()
518 smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev, in smc_ib_setup_per_ibdev()
520 smcibdev, &cqattr); in smc_ib_setup_per_ibdev()
521 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send); in smc_ib_setup_per_ibdev()
522 if (IS_ERR(smcibdev->roce_cq_send)) { in smc_ib_setup_per_ibdev()
523 smcibdev->roce_cq_send = NULL; in smc_ib_setup_per_ibdev()
526 smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev, in smc_ib_setup_per_ibdev()
528 smcibdev, &cqattr); in smc_ib_setup_per_ibdev()
529 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv); in smc_ib_setup_per_ibdev()
530 if (IS_ERR(smcibdev->roce_cq_recv)) { in smc_ib_setup_per_ibdev()
531 smcibdev->roce_cq_recv = NULL; in smc_ib_setup_per_ibdev()
534 smc_wr_add_dev(smcibdev); in smc_ib_setup_per_ibdev()
535 smcibdev->initialized = 1; in smc_ib_setup_per_ibdev()
539 ib_destroy_cq(smcibdev->roce_cq_send); in smc_ib_setup_per_ibdev()
541 mutex_unlock(&smcibdev->mutex); in smc_ib_setup_per_ibdev()
545 static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev) in smc_ib_cleanup_per_ibdev() argument
547 mutex_lock(&smcibdev->mutex); in smc_ib_cleanup_per_ibdev()
548 if (!smcibdev->initialized) in smc_ib_cleanup_per_ibdev()
550 smcibdev->initialized = 0; in smc_ib_cleanup_per_ibdev()
551 ib_destroy_cq(smcibdev->roce_cq_recv); in smc_ib_cleanup_per_ibdev()
552 ib_destroy_cq(smcibdev->roce_cq_send); in smc_ib_cleanup_per_ibdev()
553 smc_wr_remove_dev(smcibdev); in smc_ib_cleanup_per_ibdev()
555 mutex_unlock(&smcibdev->mutex); in smc_ib_cleanup_per_ibdev()
563 struct smc_ib_device *smcibdev; in smc_ib_add_dev() local
570 smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL); in smc_ib_add_dev()
571 if (!smcibdev) in smc_ib_add_dev()
574 smcibdev->ibdev = ibdev; in smc_ib_add_dev()
575 INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work); in smc_ib_add_dev()
576 atomic_set(&smcibdev->lnk_cnt, 0); in smc_ib_add_dev()
577 init_waitqueue_head(&smcibdev->lnks_deleted); in smc_ib_add_dev()
578 mutex_init(&smcibdev->mutex); in smc_ib_add_dev()
580 list_add_tail(&smcibdev->list, &smc_ib_devices.list); in smc_ib_add_dev()
582 ib_set_client_data(ibdev, &smc_ib_client, smcibdev); in smc_ib_add_dev()
583 INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev, in smc_ib_add_dev()
585 ib_register_event_handler(&smcibdev->event_handler); in smc_ib_add_dev()
588 port_cnt = smcibdev->ibdev->phys_port_cnt; in smc_ib_add_dev()
590 smcibdev->ibdev->name, port_cnt); in smc_ib_add_dev()
594 set_bit(i, &smcibdev->port_event_mask); in smc_ib_add_dev()
597 smcibdev->pnetid[i])) in smc_ib_add_dev()
598 smc_pnetid_by_table_ib(smcibdev, i + 1); in smc_ib_add_dev()
601 smcibdev->ibdev->name, i + 1, in smc_ib_add_dev()
602 smcibdev->pnetid[i], in smc_ib_add_dev()
603 smcibdev->pnetid_by_user[i] ? in smc_ib_add_dev()
607 schedule_work(&smcibdev->port_event_work); in smc_ib_add_dev()
614 struct smc_ib_device *smcibdev = client_data; in smc_ib_remove_dev() local
617 list_del_init(&smcibdev->list); /* remove from smc_ib_devices */ in smc_ib_remove_dev()
620 smcibdev->ibdev->name); in smc_ib_remove_dev()
621 smc_smcr_terminate_all(smcibdev); in smc_ib_remove_dev()
622 smc_ib_cleanup_per_ibdev(smcibdev); in smc_ib_remove_dev()
623 ib_unregister_event_handler(&smcibdev->event_handler); in smc_ib_remove_dev()
624 cancel_work_sync(&smcibdev->port_event_work); in smc_ib_remove_dev()
625 kfree(smcibdev); in smc_ib_remove_dev()