Lines Matching refs:srv_path
61 static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path, in rtrs_srv_change_state() argument
67 spin_lock_irq(&srv_path->state_lock); in rtrs_srv_change_state()
68 old_state = srv_path->state; in rtrs_srv_change_state()
87 srv_path->state = new_state; in rtrs_srv_change_state()
88 spin_unlock_irq(&srv_path->state_lock); in rtrs_srv_change_state()
100 static void rtrs_srv_free_ops_ids(struct rtrs_srv_path *srv_path) in rtrs_srv_free_ops_ids() argument
102 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_free_ops_ids()
105 if (srv_path->ops_ids) { in rtrs_srv_free_ops_ids()
107 free_id(srv_path->ops_ids[i]); in rtrs_srv_free_ops_ids()
108 kfree(srv_path->ops_ids); in rtrs_srv_free_ops_ids()
109 srv_path->ops_ids = NULL; in rtrs_srv_free_ops_ids()
121 struct rtrs_srv_path *srv_path = container_of(ref, in rtrs_srv_inflight_ref_release() local
125 percpu_ref_exit(&srv_path->ids_inflight_ref); in rtrs_srv_inflight_ref_release()
126 complete(&srv_path->complete_done); in rtrs_srv_inflight_ref_release()
129 static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_path *srv_path) in rtrs_srv_alloc_ops_ids() argument
131 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_alloc_ops_ids()
135 srv_path->ops_ids = kcalloc(srv->queue_depth, in rtrs_srv_alloc_ops_ids()
136 sizeof(*srv_path->ops_ids), in rtrs_srv_alloc_ops_ids()
138 if (!srv_path->ops_ids) in rtrs_srv_alloc_ops_ids()
146 srv_path->ops_ids[i] = id; in rtrs_srv_alloc_ops_ids()
149 ret = percpu_ref_init(&srv_path->ids_inflight_ref, in rtrs_srv_alloc_ops_ids()
155 init_completion(&srv_path->complete_done); in rtrs_srv_alloc_ops_ids()
160 rtrs_srv_free_ops_ids(srv_path); in rtrs_srv_alloc_ops_ids()
164 static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_path *srv_path) in rtrs_srv_get_ops_ids() argument
166 percpu_ref_get(&srv_path->ids_inflight_ref); in rtrs_srv_get_ops_ids()
169 static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_path *srv_path) in rtrs_srv_put_ops_ids() argument
171 percpu_ref_put(&srv_path->ids_inflight_ref); in rtrs_srv_put_ops_ids()
178 struct rtrs_srv_path *srv_path = to_srv_path(s); in rtrs_srv_reg_mr_done() local
183 close_path(srv_path); in rtrs_srv_reg_mr_done()
195 struct rtrs_srv_path *srv_path = to_srv_path(s); in rdma_write_sg() local
196 dma_addr_t dma_addr = srv_path->dma_addr[id->msg_id]; in rdma_write_sg()
230 plist->lkey = srv_path->s.dev->ib_pd->local_dma_lkey; in rdma_write_sg()
281 srv_mr = &srv_path->mrs[id->msg_id]; in rdma_write_sg()
297 list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey; in rdma_write_sg()
301 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, in rdma_write_sg()
314 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, dma_addr, in rdma_write_sg()
339 struct rtrs_srv_path *srv_path = to_srv_path(s); in send_io_resp_imm() local
401 srv_mr = &srv_path->mrs[id->msg_id]; in send_io_resp_imm()
418 list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey; in send_io_resp_imm()
422 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, in send_io_resp_imm()
443 void close_path(struct rtrs_srv_path *srv_path) in close_path() argument
445 if (rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSING)) in close_path()
446 queue_work(rtrs_wq, &srv_path->close_work); in close_path()
447 WARN_ON(srv_path->state != RTRS_SRV_CLOSING); in close_path()
479 struct rtrs_srv_path *srv_path; in rtrs_srv_resp_rdma() local
489 srv_path = to_srv_path(s); in rtrs_srv_resp_rdma()
493 if (srv_path->state != RTRS_SRV_CONNECTED) { in rtrs_srv_resp_rdma()
496 kobject_name(&srv_path->kobj), in rtrs_srv_resp_rdma()
497 rtrs_srv_state_str(srv_path->state)); in rtrs_srv_resp_rdma()
501 struct rtrs_srv_mr *mr = &srv_path->mrs[id->msg_id]; in rtrs_srv_resp_rdma()
507 kobject_name(&srv_path->kobj), in rtrs_srv_resp_rdma()
523 kobject_name(&srv_path->kobj)); in rtrs_srv_resp_rdma()
524 close_path(srv_path); in rtrs_srv_resp_rdma()
527 rtrs_srv_put_ops_ids(srv_path); in rtrs_srv_resp_rdma()
543 static void unmap_cont_bufs(struct rtrs_srv_path *srv_path) in unmap_cont_bufs() argument
547 for (i = 0; i < srv_path->mrs_num; i++) { in unmap_cont_bufs()
550 srv_mr = &srv_path->mrs[i]; in unmap_cont_bufs()
551 rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1); in unmap_cont_bufs()
553 ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl, in unmap_cont_bufs()
557 kfree(srv_path->mrs); in unmap_cont_bufs()
560 static int map_cont_bufs(struct rtrs_srv_path *srv_path) in map_cont_bufs() argument
562 struct rtrs_srv_sess *srv = srv_path->srv; in map_cont_bufs()
563 struct rtrs_path *ss = &srv_path->s; in map_cont_bufs()
580 srv_path->s.dev->ib_dev->attrs.max_fast_reg_page_list_len; in map_cont_bufs()
585 srv_path->mrs = kcalloc(mrs_num, sizeof(*srv_path->mrs), GFP_KERNEL); in map_cont_bufs()
586 if (!srv_path->mrs) in map_cont_bufs()
589 srv_path->mrs_num = mrs_num; in map_cont_bufs()
592 struct rtrs_srv_mr *srv_mr = &srv_path->mrs[mri]; in map_cont_bufs()
611 nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl, in map_cont_bufs()
617 mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG, in map_cont_bufs()
633 GFP_KERNEL, srv_path->s.dev->ib_dev, in map_cont_bufs()
643 srv_path->dma_addr[chunks + i] = sg_dma_address(s); in map_cont_bufs()
651 srv_mr = &srv_path->mrs[mri]; in map_cont_bufs()
654 rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1); in map_cont_bufs()
658 ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl, in map_cont_bufs()
663 kfree(srv_path->mrs); in map_cont_bufs()
669 srv_path->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits); in map_cont_bufs()
679 static void rtrs_srv_init_hb(struct rtrs_srv_path *srv_path) in rtrs_srv_init_hb() argument
681 rtrs_init_hb(&srv_path->s, &io_comp_cqe, in rtrs_srv_init_hb()
688 static void rtrs_srv_start_hb(struct rtrs_srv_path *srv_path) in rtrs_srv_start_hb() argument
690 rtrs_start_hb(&srv_path->s); in rtrs_srv_start_hb()
693 static void rtrs_srv_stop_hb(struct rtrs_srv_path *srv_path) in rtrs_srv_stop_hb() argument
695 rtrs_stop_hb(&srv_path->s); in rtrs_srv_stop_hb()
702 struct rtrs_srv_path *srv_path = to_srv_path(s); in rtrs_srv_info_rsp_done() local
706 rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1); in rtrs_srv_info_rsp_done()
711 close_path(srv_path); in rtrs_srv_info_rsp_done()
717 static void rtrs_srv_path_up(struct rtrs_srv_path *srv_path) in rtrs_srv_path_up() argument
719 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_path_up()
730 srv_path->established = true; in rtrs_srv_path_up()
733 static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path) in rtrs_srv_path_down() argument
735 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_path_down()
738 if (!srv_path->established) in rtrs_srv_path_down()
741 srv_path->established = false; in rtrs_srv_path_down()
753 struct rtrs_srv_path *srv_path; in exist_pathname() local
766 list_for_each_entry(srv_path, &srv->paths_list, s.entry) { in exist_pathname()
767 if (strlen(srv_path->s.sessname) == strlen(pathname) && in exist_pathname()
768 !strcmp(srv_path->s.sessname, pathname)) { in exist_pathname()
781 static int post_recv_path(struct rtrs_srv_path *srv_path);
788 struct rtrs_srv_path *srv_path = to_srv_path(s); in process_info_req() local
796 err = post_recv_path(srv_path); in process_info_req()
807 if (exist_pathname(srv_path->srv->ctx, in process_info_req()
808 msg->pathname, &srv_path->srv->paths_uuid)) { in process_info_req()
812 strscpy(srv_path->s.sessname, msg->pathname, in process_info_req()
813 sizeof(srv_path->s.sessname)); in process_info_req()
815 rwr = kcalloc(srv_path->mrs_num, sizeof(*rwr), GFP_KERNEL); in process_info_req()
820 tx_sz += sizeof(rsp->desc[0]) * srv_path->mrs_num; in process_info_req()
821 tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, srv_path->s.dev->ib_dev, in process_info_req()
830 rsp->sg_cnt = cpu_to_le16(srv_path->mrs_num); in process_info_req()
832 for (mri = 0; mri < srv_path->mrs_num; mri++) { in process_info_req()
833 struct ib_mr *mr = srv_path->mrs[mri].mr; in process_info_req()
854 err = rtrs_srv_create_path_files(srv_path); in process_info_req()
857 kobject_get(&srv_path->kobj); in process_info_req()
858 get_device(&srv_path->srv->dev); in process_info_req()
859 rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED); in process_info_req()
860 rtrs_srv_start_hb(srv_path); in process_info_req()
868 rtrs_srv_path_up(srv_path); in process_info_req()
870 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, in process_info_req()
879 rtrs_iu_free(tx_iu, srv_path->s.dev->ib_dev, 1); in process_info_req()
891 struct rtrs_srv_path *srv_path = to_srv_path(s); in rtrs_srv_info_req_done() local
911 ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, iu->dma_addr, in rtrs_srv_info_req_done()
924 rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1); in rtrs_srv_info_req_done()
927 close_path(srv_path); in rtrs_srv_info_req_done()
934 struct rtrs_srv_path *srv_path = to_srv_path(s); in post_recv_info_req() local
939 GFP_KERNEL, srv_path->s.dev->ib_dev, in post_recv_info_req()
947 rtrs_iu_free(rx_iu, srv_path->s.dev->ib_dev, 1); in post_recv_info_req()
967 static int post_recv_path(struct rtrs_srv_path *srv_path) in post_recv_path() argument
969 struct rtrs_srv_sess *srv = srv_path->srv; in post_recv_path()
970 struct rtrs_path *s = &srv_path->s; in post_recv_path()
974 for (cid = 0; cid < srv_path->s.con_num; cid++) { in post_recv_path()
980 err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size); in post_recv_path()
995 struct rtrs_srv_path *srv_path = to_srv_path(s); in process_read() local
996 struct rtrs_srv_sess *srv = srv_path->srv; in process_read()
1004 if (srv_path->state != RTRS_SRV_CONNECTED) { in process_read()
1007 rtrs_srv_state_str(srv_path->state)); in process_read()
1015 rtrs_srv_get_ops_ids(srv_path); in process_read()
1016 rtrs_srv_update_rdma_stats(srv_path->stats, off, READ); in process_read()
1017 id = srv_path->ops_ids[buf_id]; in process_read()
1043 close_path(srv_path); in process_read()
1045 rtrs_srv_put_ops_ids(srv_path); in process_read()
1053 struct rtrs_srv_path *srv_path = to_srv_path(s); in process_write() local
1054 struct rtrs_srv_sess *srv = srv_path->srv; in process_write()
1062 if (srv_path->state != RTRS_SRV_CONNECTED) { in process_write()
1065 rtrs_srv_state_str(srv_path->state)); in process_write()
1068 rtrs_srv_get_ops_ids(srv_path); in process_write()
1069 rtrs_srv_update_rdma_stats(srv_path->stats, off, WRITE); in process_write()
1070 id = srv_path->ops_ids[buf_id]; in process_write()
1095 close_path(srv_path); in process_write()
1097 rtrs_srv_put_ops_ids(srv_path); in process_write()
1104 struct rtrs_srv_path *srv_path = to_srv_path(s); in process_io_req() local
1108 ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, in process_io_req()
1109 srv_path->dma_addr[id], in process_io_req()
1131 close_path(srv_path); in process_io_req()
1140 struct rtrs_srv_path *srv_path = to_srv_path(s); in rtrs_srv_inv_rkey_done() local
1141 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_inv_rkey_done()
1148 close_path(srv_path); in rtrs_srv_inv_rkey_done()
1197 struct rtrs_srv_path *srv_path = to_srv_path(s); in rtrs_srv_rdma_done() local
1198 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_rdma_done()
1208 close_path(srv_path); in rtrs_srv_rdma_done()
1224 close_path(srv_path); in rtrs_srv_rdma_done()
1233 msg_id = imm_payload >> srv_path->mem_bits; in rtrs_srv_rdma_done()
1234 off = imm_payload & ((1 << srv_path->mem_bits) - 1); in rtrs_srv_rdma_done()
1238 close_path(srv_path); in rtrs_srv_rdma_done()
1242 struct rtrs_srv_mr *mr = &srv_path->mrs[msg_id]; in rtrs_srv_rdma_done()
1250 close_path(srv_path); in rtrs_srv_rdma_done()
1259 rtrs_send_hb_ack(&srv_path->s); in rtrs_srv_rdma_done()
1262 srv_path->s.hb_missed_cnt = 0; in rtrs_srv_rdma_done()
1294 struct rtrs_srv_path *srv_path; in rtrs_srv_get_path_name() local
1298 list_for_each_entry(srv_path, &srv->paths_list, s.entry) { in rtrs_srv_get_path_name()
1299 if (srv_path->state != RTRS_SRV_CONNECTED) in rtrs_srv_get_path_name()
1301 strscpy(pathname, srv_path->s.sessname, in rtrs_srv_get_path_name()
1302 min_t(size_t, sizeof(srv_path->s.sessname), len)); in rtrs_srv_get_path_name()
1322 static int find_next_bit_ring(struct rtrs_srv_path *srv_path) in find_next_bit_ring() argument
1324 struct ib_device *ib_dev = srv_path->s.dev->ib_dev; in find_next_bit_ring()
1327 v = cpumask_next(srv_path->cur_cq_vector, &cq_affinity_mask); in find_next_bit_ring()
1333 static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_path *srv_path) in rtrs_srv_get_next_cq_vector() argument
1335 srv_path->cur_cq_vector = find_next_bit_ring(srv_path); in rtrs_srv_get_next_cq_vector()
1337 return srv_path->cur_cq_vector; in rtrs_srv_get_next_cq_vector()
1444 struct rtrs_srv_path *srv_path) in __add_path_to_srv() argument
1446 list_add_tail(&srv_path->s.entry, &srv->paths_list); in __add_path_to_srv()
1451 static void del_path_from_srv(struct rtrs_srv_path *srv_path) in del_path_from_srv() argument
1453 struct rtrs_srv_sess *srv = srv_path->srv; in del_path_from_srv()
1459 list_del(&srv_path->s.entry); in del_path_from_srv()
1492 struct rtrs_srv_path *srv_path; in __is_path_w_addr_exists() local
1494 list_for_each_entry(srv_path, &srv->paths_list, s.entry) in __is_path_w_addr_exists()
1495 if (!sockaddr_cmp((struct sockaddr *)&srv_path->s.dst_addr, in __is_path_w_addr_exists()
1497 !sockaddr_cmp((struct sockaddr *)&srv_path->s.src_addr, in __is_path_w_addr_exists()
1504 static void free_path(struct rtrs_srv_path *srv_path) in free_path() argument
1506 if (srv_path->kobj.state_in_sysfs) { in free_path()
1507 kobject_del(&srv_path->kobj); in free_path()
1508 kobject_put(&srv_path->kobj); in free_path()
1510 free_percpu(srv_path->stats->rdma_stats); in free_path()
1511 kfree(srv_path->stats); in free_path()
1512 kfree(srv_path); in free_path()
1518 struct rtrs_srv_path *srv_path; in rtrs_srv_close_work() local
1522 srv_path = container_of(work, typeof(*srv_path), close_work); in rtrs_srv_close_work()
1524 rtrs_srv_destroy_path_files(srv_path); in rtrs_srv_close_work()
1525 rtrs_srv_stop_hb(srv_path); in rtrs_srv_close_work()
1527 for (i = 0; i < srv_path->s.con_num; i++) { in rtrs_srv_close_work()
1528 if (!srv_path->s.con[i]) in rtrs_srv_close_work()
1530 con = to_srv_con(srv_path->s.con[i]); in rtrs_srv_close_work()
1539 percpu_ref_kill(&srv_path->ids_inflight_ref); in rtrs_srv_close_work()
1542 wait_for_completion(&srv_path->complete_done); in rtrs_srv_close_work()
1545 rtrs_srv_path_down(srv_path); in rtrs_srv_close_work()
1547 unmap_cont_bufs(srv_path); in rtrs_srv_close_work()
1548 rtrs_srv_free_ops_ids(srv_path); in rtrs_srv_close_work()
1550 for (i = 0; i < srv_path->s.con_num; i++) { in rtrs_srv_close_work()
1551 if (!srv_path->s.con[i]) in rtrs_srv_close_work()
1553 con = to_srv_con(srv_path->s.con[i]); in rtrs_srv_close_work()
1558 rtrs_ib_dev_put(srv_path->s.dev); in rtrs_srv_close_work()
1560 del_path_from_srv(srv_path); in rtrs_srv_close_work()
1561 put_srv(srv_path->srv); in rtrs_srv_close_work()
1562 srv_path->srv = NULL; in rtrs_srv_close_work()
1563 rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSED); in rtrs_srv_close_work()
1565 kfree(srv_path->dma_addr); in rtrs_srv_close_work()
1566 kfree(srv_path->s.con); in rtrs_srv_close_work()
1567 free_path(srv_path); in rtrs_srv_close_work()
1570 static int rtrs_rdma_do_accept(struct rtrs_srv_path *srv_path, in rtrs_rdma_do_accept() argument
1573 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_rdma_do_accept()
1624 struct rtrs_srv_path *srv_path; in __find_path() local
1626 list_for_each_entry(srv_path, &srv->paths_list, s.entry) { in __find_path()
1627 if (uuid_equal(&srv_path->s.uuid, sess_uuid)) in __find_path()
1628 return srv_path; in __find_path()
1634 static int create_con(struct rtrs_srv_path *srv_path, in create_con() argument
1638 struct rtrs_srv_sess *srv = srv_path->srv; in create_con()
1639 struct rtrs_path *s = &srv_path->s; in create_con()
1654 con->c.path = &srv_path->s; in create_con()
1657 wr_limit = srv_path->s.dev->ib_dev->attrs.max_qp_wr; in create_con()
1690 cq_vector = rtrs_srv_get_next_cq_vector(srv_path); in create_con()
1693 err = rtrs_cq_qp_create(&srv_path->s, &con->c, 1, cq_vector, cq_num, in create_con()
1705 WARN_ON(srv_path->s.con[cid]); in create_con()
1706 srv_path->s.con[cid] = &con->c; in create_con()
1731 struct rtrs_srv_path *srv_path; in __alloc_path() local
1745 srv_path = kzalloc(sizeof(*srv_path), GFP_KERNEL); in __alloc_path()
1746 if (!srv_path) in __alloc_path()
1749 srv_path->stats = kzalloc(sizeof(*srv_path->stats), GFP_KERNEL); in __alloc_path()
1750 if (!srv_path->stats) in __alloc_path()
1753 srv_path->stats->rdma_stats = alloc_percpu(struct rtrs_srv_stats_rdma_stats); in __alloc_path()
1754 if (!srv_path->stats->rdma_stats) in __alloc_path()
1757 srv_path->stats->srv_path = srv_path; in __alloc_path()
1759 srv_path->dma_addr = kcalloc(srv->queue_depth, in __alloc_path()
1760 sizeof(*srv_path->dma_addr), in __alloc_path()
1762 if (!srv_path->dma_addr) in __alloc_path()
1765 srv_path->s.con = kcalloc(con_num, sizeof(*srv_path->s.con), in __alloc_path()
1767 if (!srv_path->s.con) in __alloc_path()
1770 srv_path->state = RTRS_SRV_CONNECTING; in __alloc_path()
1771 srv_path->srv = srv; in __alloc_path()
1772 srv_path->cur_cq_vector = -1; in __alloc_path()
1773 srv_path->s.dst_addr = cm_id->route.addr.dst_addr; in __alloc_path()
1774 srv_path->s.src_addr = cm_id->route.addr.src_addr; in __alloc_path()
1777 path.src = &srv_path->s.src_addr; in __alloc_path()
1778 path.dst = &srv_path->s.dst_addr; in __alloc_path()
1780 strscpy(srv_path->s.sessname, str, sizeof(srv_path->s.sessname)); in __alloc_path()
1782 srv_path->s.con_num = con_num; in __alloc_path()
1783 srv_path->s.irq_con_num = con_num; in __alloc_path()
1784 srv_path->s.recon_cnt = recon_cnt; in __alloc_path()
1785 uuid_copy(&srv_path->s.uuid, uuid); in __alloc_path()
1786 spin_lock_init(&srv_path->state_lock); in __alloc_path()
1787 INIT_WORK(&srv_path->close_work, rtrs_srv_close_work); in __alloc_path()
1788 rtrs_srv_init_hb(srv_path); in __alloc_path()
1790 srv_path->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd); in __alloc_path()
1791 if (!srv_path->s.dev) { in __alloc_path()
1795 err = map_cont_bufs(srv_path); in __alloc_path()
1799 err = rtrs_srv_alloc_ops_ids(srv_path); in __alloc_path()
1803 __add_path_to_srv(srv, srv_path); in __alloc_path()
1805 return srv_path; in __alloc_path()
1808 unmap_cont_bufs(srv_path); in __alloc_path()
1810 rtrs_ib_dev_put(srv_path->s.dev); in __alloc_path()
1812 kfree(srv_path->s.con); in __alloc_path()
1814 kfree(srv_path->dma_addr); in __alloc_path()
1816 free_percpu(srv_path->stats->rdma_stats); in __alloc_path()
1818 kfree(srv_path->stats); in __alloc_path()
1820 kfree(srv_path); in __alloc_path()
1830 struct rtrs_srv_path *srv_path; in rtrs_rdma_connect() local
1871 srv_path = __find_path(srv, &msg->sess_uuid); in rtrs_rdma_connect()
1872 if (srv_path) { in rtrs_rdma_connect()
1873 struct rtrs_path *s = &srv_path->s; in rtrs_rdma_connect()
1878 if (srv_path->state != RTRS_SRV_CONNECTING) { in rtrs_rdma_connect()
1880 rtrs_srv_state_str(srv_path->state)); in rtrs_rdma_connect()
1900 srv_path = __alloc_path(srv, cm_id, con_num, recon_cnt, in rtrs_rdma_connect()
1902 if (IS_ERR(srv_path)) { in rtrs_rdma_connect()
1905 err = PTR_ERR(srv_path); in rtrs_rdma_connect()
1910 err = create_con(srv_path, cm_id, cid); in rtrs_rdma_connect()
1912 rtrs_err((&srv_path->s), "create_con(), error %d\n", err); in rtrs_rdma_connect()
1921 err = rtrs_rdma_do_accept(srv_path, cm_id); in rtrs_rdma_connect()
1923 rtrs_err((&srv_path->s), "rtrs_rdma_do_accept(), error %d\n", err); in rtrs_rdma_connect()
1943 close_path(srv_path); in rtrs_rdma_connect()
1951 struct rtrs_srv_path *srv_path = NULL; in rtrs_srv_rdma_cm_handler() local
1958 srv_path = to_srv_path(s); in rtrs_srv_rdma_cm_handler()
1982 close_path(srv_path); in rtrs_srv_rdma_cm_handler()
2192 struct rtrs_srv_path *srv_path; in close_paths() local
2195 list_for_each_entry(srv_path, &srv->paths_list, s.entry) in close_paths()
2196 close_path(srv_path); in close_paths()