Lines Matching refs:clt_path
52 struct rtrs_clt_path *clt_path; in rtrs_clt_is_connected() local
56 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) in rtrs_clt_is_connected()
57 if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED) { in rtrs_clt_is_connected()
178 struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_path *clt_path, in rtrs_permit_to_clt_con() argument
184 id = (permit->cpu_id % (clt_path->s.irq_con_num - 1)) + 1; in rtrs_permit_to_clt_con()
186 return to_clt_con(clt_path->s.con[id]); in rtrs_permit_to_clt_con()
201 static bool rtrs_clt_change_state(struct rtrs_clt_path *clt_path, in rtrs_clt_change_state() argument
207 lockdep_assert_held(&clt_path->state_wq.lock); in rtrs_clt_change_state()
209 old_state = clt_path->state; in rtrs_clt_change_state()
283 clt_path->state = new_state; in rtrs_clt_change_state()
284 wake_up_locked(&clt_path->state_wq); in rtrs_clt_change_state()
290 static bool rtrs_clt_change_state_from_to(struct rtrs_clt_path *clt_path, in rtrs_clt_change_state_from_to() argument
296 spin_lock_irq(&clt_path->state_wq.lock); in rtrs_clt_change_state_from_to()
297 if (clt_path->state == old_state) in rtrs_clt_change_state_from_to()
298 changed = rtrs_clt_change_state(clt_path, new_state); in rtrs_clt_change_state_from_to()
299 spin_unlock_irq(&clt_path->state_wq.lock); in rtrs_clt_change_state_from_to()
304 static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path);
307 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_rdma_error_recovery() local
309 trace_rtrs_rdma_error_recovery(clt_path); in rtrs_rdma_error_recovery()
311 if (rtrs_clt_change_state_from_to(clt_path, in rtrs_rdma_error_recovery()
314 queue_work(rtrs_wq, &clt_path->err_recovery_work); in rtrs_rdma_error_recovery()
321 rtrs_clt_change_state_from_to(clt_path, in rtrs_rdma_error_recovery()
382 struct rtrs_clt_path *clt_path; in complete_rdma_req() local
389 clt_path = to_clt_path(con->c.path); in complete_rdma_req()
435 ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, in complete_rdma_req()
441 atomic_dec(&clt_path->stats->inflight); in complete_rdma_req()
448 errno, kobject_name(&clt_path->kobj), clt_path->hca_name, in complete_rdma_req()
449 clt_path->hca_port, notify); in complete_rdma_req()
461 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_post_send_rdma() local
474 sge.lkey = clt_path->s.dev->ib_pd->local_dma_lkey; in rtrs_post_send_rdma()
480 flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ? in rtrs_post_send_rdma()
483 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, in rtrs_post_send_rdma()
492 static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id, in process_io_rsp() argument
497 if (WARN_ON(msg_id >= clt_path->queue_depth)) in process_io_rsp()
500 req = &clt_path->reqs[msg_id]; in process_io_rsp()
510 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_clt_recv_done() local
512 WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0); in rtrs_clt_recv_done()
524 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_clt_rkey_rsp_done() local
532 WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0); in rtrs_clt_rkey_rsp_done()
541 ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr, in rtrs_clt_rkey_rsp_done()
545 rtrs_err(clt_path->clt, in rtrs_clt_rkey_rsp_done()
551 if (WARN_ON(buf_id >= clt_path->queue_depth)) in rtrs_clt_rkey_rsp_done()
564 clt_path->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey); in rtrs_clt_rkey_rsp_done()
565 process_io_rsp(clt_path, msg_id, err, w_inval); in rtrs_clt_rkey_rsp_done()
567 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, iu->dma_addr, in rtrs_clt_rkey_rsp_done()
604 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_clt_rdma_done() local
611 rtrs_err(clt_path->clt, "RDMA failed: %s\n", in rtrs_clt_rdma_done()
636 process_io_rsp(clt_path, msg_id, err, w_inval); in rtrs_clt_rdma_done()
639 rtrs_send_hb_ack(&clt_path->s); in rtrs_clt_rdma_done()
640 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) in rtrs_clt_rdma_done()
644 clt_path->s.hb_missed_cnt = 0; in rtrs_clt_rdma_done()
645 clt_path->s.hb_cur_latency = in rtrs_clt_rdma_done()
646 ktime_sub(ktime_get(), clt_path->s.hb_last_sent); in rtrs_clt_rdma_done()
647 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) in rtrs_clt_rdma_done()
674 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) { in rtrs_clt_rdma_done()
689 rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode); in rtrs_clt_rdma_done()
697 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in post_recv_io() local
700 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) { in post_recv_io()
714 static int post_recv_path(struct rtrs_clt_path *clt_path) in post_recv_path() argument
719 for (cid = 0; cid < clt_path->s.con_num; cid++) { in post_recv_path()
723 q_size = clt_path->queue_depth; in post_recv_path()
731 err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size); in post_recv_path()
733 rtrs_err(clt_path->clt, "post_recv_io(), err: %d\n", in post_recv_path()
761 rtrs_clt_get_next_path_or_null(struct list_head *head, struct rtrs_clt_path *clt_path) in rtrs_clt_get_next_path_or_null() argument
763 return list_next_or_null_rcu(head, &clt_path->s.entry, typeof(*clt_path), s.entry) ?: in rtrs_clt_get_next_path_or_null()
765 READ_ONCE((&clt_path->s.entry)->next), in rtrs_clt_get_next_path_or_null()
766 typeof(*clt_path), s.entry); in rtrs_clt_get_next_path_or_null()
818 struct rtrs_clt_path *clt_path; in get_next_path_min_inflight() local
822 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) { in get_next_path_min_inflight()
823 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) in get_next_path_min_inflight()
826 if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry))) in get_next_path_min_inflight()
829 inflight = atomic_read(&clt_path->stats->inflight); in get_next_path_min_inflight()
833 min_path = clt_path; in get_next_path_min_inflight()
869 struct rtrs_clt_path *clt_path; in get_next_path_min_latency() local
873 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) { in get_next_path_min_latency()
874 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) in get_next_path_min_latency()
877 if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry))) in get_next_path_min_latency()
880 latency = clt_path->s.hb_cur_latency; in get_next_path_min_latency()
884 min_path = clt_path; in get_next_path_min_latency()
943 struct rtrs_clt_path *clt_path, in rtrs_clt_init_req() argument
961 req->con = rtrs_permit_to_clt_con(clt_path, permit); in rtrs_clt_init_req()
967 req->mp_policy = clt_path->clt->mp_policy; in rtrs_clt_init_req()
977 rtrs_clt_get_req(struct rtrs_clt_path *clt_path, in rtrs_clt_get_req() argument
986 req = &clt_path->reqs[permit->mem_id]; in rtrs_clt_get_req()
987 rtrs_clt_init_req(req, clt_path, conf, permit, priv, vec, usr_len, in rtrs_clt_get_req()
1017 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_post_rdma_write_sg() local
1037 sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey; in rtrs_post_rdma_write_sg()
1043 sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey; in rtrs_post_rdma_write_sg()
1049 flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ? in rtrs_post_rdma_write_sg()
1052 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, in rtrs_post_rdma_write_sg()
1080 struct rtrs_clt_path *clt_path = to_clt_path(s); in rtrs_clt_write_req() local
1093 if (tsize > clt_path->chunk_size) { in rtrs_clt_write_req()
1095 tsize, clt_path->chunk_size); in rtrs_clt_write_req()
1099 count = ib_dma_map_sg(clt_path->s.dev->ib_dev, req->sglist, in rtrs_clt_write_req()
1116 rbuf = &clt_path->rbufs[buf_id]; in rtrs_clt_write_req()
1124 ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, in rtrs_clt_write_req()
1158 ret, kobject_name(&clt_path->kobj), clt_path->hca_name, in rtrs_clt_write_req()
1159 clt_path->hca_port); in rtrs_clt_write_req()
1161 atomic_dec(&clt_path->stats->inflight); in rtrs_clt_write_req()
1163 ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, in rtrs_clt_write_req()
1174 struct rtrs_clt_path *clt_path = to_clt_path(s); in rtrs_clt_read_req() local
1176 struct rtrs_ib_dev *dev = clt_path->s.dev; in rtrs_clt_read_req()
1186 if (tsize > clt_path->chunk_size) { in rtrs_clt_read_req()
1189 tsize, clt_path->chunk_size); in rtrs_clt_read_req()
1259 ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id], in rtrs_clt_read_req()
1264 ret, kobject_name(&clt_path->kobj), clt_path->hca_name, in rtrs_clt_read_req()
1265 clt_path->hca_port); in rtrs_clt_read_req()
1267 atomic_dec(&clt_path->stats->inflight); in rtrs_clt_read_req()
1315 static void fail_all_outstanding_reqs(struct rtrs_clt_path *clt_path) in fail_all_outstanding_reqs() argument
1317 struct rtrs_clt_sess *clt = clt_path->clt; in fail_all_outstanding_reqs()
1321 if (!clt_path->reqs) in fail_all_outstanding_reqs()
1323 for (i = 0; i < clt_path->queue_depth; ++i) { in fail_all_outstanding_reqs()
1324 req = &clt_path->reqs[i]; in fail_all_outstanding_reqs()
1342 static void free_path_reqs(struct rtrs_clt_path *clt_path) in free_path_reqs() argument
1347 if (!clt_path->reqs) in free_path_reqs()
1349 for (i = 0; i < clt_path->queue_depth; ++i) { in free_path_reqs()
1350 req = &clt_path->reqs[i]; in free_path_reqs()
1354 rtrs_iu_free(req->iu, clt_path->s.dev->ib_dev, 1); in free_path_reqs()
1356 kfree(clt_path->reqs); in free_path_reqs()
1357 clt_path->reqs = NULL; in free_path_reqs()
1360 static int alloc_path_reqs(struct rtrs_clt_path *clt_path) in alloc_path_reqs() argument
1365 clt_path->reqs = kcalloc(clt_path->queue_depth, in alloc_path_reqs()
1366 sizeof(*clt_path->reqs), in alloc_path_reqs()
1368 if (!clt_path->reqs) in alloc_path_reqs()
1371 for (i = 0; i < clt_path->queue_depth; ++i) { in alloc_path_reqs()
1372 req = &clt_path->reqs[i]; in alloc_path_reqs()
1373 req->iu = rtrs_iu_alloc(1, clt_path->max_hdr_size, GFP_KERNEL, in alloc_path_reqs()
1374 clt_path->s.dev->ib_dev, in alloc_path_reqs()
1384 req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd, in alloc_path_reqs()
1386 clt_path->max_pages_per_mr); in alloc_path_reqs()
1391 clt_path->max_pages_per_mr); in alloc_path_reqs()
1401 free_path_reqs(clt_path); in alloc_path_reqs()
1451 static void query_fast_reg_mode(struct rtrs_clt_path *clt_path) in query_fast_reg_mode() argument
1457 ib_dev = clt_path->s.dev->ib_dev; in query_fast_reg_mode()
1467 clt_path->max_pages_per_mr = in query_fast_reg_mode()
1468 min3(clt_path->max_pages_per_mr, (u32)max_pages_per_mr, in query_fast_reg_mode()
1470 clt_path->clt->max_segments = in query_fast_reg_mode()
1471 min(clt_path->max_pages_per_mr, clt_path->clt->max_segments); in query_fast_reg_mode()
1474 static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path, in rtrs_clt_change_state_get_old() argument
1480 spin_lock_irq(&clt_path->state_wq.lock); in rtrs_clt_change_state_get_old()
1482 *old_state = clt_path->state; in rtrs_clt_change_state_get_old()
1483 changed = rtrs_clt_change_state(clt_path, new_state); in rtrs_clt_change_state_get_old()
1484 spin_unlock_irq(&clt_path->state_wq.lock); in rtrs_clt_change_state_get_old()
1496 static void rtrs_clt_init_hb(struct rtrs_clt_path *clt_path) in rtrs_clt_init_hb() argument
1498 rtrs_init_hb(&clt_path->s, &io_comp_cqe, in rtrs_clt_init_hb()
1510 struct rtrs_clt_path *clt_path; in rtrs_clt_err_recovery_work() local
1514 clt_path = container_of(work, struct rtrs_clt_path, err_recovery_work); in rtrs_clt_err_recovery_work()
1515 clt = clt_path->clt; in rtrs_clt_err_recovery_work()
1517 rtrs_clt_stop_and_destroy_conns(clt_path); in rtrs_clt_err_recovery_work()
1518 queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, in rtrs_clt_err_recovery_work()
1527 struct rtrs_clt_path *clt_path; in alloc_path() local
1532 clt_path = kzalloc(sizeof(*clt_path), GFP_KERNEL); in alloc_path()
1533 if (!clt_path) in alloc_path()
1541 clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con), in alloc_path()
1543 if (!clt_path->s.con) in alloc_path()
1546 clt_path->s.con_num = total_con; in alloc_path()
1547 clt_path->s.irq_con_num = con_num + 1; in alloc_path()
1549 clt_path->stats = kzalloc(sizeof(*clt_path->stats), GFP_KERNEL); in alloc_path()
1550 if (!clt_path->stats) in alloc_path()
1553 mutex_init(&clt_path->init_mutex); in alloc_path()
1554 uuid_gen(&clt_path->s.uuid); in alloc_path()
1555 memcpy(&clt_path->s.dst_addr, path->dst, in alloc_path()
1564 memcpy(&clt_path->s.src_addr, path->src, in alloc_path()
1566 strscpy(clt_path->s.sessname, clt->sessname, in alloc_path()
1567 sizeof(clt_path->s.sessname)); in alloc_path()
1568 clt_path->clt = clt; in alloc_path()
1569 clt_path->max_pages_per_mr = RTRS_MAX_SEGMENTS; in alloc_path()
1570 init_waitqueue_head(&clt_path->state_wq); in alloc_path()
1571 clt_path->state = RTRS_CLT_CONNECTING; in alloc_path()
1572 atomic_set(&clt_path->connected_cnt, 0); in alloc_path()
1573 INIT_WORK(&clt_path->close_work, rtrs_clt_close_work); in alloc_path()
1574 INIT_WORK(&clt_path->err_recovery_work, rtrs_clt_err_recovery_work); in alloc_path()
1575 INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work); in alloc_path()
1576 rtrs_clt_init_hb(clt_path); in alloc_path()
1578 clt_path->mp_skip_entry = alloc_percpu(typeof(*clt_path->mp_skip_entry)); in alloc_path()
1579 if (!clt_path->mp_skip_entry) in alloc_path()
1583 INIT_LIST_HEAD(per_cpu_ptr(clt_path->mp_skip_entry, cpu)); in alloc_path()
1585 err = rtrs_clt_init_stats(clt_path->stats); in alloc_path()
1589 return clt_path; in alloc_path()
1592 free_percpu(clt_path->mp_skip_entry); in alloc_path()
1594 kfree(clt_path->stats); in alloc_path()
1596 kfree(clt_path->s.con); in alloc_path()
1598 kfree(clt_path); in alloc_path()
1603 void free_path(struct rtrs_clt_path *clt_path) in free_path() argument
1605 free_percpu(clt_path->mp_skip_entry); in free_path()
1606 mutex_destroy(&clt_path->init_mutex); in free_path()
1607 kfree(clt_path->s.con); in free_path()
1608 kfree(clt_path->rbufs); in free_path()
1609 kfree(clt_path); in free_path()
1612 static int create_con(struct rtrs_clt_path *clt_path, unsigned int cid) in create_con() argument
1623 con->c.path = &clt_path->s; in create_con()
1628 clt_path->s.con[cid] = &con->c; in create_con()
1635 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in destroy_con() local
1637 clt_path->s.con[con->c.cid] = NULL; in destroy_con()
1644 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in create_con_cq_qp() local
1653 if (WARN_ON(clt_path->s.dev)) in create_con_cq_qp()
1661 clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device, in create_con_cq_qp()
1663 if (!clt_path->s.dev) { in create_con_cq_qp()
1664 rtrs_wrn(clt_path->clt, in create_con_cq_qp()
1668 clt_path->s.dev_ref = 1; in create_con_cq_qp()
1669 query_fast_reg_mode(clt_path); in create_con_cq_qp()
1670 wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr; in create_con_cq_qp()
1687 if (WARN_ON(!clt_path->s.dev)) in create_con_cq_qp()
1689 if (WARN_ON(!clt_path->queue_depth)) in create_con_cq_qp()
1692 wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr; in create_con_cq_qp()
1694 clt_path->s.dev_ref++; in create_con_cq_qp()
1697 clt_path->queue_depth * 3 + 1); in create_con_cq_qp()
1699 clt_path->queue_depth * 3 + 1); in create_con_cq_qp()
1705 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) { in create_con_cq_qp()
1708 clt_path->s.dev->ib_dev, in create_con_cq_qp()
1716 cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors; in create_con_cq_qp()
1717 if (con->c.cid >= clt_path->s.irq_con_num) in create_con_cq_qp()
1718 err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge, in create_con_cq_qp()
1722 err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge, in create_con_cq_qp()
1734 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in destroy_con_cq_qp() local
1743 rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev, in destroy_con_cq_qp()
1748 if (clt_path->s.dev_ref && !--clt_path->s.dev_ref) { in destroy_con_cq_qp()
1749 rtrs_ib_dev_put(clt_path->s.dev); in destroy_con_cq_qp()
1750 clt_path->s.dev = NULL; in destroy_con_cq_qp()
1788 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_rdma_route_resolved() local
1789 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_rdma_route_resolved()
1806 .cid_num = cpu_to_le16(clt_path->s.con_num), in rtrs_rdma_route_resolved()
1807 .recon_cnt = cpu_to_le16(clt_path->s.recon_cnt), in rtrs_rdma_route_resolved()
1809 msg.first_conn = clt_path->for_new_clt ? FIRST_CONN : 0; in rtrs_rdma_route_resolved()
1810 uuid_copy(&msg.sess_uuid, &clt_path->s.uuid); in rtrs_rdma_route_resolved()
1823 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_rdma_conn_established() local
1824 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_rdma_conn_established()
1855 if (clt_path->queue_depth > 0 && queue_depth != clt_path->queue_depth) { in rtrs_rdma_conn_established()
1861 clt_path->reconnect_attempts = -1; in rtrs_rdma_conn_established()
1867 if (!clt_path->rbufs) { in rtrs_rdma_conn_established()
1868 clt_path->rbufs = kcalloc(queue_depth, in rtrs_rdma_conn_established()
1869 sizeof(*clt_path->rbufs), in rtrs_rdma_conn_established()
1871 if (!clt_path->rbufs) in rtrs_rdma_conn_established()
1874 clt_path->queue_depth = queue_depth; in rtrs_rdma_conn_established()
1875 clt_path->s.signal_interval = min_not_zero(queue_depth, in rtrs_rdma_conn_established()
1877 clt_path->max_hdr_size = le32_to_cpu(msg->max_hdr_size); in rtrs_rdma_conn_established()
1878 clt_path->max_io_size = le32_to_cpu(msg->max_io_size); in rtrs_rdma_conn_established()
1879 clt_path->flags = le32_to_cpu(msg->flags); in rtrs_rdma_conn_established()
1880 clt_path->chunk_size = clt_path->max_io_size + clt_path->max_hdr_size; in rtrs_rdma_conn_established()
1891 clt->queue_depth = clt_path->queue_depth; in rtrs_rdma_conn_established()
1892 clt->max_io_size = min_not_zero(clt_path->max_io_size, in rtrs_rdma_conn_established()
1899 clt_path->hca_port = con->c.cm_id->port_num; in rtrs_rdma_conn_established()
1900 scnprintf(clt_path->hca_name, sizeof(clt_path->hca_name), in rtrs_rdma_conn_established()
1901 clt_path->s.dev->ib_dev->name); in rtrs_rdma_conn_established()
1902 clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr; in rtrs_rdma_conn_established()
1904 clt_path->for_new_clt = 1; in rtrs_rdma_conn_established()
1912 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in flag_success_on_conn() local
1914 atomic_inc(&clt_path->connected_cnt); in flag_success_on_conn()
1949 void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait) in rtrs_clt_close_conns() argument
1951 trace_rtrs_clt_close_conns(clt_path); in rtrs_clt_close_conns()
1953 if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL)) in rtrs_clt_close_conns()
1954 queue_work(rtrs_wq, &clt_path->close_work); in rtrs_clt_close_conns()
1956 flush_work(&clt_path->close_work); in rtrs_clt_close_conns()
1962 struct rtrs_clt_path *clt_path; in flag_error_on_conn() local
1964 clt_path = to_clt_path(con->c.path); in flag_error_on_conn()
1965 if (atomic_dec_and_test(&clt_path->connected_cnt)) in flag_error_on_conn()
1967 wake_up(&clt_path->state_wq); in flag_error_on_conn()
1977 struct rtrs_clt_path *clt_path = to_clt_path(s); in rtrs_clt_rdma_cm_handler() local
1995 wake_up(&clt_path->state_wq); in rtrs_clt_rdma_cm_handler()
2024 rtrs_clt_close_conns(clt_path, false); in rtrs_clt_rdma_cm_handler()
2048 struct rtrs_clt_path *clt_path = to_clt_path(s); in create_cm() local
2053 clt_path->s.dst_addr.ss_family == AF_IB ? in create_cm()
2069 err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr, in create_cm()
2070 (struct sockaddr *)&clt_path->s.dst_addr, in create_cm()
2082 clt_path->state_wq, in create_cm()
2083 con->cm_err || clt_path->state != RTRS_CLT_CONNECTING, in create_cm()
2095 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) { in create_cm()
2114 static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path) in rtrs_clt_path_up() argument
2116 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_clt_path_up()
2140 clt_path->established = true; in rtrs_clt_path_up()
2141 clt_path->reconnect_attempts = 0; in rtrs_clt_path_up()
2142 clt_path->stats->reconnects.successful_cnt++; in rtrs_clt_path_up()
2145 static void rtrs_clt_path_down(struct rtrs_clt_path *clt_path) in rtrs_clt_path_down() argument
2147 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_clt_path_down()
2149 if (!clt_path->established) in rtrs_clt_path_down()
2152 clt_path->established = false; in rtrs_clt_path_down()
2160 static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path) in rtrs_clt_stop_and_destroy_conns() argument
2165 WARN_ON(READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED); in rtrs_clt_stop_and_destroy_conns()
2171 mutex_lock(&clt_path->init_mutex); in rtrs_clt_stop_and_destroy_conns()
2172 mutex_unlock(&clt_path->init_mutex); in rtrs_clt_stop_and_destroy_conns()
2180 rtrs_stop_hb(&clt_path->s); in rtrs_clt_stop_and_destroy_conns()
2189 for (cid = 0; cid < clt_path->s.con_num; cid++) { in rtrs_clt_stop_and_destroy_conns()
2190 if (!clt_path->s.con[cid]) in rtrs_clt_stop_and_destroy_conns()
2192 con = to_clt_con(clt_path->s.con[cid]); in rtrs_clt_stop_and_destroy_conns()
2195 fail_all_outstanding_reqs(clt_path); in rtrs_clt_stop_and_destroy_conns()
2196 free_path_reqs(clt_path); in rtrs_clt_stop_and_destroy_conns()
2197 rtrs_clt_path_down(clt_path); in rtrs_clt_stop_and_destroy_conns()
2207 wait_event_timeout(clt_path->state_wq, in rtrs_clt_stop_and_destroy_conns()
2208 !atomic_read(&clt_path->connected_cnt), in rtrs_clt_stop_and_destroy_conns()
2211 for (cid = 0; cid < clt_path->s.con_num; cid++) { in rtrs_clt_stop_and_destroy_conns()
2212 if (!clt_path->s.con[cid]) in rtrs_clt_stop_and_destroy_conns()
2214 con = to_clt_con(clt_path->s.con[cid]); in rtrs_clt_stop_and_destroy_conns()
2223 static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path) in rtrs_clt_remove_path_from_arr() argument
2225 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_clt_remove_path_from_arr()
2231 list_del_rcu(&clt_path->s.entry); in rtrs_clt_remove_path_from_arr()
2272 next = rtrs_clt_get_next_path_or_null(&clt->paths_list, clt_path); in rtrs_clt_remove_path_from_arr()
2284 lockdep_is_held(&clt->paths_mutex)) != clt_path) in rtrs_clt_remove_path_from_arr()
2297 if (try_cmpxchg((struct rtrs_clt_path **)ppcpu_path, &clt_path, in rtrs_clt_remove_path_from_arr()
2313 static void rtrs_clt_add_path_to_arr(struct rtrs_clt_path *clt_path) in rtrs_clt_add_path_to_arr() argument
2315 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_clt_add_path_to_arr()
2320 list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list); in rtrs_clt_add_path_to_arr()
2326 struct rtrs_clt_path *clt_path; in rtrs_clt_close_work() local
2328 clt_path = container_of(work, struct rtrs_clt_path, close_work); in rtrs_clt_close_work()
2330 cancel_work_sync(&clt_path->err_recovery_work); in rtrs_clt_close_work()
2331 cancel_delayed_work_sync(&clt_path->reconnect_dwork); in rtrs_clt_close_work()
2332 rtrs_clt_stop_and_destroy_conns(clt_path); in rtrs_clt_close_work()
2333 rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSED, NULL); in rtrs_clt_close_work()
2336 static int init_conns(struct rtrs_clt_path *clt_path) in init_conns() argument
2346 clt_path->s.recon_cnt++; in init_conns()
2349 for (cid = 0; cid < clt_path->s.con_num; cid++) { in init_conns()
2350 err = create_con(clt_path, cid); in init_conns()
2354 err = create_cm(to_clt_con(clt_path->s.con[cid])); in init_conns()
2356 destroy_con(to_clt_con(clt_path->s.con[cid])); in init_conns()
2360 err = alloc_path_reqs(clt_path); in init_conns()
2364 rtrs_start_hb(&clt_path->s); in init_conns()
2370 struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]); in init_conns()
2385 rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL); in init_conns()
2393 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_clt_info_req_done() local
2397 rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1); in rtrs_clt_info_req_done()
2400 rtrs_err(clt_path->clt, "Path info request send failed: %s\n", in rtrs_clt_info_req_done()
2402 rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL); in rtrs_clt_info_req_done()
2409 static int process_info_rsp(struct rtrs_clt_path *clt_path, in process_info_rsp() argument
2416 if (!sg_cnt || (clt_path->queue_depth % sg_cnt)) { in process_info_rsp()
2417 rtrs_err(clt_path->clt, in process_info_rsp()
2427 if ((ilog2(sg_cnt - 1) + 1) + (ilog2(clt_path->chunk_size - 1) + 1) > in process_info_rsp()
2429 rtrs_err(clt_path->clt, in process_info_rsp()
2431 MAX_IMM_PAYL_BITS, sg_cnt, clt_path->chunk_size); in process_info_rsp()
2435 for (sgi = 0, i = 0; sgi < sg_cnt && i < clt_path->queue_depth; sgi++) { in process_info_rsp()
2446 if (!len || (len % clt_path->chunk_size)) { in process_info_rsp()
2447 rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n", in process_info_rsp()
2452 for ( ; len && i < clt_path->queue_depth; i++) { in process_info_rsp()
2453 clt_path->rbufs[i].addr = addr; in process_info_rsp()
2454 clt_path->rbufs[i].rkey = rkey; in process_info_rsp()
2456 len -= clt_path->chunk_size; in process_info_rsp()
2457 addr += clt_path->chunk_size; in process_info_rsp()
2461 if (sgi != sg_cnt || i != clt_path->queue_depth) { in process_info_rsp()
2462 rtrs_err(clt_path->clt, in process_info_rsp()
2466 if (total_len != clt_path->chunk_size * clt_path->queue_depth) { in process_info_rsp()
2467 rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len); in process_info_rsp()
2477 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_clt_info_rsp_done() local
2489 rtrs_err(clt_path->clt, "Path info response recv failed: %s\n", in rtrs_clt_info_rsp_done()
2496 rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n", in rtrs_clt_info_rsp_done()
2500 ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr, in rtrs_clt_info_rsp_done()
2504 rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n", in rtrs_clt_info_rsp_done()
2511 rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n", in rtrs_clt_info_rsp_done()
2515 err = process_info_rsp(clt_path, msg); in rtrs_clt_info_rsp_done()
2519 err = post_recv_path(clt_path); in rtrs_clt_info_rsp_done()
2527 rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1); in rtrs_clt_info_rsp_done()
2528 rtrs_clt_change_state_get_old(clt_path, state, NULL); in rtrs_clt_info_rsp_done()
2531 static int rtrs_send_path_info(struct rtrs_clt_path *clt_path) in rtrs_send_path_info() argument
2533 struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]); in rtrs_send_path_info()
2540 rx_sz += sizeof(struct rtrs_sg_desc) * clt_path->queue_depth; in rtrs_send_path_info()
2543 clt_path->s.dev->ib_dev, DMA_TO_DEVICE, in rtrs_send_path_info()
2545 rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, clt_path->s.dev->ib_dev, in rtrs_send_path_info()
2554 rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %d\n", err); in rtrs_send_path_info()
2561 memcpy(msg->pathname, clt_path->s.sessname, sizeof(msg->pathname)); in rtrs_send_path_info()
2563 ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, in rtrs_send_path_info()
2570 rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %d\n", err); in rtrs_send_path_info()
2576 wait_event_interruptible_timeout(clt_path->state_wq, in rtrs_send_path_info()
2577 clt_path->state != RTRS_CLT_CONNECTING, in rtrs_send_path_info()
2580 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) { in rtrs_send_path_info()
2581 if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTING_ERR) in rtrs_send_path_info()
2589 rtrs_iu_free(tx_iu, clt_path->s.dev->ib_dev, 1); in rtrs_send_path_info()
2591 rtrs_iu_free(rx_iu, clt_path->s.dev->ib_dev, 1); in rtrs_send_path_info()
2594 rtrs_clt_change_state_get_old(clt_path, in rtrs_send_path_info()
2606 static int init_path(struct rtrs_clt_path *clt_path) in init_path() argument
2611 .src = &clt_path->s.src_addr, in init_path()
2612 .dst = &clt_path->s.dst_addr, in init_path()
2617 mutex_lock(&clt_path->init_mutex); in init_path()
2618 err = init_conns(clt_path); in init_path()
2620 rtrs_err(clt_path->clt, in init_path()
2622 str, clt_path->hca_name, clt_path->hca_port); in init_path()
2625 err = rtrs_send_path_info(clt_path); in init_path()
2627 rtrs_err(clt_path->clt, in init_path()
2629 err, str, clt_path->hca_name, clt_path->hca_port); in init_path()
2632 rtrs_clt_path_up(clt_path); in init_path()
2634 mutex_unlock(&clt_path->init_mutex); in init_path()
2641 struct rtrs_clt_path *clt_path; in rtrs_clt_reconnect_work() local
2645 clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path, in rtrs_clt_reconnect_work()
2647 clt = clt_path->clt; in rtrs_clt_reconnect_work()
2649 trace_rtrs_clt_reconnect_work(clt_path); in rtrs_clt_reconnect_work()
2651 if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING) in rtrs_clt_reconnect_work()
2654 if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) { in rtrs_clt_reconnect_work()
2656 rtrs_clt_close_conns(clt_path, false); in rtrs_clt_reconnect_work()
2659 clt_path->reconnect_attempts++; in rtrs_clt_reconnect_work()
2662 if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING, NULL)) { in rtrs_clt_reconnect_work()
2663 err = init_path(clt_path); in rtrs_clt_reconnect_work()
2671 if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_RECONNECTING, NULL)) { in rtrs_clt_reconnect_work()
2672 clt_path->stats->reconnects.fail_cnt++; in rtrs_clt_reconnect_work()
2673 queue_work(rtrs_wq, &clt_path->err_recovery_work); in rtrs_clt_reconnect_work()
2804 struct rtrs_clt_path *clt_path, *tmp; in rtrs_clt_open() local
2823 struct rtrs_clt_path *clt_path; in rtrs_clt_open() local
2825 clt_path = alloc_path(clt, &paths[i], nr_cpu_ids, in rtrs_clt_open()
2827 if (IS_ERR(clt_path)) { in rtrs_clt_open()
2828 err = PTR_ERR(clt_path); in rtrs_clt_open()
2832 clt_path->for_new_clt = 1; in rtrs_clt_open()
2833 list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list); in rtrs_clt_open()
2835 err = init_path(clt_path); in rtrs_clt_open()
2837 list_del_rcu(&clt_path->s.entry); in rtrs_clt_open()
2838 rtrs_clt_close_conns(clt_path, true); in rtrs_clt_open()
2839 free_percpu(clt_path->stats->pcpu_stats); in rtrs_clt_open()
2840 kfree(clt_path->stats); in rtrs_clt_open()
2841 free_path(clt_path); in rtrs_clt_open()
2845 err = rtrs_clt_create_path_files(clt_path); in rtrs_clt_open()
2847 list_del_rcu(&clt_path->s.entry); in rtrs_clt_open()
2848 rtrs_clt_close_conns(clt_path, true); in rtrs_clt_open()
2849 free_percpu(clt_path->stats->pcpu_stats); in rtrs_clt_open()
2850 kfree(clt_path->stats); in rtrs_clt_open()
2851 free_path(clt_path); in rtrs_clt_open()
2862 list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) { in rtrs_clt_open()
2863 rtrs_clt_destroy_path_files(clt_path, NULL); in rtrs_clt_open()
2864 rtrs_clt_close_conns(clt_path, true); in rtrs_clt_open()
2865 kobject_put(&clt_path->kobj); in rtrs_clt_open()
2881 struct rtrs_clt_path *clt_path, *tmp; in rtrs_clt_close() local
2887 list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) { in rtrs_clt_close()
2888 rtrs_clt_close_conns(clt_path, true); in rtrs_clt_close()
2889 rtrs_clt_destroy_path_files(clt_path, NULL); in rtrs_clt_close()
2890 kobject_put(&clt_path->kobj); in rtrs_clt_close()
2897 int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path) in rtrs_clt_reconnect_from_sysfs() argument
2903 changed = rtrs_clt_change_state_get_old(clt_path, in rtrs_clt_reconnect_from_sysfs()
2907 clt_path->reconnect_attempts = 0; in rtrs_clt_reconnect_from_sysfs()
2908 rtrs_clt_stop_and_destroy_conns(clt_path); in rtrs_clt_reconnect_from_sysfs()
2909 queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0); in rtrs_clt_reconnect_from_sysfs()
2917 flush_delayed_work(&clt_path->reconnect_dwork); in rtrs_clt_reconnect_from_sysfs()
2918 err = (READ_ONCE(clt_path->state) == in rtrs_clt_reconnect_from_sysfs()
2925 int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *clt_path, in rtrs_clt_remove_path_from_sysfs() argument
2941 rtrs_clt_close_conns(clt_path, true); in rtrs_clt_remove_path_from_sysfs()
2942 changed = rtrs_clt_change_state_get_old(clt_path, in rtrs_clt_remove_path_from_sysfs()
2948 rtrs_clt_remove_path_from_arr(clt_path); in rtrs_clt_remove_path_from_sysfs()
2949 rtrs_clt_destroy_path_files(clt_path, sysfs_self); in rtrs_clt_remove_path_from_sysfs()
2950 kobject_put(&clt_path->kobj); in rtrs_clt_remove_path_from_sysfs()
2996 struct rtrs_clt_path *clt_path; in rtrs_clt_request() local
3018 (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { in rtrs_clt_request()
3019 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) in rtrs_clt_request()
3022 if (usr_len + hdr_len > clt_path->max_hdr_size) { in rtrs_clt_request()
3023 rtrs_wrn_rl(clt_path->clt, in rtrs_clt_request()
3026 usr_len, hdr_len, clt_path->max_hdr_size); in rtrs_clt_request()
3030 req = rtrs_clt_get_req(clt_path, ops->conf_fn, permit, ops->priv, in rtrs_clt_request()
3056 struct rtrs_clt_path *clt_path; in rtrs_clt_rdma_cq_direct() local
3061 (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { in rtrs_clt_rdma_cq_direct()
3062 if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) in rtrs_clt_rdma_cq_direct()
3065 con = clt_path->s.con[index + 1]; in rtrs_clt_rdma_cq_direct()
3103 struct rtrs_clt_path *clt_path; in rtrs_clt_create_path_from_sysfs() local
3106 clt_path = alloc_path(clt, addr, nr_cpu_ids, 0); in rtrs_clt_create_path_from_sysfs()
3107 if (IS_ERR(clt_path)) in rtrs_clt_create_path_from_sysfs()
3108 return PTR_ERR(clt_path); in rtrs_clt_create_path_from_sysfs()
3117 clt_path->for_new_clt = 1; in rtrs_clt_create_path_from_sysfs()
3127 rtrs_clt_add_path_to_arr(clt_path); in rtrs_clt_create_path_from_sysfs()
3129 err = init_path(clt_path); in rtrs_clt_create_path_from_sysfs()
3133 err = rtrs_clt_create_path_files(clt_path); in rtrs_clt_create_path_from_sysfs()
3140 rtrs_clt_remove_path_from_arr(clt_path); in rtrs_clt_create_path_from_sysfs()
3141 rtrs_clt_close_conns(clt_path, true); in rtrs_clt_create_path_from_sysfs()
3142 free_percpu(clt_path->stats->pcpu_stats); in rtrs_clt_create_path_from_sysfs()
3143 kfree(clt_path->stats); in rtrs_clt_create_path_from_sysfs()
3144 free_path(clt_path); in rtrs_clt_create_path_from_sysfs()