Lines Matching refs:clt

50 static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt)  in rtrs_clt_is_connected()  argument
56 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) in rtrs_clt_is_connected()
67 __rtrs_get_permit(struct rtrs_clt_sess *clt, enum rtrs_clt_con_type con_type) in __rtrs_get_permit() argument
69 size_t max_depth = clt->queue_depth; in __rtrs_get_permit()
81 bit = find_first_zero_bit(clt->permits_map, max_depth); in __rtrs_get_permit()
84 } while (test_and_set_bit_lock(bit, clt->permits_map)); in __rtrs_get_permit()
86 permit = get_permit(clt, bit); in __rtrs_get_permit()
94 static inline void __rtrs_put_permit(struct rtrs_clt_sess *clt, in __rtrs_put_permit() argument
97 clear_bit_unlock(permit->mem_id, clt->permits_map); in __rtrs_put_permit()
114 struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *clt, in rtrs_clt_get_permit() argument
121 permit = __rtrs_get_permit(clt, con_type); in rtrs_clt_get_permit()
126 prepare_to_wait(&clt->permits_wait, &wait, in rtrs_clt_get_permit()
128 permit = __rtrs_get_permit(clt, con_type); in rtrs_clt_get_permit()
135 finish_wait(&clt->permits_wait, &wait); in rtrs_clt_get_permit()
149 void rtrs_clt_put_permit(struct rtrs_clt_sess *clt, in rtrs_clt_put_permit() argument
152 if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map))) in rtrs_clt_put_permit()
155 __rtrs_put_permit(clt, permit); in rtrs_clt_put_permit()
164 if (waitqueue_active(&clt->permits_wait)) in rtrs_clt_put_permit()
165 wake_up(&clt->permits_wait); in rtrs_clt_put_permit()
545 rtrs_err(clt_path->clt, in rtrs_clt_rkey_rsp_done()
611 rtrs_err(clt_path->clt, "RDMA failed: %s\n", in rtrs_clt_rdma_done()
689 rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode); in rtrs_clt_rdma_done()
733 rtrs_err(clt_path->clt, "post_recv_io(), err: %d\n", in post_recv_path()
745 struct rtrs_clt_sess *clt; member
782 struct rtrs_clt_sess *clt; in get_next_path_rr() local
784 clt = it->clt; in get_next_path_rr()
792 ppcpu_path = this_cpu_ptr(clt->pcpu_path); in get_next_path_rr()
795 path = list_first_or_null_rcu(&clt->paths_list, in get_next_path_rr()
798 path = rtrs_clt_get_next_path_or_null(&clt->paths_list, path); in get_next_path_rr()
817 struct rtrs_clt_sess *clt = it->clt; in get_next_path_min_inflight() local
822 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) { in get_next_path_min_inflight()
868 struct rtrs_clt_sess *clt = it->clt; in get_next_path_min_latency() local
873 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) { in get_next_path_min_latency()
898 static inline void path_it_init(struct path_it *it, struct rtrs_clt_sess *clt) in path_it_init() argument
901 it->clt = clt; in path_it_init()
904 if (clt->mp_policy == MP_POLICY_RR) in path_it_init()
906 else if (clt->mp_policy == MP_POLICY_MIN_INFLIGHT) in path_it_init()
967 req->mp_policy = clt_path->clt->mp_policy; in rtrs_clt_init_req()
1282 static int rtrs_clt_failover_req(struct rtrs_clt_sess *clt, in rtrs_clt_failover_req() argument
1291 for (path_it_init(&it, clt); in rtrs_clt_failover_req()
1292 (alive_path = it.next_path(&it)) && it.i < it.clt->paths_num; in rtrs_clt_failover_req()
1317 struct rtrs_clt_sess *clt = clt_path->clt; in fail_all_outstanding_reqs() local
1335 err = rtrs_clt_failover_req(clt, req); in fail_all_outstanding_reqs()
1406 static int alloc_permits(struct rtrs_clt_sess *clt) in alloc_permits() argument
1411 clt->permits_map = bitmap_zalloc(clt->queue_depth, GFP_KERNEL); in alloc_permits()
1412 if (!clt->permits_map) { in alloc_permits()
1416 clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL); in alloc_permits()
1417 if (!clt->permits) { in alloc_permits()
1421 chunk_bits = ilog2(clt->queue_depth - 1) + 1; in alloc_permits()
1422 for (i = 0; i < clt->queue_depth; i++) { in alloc_permits()
1425 permit = get_permit(clt, i); in alloc_permits()
1433 bitmap_free(clt->permits_map); in alloc_permits()
1434 clt->permits_map = NULL; in alloc_permits()
1439 static void free_permits(struct rtrs_clt_sess *clt) in free_permits() argument
1441 if (clt->permits_map) in free_permits()
1442 wait_event(clt->permits_wait, in free_permits()
1443 bitmap_empty(clt->permits_map, clt->queue_depth)); in free_permits()
1445 bitmap_free(clt->permits_map); in free_permits()
1446 clt->permits_map = NULL; in free_permits()
1447 kfree(clt->permits); in free_permits()
1448 clt->permits = NULL; in free_permits()
1470 clt_path->clt->max_segments = in query_fast_reg_mode()
1471 min(clt_path->max_pages_per_mr, clt_path->clt->max_segments); in query_fast_reg_mode()
1511 struct rtrs_clt_sess *clt; in rtrs_clt_err_recovery_work() local
1515 clt = clt_path->clt; in rtrs_clt_err_recovery_work()
1516 delay_ms = clt->reconnect_delay_sec * 1000; in rtrs_clt_err_recovery_work()
1523 static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt, in alloc_path() argument
1566 strscpy(clt_path->s.sessname, clt->sessname, in alloc_path()
1568 clt_path->clt = clt; in alloc_path()
1664 rtrs_wrn(clt_path->clt, in create_con_cq_qp()
1789 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_rdma_route_resolved() local
1811 uuid_copy(&msg.paths_uuid, &clt->paths_uuid); in rtrs_rdma_route_resolved()
1815 rtrs_err(clt, "rdma_connect_locked(): %d\n", err); in rtrs_rdma_route_resolved()
1824 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_rdma_conn_established() local
1833 rtrs_err(clt, "Invalid RTRS connection response\n"); in rtrs_rdma_conn_established()
1837 rtrs_err(clt, "Invalid RTRS magic\n"); in rtrs_rdma_conn_established()
1842 rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n", in rtrs_rdma_conn_established()
1848 rtrs_err(clt, "Invalid RTRS message: errno %d\n", in rtrs_rdma_conn_established()
1856 rtrs_err(clt, "Error: queue depth changed\n"); in rtrs_rdma_conn_established()
1862 rtrs_err(clt, in rtrs_rdma_conn_established()
1890 mutex_lock(&clt->paths_mutex); in rtrs_rdma_conn_established()
1891 clt->queue_depth = clt_path->queue_depth; in rtrs_rdma_conn_established()
1892 clt->max_io_size = min_not_zero(clt_path->max_io_size, in rtrs_rdma_conn_established()
1893 clt->max_io_size); in rtrs_rdma_conn_established()
1894 mutex_unlock(&clt->paths_mutex); in rtrs_rdma_conn_established()
2116 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_clt_path_up() local
2126 mutex_lock(&clt->paths_ev_mutex); in rtrs_clt_path_up()
2127 up = ++clt->paths_up; in rtrs_clt_path_up()
2133 if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num) in rtrs_clt_path_up()
2134 clt->paths_up = clt->paths_num; in rtrs_clt_path_up()
2136 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED); in rtrs_clt_path_up()
2137 mutex_unlock(&clt->paths_ev_mutex); in rtrs_clt_path_up()
2147 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_clt_path_down() local
2153 mutex_lock(&clt->paths_ev_mutex); in rtrs_clt_path_down()
2154 WARN_ON(!clt->paths_up); in rtrs_clt_path_down()
2155 if (--clt->paths_up == 0) in rtrs_clt_path_down()
2156 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED); in rtrs_clt_path_down()
2157 mutex_unlock(&clt->paths_ev_mutex); in rtrs_clt_path_down()
2225 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_clt_remove_path_from_arr() local
2230 mutex_lock(&clt->paths_mutex); in rtrs_clt_remove_path_from_arr()
2265 clt->paths_num--; in rtrs_clt_remove_path_from_arr()
2272 next = rtrs_clt_get_next_path_or_null(&clt->paths_list, clt_path); in rtrs_clt_remove_path_from_arr()
2282 ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu); in rtrs_clt_remove_path_from_arr()
2284 lockdep_is_held(&clt->paths_mutex)) != clt_path) in rtrs_clt_remove_path_from_arr()
2310 mutex_unlock(&clt->paths_mutex); in rtrs_clt_remove_path_from_arr()
2315 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_clt_add_path_to_arr() local
2317 mutex_lock(&clt->paths_mutex); in rtrs_clt_add_path_to_arr()
2318 clt->paths_num++; in rtrs_clt_add_path_to_arr()
2320 list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list); in rtrs_clt_add_path_to_arr()
2321 mutex_unlock(&clt->paths_mutex); in rtrs_clt_add_path_to_arr()
2400 rtrs_err(clt_path->clt, "Path info request send failed: %s\n", in rtrs_clt_info_req_done()
2417 rtrs_err(clt_path->clt, in process_info_rsp()
2429 rtrs_err(clt_path->clt, in process_info_rsp()
2447 rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n", in process_info_rsp()
2462 rtrs_err(clt_path->clt, in process_info_rsp()
2467 rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len); in process_info_rsp()
2489 rtrs_err(clt_path->clt, "Path info response recv failed: %s\n", in rtrs_clt_info_rsp_done()
2496 rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n", in rtrs_clt_info_rsp_done()
2504 rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n", in rtrs_clt_info_rsp_done()
2511 rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n", in rtrs_clt_info_rsp_done()
2554 rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %d\n", err); in rtrs_send_path_info()
2570 rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %d\n", err); in rtrs_send_path_info()
2620 rtrs_err(clt_path->clt, in init_path()
2627 rtrs_err(clt_path->clt, in init_path()
2642 struct rtrs_clt_sess *clt; in rtrs_clt_reconnect_work() local
2647 clt = clt_path->clt; in rtrs_clt_reconnect_work()
2654 if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) { in rtrs_clt_reconnect_work()
2679 struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess, in rtrs_clt_dev_release() local
2682 mutex_destroy(&clt->paths_ev_mutex); in rtrs_clt_dev_release()
2683 mutex_destroy(&clt->paths_mutex); in rtrs_clt_dev_release()
2684 kfree(clt); in rtrs_clt_dev_release()
2694 struct rtrs_clt_sess *clt; in alloc_clt() local
2700 if (strlen(sessname) >= sizeof(clt->sessname)) in alloc_clt()
2703 clt = kzalloc(sizeof(*clt), GFP_KERNEL); in alloc_clt()
2704 if (!clt) in alloc_clt()
2707 clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path)); in alloc_clt()
2708 if (!clt->pcpu_path) { in alloc_clt()
2709 kfree(clt); in alloc_clt()
2713 clt->dev.class = rtrs_clt_dev_class; in alloc_clt()
2714 clt->dev.release = rtrs_clt_dev_release; in alloc_clt()
2715 uuid_gen(&clt->paths_uuid); in alloc_clt()
2716 INIT_LIST_HEAD_RCU(&clt->paths_list); in alloc_clt()
2717 clt->paths_num = paths_num; in alloc_clt()
2718 clt->paths_up = MAX_PATHS_NUM; in alloc_clt()
2719 clt->port = port; in alloc_clt()
2720 clt->pdu_sz = pdu_sz; in alloc_clt()
2721 clt->max_segments = RTRS_MAX_SEGMENTS; in alloc_clt()
2722 clt->reconnect_delay_sec = reconnect_delay_sec; in alloc_clt()
2723 clt->max_reconnect_attempts = max_reconnect_attempts; in alloc_clt()
2724 clt->priv = priv; in alloc_clt()
2725 clt->link_ev = link_ev; in alloc_clt()
2726 clt->mp_policy = MP_POLICY_MIN_INFLIGHT; in alloc_clt()
2727 strscpy(clt->sessname, sessname, sizeof(clt->sessname)); in alloc_clt()
2728 init_waitqueue_head(&clt->permits_wait); in alloc_clt()
2729 mutex_init(&clt->paths_ev_mutex); in alloc_clt()
2730 mutex_init(&clt->paths_mutex); in alloc_clt()
2731 device_initialize(&clt->dev); in alloc_clt()
2733 err = dev_set_name(&clt->dev, "%s", sessname); in alloc_clt()
2741 dev_set_uevent_suppress(&clt->dev, true); in alloc_clt()
2742 err = device_add(&clt->dev); in alloc_clt()
2746 clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj); in alloc_clt()
2747 if (!clt->kobj_paths) { in alloc_clt()
2751 err = rtrs_clt_create_sysfs_root_files(clt); in alloc_clt()
2753 kobject_del(clt->kobj_paths); in alloc_clt()
2754 kobject_put(clt->kobj_paths); in alloc_clt()
2757 dev_set_uevent_suppress(&clt->dev, false); in alloc_clt()
2758 kobject_uevent(&clt->dev.kobj, KOBJ_ADD); in alloc_clt()
2760 return clt; in alloc_clt()
2762 device_del(&clt->dev); in alloc_clt()
2764 free_percpu(clt->pcpu_path); in alloc_clt()
2765 put_device(&clt->dev); in alloc_clt()
2769 static void free_clt(struct rtrs_clt_sess *clt) in free_clt() argument
2771 free_percpu(clt->pcpu_path); in free_clt()
2776 device_unregister(&clt->dev); in free_clt()
2805 struct rtrs_clt_sess *clt; in rtrs_clt_open() local
2814 clt = alloc_clt(pathname, paths_num, port, pdu_sz, ops->priv, in rtrs_clt_open()
2818 if (IS_ERR(clt)) { in rtrs_clt_open()
2819 err = PTR_ERR(clt); in rtrs_clt_open()
2825 clt_path = alloc_path(clt, &paths[i], nr_cpu_ids, in rtrs_clt_open()
2833 list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list); in rtrs_clt_open()
2855 err = alloc_permits(clt); in rtrs_clt_open()
2859 return clt; in rtrs_clt_open()
2862 list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) { in rtrs_clt_open()
2867 rtrs_clt_destroy_sysfs_root(clt); in rtrs_clt_open()
2868 free_clt(clt); in rtrs_clt_open()
2879 void rtrs_clt_close(struct rtrs_clt_sess *clt) in rtrs_clt_close() argument
2884 rtrs_clt_destroy_sysfs_root(clt); in rtrs_clt_close()
2887 list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) { in rtrs_clt_close()
2892 free_permits(clt); in rtrs_clt_close()
2893 free_clt(clt); in rtrs_clt_close()
2956 void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value) in rtrs_clt_set_max_reconnect_attempts() argument
2958 clt->max_reconnect_attempts = (unsigned int)value; in rtrs_clt_set_max_reconnect_attempts()
2961 int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt) in rtrs_clt_get_max_reconnect_attempts() argument
2963 return (int)clt->max_reconnect_attempts; in rtrs_clt_get_max_reconnect_attempts()
2991 struct rtrs_clt_sess *clt, struct rtrs_permit *permit, in rtrs_clt_request() argument
3017 for (path_it_init(&it, clt); in rtrs_clt_request()
3018 (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { in rtrs_clt_request()
3023 rtrs_wrn_rl(clt_path->clt, in rtrs_clt_request()
3051 int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index) in rtrs_clt_rdma_cq_direct() argument
3060 for (path_it_init(&it, clt); in rtrs_clt_rdma_cq_direct()
3061 (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { in rtrs_clt_rdma_cq_direct()
3085 int rtrs_clt_query(struct rtrs_clt_sess *clt, struct rtrs_attrs *attr) in rtrs_clt_query() argument
3087 if (!rtrs_clt_is_connected(clt)) in rtrs_clt_query()
3090 attr->queue_depth = clt->queue_depth; in rtrs_clt_query()
3091 attr->max_segments = clt->max_segments; in rtrs_clt_query()
3093 attr->max_io_size = min_t(int, clt->max_io_size, in rtrs_clt_query()
3094 clt->max_segments * SZ_4K); in rtrs_clt_query()
3100 int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt, in rtrs_clt_create_path_from_sysfs() argument
3106 clt_path = alloc_path(clt, addr, nr_cpu_ids, 0); in rtrs_clt_create_path_from_sysfs()
3110 mutex_lock(&clt->paths_mutex); in rtrs_clt_create_path_from_sysfs()
3111 if (clt->paths_num == 0) { in rtrs_clt_create_path_from_sysfs()
3120 mutex_unlock(&clt->paths_mutex); in rtrs_clt_create_path_from_sysfs()