Lines Matching full:info
15 struct smbd_connection *info);
17 struct smbd_connection *info);
19 struct smbd_connection *info,
21 static int allocate_receive_buffers(struct smbd_connection *info, int num_buf);
22 static void destroy_receive_buffers(struct smbd_connection *info);
25 struct smbd_connection *info, struct smbd_response *response);
27 struct smbd_connection *info,
30 struct smbd_connection *info);
33 struct smbd_connection *info,
36 static int smbd_post_send_empty(struct smbd_connection *info);
38 struct smbd_connection *info,
40 static int smbd_post_send_page(struct smbd_connection *info,
44 static void destroy_mr_list(struct smbd_connection *info);
45 static int allocate_mr_list(struct smbd_connection *info);
129 #define INFO 0x1 macro
133 "Logging level for SMBD transport, 0 (default): error, 1: info");
160 struct smbd_connection *info = in smbd_disconnect_rdma_work() local
163 if (info->transport_status == SMBD_CONNECTED) { in smbd_disconnect_rdma_work()
164 info->transport_status = SMBD_DISCONNECTING; in smbd_disconnect_rdma_work()
165 rdma_disconnect(info->id); in smbd_disconnect_rdma_work()
169 static void smbd_disconnect_rdma_connection(struct smbd_connection *info) in smbd_disconnect_rdma_connection() argument
171 queue_work(info->workqueue, &info->disconnect_work); in smbd_disconnect_rdma_connection()
178 struct smbd_connection *info = id->context; in smbd_conn_upcall() local
180 log_rdma_event(INFO, "event=%d status=%d\n", in smbd_conn_upcall()
186 info->ri_rc = 0; in smbd_conn_upcall()
187 complete(&info->ri_done); in smbd_conn_upcall()
191 info->ri_rc = -EHOSTUNREACH; in smbd_conn_upcall()
192 complete(&info->ri_done); in smbd_conn_upcall()
196 info->ri_rc = -ENETUNREACH; in smbd_conn_upcall()
197 complete(&info->ri_done); in smbd_conn_upcall()
201 log_rdma_event(INFO, "connected event=%d\n", event->event); in smbd_conn_upcall()
202 info->transport_status = SMBD_CONNECTED; in smbd_conn_upcall()
203 wake_up_interruptible(&info->conn_wait); in smbd_conn_upcall()
209 log_rdma_event(INFO, "connecting failed event=%d\n", event->event); in smbd_conn_upcall()
210 info->transport_status = SMBD_DISCONNECTED; in smbd_conn_upcall()
211 wake_up_interruptible(&info->conn_wait); in smbd_conn_upcall()
217 if (info->transport_status == SMBD_NEGOTIATE_FAILED) { in smbd_conn_upcall()
218 info->transport_status = SMBD_DISCONNECTED; in smbd_conn_upcall()
219 wake_up(&info->conn_wait); in smbd_conn_upcall()
223 info->transport_status = SMBD_DISCONNECTED; in smbd_conn_upcall()
224 wake_up_interruptible(&info->disconn_wait); in smbd_conn_upcall()
225 wake_up_interruptible(&info->wait_reassembly_queue); in smbd_conn_upcall()
226 wake_up_interruptible_all(&info->wait_send_queue); in smbd_conn_upcall()
240 struct smbd_connection *info = context; in smbd_qp_async_error_upcall() local
242 log_rdma_event(ERR, "%s on device %s info %p\n", in smbd_qp_async_error_upcall()
243 ib_event_msg(event->event), event->device->name, info); in smbd_qp_async_error_upcall()
248 smbd_disconnect_rdma_connection(info); in smbd_qp_async_error_upcall()
272 log_rdma_send(INFO, "smbd_request %p completed wc->status=%d\n", in send_done()
278 smbd_disconnect_rdma_connection(request->info); in send_done()
282 ib_dma_unmap_single(request->info->id->device, in send_done()
288 if (atomic_dec_and_test(&request->info->send_payload_pending)) in send_done()
289 wake_up(&request->info->wait_send_payload_pending); in send_done()
291 if (atomic_dec_and_test(&request->info->send_pending)) in send_done()
292 wake_up(&request->info->wait_send_pending); in send_done()
295 mempool_free(request, request->info->request_mempool); in send_done()
300 log_rdma_event(INFO, "resp message min_version %u max_version %u " in dump_smbd_negotiate_resp()
319 struct smbd_connection *info = response->info; in process_negotiation_response() local
333 info->protocol = le16_to_cpu(packet->negotiated_version); in process_negotiation_response()
339 info->receive_credit_target = le16_to_cpu(packet->credits_requested); in process_negotiation_response()
345 atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted)); in process_negotiation_response()
347 atomic_set(&info->receive_credits, 0); in process_negotiation_response()
349 if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) { in process_negotiation_response()
354 info->max_receive_size = le32_to_cpu(packet->preferred_send_size); in process_negotiation_response()
361 info->max_send_size = min_t(int, info->max_send_size, in process_negotiation_response()
370 info->max_fragmented_send_size = in process_negotiation_response()
372 info->rdma_readwrite_threshold = in process_negotiation_response()
373 rdma_readwrite_threshold > info->max_fragmented_send_size ? in process_negotiation_response()
374 info->max_fragmented_send_size : in process_negotiation_response()
378 info->max_readwrite_size = min_t(u32, in process_negotiation_response()
380 info->max_frmr_depth * PAGE_SIZE); in process_negotiation_response()
381 info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE; in process_negotiation_response()
390 static void check_and_send_immediate(struct smbd_connection *info) in check_and_send_immediate() argument
392 if (info->transport_status != SMBD_CONNECTED) in check_and_send_immediate()
395 info->send_immediate = true; in check_and_send_immediate()
401 if (atomic_read(&info->receive_credits) < in check_and_send_immediate()
402 info->receive_credit_target - 1) in check_and_send_immediate()
404 info->workqueue, &info->send_immediate_work, 0); in check_and_send_immediate()
413 struct smbd_connection *info = in smbd_post_send_credits() local
417 if (info->transport_status != SMBD_CONNECTED) { in smbd_post_send_credits()
418 wake_up(&info->wait_receive_queues); in smbd_post_send_credits()
422 if (info->receive_credit_target > in smbd_post_send_credits()
423 atomic_read(&info->receive_credits)) { in smbd_post_send_credits()
426 response = get_receive_buffer(info); in smbd_post_send_credits()
428 response = get_empty_queue_buffer(info); in smbd_post_send_credits()
440 rc = smbd_post_recv(info, response); in smbd_post_send_credits()
444 put_receive_buffer(info, response); in smbd_post_send_credits()
452 spin_lock(&info->lock_new_credits_offered); in smbd_post_send_credits()
453 info->new_credits_offered += ret; in smbd_post_send_credits()
454 spin_unlock(&info->lock_new_credits_offered); in smbd_post_send_credits()
456 atomic_add(ret, &info->receive_credits); in smbd_post_send_credits()
459 check_and_send_immediate(info); in smbd_post_send_credits()
464 struct smbd_connection *info = in smbd_recv_done_work() local
471 if (atomic_read(&info->send_credits)) in smbd_recv_done_work()
472 wake_up_interruptible(&info->wait_send_queue); in smbd_recv_done_work()
478 check_and_send_immediate(info); in smbd_recv_done_work()
487 struct smbd_connection *info = response->info; in recv_done() local
490 log_rdma_recv(INFO, "response=%p type=%d wc status=%d wc opcode %d " in recv_done()
496 log_rdma_recv(INFO, "wc->status=%d opcode=%d\n", in recv_done()
498 smbd_disconnect_rdma_connection(info); in recv_done()
512 info->full_packet_received = true; in recv_done()
513 info->negotiate_done = in recv_done()
515 complete(&info->negotiate_completion); in recv_done()
528 if (info->full_packet_received) in recv_done()
532 info->full_packet_received = false; in recv_done()
534 info->full_packet_received = true; in recv_done()
537 info, in recv_done()
541 put_empty_packet(info, response); in recv_done()
544 wake_up_interruptible(&info->wait_reassembly_queue); in recv_done()
546 atomic_dec(&info->receive_credits); in recv_done()
547 info->receive_credit_target = in recv_done()
550 &info->send_credits); in recv_done()
552 log_incoming(INFO, "data flags %d data_offset %d " in recv_done()
560 info->keep_alive_requested = KEEP_ALIVE_NONE; in recv_done()
563 info->keep_alive_requested = KEEP_ALIVE_PENDING; in recv_done()
566 queue_work(info->workqueue, &info->recv_done_work); in recv_done()
575 put_receive_buffer(info, response); in recv_done()
579 struct smbd_connection *info, in smbd_create_id() argument
586 id = rdma_create_id(&init_net, smbd_conn_upcall, info, in smbd_create_id()
601 init_completion(&info->ri_done); in smbd_create_id()
602 info->ri_rc = -ETIMEDOUT; in smbd_create_id()
611 &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); in smbd_create_id()
612 rc = info->ri_rc; in smbd_create_id()
618 info->ri_rc = -ETIMEDOUT; in smbd_create_id()
625 &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); in smbd_create_id()
626 rc = info->ri_rc; in smbd_create_id()
654 struct smbd_connection *info, in smbd_ia_open() argument
659 info->id = smbd_create_id(info, dstaddr, port); in smbd_ia_open()
660 if (IS_ERR(info->id)) { in smbd_ia_open()
661 rc = PTR_ERR(info->id); in smbd_ia_open()
665 if (!frwr_is_supported(&info->id->device->attrs)) { in smbd_ia_open()
672 info->id->device->attrs.device_cap_flags, in smbd_ia_open()
673 info->id->device->attrs.max_fast_reg_page_list_len); in smbd_ia_open()
677 info->max_frmr_depth = min_t(int, in smbd_ia_open()
679 info->id->device->attrs.max_fast_reg_page_list_len); in smbd_ia_open()
680 info->mr_type = IB_MR_TYPE_MEM_REG; in smbd_ia_open()
681 if (info->id->device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) in smbd_ia_open()
682 info->mr_type = IB_MR_TYPE_SG_GAPS; in smbd_ia_open()
684 info->pd = ib_alloc_pd(info->id->device, 0); in smbd_ia_open()
685 if (IS_ERR(info->pd)) { in smbd_ia_open()
686 rc = PTR_ERR(info->pd); in smbd_ia_open()
694 rdma_destroy_id(info->id); in smbd_ia_open()
695 info->id = NULL; in smbd_ia_open()
707 static int smbd_post_send_negotiate_req(struct smbd_connection *info) in smbd_post_send_negotiate_req() argument
714 request = mempool_alloc(info->request_mempool, GFP_KERNEL); in smbd_post_send_negotiate_req()
718 request->info = info; in smbd_post_send_negotiate_req()
724 packet->credits_requested = cpu_to_le16(info->send_credit_target); in smbd_post_send_negotiate_req()
725 packet->preferred_send_size = cpu_to_le32(info->max_send_size); in smbd_post_send_negotiate_req()
726 packet->max_receive_size = cpu_to_le32(info->max_receive_size); in smbd_post_send_negotiate_req()
728 cpu_to_le32(info->max_fragmented_recv_size); in smbd_post_send_negotiate_req()
732 info->id->device, (void *)packet, in smbd_post_send_negotiate_req()
734 if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { in smbd_post_send_negotiate_req()
740 request->sge[0].lkey = info->pd->local_dma_lkey; in smbd_post_send_negotiate_req()
743 info->id->device, request->sge[0].addr, in smbd_post_send_negotiate_req()
755 log_rdma_send(INFO, "sge addr=%llx length=%x lkey=%x\n", in smbd_post_send_negotiate_req()
760 atomic_inc(&info->send_pending); in smbd_post_send_negotiate_req()
761 rc = ib_post_send(info->id->qp, &send_wr, NULL); in smbd_post_send_negotiate_req()
767 atomic_dec(&info->send_pending); in smbd_post_send_negotiate_req()
768 ib_dma_unmap_single(info->id->device, request->sge[0].addr, in smbd_post_send_negotiate_req()
771 smbd_disconnect_rdma_connection(info); in smbd_post_send_negotiate_req()
774 mempool_free(request, info->request_mempool); in smbd_post_send_negotiate_req()
786 static int manage_credits_prior_sending(struct smbd_connection *info) in manage_credits_prior_sending() argument
790 spin_lock(&info->lock_new_credits_offered); in manage_credits_prior_sending()
791 new_credits = info->new_credits_offered; in manage_credits_prior_sending()
792 info->new_credits_offered = 0; in manage_credits_prior_sending()
793 spin_unlock(&info->lock_new_credits_offered); in manage_credits_prior_sending()
807 static int manage_keep_alive_before_sending(struct smbd_connection *info) in manage_keep_alive_before_sending() argument
809 if (info->keep_alive_requested == KEEP_ALIVE_PENDING) { in manage_keep_alive_before_sending()
810 info->keep_alive_requested = KEEP_ALIVE_SENT; in manage_keep_alive_before_sending()
829 static int smbd_create_header(struct smbd_connection *info, in smbd_create_header() argument
839 rc = wait_event_interruptible(info->wait_send_queue, in smbd_create_header()
840 atomic_read(&info->send_credits) > 0 || in smbd_create_header()
841 info->transport_status != SMBD_CONNECTED); in smbd_create_header()
845 if (info->transport_status != SMBD_CONNECTED) { in smbd_create_header()
849 atomic_dec(&info->send_credits); in smbd_create_header()
851 request = mempool_alloc(info->request_mempool, GFP_KERNEL); in smbd_create_header()
857 request->info = info; in smbd_create_header()
861 packet->credits_requested = cpu_to_le16(info->send_credit_target); in smbd_create_header()
863 cpu_to_le16(manage_credits_prior_sending(info)); in smbd_create_header()
864 info->send_immediate = false; in smbd_create_header()
867 if (manage_keep_alive_before_sending(info)) in smbd_create_header()
879 log_outgoing(INFO, "credits_requested=%d credits_granted=%d " in smbd_create_header()
894 request->sge[0].addr = ib_dma_map_single(info->id->device, in smbd_create_header()
898 if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { in smbd_create_header()
899 mempool_free(request, info->request_mempool); in smbd_create_header()
905 request->sge[0].lkey = info->pd->local_dma_lkey; in smbd_create_header()
911 atomic_inc(&info->send_credits); in smbd_create_header()
915 static void smbd_destroy_header(struct smbd_connection *info, in smbd_destroy_header() argument
919 ib_dma_unmap_single(info->id->device, in smbd_destroy_header()
923 mempool_free(request, info->request_mempool); in smbd_destroy_header()
924 atomic_inc(&info->send_credits); in smbd_destroy_header()
928 static int smbd_post_send(struct smbd_connection *info, in smbd_post_send() argument
935 log_rdma_send(INFO, in smbd_post_send()
939 info->id->device, in smbd_post_send()
956 atomic_inc(&info->send_payload_pending); in smbd_post_send()
959 atomic_inc(&info->send_pending); in smbd_post_send()
962 rc = ib_post_send(info->id->qp, &send_wr, NULL); in smbd_post_send()
966 if (atomic_dec_and_test(&info->send_payload_pending)) in smbd_post_send()
967 wake_up(&info->wait_send_payload_pending); in smbd_post_send()
969 if (atomic_dec_and_test(&info->send_pending)) in smbd_post_send()
970 wake_up(&info->wait_send_pending); in smbd_post_send()
972 smbd_disconnect_rdma_connection(info); in smbd_post_send()
976 mod_delayed_work(info->workqueue, &info->idle_timer_work, in smbd_post_send()
977 info->keep_alive_interval*HZ); in smbd_post_send()
982 static int smbd_post_send_sgl(struct smbd_connection *info, in smbd_post_send_sgl() argument
991 info, data_length, remaining_data_length, &request); in smbd_post_send_sgl()
998 ib_dma_map_page(info->id->device, sg_page(sg), in smbd_post_send_sgl()
1001 info->id->device, request->sge[i+1].addr)) { in smbd_post_send_sgl()
1007 request->sge[i+1].lkey = info->pd->local_dma_lkey; in smbd_post_send_sgl()
1011 rc = smbd_post_send(info, request, data_length); in smbd_post_send_sgl()
1018 ib_dma_unmap_single(info->id->device, in smbd_post_send_sgl()
1022 smbd_destroy_header(info, request); in smbd_post_send_sgl()
1033 static int smbd_post_send_page(struct smbd_connection *info, struct page *page, in smbd_post_send_page() argument
1041 return smbd_post_send_sgl(info, &sgl, size, remaining_data_length); in smbd_post_send_page()
1049 static int smbd_post_send_empty(struct smbd_connection *info) in smbd_post_send_empty() argument
1051 info->count_send_empty++; in smbd_post_send_empty()
1052 return smbd_post_send_sgl(info, NULL, 0, 0); in smbd_post_send_empty()
1063 struct smbd_connection *info, struct kvec *iov, int n_vec, in smbd_post_send_data() argument
1081 return smbd_post_send_sgl(info, sgl, data_length, remaining_data_length); in smbd_post_send_data()
1090 struct smbd_connection *info, struct smbd_response *response) in smbd_post_recv() argument
1096 info->id->device, response->packet, in smbd_post_recv()
1097 info->max_receive_size, DMA_FROM_DEVICE); in smbd_post_recv()
1098 if (ib_dma_mapping_error(info->id->device, response->sge.addr)) in smbd_post_recv()
1101 response->sge.length = info->max_receive_size; in smbd_post_recv()
1102 response->sge.lkey = info->pd->local_dma_lkey; in smbd_post_recv()
1111 rc = ib_post_recv(info->id->qp, &recv_wr, NULL); in smbd_post_recv()
1113 ib_dma_unmap_single(info->id->device, response->sge.addr, in smbd_post_recv()
1115 smbd_disconnect_rdma_connection(info); in smbd_post_recv()
1123 static int smbd_negotiate(struct smbd_connection *info) in smbd_negotiate() argument
1126 struct smbd_response *response = get_receive_buffer(info); in smbd_negotiate()
1129 rc = smbd_post_recv(info, response); in smbd_negotiate()
1130 log_rdma_event(INFO, in smbd_negotiate()
1138 init_completion(&info->negotiate_completion); in smbd_negotiate()
1139 info->negotiate_done = false; in smbd_negotiate()
1140 rc = smbd_post_send_negotiate_req(info); in smbd_negotiate()
1145 &info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ); in smbd_negotiate()
1146 log_rdma_event(INFO, "wait_for_completion_timeout rc=%d\n", rc); in smbd_negotiate()
1148 if (info->negotiate_done) in smbd_negotiate()
1162 struct smbd_connection *info, struct smbd_response *response) in put_empty_packet() argument
1164 spin_lock(&info->empty_packet_queue_lock); in put_empty_packet()
1165 list_add_tail(&response->list, &info->empty_packet_queue); in put_empty_packet()
1166 info->count_empty_packet_queue++; in put_empty_packet()
1167 spin_unlock(&info->empty_packet_queue_lock); in put_empty_packet()
1169 queue_work(info->workqueue, &info->post_send_credits_work); in put_empty_packet()
1183 struct smbd_connection *info, in enqueue_reassembly() argument
1187 spin_lock(&info->reassembly_queue_lock); in enqueue_reassembly()
1188 list_add_tail(&response->list, &info->reassembly_queue); in enqueue_reassembly()
1189 info->reassembly_queue_length++; in enqueue_reassembly()
1197 info->reassembly_data_length += data_length; in enqueue_reassembly()
1198 spin_unlock(&info->reassembly_queue_lock); in enqueue_reassembly()
1199 info->count_reassembly_queue++; in enqueue_reassembly()
1200 info->count_enqueue_reassembly_queue++; in enqueue_reassembly()
1208 static struct smbd_response *_get_first_reassembly(struct smbd_connection *info) in _get_first_reassembly() argument
1212 if (!list_empty(&info->reassembly_queue)) { in _get_first_reassembly()
1214 &info->reassembly_queue, in _get_first_reassembly()
1221 struct smbd_connection *info) in get_empty_queue_buffer() argument
1226 spin_lock_irqsave(&info->empty_packet_queue_lock, flags); in get_empty_queue_buffer()
1227 if (!list_empty(&info->empty_packet_queue)) { in get_empty_queue_buffer()
1229 &info->empty_packet_queue, in get_empty_queue_buffer()
1232 info->count_empty_packet_queue--; in get_empty_queue_buffer()
1234 spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags); in get_empty_queue_buffer()
1245 static struct smbd_response *get_receive_buffer(struct smbd_connection *info) in get_receive_buffer() argument
1250 spin_lock_irqsave(&info->receive_queue_lock, flags); in get_receive_buffer()
1251 if (!list_empty(&info->receive_queue)) { in get_receive_buffer()
1253 &info->receive_queue, in get_receive_buffer()
1256 info->count_receive_queue--; in get_receive_buffer()
1257 info->count_get_receive_buffer++; in get_receive_buffer()
1259 spin_unlock_irqrestore(&info->receive_queue_lock, flags); in get_receive_buffer()
1271 struct smbd_connection *info, struct smbd_response *response) in put_receive_buffer() argument
1275 ib_dma_unmap_single(info->id->device, response->sge.addr, in put_receive_buffer()
1278 spin_lock_irqsave(&info->receive_queue_lock, flags); in put_receive_buffer()
1279 list_add_tail(&response->list, &info->receive_queue); in put_receive_buffer()
1280 info->count_receive_queue++; in put_receive_buffer()
1281 info->count_put_receive_buffer++; in put_receive_buffer()
1282 spin_unlock_irqrestore(&info->receive_queue_lock, flags); in put_receive_buffer()
1284 queue_work(info->workqueue, &info->post_send_credits_work); in put_receive_buffer()
1288 static int allocate_receive_buffers(struct smbd_connection *info, int num_buf) in allocate_receive_buffers() argument
1293 INIT_LIST_HEAD(&info->reassembly_queue); in allocate_receive_buffers()
1294 spin_lock_init(&info->reassembly_queue_lock); in allocate_receive_buffers()
1295 info->reassembly_data_length = 0; in allocate_receive_buffers()
1296 info->reassembly_queue_length = 0; in allocate_receive_buffers()
1298 INIT_LIST_HEAD(&info->receive_queue); in allocate_receive_buffers()
1299 spin_lock_init(&info->receive_queue_lock); in allocate_receive_buffers()
1300 info->count_receive_queue = 0; in allocate_receive_buffers()
1302 INIT_LIST_HEAD(&info->empty_packet_queue); in allocate_receive_buffers()
1303 spin_lock_init(&info->empty_packet_queue_lock); in allocate_receive_buffers()
1304 info->count_empty_packet_queue = 0; in allocate_receive_buffers()
1306 init_waitqueue_head(&info->wait_receive_queues); in allocate_receive_buffers()
1309 response = mempool_alloc(info->response_mempool, GFP_KERNEL); in allocate_receive_buffers()
1313 response->info = info; in allocate_receive_buffers()
1314 list_add_tail(&response->list, &info->receive_queue); in allocate_receive_buffers()
1315 info->count_receive_queue++; in allocate_receive_buffers()
1321 while (!list_empty(&info->receive_queue)) { in allocate_receive_buffers()
1323 &info->receive_queue, in allocate_receive_buffers()
1326 info->count_receive_queue--; in allocate_receive_buffers()
1328 mempool_free(response, info->response_mempool); in allocate_receive_buffers()
1333 static void destroy_receive_buffers(struct smbd_connection *info) in destroy_receive_buffers() argument
1337 while ((response = get_receive_buffer(info))) in destroy_receive_buffers()
1338 mempool_free(response, info->response_mempool); in destroy_receive_buffers()
1340 while ((response = get_empty_queue_buffer(info))) in destroy_receive_buffers()
1341 mempool_free(response, info->response_mempool); in destroy_receive_buffers()
1352 struct smbd_connection *info = container_of( in send_immediate_work() local
1356 if (info->keep_alive_requested == KEEP_ALIVE_PENDING || in send_immediate_work()
1357 info->send_immediate) { in send_immediate_work()
1358 log_keep_alive(INFO, "send an empty message\n"); in send_immediate_work()
1359 smbd_post_send_empty(info); in send_immediate_work()
1366 struct smbd_connection *info = container_of( in idle_connection_timer() local
1370 if (info->keep_alive_requested != KEEP_ALIVE_NONE) { in idle_connection_timer()
1372 "error status info->keep_alive_requested=%d\n", in idle_connection_timer()
1373 info->keep_alive_requested); in idle_connection_timer()
1374 smbd_disconnect_rdma_connection(info); in idle_connection_timer()
1378 log_keep_alive(INFO, "about to send an empty idle message\n"); in idle_connection_timer()
1379 smbd_post_send_empty(info); in idle_connection_timer()
1382 queue_delayed_work(info->workqueue, &info->idle_timer_work, in idle_connection_timer()
1383 info->keep_alive_interval*HZ); in idle_connection_timer()
1393 struct smbd_connection *info = server->smbd_conn; in smbd_destroy() local
1397 if (!info) { in smbd_destroy()
1398 log_rdma_event(INFO, "rdma session already destroyed\n"); in smbd_destroy()
1402 log_rdma_event(INFO, "destroying rdma session\n"); in smbd_destroy()
1403 if (info->transport_status != SMBD_DISCONNECTED) { in smbd_destroy()
1405 log_rdma_event(INFO, "wait for transport being disconnected\n"); in smbd_destroy()
1407 info->disconn_wait, in smbd_destroy()
1408 info->transport_status == SMBD_DISCONNECTED); in smbd_destroy()
1411 log_rdma_event(INFO, "destroying qp\n"); in smbd_destroy()
1412 ib_drain_qp(info->id->qp); in smbd_destroy()
1413 rdma_destroy_qp(info->id); in smbd_destroy()
1415 log_rdma_event(INFO, "cancelling idle timer\n"); in smbd_destroy()
1416 cancel_delayed_work_sync(&info->idle_timer_work); in smbd_destroy()
1417 log_rdma_event(INFO, "cancelling send immediate work\n"); in smbd_destroy()
1418 cancel_delayed_work_sync(&info->send_immediate_work); in smbd_destroy()
1420 log_rdma_event(INFO, "wait for all send posted to IB to finish\n"); in smbd_destroy()
1421 wait_event(info->wait_send_pending, in smbd_destroy()
1422 atomic_read(&info->send_pending) == 0); in smbd_destroy()
1423 wait_event(info->wait_send_payload_pending, in smbd_destroy()
1424 atomic_read(&info->send_payload_pending) == 0); in smbd_destroy()
1427 log_rdma_event(INFO, "drain the reassembly queue\n"); in smbd_destroy()
1429 spin_lock_irqsave(&info->reassembly_queue_lock, flags); in smbd_destroy()
1430 response = _get_first_reassembly(info); in smbd_destroy()
1434 &info->reassembly_queue_lock, flags); in smbd_destroy()
1435 put_receive_buffer(info, response); in smbd_destroy()
1438 &info->reassembly_queue_lock, flags); in smbd_destroy()
1440 info->reassembly_data_length = 0; in smbd_destroy()
1442 log_rdma_event(INFO, "free receive buffers\n"); in smbd_destroy()
1443 wait_event(info->wait_receive_queues, in smbd_destroy()
1444 info->count_receive_queue + info->count_empty_packet_queue in smbd_destroy()
1445 == info->receive_credit_max); in smbd_destroy()
1446 destroy_receive_buffers(info); in smbd_destroy()
1455 log_rdma_event(INFO, "freeing mr list\n"); in smbd_destroy()
1456 wake_up_interruptible_all(&info->wait_mr); in smbd_destroy()
1457 while (atomic_read(&info->mr_used_count)) { in smbd_destroy()
1462 destroy_mr_list(info); in smbd_destroy()
1464 ib_free_cq(info->send_cq); in smbd_destroy()
1465 ib_free_cq(info->recv_cq); in smbd_destroy()
1466 ib_dealloc_pd(info->pd); in smbd_destroy()
1467 rdma_destroy_id(info->id); in smbd_destroy()
1470 mempool_destroy(info->request_mempool); in smbd_destroy()
1471 kmem_cache_destroy(info->request_cache); in smbd_destroy()
1473 mempool_destroy(info->response_mempool); in smbd_destroy()
1474 kmem_cache_destroy(info->response_cache); in smbd_destroy()
1476 info->transport_status = SMBD_DESTROYED; in smbd_destroy()
1478 destroy_workqueue(info->workqueue); in smbd_destroy()
1479 kfree(info); in smbd_destroy()
1488 log_rdma_event(INFO, "reconnecting rdma session\n"); in smbd_reconnect()
1491 log_rdma_event(INFO, "rdma session already destroyed\n"); in smbd_reconnect()
1500 log_rdma_event(INFO, "disconnecting transport\n"); in smbd_reconnect()
1505 log_rdma_event(INFO, "creating rdma session\n"); in smbd_reconnect()
1508 log_rdma_event(INFO, "created rdma session info=%p\n", in smbd_reconnect()
1514 static void destroy_caches_and_workqueue(struct smbd_connection *info) in destroy_caches_and_workqueue() argument
1516 destroy_receive_buffers(info); in destroy_caches_and_workqueue()
1517 destroy_workqueue(info->workqueue); in destroy_caches_and_workqueue()
1518 mempool_destroy(info->response_mempool); in destroy_caches_and_workqueue()
1519 kmem_cache_destroy(info->response_cache); in destroy_caches_and_workqueue()
1520 mempool_destroy(info->request_mempool); in destroy_caches_and_workqueue()
1521 kmem_cache_destroy(info->request_cache); in destroy_caches_and_workqueue()
1525 static int allocate_caches_and_workqueue(struct smbd_connection *info) in allocate_caches_and_workqueue() argument
1530 scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info); in allocate_caches_and_workqueue()
1531 info->request_cache = in allocate_caches_and_workqueue()
1537 if (!info->request_cache) in allocate_caches_and_workqueue()
1540 info->request_mempool = in allocate_caches_and_workqueue()
1541 mempool_create(info->send_credit_target, mempool_alloc_slab, in allocate_caches_and_workqueue()
1542 mempool_free_slab, info->request_cache); in allocate_caches_and_workqueue()
1543 if (!info->request_mempool) in allocate_caches_and_workqueue()
1546 scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info); in allocate_caches_and_workqueue()
1547 info->response_cache = in allocate_caches_and_workqueue()
1551 info->max_receive_size, in allocate_caches_and_workqueue()
1553 if (!info->response_cache) in allocate_caches_and_workqueue()
1556 info->response_mempool = in allocate_caches_and_workqueue()
1557 mempool_create(info->receive_credit_max, mempool_alloc_slab, in allocate_caches_and_workqueue()
1558 mempool_free_slab, info->response_cache); in allocate_caches_and_workqueue()
1559 if (!info->response_mempool) in allocate_caches_and_workqueue()
1562 scnprintf(name, MAX_NAME_LEN, "smbd_%p", info); in allocate_caches_and_workqueue()
1563 info->workqueue = create_workqueue(name); in allocate_caches_and_workqueue()
1564 if (!info->workqueue) in allocate_caches_and_workqueue()
1567 rc = allocate_receive_buffers(info, info->receive_credit_max); in allocate_caches_and_workqueue()
1576 destroy_workqueue(info->workqueue); in allocate_caches_and_workqueue()
1578 mempool_destroy(info->response_mempool); in allocate_caches_and_workqueue()
1580 kmem_cache_destroy(info->response_cache); in allocate_caches_and_workqueue()
1582 mempool_destroy(info->request_mempool); in allocate_caches_and_workqueue()
1584 kmem_cache_destroy(info->request_cache); in allocate_caches_and_workqueue()
1593 struct smbd_connection *info; in _smbd_get_connection() local
1600 info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL); in _smbd_get_connection()
1601 if (!info) in _smbd_get_connection()
1604 info->transport_status = SMBD_CONNECTING; in _smbd_get_connection()
1605 rc = smbd_ia_open(info, dstaddr, port); in _smbd_get_connection()
1607 log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc); in _smbd_get_connection()
1611 if (smbd_send_credit_target > info->id->device->attrs.max_cqe || in _smbd_get_connection()
1612 smbd_send_credit_target > info->id->device->attrs.max_qp_wr) { in _smbd_get_connection()
1618 info->id->device->attrs.max_cqe, in _smbd_get_connection()
1619 info->id->device->attrs.max_qp_wr); in _smbd_get_connection()
1623 if (smbd_receive_credit_max > info->id->device->attrs.max_cqe || in _smbd_get_connection()
1624 smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) { in _smbd_get_connection()
1630 info->id->device->attrs.max_cqe, in _smbd_get_connection()
1631 info->id->device->attrs.max_qp_wr); in _smbd_get_connection()
1635 info->receive_credit_max = smbd_receive_credit_max; in _smbd_get_connection()
1636 info->send_credit_target = smbd_send_credit_target; in _smbd_get_connection()
1637 info->max_send_size = smbd_max_send_size; in _smbd_get_connection()
1638 info->max_fragmented_recv_size = smbd_max_fragmented_recv_size; in _smbd_get_connection()
1639 info->max_receive_size = smbd_max_receive_size; in _smbd_get_connection()
1640 info->keep_alive_interval = smbd_keep_alive_interval; in _smbd_get_connection()
1642 if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SGE) { in _smbd_get_connection()
1645 info->id->device->attrs.max_send_sge); in _smbd_get_connection()
1648 if (info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_SGE) { in _smbd_get_connection()
1651 info->id->device->attrs.max_recv_sge); in _smbd_get_connection()
1655 info->send_cq = NULL; in _smbd_get_connection()
1656 info->recv_cq = NULL; in _smbd_get_connection()
1657 info->send_cq = in _smbd_get_connection()
1658 ib_alloc_cq_any(info->id->device, info, in _smbd_get_connection()
1659 info->send_credit_target, IB_POLL_SOFTIRQ); in _smbd_get_connection()
1660 if (IS_ERR(info->send_cq)) { in _smbd_get_connection()
1661 info->send_cq = NULL; in _smbd_get_connection()
1665 info->recv_cq = in _smbd_get_connection()
1666 ib_alloc_cq_any(info->id->device, info, in _smbd_get_connection()
1667 info->receive_credit_max, IB_POLL_SOFTIRQ); in _smbd_get_connection()
1668 if (IS_ERR(info->recv_cq)) { in _smbd_get_connection()
1669 info->recv_cq = NULL; in _smbd_get_connection()
1675 qp_attr.qp_context = info; in _smbd_get_connection()
1676 qp_attr.cap.max_send_wr = info->send_credit_target; in _smbd_get_connection()
1677 qp_attr.cap.max_recv_wr = info->receive_credit_max; in _smbd_get_connection()
1683 qp_attr.send_cq = info->send_cq; in _smbd_get_connection()
1684 qp_attr.recv_cq = info->recv_cq; in _smbd_get_connection()
1687 rc = rdma_create_qp(info->id, info->pd, &qp_attr); in _smbd_get_connection()
1697 info->id->device->attrs.max_qp_rd_atom in _smbd_get_connection()
1699 info->id->device->attrs.max_qp_rd_atom : in _smbd_get_connection()
1701 info->responder_resources = conn_param.responder_resources; in _smbd_get_connection()
1702 log_rdma_mr(INFO, "responder_resources=%d\n", in _smbd_get_connection()
1703 info->responder_resources); in _smbd_get_connection()
1706 info->id->device->ops.get_port_immutable( in _smbd_get_connection()
1707 info->id->device, info->id->port_num, &port_immutable); in _smbd_get_connection()
1709 ird_ord_hdr[0] = info->responder_resources; in _smbd_get_connection()
1722 log_rdma_event(INFO, "connecting to IP %pI4 port %d\n", in _smbd_get_connection()
1725 init_waitqueue_head(&info->conn_wait); in _smbd_get_connection()
1726 init_waitqueue_head(&info->disconn_wait); in _smbd_get_connection()
1727 init_waitqueue_head(&info->wait_reassembly_queue); in _smbd_get_connection()
1728 rc = rdma_connect(info->id, &conn_param); in _smbd_get_connection()
1735 info->conn_wait, info->transport_status != SMBD_CONNECTING); in _smbd_get_connection()
1737 if (info->transport_status != SMBD_CONNECTED) { in _smbd_get_connection()
1742 log_rdma_event(INFO, "rdma_connect connected\n"); in _smbd_get_connection()
1744 rc = allocate_caches_and_workqueue(info); in _smbd_get_connection()
1750 init_waitqueue_head(&info->wait_send_queue); in _smbd_get_connection()
1751 INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer); in _smbd_get_connection()
1752 INIT_DELAYED_WORK(&info->send_immediate_work, send_immediate_work); in _smbd_get_connection()
1753 queue_delayed_work(info->workqueue, &info->idle_timer_work, in _smbd_get_connection()
1754 info->keep_alive_interval*HZ); in _smbd_get_connection()
1756 init_waitqueue_head(&info->wait_send_pending); in _smbd_get_connection()
1757 atomic_set(&info->send_pending, 0); in _smbd_get_connection()
1759 init_waitqueue_head(&info->wait_send_payload_pending); in _smbd_get_connection()
1760 atomic_set(&info->send_payload_pending, 0); in _smbd_get_connection()
1762 INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work); in _smbd_get_connection()
1763 INIT_WORK(&info->recv_done_work, smbd_recv_done_work); in _smbd_get_connection()
1764 INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits); in _smbd_get_connection()
1765 info->new_credits_offered = 0; in _smbd_get_connection()
1766 spin_lock_init(&info->lock_new_credits_offered); in _smbd_get_connection()
1768 rc = smbd_negotiate(info); in _smbd_get_connection()
1774 rc = allocate_mr_list(info); in _smbd_get_connection()
1780 return info; in _smbd_get_connection()
1788 cancel_delayed_work_sync(&info->idle_timer_work); in _smbd_get_connection()
1789 destroy_caches_and_workqueue(info); in _smbd_get_connection()
1790 info->transport_status = SMBD_NEGOTIATE_FAILED; in _smbd_get_connection()
1791 init_waitqueue_head(&info->conn_wait); in _smbd_get_connection()
1792 rdma_disconnect(info->id); in _smbd_get_connection()
1793 wait_event(info->conn_wait, in _smbd_get_connection()
1794 info->transport_status == SMBD_DISCONNECTED); in _smbd_get_connection()
1798 rdma_destroy_qp(info->id); in _smbd_get_connection()
1802 if (info->send_cq) in _smbd_get_connection()
1803 ib_free_cq(info->send_cq); in _smbd_get_connection()
1804 if (info->recv_cq) in _smbd_get_connection()
1805 ib_free_cq(info->recv_cq); in _smbd_get_connection()
1808 ib_dealloc_pd(info->pd); in _smbd_get_connection()
1809 rdma_destroy_id(info->id); in _smbd_get_connection()
1812 kfree(info); in _smbd_get_connection()
1846 static int smbd_recv_buf(struct smbd_connection *info, char *buf, in smbd_recv_buf() argument
1861 log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size, in smbd_recv_buf()
1862 info->reassembly_data_length); in smbd_recv_buf()
1863 if (info->reassembly_data_length >= size) { in smbd_recv_buf()
1875 queue_length = info->reassembly_queue_length; in smbd_recv_buf()
1878 offset = info->first_entry_offset; in smbd_recv_buf()
1880 response = _get_first_reassembly(info); in smbd_recv_buf()
1902 log_read(INFO, "returning rfc1002 length %d\n", in smbd_recv_buf()
1924 &info->reassembly_queue_lock); in smbd_recv_buf()
1927 &info->reassembly_queue_lock); in smbd_recv_buf()
1930 info->count_reassembly_queue--; in smbd_recv_buf()
1931 info->count_dequeue_reassembly_queue++; in smbd_recv_buf()
1932 put_receive_buffer(info, response); in smbd_recv_buf()
1934 log_read(INFO, "put_receive_buffer offset=0\n"); in smbd_recv_buf()
1941 log_read(INFO, "_get_first_reassembly memcpy %d bytes " in smbd_recv_buf()
1948 spin_lock_irq(&info->reassembly_queue_lock); in smbd_recv_buf()
1949 info->reassembly_data_length -= data_read; in smbd_recv_buf()
1950 info->reassembly_queue_length -= queue_removed; in smbd_recv_buf()
1951 spin_unlock_irq(&info->reassembly_queue_lock); in smbd_recv_buf()
1953 info->first_entry_offset = offset; in smbd_recv_buf()
1954 log_read(INFO, "returning to thread data_read=%d " in smbd_recv_buf()
1956 data_read, info->reassembly_data_length, in smbd_recv_buf()
1957 info->first_entry_offset); in smbd_recv_buf()
1962 log_read(INFO, "wait_event on more data\n"); in smbd_recv_buf()
1964 info->wait_reassembly_queue, in smbd_recv_buf()
1965 info->reassembly_data_length >= size || in smbd_recv_buf()
1966 info->transport_status != SMBD_CONNECTED); in smbd_recv_buf()
1971 if (info->transport_status != SMBD_CONNECTED) { in smbd_recv_buf()
1985 static int smbd_recv_page(struct smbd_connection *info, in smbd_recv_page() argument
1995 info->wait_reassembly_queue, in smbd_recv_page()
1996 info->reassembly_data_length >= to_read || in smbd_recv_page()
1997 info->transport_status != SMBD_CONNECTED); in smbd_recv_page()
2005 log_read(INFO, "reading from page=%p address=%p to_read=%d\n", in smbd_recv_page()
2008 ret = smbd_recv_buf(info, to_address, to_read); in smbd_recv_page()
2019 int smbd_recv(struct smbd_connection *info, struct msghdr *msg) in smbd_recv() argument
2038 rc = smbd_recv_buf(info, buf, to_read); in smbd_recv()
2045 rc = smbd_recv_page(info, page, page_offset, to_read); in smbd_recv()
2071 struct smbd_connection *info = server->smbd_conn; in smbd_send() local
2078 info->max_send_size - sizeof(struct smbd_data_transfer); in smbd_send()
2084 if (info->transport_status != SMBD_CONNECTED) { in smbd_send()
2099 info->max_fragmented_send_size) { in smbd_send()
2101 remaining_data_length, info->max_fragmented_send_size); in smbd_send()
2106 log_write(INFO, "num_rqst=%d total length=%u\n", in smbd_send()
2120 log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d " in smbd_send()
2133 log_write(INFO, "sending iov[] from start=%d " in smbd_send()
2139 info, &iov[start], i-start, in smbd_send()
2146 log_write(INFO, "iov[%d] iov_base=%p buflen=%d" in smbd_send()
2160 log_write(INFO, in smbd_send()
2167 info, &vec, 1, in smbd_send()
2183 log_write(INFO, in smbd_send()
2188 rc = smbd_post_send_data(info, &iov[start], in smbd_send()
2195 log_write(INFO, "looping i=%d buflen=%d\n", i, buflen); in smbd_send()
2204 log_write(INFO, "sending pages buflen=%d nvecs=%d\n", in smbd_send()
2211 log_write(INFO, "sending pages i=%d offset=%d size=%d" in smbd_send()
2216 info, rqst->rq_pages[i], in smbd_send()
2236 wait_event(info->wait_send_payload_pending, in smbd_send()
2237 atomic_read(&info->send_payload_pending) == 0); in smbd_send()
2266 struct smbd_connection *info = in smbd_mr_recovery_work() local
2271 list_for_each_entry(smbdirect_mr, &info->mr_list, list) { in smbd_mr_recovery_work()
2274 info->id->device, smbdirect_mr->sgl, in smbd_mr_recovery_work()
2285 smbd_disconnect_rdma_connection(info); in smbd_mr_recovery_work()
2290 info->pd, info->mr_type, in smbd_mr_recovery_work()
2291 info->max_frmr_depth); in smbd_mr_recovery_work()
2296 info->mr_type, in smbd_mr_recovery_work()
2297 info->max_frmr_depth); in smbd_mr_recovery_work()
2298 smbd_disconnect_rdma_connection(info); in smbd_mr_recovery_work()
2314 if (atomic_inc_return(&info->mr_ready_count) == 1) in smbd_mr_recovery_work()
2315 wake_up_interruptible(&info->wait_mr); in smbd_mr_recovery_work()
2319 static void destroy_mr_list(struct smbd_connection *info) in destroy_mr_list() argument
2323 cancel_work_sync(&info->mr_recovery_work); in destroy_mr_list()
2324 list_for_each_entry_safe(mr, tmp, &info->mr_list, list) { in destroy_mr_list()
2326 ib_dma_unmap_sg(info->id->device, mr->sgl, in destroy_mr_list()
2341 static int allocate_mr_list(struct smbd_connection *info) in allocate_mr_list() argument
2346 INIT_LIST_HEAD(&info->mr_list); in allocate_mr_list()
2347 init_waitqueue_head(&info->wait_mr); in allocate_mr_list()
2348 spin_lock_init(&info->mr_list_lock); in allocate_mr_list()
2349 atomic_set(&info->mr_ready_count, 0); in allocate_mr_list()
2350 atomic_set(&info->mr_used_count, 0); in allocate_mr_list()
2351 init_waitqueue_head(&info->wait_for_mr_cleanup); in allocate_mr_list()
2353 for (i = 0; i < info->responder_resources * 2; i++) { in allocate_mr_list()
2357 smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type, in allocate_mr_list()
2358 info->max_frmr_depth); in allocate_mr_list()
2362 info->mr_type, info->max_frmr_depth); in allocate_mr_list()
2366 info->max_frmr_depth, in allocate_mr_list()
2375 smbdirect_mr->conn = info; in allocate_mr_list()
2377 list_add_tail(&smbdirect_mr->list, &info->mr_list); in allocate_mr_list()
2378 atomic_inc(&info->mr_ready_count); in allocate_mr_list()
2380 INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work); in allocate_mr_list()
2386 list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) { in allocate_mr_list()
2402 static struct smbd_mr *get_mr(struct smbd_connection *info) in get_mr() argument
2407 rc = wait_event_interruptible(info->wait_mr, in get_mr()
2408 atomic_read(&info->mr_ready_count) || in get_mr()
2409 info->transport_status != SMBD_CONNECTED); in get_mr()
2415 if (info->transport_status != SMBD_CONNECTED) { in get_mr()
2416 log_rdma_mr(ERR, "info->transport_status=%x\n", in get_mr()
2417 info->transport_status); in get_mr()
2421 spin_lock(&info->mr_list_lock); in get_mr()
2422 list_for_each_entry(ret, &info->mr_list, list) { in get_mr()
2425 spin_unlock(&info->mr_list_lock); in get_mr()
2426 atomic_dec(&info->mr_ready_count); in get_mr()
2427 atomic_inc(&info->mr_used_count); in get_mr()
2432 spin_unlock(&info->mr_list_lock); in get_mr()
2450 struct smbd_connection *info, struct page *pages[], int num_pages, in smbd_register_mr() argument
2458 if (num_pages > info->max_frmr_depth) { in smbd_register_mr()
2460 num_pages, info->max_frmr_depth); in smbd_register_mr()
2464 smbdirect_mr = get_mr(info); in smbd_register_mr()
2473 log_rdma_mr(INFO, "num_pages=0x%x offset=0x%x tailsz=0x%x\n", in smbd_register_mr()
2495 rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgl, num_pages, dir); in smbd_register_mr()
2530 rc = ib_post_send(info->id->qp, ®_wr->wr, NULL); in smbd_register_mr()
2539 ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgl, in smbd_register_mr()
2544 if (atomic_dec_and_test(&info->mr_used_count)) in smbd_register_mr()
2545 wake_up(&info->wait_for_mr_cleanup); in smbd_register_mr()
2547 smbd_disconnect_rdma_connection(info); in smbd_register_mr()
2576 struct smbd_connection *info = smbdirect_mr->conn; in smbd_deregister_mr() local
2590 rc = ib_post_send(info->id->qp, wr, NULL); in smbd_deregister_mr()
2593 smbd_disconnect_rdma_connection(info); in smbd_deregister_mr()
2609 queue_work(info->workqueue, &info->mr_recovery_work); in smbd_deregister_mr()
2612 if (atomic_dec_and_test(&info->mr_used_count)) in smbd_deregister_mr()
2613 wake_up(&info->wait_for_mr_cleanup); in smbd_deregister_mr()