Lines Matching +full:no +full:- +full:memory +full:- +full:wc
1 // SPDX-License-Identifier: GPL-2.0-or-later
60 /* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */
72 /* No need to retry on Receiver Not Ready since SMBD manages credits */
77 * as defined in [MS-SMBD] 3.1.1.1
89 /* The maximum fragmented upper-layer payload receive size supported */
92 /* The maximum single-message size which can be received */
163 if (info->transport_status == SMBD_CONNECTED) { in smbd_disconnect_rdma_work()
164 info->transport_status = SMBD_DISCONNECTING; in smbd_disconnect_rdma_work()
165 rdma_disconnect(info->id); in smbd_disconnect_rdma_work()
171 queue_work(info->workqueue, &info->disconnect_work); in smbd_disconnect_rdma_connection()
178 struct smbd_connection *info = id->context; in smbd_conn_upcall()
181 event->event, event->status); in smbd_conn_upcall()
183 switch (event->event) { in smbd_conn_upcall()
186 info->ri_rc = 0; in smbd_conn_upcall()
187 complete(&info->ri_done); in smbd_conn_upcall()
191 info->ri_rc = -EHOSTUNREACH; in smbd_conn_upcall()
192 complete(&info->ri_done); in smbd_conn_upcall()
196 info->ri_rc = -ENETUNREACH; in smbd_conn_upcall()
197 complete(&info->ri_done); in smbd_conn_upcall()
201 log_rdma_event(INFO, "connected event=%d\n", event->event); in smbd_conn_upcall()
202 info->transport_status = SMBD_CONNECTED; in smbd_conn_upcall()
203 wake_up_interruptible(&info->conn_wait); in smbd_conn_upcall()
209 log_rdma_event(INFO, "connecting failed event=%d\n", event->event); in smbd_conn_upcall()
210 info->transport_status = SMBD_DISCONNECTED; in smbd_conn_upcall()
211 wake_up_interruptible(&info->conn_wait); in smbd_conn_upcall()
217 if (info->transport_status == SMBD_NEGOTIATE_FAILED) { in smbd_conn_upcall()
218 info->transport_status = SMBD_DISCONNECTED; in smbd_conn_upcall()
219 wake_up(&info->conn_wait); in smbd_conn_upcall()
223 info->transport_status = SMBD_DISCONNECTED; in smbd_conn_upcall()
224 wake_up_interruptible(&info->disconn_wait); in smbd_conn_upcall()
225 wake_up_interruptible(&info->wait_reassembly_queue); in smbd_conn_upcall()
226 wake_up_interruptible_all(&info->wait_send_queue); in smbd_conn_upcall()
243 ib_event_msg(event->event), event->device->name, info); in smbd_qp_async_error_upcall()
245 switch (event->event) { in smbd_qp_async_error_upcall()
258 return (void *)request->packet; in smbd_request_payload()
263 return (void *)response->packet; in smbd_response_payload()
267 static void send_done(struct ib_cq *cq, struct ib_wc *wc) in send_done() argument
271 container_of(wc->wr_cqe, struct smbd_request, cqe); in send_done()
273 log_rdma_send(INFO, "smbd_request %p completed wc->status=%d\n", in send_done()
274 request, wc->status); in send_done()
276 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { in send_done()
277 log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n", in send_done()
278 wc->status, wc->opcode); in send_done()
279 smbd_disconnect_rdma_connection(request->info); in send_done()
282 for (i = 0; i < request->num_sge; i++) in send_done()
283 ib_dma_unmap_single(request->info->id->device, in send_done()
284 request->sge[i].addr, in send_done()
285 request->sge[i].length, in send_done()
288 if (atomic_dec_and_test(&request->info->send_pending)) in send_done()
289 wake_up(&request->info->wait_send_pending); in send_done()
291 wake_up(&request->info->wait_post_send); in send_done()
293 mempool_free(request, request->info->request_mempool); in send_done()
299 resp->min_version, resp->max_version, in dump_smbd_negotiate_resp()
300 resp->negotiated_version, resp->credits_requested, in dump_smbd_negotiate_resp()
301 resp->credits_granted, resp->status, in dump_smbd_negotiate_resp()
302 resp->max_readwrite_size, resp->preferred_send_size, in dump_smbd_negotiate_resp()
303 resp->max_receive_size, resp->max_fragmented_size); in dump_smbd_negotiate_resp()
307 * Process a negotiation response message, according to [MS-SMBD]3.1.5.7
314 struct smbd_connection *info = response->info; in process_negotiation_response()
323 if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) { in process_negotiation_response()
325 le16_to_cpu(packet->negotiated_version)); in process_negotiation_response()
328 info->protocol = le16_to_cpu(packet->negotiated_version); in process_negotiation_response()
330 if (packet->credits_requested == 0) { in process_negotiation_response()
334 info->receive_credit_target = le16_to_cpu(packet->credits_requested); in process_negotiation_response()
336 if (packet->credits_granted == 0) { in process_negotiation_response()
340 atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted)); in process_negotiation_response()
342 atomic_set(&info->receive_credits, 0); in process_negotiation_response()
344 if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) { in process_negotiation_response()
346 le32_to_cpu(packet->preferred_send_size)); in process_negotiation_response()
349 info->max_receive_size = le32_to_cpu(packet->preferred_send_size); in process_negotiation_response()
351 if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) { in process_negotiation_response()
353 le32_to_cpu(packet->max_receive_size)); in process_negotiation_response()
356 info->max_send_size = min_t(int, info->max_send_size, in process_negotiation_response()
357 le32_to_cpu(packet->max_receive_size)); in process_negotiation_response()
359 if (le32_to_cpu(packet->max_fragmented_size) < in process_negotiation_response()
362 le32_to_cpu(packet->max_fragmented_size)); in process_negotiation_response()
365 info->max_fragmented_send_size = in process_negotiation_response()
366 le32_to_cpu(packet->max_fragmented_size); in process_negotiation_response()
367 info->rdma_readwrite_threshold = in process_negotiation_response()
368 rdma_readwrite_threshold > info->max_fragmented_send_size ? in process_negotiation_response()
369 info->max_fragmented_send_size : in process_negotiation_response()
373 info->max_readwrite_size = min_t(u32, in process_negotiation_response()
374 le32_to_cpu(packet->max_readwrite_size), in process_negotiation_response()
375 info->max_frmr_depth * PAGE_SIZE); in process_negotiation_response()
376 info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE; in process_negotiation_response()
391 if (info->transport_status != SMBD_CONNECTED) { in smbd_post_send_credits()
392 wake_up(&info->wait_receive_queues); in smbd_post_send_credits()
396 if (info->receive_credit_target > in smbd_post_send_credits()
397 atomic_read(&info->receive_credits)) { in smbd_post_send_credits()
412 response->type = SMBD_TRANSFER_DATA; in smbd_post_send_credits()
413 response->first_segment = false; in smbd_post_send_credits()
426 spin_lock(&info->lock_new_credits_offered); in smbd_post_send_credits()
427 info->new_credits_offered += ret; in smbd_post_send_credits()
428 spin_unlock(&info->lock_new_credits_offered); in smbd_post_send_credits()
430 /* Promptly send an immediate packet as defined in [MS-SMBD] 3.1.1.1 */ in smbd_post_send_credits()
431 info->send_immediate = true; in smbd_post_send_credits()
432 if (atomic_read(&info->receive_credits) < in smbd_post_send_credits()
433 info->receive_credit_target - 1) { in smbd_post_send_credits()
434 if (info->keep_alive_requested == KEEP_ALIVE_PENDING || in smbd_post_send_credits()
435 info->send_immediate) { in smbd_post_send_credits()
443 static void recv_done(struct ib_cq *cq, struct ib_wc *wc) in recv_done() argument
447 container_of(wc->wr_cqe, struct smbd_response, cqe); in recv_done()
448 struct smbd_connection *info = response->info; in recv_done()
451 log_rdma_recv(INFO, "response=%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%x\n", in recv_done()
452 response, response->type, wc->status, wc->opcode, in recv_done()
453 wc->byte_len, wc->pkey_index); in recv_done()
455 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) { in recv_done()
456 log_rdma_recv(INFO, "wc->status=%d opcode=%d\n", in recv_done()
457 wc->status, wc->opcode); in recv_done()
463 wc->qp->device, in recv_done()
464 response->sge.addr, in recv_done()
465 response->sge.length, in recv_done()
468 switch (response->type) { in recv_done()
472 info->full_packet_received = true; in recv_done()
473 info->negotiate_done = in recv_done()
474 process_negotiation_response(response, wc->byte_len); in recv_done()
475 complete(&info->negotiate_completion); in recv_done()
481 data_length = le32_to_cpu(data_transfer->data_length); in recv_done()
488 if (info->full_packet_received) in recv_done()
489 response->first_segment = true; in recv_done()
491 if (le32_to_cpu(data_transfer->remaining_data_length)) in recv_done()
492 info->full_packet_received = false; in recv_done()
494 info->full_packet_received = true; in recv_done()
504 wake_up_interruptible(&info->wait_reassembly_queue); in recv_done()
506 atomic_dec(&info->receive_credits); in recv_done()
507 info->receive_credit_target = in recv_done()
508 le16_to_cpu(data_transfer->credits_requested); in recv_done()
509 if (le16_to_cpu(data_transfer->credits_granted)) { in recv_done()
510 atomic_add(le16_to_cpu(data_transfer->credits_granted), in recv_done()
511 &info->send_credits); in recv_done()
516 wake_up_interruptible(&info->wait_send_queue); in recv_done()
520 le16_to_cpu(data_transfer->flags), in recv_done()
521 le32_to_cpu(data_transfer->data_offset), in recv_done()
522 le32_to_cpu(data_transfer->data_length), in recv_done()
523 le32_to_cpu(data_transfer->remaining_data_length)); in recv_done()
526 info->keep_alive_requested = KEEP_ALIVE_NONE; in recv_done()
527 if (le16_to_cpu(data_transfer->flags) & in recv_done()
529 info->keep_alive_requested = KEEP_ALIVE_PENDING; in recv_done()
536 "unexpected response type=%d\n", response->type); in recv_done()
559 if (dstaddr->sa_family == AF_INET6) in smbd_create_id()
560 sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port; in smbd_create_id()
562 sport = &((struct sockaddr_in *)dstaddr)->sin_port; in smbd_create_id()
566 init_completion(&info->ri_done); in smbd_create_id()
567 info->ri_rc = -ETIMEDOUT; in smbd_create_id()
576 &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); in smbd_create_id()
577 /* e.g. if interrupted returns -ERESTARTSYS */ in smbd_create_id()
582 rc = info->ri_rc; in smbd_create_id()
588 info->ri_rc = -ETIMEDOUT; in smbd_create_id()
595 &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); in smbd_create_id()
596 /* e.g. if interrupted returns -ERESTARTSYS */ in smbd_create_id()
601 rc = info->ri_rc; in smbd_create_id()
621 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) in frwr_is_supported()
623 if (attrs->max_fast_reg_page_list_len == 0) in frwr_is_supported()
634 info->id = smbd_create_id(info, dstaddr, port); in smbd_ia_open()
635 if (IS_ERR(info->id)) { in smbd_ia_open()
636 rc = PTR_ERR(info->id); in smbd_ia_open()
640 if (!frwr_is_supported(&info->id->device->attrs)) { in smbd_ia_open()
643 info->id->device->attrs.device_cap_flags, in smbd_ia_open()
644 info->id->device->attrs.max_fast_reg_page_list_len); in smbd_ia_open()
645 rc = -EPROTONOSUPPORT; in smbd_ia_open()
648 info->max_frmr_depth = min_t(int, in smbd_ia_open()
650 info->id->device->attrs.max_fast_reg_page_list_len); in smbd_ia_open()
651 info->mr_type = IB_MR_TYPE_MEM_REG; in smbd_ia_open()
652 if (info->id->device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) in smbd_ia_open()
653 info->mr_type = IB_MR_TYPE_SG_GAPS; in smbd_ia_open()
655 info->pd = ib_alloc_pd(info->id->device, 0); in smbd_ia_open()
656 if (IS_ERR(info->pd)) { in smbd_ia_open()
657 rc = PTR_ERR(info->pd); in smbd_ia_open()
665 rdma_destroy_id(info->id); in smbd_ia_open()
666 info->id = NULL; in smbd_ia_open()
674 * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3
681 int rc = -ENOMEM; in smbd_post_send_negotiate_req()
685 request = mempool_alloc(info->request_mempool, GFP_KERNEL); in smbd_post_send_negotiate_req()
689 request->info = info; in smbd_post_send_negotiate_req()
692 packet->min_version = cpu_to_le16(SMBD_V1); in smbd_post_send_negotiate_req()
693 packet->max_version = cpu_to_le16(SMBD_V1); in smbd_post_send_negotiate_req()
694 packet->reserved = 0; in smbd_post_send_negotiate_req()
695 packet->credits_requested = cpu_to_le16(info->send_credit_target); in smbd_post_send_negotiate_req()
696 packet->preferred_send_size = cpu_to_le32(info->max_send_size); in smbd_post_send_negotiate_req()
697 packet->max_receive_size = cpu_to_le32(info->max_receive_size); in smbd_post_send_negotiate_req()
698 packet->max_fragmented_size = in smbd_post_send_negotiate_req()
699 cpu_to_le32(info->max_fragmented_recv_size); in smbd_post_send_negotiate_req()
701 request->num_sge = 1; in smbd_post_send_negotiate_req()
702 request->sge[0].addr = ib_dma_map_single( in smbd_post_send_negotiate_req()
703 info->id->device, (void *)packet, in smbd_post_send_negotiate_req()
705 if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { in smbd_post_send_negotiate_req()
706 rc = -EIO; in smbd_post_send_negotiate_req()
710 request->sge[0].length = sizeof(*packet); in smbd_post_send_negotiate_req()
711 request->sge[0].lkey = info->pd->local_dma_lkey; in smbd_post_send_negotiate_req()
714 info->id->device, request->sge[0].addr, in smbd_post_send_negotiate_req()
715 request->sge[0].length, DMA_TO_DEVICE); in smbd_post_send_negotiate_req()
717 request->cqe.done = send_done; in smbd_post_send_negotiate_req()
720 send_wr.wr_cqe = &request->cqe; in smbd_post_send_negotiate_req()
721 send_wr.sg_list = request->sge; in smbd_post_send_negotiate_req()
722 send_wr.num_sge = request->num_sge; in smbd_post_send_negotiate_req()
727 request->sge[0].addr, in smbd_post_send_negotiate_req()
728 request->sge[0].length, request->sge[0].lkey); in smbd_post_send_negotiate_req()
730 atomic_inc(&info->send_pending); in smbd_post_send_negotiate_req()
731 rc = ib_post_send(info->id->qp, &send_wr, NULL); in smbd_post_send_negotiate_req()
737 atomic_dec(&info->send_pending); in smbd_post_send_negotiate_req()
738 ib_dma_unmap_single(info->id->device, request->sge[0].addr, in smbd_post_send_negotiate_req()
739 request->sge[0].length, DMA_TO_DEVICE); in smbd_post_send_negotiate_req()
744 mempool_free(request, info->request_mempool); in smbd_post_send_negotiate_req()
750 * This implements [MS-SMBD] 3.1.5.9
760 spin_lock(&info->lock_new_credits_offered); in manage_credits_prior_sending()
761 new_credits = info->new_credits_offered; in manage_credits_prior_sending()
762 info->new_credits_offered = 0; in manage_credits_prior_sending()
763 spin_unlock(&info->lock_new_credits_offered); in manage_credits_prior_sending()
779 if (info->keep_alive_requested == KEEP_ALIVE_PENDING) { in manage_keep_alive_before_sending()
780 info->keep_alive_requested = KEEP_ALIVE_SENT; in manage_keep_alive_before_sending()
793 for (i = 0; i < request->num_sge; i++) { in smbd_post_send()
796 i, request->sge[i].addr, request->sge[i].length); in smbd_post_send()
798 info->id->device, in smbd_post_send()
799 request->sge[i].addr, in smbd_post_send()
800 request->sge[i].length, in smbd_post_send()
804 request->cqe.done = send_done; in smbd_post_send()
807 send_wr.wr_cqe = &request->cqe; in smbd_post_send()
808 send_wr.sg_list = request->sge; in smbd_post_send()
809 send_wr.num_sge = request->num_sge; in smbd_post_send()
813 rc = ib_post_send(info->id->qp, &send_wr, NULL); in smbd_post_send()
817 rc = -EAGAIN; in smbd_post_send()
820 mod_delayed_work(info->workqueue, &info->idle_timer_work, in smbd_post_send()
821 info->keep_alive_interval*HZ); in smbd_post_send()
839 rc = wait_event_interruptible(info->wait_send_queue, in smbd_post_send_sgl()
840 atomic_read(&info->send_credits) > 0 || in smbd_post_send_sgl()
841 info->transport_status != SMBD_CONNECTED); in smbd_post_send_sgl()
845 if (info->transport_status != SMBD_CONNECTED) { in smbd_post_send_sgl()
847 rc = -EAGAIN; in smbd_post_send_sgl()
850 if (unlikely(atomic_dec_return(&info->send_credits) < 0)) { in smbd_post_send_sgl()
851 atomic_inc(&info->send_credits); in smbd_post_send_sgl()
856 wait_event(info->wait_post_send, in smbd_post_send_sgl()
857 atomic_read(&info->send_pending) < info->send_credit_target || in smbd_post_send_sgl()
858 info->transport_status != SMBD_CONNECTED); in smbd_post_send_sgl()
860 if (info->transport_status != SMBD_CONNECTED) { in smbd_post_send_sgl()
862 rc = -EAGAIN; in smbd_post_send_sgl()
866 if (unlikely(atomic_inc_return(&info->send_pending) > in smbd_post_send_sgl()
867 info->send_credit_target)) { in smbd_post_send_sgl()
868 atomic_dec(&info->send_pending); in smbd_post_send_sgl()
872 request = mempool_alloc(info->request_mempool, GFP_KERNEL); in smbd_post_send_sgl()
874 rc = -ENOMEM; in smbd_post_send_sgl()
878 request->info = info; in smbd_post_send_sgl()
882 packet->credits_requested = cpu_to_le16(info->send_credit_target); in smbd_post_send_sgl()
885 atomic_add(new_credits, &info->receive_credits); in smbd_post_send_sgl()
886 packet->credits_granted = cpu_to_le16(new_credits); in smbd_post_send_sgl()
888 info->send_immediate = false; in smbd_post_send_sgl()
890 packet->flags = 0; in smbd_post_send_sgl()
892 packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED); in smbd_post_send_sgl()
894 packet->reserved = 0; in smbd_post_send_sgl()
896 packet->data_offset = 0; in smbd_post_send_sgl()
898 packet->data_offset = cpu_to_le32(24); in smbd_post_send_sgl()
899 packet->data_length = cpu_to_le32(data_length); in smbd_post_send_sgl()
900 packet->remaining_data_length = cpu_to_le32(remaining_data_length); in smbd_post_send_sgl()
901 packet->padding = 0; in smbd_post_send_sgl()
904 le16_to_cpu(packet->credits_requested), in smbd_post_send_sgl()
905 le16_to_cpu(packet->credits_granted), in smbd_post_send_sgl()
906 le32_to_cpu(packet->data_offset), in smbd_post_send_sgl()
907 le32_to_cpu(packet->data_length), in smbd_post_send_sgl()
908 le32_to_cpu(packet->remaining_data_length)); in smbd_post_send_sgl()
916 request->num_sge = 1; in smbd_post_send_sgl()
917 request->sge[0].addr = ib_dma_map_single(info->id->device, in smbd_post_send_sgl()
921 if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { in smbd_post_send_sgl()
922 rc = -EIO; in smbd_post_send_sgl()
923 request->sge[0].addr = 0; in smbd_post_send_sgl()
927 request->sge[0].length = header_length; in smbd_post_send_sgl()
928 request->sge[0].lkey = info->pd->local_dma_lkey; in smbd_post_send_sgl()
933 request->sge[i+1].addr = in smbd_post_send_sgl()
934 ib_dma_map_page(info->id->device, sg_page(sg), in smbd_post_send_sgl()
935 sg->offset, sg->length, DMA_TO_DEVICE); in smbd_post_send_sgl()
937 info->id->device, request->sge[i+1].addr)) { in smbd_post_send_sgl()
938 rc = -EIO; in smbd_post_send_sgl()
939 request->sge[i+1].addr = 0; in smbd_post_send_sgl()
942 request->sge[i+1].length = sg->length; in smbd_post_send_sgl()
943 request->sge[i+1].lkey = info->pd->local_dma_lkey; in smbd_post_send_sgl()
944 request->num_sge++; in smbd_post_send_sgl()
952 for (i = 0; i < request->num_sge; i++) in smbd_post_send_sgl()
953 if (request->sge[i].addr) in smbd_post_send_sgl()
954 ib_dma_unmap_single(info->id->device, in smbd_post_send_sgl()
955 request->sge[i].addr, in smbd_post_send_sgl()
956 request->sge[i].length, in smbd_post_send_sgl()
958 mempool_free(request, info->request_mempool); in smbd_post_send_sgl()
961 spin_lock(&info->lock_new_credits_offered); in smbd_post_send_sgl()
962 info->new_credits_offered += new_credits; in smbd_post_send_sgl()
963 spin_unlock(&info->lock_new_credits_offered); in smbd_post_send_sgl()
964 atomic_sub(new_credits, &info->receive_credits); in smbd_post_send_sgl()
967 if (atomic_dec_and_test(&info->send_pending)) in smbd_post_send_sgl()
968 wake_up(&info->wait_send_pending); in smbd_post_send_sgl()
972 atomic_inc(&info->send_credits); in smbd_post_send_sgl()
999 * while there is no upper layer payload to send at the time
1003 info->count_send_empty++; in smbd_post_send_empty()
1024 return -EINVAL; in smbd_post_send_data()
1045 int rc = -EIO; in smbd_post_recv()
1047 response->sge.addr = ib_dma_map_single( in smbd_post_recv()
1048 info->id->device, response->packet, in smbd_post_recv()
1049 info->max_receive_size, DMA_FROM_DEVICE); in smbd_post_recv()
1050 if (ib_dma_mapping_error(info->id->device, response->sge.addr)) in smbd_post_recv()
1053 response->sge.length = info->max_receive_size; in smbd_post_recv()
1054 response->sge.lkey = info->pd->local_dma_lkey; in smbd_post_recv()
1056 response->cqe.done = recv_done; in smbd_post_recv()
1058 recv_wr.wr_cqe = &response->cqe; in smbd_post_recv()
1060 recv_wr.sg_list = &response->sge; in smbd_post_recv()
1063 rc = ib_post_recv(info->id->qp, &recv_wr, NULL); in smbd_post_recv()
1065 ib_dma_unmap_single(info->id->device, response->sge.addr, in smbd_post_recv()
1066 response->sge.length, DMA_FROM_DEVICE); in smbd_post_recv()
1074 /* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */
1080 response->type = SMBD_NEGOTIATE_RESP; in smbd_negotiate()
1083 rc, response->sge.addr, in smbd_negotiate()
1084 response->sge.length, response->sge.lkey); in smbd_negotiate()
1088 init_completion(&info->negotiate_completion); in smbd_negotiate()
1089 info->negotiate_done = false; in smbd_negotiate()
1095 &info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ); in smbd_negotiate()
1098 if (info->negotiate_done) in smbd_negotiate()
1102 rc = -ETIMEDOUT; in smbd_negotiate()
1103 else if (rc == -ERESTARTSYS) in smbd_negotiate()
1104 rc = -EINTR; in smbd_negotiate()
1106 rc = -ENOTCONN; in smbd_negotiate()
1114 spin_lock(&info->empty_packet_queue_lock); in put_empty_packet()
1115 list_add_tail(&response->list, &info->empty_packet_queue); in put_empty_packet()
1116 info->count_empty_packet_queue++; in put_empty_packet()
1117 spin_unlock(&info->empty_packet_queue_lock); in put_empty_packet()
1119 queue_work(info->workqueue, &info->post_send_credits_work); in put_empty_packet()
1123 * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
1137 spin_lock(&info->reassembly_queue_lock); in enqueue_reassembly()
1138 list_add_tail(&response->list, &info->reassembly_queue); in enqueue_reassembly()
1139 info->reassembly_queue_length++; in enqueue_reassembly()
1147 info->reassembly_data_length += data_length; in enqueue_reassembly()
1148 spin_unlock(&info->reassembly_queue_lock); in enqueue_reassembly()
1149 info->count_reassembly_queue++; in enqueue_reassembly()
1150 info->count_enqueue_reassembly_queue++; in enqueue_reassembly()
1162 if (!list_empty(&info->reassembly_queue)) { in _get_first_reassembly()
1164 &info->reassembly_queue, in _get_first_reassembly()
1176 spin_lock_irqsave(&info->empty_packet_queue_lock, flags); in get_empty_queue_buffer()
1177 if (!list_empty(&info->empty_packet_queue)) { in get_empty_queue_buffer()
1179 &info->empty_packet_queue, in get_empty_queue_buffer()
1181 list_del(&ret->list); in get_empty_queue_buffer()
1182 info->count_empty_packet_queue--; in get_empty_queue_buffer()
1184 spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags); in get_empty_queue_buffer()
1192 * pre-allocated in advance.
1200 spin_lock_irqsave(&info->receive_queue_lock, flags); in get_receive_buffer()
1201 if (!list_empty(&info->receive_queue)) { in get_receive_buffer()
1203 &info->receive_queue, in get_receive_buffer()
1205 list_del(&ret->list); in get_receive_buffer()
1206 info->count_receive_queue--; in get_receive_buffer()
1207 info->count_get_receive_buffer++; in get_receive_buffer()
1209 spin_unlock_irqrestore(&info->receive_queue_lock, flags); in get_receive_buffer()
1225 ib_dma_unmap_single(info->id->device, response->sge.addr, in put_receive_buffer()
1226 response->sge.length, DMA_FROM_DEVICE); in put_receive_buffer()
1228 spin_lock_irqsave(&info->receive_queue_lock, flags); in put_receive_buffer()
1229 list_add_tail(&response->list, &info->receive_queue); in put_receive_buffer()
1230 info->count_receive_queue++; in put_receive_buffer()
1231 info->count_put_receive_buffer++; in put_receive_buffer()
1232 spin_unlock_irqrestore(&info->receive_queue_lock, flags); in put_receive_buffer()
1234 queue_work(info->workqueue, &info->post_send_credits_work); in put_receive_buffer()
1243 INIT_LIST_HEAD(&info->reassembly_queue); in allocate_receive_buffers()
1244 spin_lock_init(&info->reassembly_queue_lock); in allocate_receive_buffers()
1245 info->reassembly_data_length = 0; in allocate_receive_buffers()
1246 info->reassembly_queue_length = 0; in allocate_receive_buffers()
1248 INIT_LIST_HEAD(&info->receive_queue); in allocate_receive_buffers()
1249 spin_lock_init(&info->receive_queue_lock); in allocate_receive_buffers()
1250 info->count_receive_queue = 0; in allocate_receive_buffers()
1252 INIT_LIST_HEAD(&info->empty_packet_queue); in allocate_receive_buffers()
1253 spin_lock_init(&info->empty_packet_queue_lock); in allocate_receive_buffers()
1254 info->count_empty_packet_queue = 0; in allocate_receive_buffers()
1256 init_waitqueue_head(&info->wait_receive_queues); in allocate_receive_buffers()
1259 response = mempool_alloc(info->response_mempool, GFP_KERNEL); in allocate_receive_buffers()
1263 response->info = info; in allocate_receive_buffers()
1264 list_add_tail(&response->list, &info->receive_queue); in allocate_receive_buffers()
1265 info->count_receive_queue++; in allocate_receive_buffers()
1271 while (!list_empty(&info->receive_queue)) { in allocate_receive_buffers()
1273 &info->receive_queue, in allocate_receive_buffers()
1275 list_del(&response->list); in allocate_receive_buffers()
1276 info->count_receive_queue--; in allocate_receive_buffers()
1278 mempool_free(response, info->response_mempool); in allocate_receive_buffers()
1280 return -ENOMEM; in allocate_receive_buffers()
1288 mempool_free(response, info->response_mempool); in destroy_receive_buffers()
1291 mempool_free(response, info->response_mempool); in destroy_receive_buffers()
1294 /* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
1301 if (info->keep_alive_requested != KEEP_ALIVE_NONE) { in idle_connection_timer()
1303 "error status info->keep_alive_requested=%d\n", in idle_connection_timer()
1304 info->keep_alive_requested); in idle_connection_timer()
1313 queue_delayed_work(info->workqueue, &info->idle_timer_work, in idle_connection_timer()
1314 info->keep_alive_interval*HZ); in idle_connection_timer()
1318 * Destroy the transport and related RDMA and memory resources
1324 struct smbd_connection *info = server->smbd_conn; in smbd_destroy()
1334 if (info->transport_status != SMBD_DISCONNECTED) { in smbd_destroy()
1335 rdma_disconnect(server->smbd_conn->id); in smbd_destroy()
1338 info->disconn_wait, in smbd_destroy()
1339 info->transport_status == SMBD_DISCONNECTED); in smbd_destroy()
1343 ib_drain_qp(info->id->qp); in smbd_destroy()
1344 rdma_destroy_qp(info->id); in smbd_destroy()
1347 cancel_delayed_work_sync(&info->idle_timer_work); in smbd_destroy()
1350 wait_event(info->wait_send_pending, in smbd_destroy()
1351 atomic_read(&info->send_pending) == 0); in smbd_destroy()
1356 spin_lock_irqsave(&info->reassembly_queue_lock, flags); in smbd_destroy()
1359 list_del(&response->list); in smbd_destroy()
1361 &info->reassembly_queue_lock, flags); in smbd_destroy()
1365 &info->reassembly_queue_lock, flags); in smbd_destroy()
1367 info->reassembly_data_length = 0; in smbd_destroy()
1370 wait_event(info->wait_receive_queues, in smbd_destroy()
1371 info->count_receive_queue + info->count_empty_packet_queue in smbd_destroy()
1372 == info->receive_credit_max); in smbd_destroy()
1376 * For performance reasons, memory registration and deregistration in smbd_destroy()
1378 * blocked on transport srv_mutex while holding memory registration. in smbd_destroy()
1380 * path when sending data, and then release memory registartions. in smbd_destroy()
1383 wake_up_interruptible_all(&info->wait_mr); in smbd_destroy()
1384 while (atomic_read(&info->mr_used_count)) { in smbd_destroy()
1385 mutex_unlock(&server->srv_mutex); in smbd_destroy()
1387 mutex_lock(&server->srv_mutex); in smbd_destroy()
1391 ib_free_cq(info->send_cq); in smbd_destroy()
1392 ib_free_cq(info->recv_cq); in smbd_destroy()
1393 ib_dealloc_pd(info->pd); in smbd_destroy()
1394 rdma_destroy_id(info->id); in smbd_destroy()
1397 mempool_destroy(info->request_mempool); in smbd_destroy()
1398 kmem_cache_destroy(info->request_cache); in smbd_destroy()
1400 mempool_destroy(info->response_mempool); in smbd_destroy()
1401 kmem_cache_destroy(info->response_cache); in smbd_destroy()
1403 info->transport_status = SMBD_DESTROYED; in smbd_destroy()
1405 destroy_workqueue(info->workqueue); in smbd_destroy()
1418 if (!server->smbd_conn) { in smbd_reconnect()
1427 if (server->smbd_conn->transport_status == SMBD_CONNECTED) { in smbd_reconnect()
1434 server->smbd_conn = smbd_get_connection( in smbd_reconnect()
1435 server, (struct sockaddr *) &server->dstaddr); in smbd_reconnect()
1437 if (server->smbd_conn) in smbd_reconnect()
1438 cifs_dbg(VFS, "RDMA transport re-established\n"); in smbd_reconnect()
1440 return server->smbd_conn ? 0 : -ENOENT; in smbd_reconnect()
1446 destroy_workqueue(info->workqueue); in destroy_caches_and_workqueue()
1447 mempool_destroy(info->response_mempool); in destroy_caches_and_workqueue()
1448 kmem_cache_destroy(info->response_cache); in destroy_caches_and_workqueue()
1449 mempool_destroy(info->request_mempool); in destroy_caches_and_workqueue()
1450 kmem_cache_destroy(info->request_cache); in destroy_caches_and_workqueue()
1460 info->request_cache = in allocate_caches_and_workqueue()
1466 if (!info->request_cache) in allocate_caches_and_workqueue()
1467 return -ENOMEM; in allocate_caches_and_workqueue()
1469 info->request_mempool = in allocate_caches_and_workqueue()
1470 mempool_create(info->send_credit_target, mempool_alloc_slab, in allocate_caches_and_workqueue()
1471 mempool_free_slab, info->request_cache); in allocate_caches_and_workqueue()
1472 if (!info->request_mempool) in allocate_caches_and_workqueue()
1476 info->response_cache = in allocate_caches_and_workqueue()
1480 info->max_receive_size, in allocate_caches_and_workqueue()
1482 if (!info->response_cache) in allocate_caches_and_workqueue()
1485 info->response_mempool = in allocate_caches_and_workqueue()
1486 mempool_create(info->receive_credit_max, mempool_alloc_slab, in allocate_caches_and_workqueue()
1487 mempool_free_slab, info->response_cache); in allocate_caches_and_workqueue()
1488 if (!info->response_mempool) in allocate_caches_and_workqueue()
1492 info->workqueue = create_workqueue(name); in allocate_caches_and_workqueue()
1493 if (!info->workqueue) in allocate_caches_and_workqueue()
1496 rc = allocate_receive_buffers(info, info->receive_credit_max); in allocate_caches_and_workqueue()
1505 destroy_workqueue(info->workqueue); in allocate_caches_and_workqueue()
1507 mempool_destroy(info->response_mempool); in allocate_caches_and_workqueue()
1509 kmem_cache_destroy(info->response_cache); in allocate_caches_and_workqueue()
1511 mempool_destroy(info->request_mempool); in allocate_caches_and_workqueue()
1513 kmem_cache_destroy(info->request_cache); in allocate_caches_and_workqueue()
1514 return -ENOMEM; in allocate_caches_and_workqueue()
1533 info->transport_status = SMBD_CONNECTING; in _smbd_get_connection()
1540 if (smbd_send_credit_target > info->id->device->attrs.max_cqe || in _smbd_get_connection()
1541 smbd_send_credit_target > info->id->device->attrs.max_qp_wr) { in _smbd_get_connection()
1544 info->id->device->attrs.max_cqe, in _smbd_get_connection()
1545 info->id->device->attrs.max_qp_wr); in _smbd_get_connection()
1549 if (smbd_receive_credit_max > info->id->device->attrs.max_cqe || in _smbd_get_connection()
1550 smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) { in _smbd_get_connection()
1553 info->id->device->attrs.max_cqe, in _smbd_get_connection()
1554 info->id->device->attrs.max_qp_wr); in _smbd_get_connection()
1558 info->receive_credit_max = smbd_receive_credit_max; in _smbd_get_connection()
1559 info->send_credit_target = smbd_send_credit_target; in _smbd_get_connection()
1560 info->max_send_size = smbd_max_send_size; in _smbd_get_connection()
1561 info->max_fragmented_recv_size = smbd_max_fragmented_recv_size; in _smbd_get_connection()
1562 info->max_receive_size = smbd_max_receive_size; in _smbd_get_connection()
1563 info->keep_alive_interval = smbd_keep_alive_interval; in _smbd_get_connection()
1565 if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SGE) { in _smbd_get_connection()
1568 info->id->device->attrs.max_send_sge); in _smbd_get_connection()
1571 if (info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_SGE) { in _smbd_get_connection()
1574 info->id->device->attrs.max_recv_sge); in _smbd_get_connection()
1578 info->send_cq = NULL; in _smbd_get_connection()
1579 info->recv_cq = NULL; in _smbd_get_connection()
1580 info->send_cq = in _smbd_get_connection()
1581 ib_alloc_cq_any(info->id->device, info, in _smbd_get_connection()
1582 info->send_credit_target, IB_POLL_SOFTIRQ); in _smbd_get_connection()
1583 if (IS_ERR(info->send_cq)) { in _smbd_get_connection()
1584 info->send_cq = NULL; in _smbd_get_connection()
1588 info->recv_cq = in _smbd_get_connection()
1589 ib_alloc_cq_any(info->id->device, info, in _smbd_get_connection()
1590 info->receive_credit_max, IB_POLL_SOFTIRQ); in _smbd_get_connection()
1591 if (IS_ERR(info->recv_cq)) { in _smbd_get_connection()
1592 info->recv_cq = NULL; in _smbd_get_connection()
1599 qp_attr.cap.max_send_wr = info->send_credit_target; in _smbd_get_connection()
1600 qp_attr.cap.max_recv_wr = info->receive_credit_max; in _smbd_get_connection()
1606 qp_attr.send_cq = info->send_cq; in _smbd_get_connection()
1607 qp_attr.recv_cq = info->recv_cq; in _smbd_get_connection()
1610 rc = rdma_create_qp(info->id, info->pd, &qp_attr); in _smbd_get_connection()
1620 info->id->device->attrs.max_qp_rd_atom in _smbd_get_connection()
1622 info->id->device->attrs.max_qp_rd_atom : in _smbd_get_connection()
1624 info->responder_resources = conn_param.responder_resources; in _smbd_get_connection()
1626 info->responder_resources); in _smbd_get_connection()
1629 info->id->device->ops.get_port_immutable( in _smbd_get_connection()
1630 info->id->device, info->id->port_num, &port_immutable); in _smbd_get_connection()
1632 ird_ord_hdr[0] = info->responder_resources; in _smbd_get_connection()
1646 &addr_in->sin_addr, port); in _smbd_get_connection()
1648 init_waitqueue_head(&info->conn_wait); in _smbd_get_connection()
1649 init_waitqueue_head(&info->disconn_wait); in _smbd_get_connection()
1650 init_waitqueue_head(&info->wait_reassembly_queue); in _smbd_get_connection()
1651 rc = rdma_connect(info->id, &conn_param); in _smbd_get_connection()
1658 info->conn_wait, info->transport_status != SMBD_CONNECTING); in _smbd_get_connection()
1660 if (info->transport_status != SMBD_CONNECTED) { in _smbd_get_connection()
1673 init_waitqueue_head(&info->wait_send_queue); in _smbd_get_connection()
1674 INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer); in _smbd_get_connection()
1675 queue_delayed_work(info->workqueue, &info->idle_timer_work, in _smbd_get_connection()
1676 info->keep_alive_interval*HZ); in _smbd_get_connection()
1678 init_waitqueue_head(&info->wait_send_pending); in _smbd_get_connection()
1679 atomic_set(&info->send_pending, 0); in _smbd_get_connection()
1681 init_waitqueue_head(&info->wait_post_send); in _smbd_get_connection()
1683 INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work); in _smbd_get_connection()
1684 INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits); in _smbd_get_connection()
1685 info->new_credits_offered = 0; in _smbd_get_connection()
1686 spin_lock_init(&info->lock_new_credits_offered); in _smbd_get_connection()
1696 log_rdma_mr(ERR, "memory registration allocation failed\n"); in _smbd_get_connection()
1708 cancel_delayed_work_sync(&info->idle_timer_work); in _smbd_get_connection()
1710 info->transport_status = SMBD_NEGOTIATE_FAILED; in _smbd_get_connection()
1711 init_waitqueue_head(&info->conn_wait); in _smbd_get_connection()
1712 rdma_disconnect(info->id); in _smbd_get_connection()
1713 wait_event(info->conn_wait, in _smbd_get_connection()
1714 info->transport_status == SMBD_DISCONNECTED); in _smbd_get_connection()
1718 rdma_destroy_qp(info->id); in _smbd_get_connection()
1722 if (info->send_cq) in _smbd_get_connection()
1723 ib_free_cq(info->send_cq); in _smbd_get_connection()
1724 if (info->recv_cq) in _smbd_get_connection()
1725 ib_free_cq(info->recv_cq); in _smbd_get_connection()
1728 ib_dealloc_pd(info->pd); in _smbd_get_connection()
1729 rdma_destroy_id(info->id); in _smbd_get_connection()
1777 * No need to hold the reassembly queue lock all the time as we are in smbd_recv_buf()
1781 log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size, in smbd_recv_buf()
1782 info->reassembly_data_length); in smbd_recv_buf()
1783 if (info->reassembly_data_length >= size) { in smbd_recv_buf()
1795 queue_length = info->reassembly_queue_length; in smbd_recv_buf()
1798 offset = info->first_entry_offset; in smbd_recv_buf()
1802 data_length = le32_to_cpu(data_transfer->data_length); in smbd_recv_buf()
1805 data_transfer->remaining_data_length); in smbd_recv_buf()
1806 data_offset = le32_to_cpu(data_transfer->data_offset); in smbd_recv_buf()
1816 if (response->first_segment && size == 4) { in smbd_recv_buf()
1821 response->first_segment = false; in smbd_recv_buf()
1827 to_copy = min_t(int, data_length - offset, to_read); in smbd_recv_buf()
1834 if (to_copy == data_length - offset) { in smbd_recv_buf()
1835 queue_length--; in smbd_recv_buf()
1837 * No need to lock if we are not at the in smbd_recv_buf()
1841 list_del(&response->list); in smbd_recv_buf()
1844 &info->reassembly_queue_lock); in smbd_recv_buf()
1845 list_del(&response->list); in smbd_recv_buf()
1847 &info->reassembly_queue_lock); in smbd_recv_buf()
1850 info->count_reassembly_queue--; in smbd_recv_buf()
1851 info->count_dequeue_reassembly_queue++; in smbd_recv_buf()
1858 to_read -= to_copy; in smbd_recv_buf()
1861 …log_read(INFO, "_get_first_reassembly memcpy %d bytes data_transfer_length-offset=%d after that to… in smbd_recv_buf()
1862 to_copy, data_length - offset, in smbd_recv_buf()
1866 spin_lock_irq(&info->reassembly_queue_lock); in smbd_recv_buf()
1867 info->reassembly_data_length -= data_read; in smbd_recv_buf()
1868 info->reassembly_queue_length -= queue_removed; in smbd_recv_buf()
1869 spin_unlock_irq(&info->reassembly_queue_lock); in smbd_recv_buf()
1871 info->first_entry_offset = offset; in smbd_recv_buf()
1873 data_read, info->reassembly_data_length, in smbd_recv_buf()
1874 info->first_entry_offset); in smbd_recv_buf()
1881 info->wait_reassembly_queue, in smbd_recv_buf()
1882 info->reassembly_data_length >= size || in smbd_recv_buf()
1883 info->transport_status != SMBD_CONNECTED); in smbd_recv_buf()
1888 if (info->transport_status != SMBD_CONNECTED) { in smbd_recv_buf()
1890 return -ECONNABORTED; in smbd_recv_buf()
1912 info->wait_reassembly_queue, in smbd_recv_page()
1913 info->reassembly_data_length >= to_read || in smbd_recv_page()
1914 info->transport_status != SMBD_CONNECTED); in smbd_recv_page()
1943 if (iov_iter_rw(&msg->msg_iter) == WRITE) { in smbd_recv()
1946 iov_iter_rw(&msg->msg_iter)); in smbd_recv()
1947 rc = -EINVAL; in smbd_recv()
1951 switch (iov_iter_type(&msg->msg_iter)) { in smbd_recv()
1953 buf = msg->msg_iter.kvec->iov_base; in smbd_recv()
1954 to_read = msg->msg_iter.kvec->iov_len; in smbd_recv()
1959 page = msg->msg_iter.bvec->bv_page; in smbd_recv()
1960 page_offset = msg->msg_iter.bvec->bv_offset; in smbd_recv()
1961 to_read = msg->msg_iter.bvec->bv_len; in smbd_recv()
1968 iov_iter_type(&msg->msg_iter)); in smbd_recv()
1969 rc = -EINVAL; in smbd_recv()
1975 msg->msg_iter.count = 0; in smbd_recv()
1988 struct smbd_connection *info = server->smbd_conn; in smbd_send()
1995 info->max_send_size - sizeof(struct smbd_data_transfer); in smbd_send()
2001 if (info->transport_status != SMBD_CONNECTED) { in smbd_send()
2002 rc = -EAGAIN; in smbd_send()
2015 if (remaining_data_length > info->max_fragmented_send_size) { in smbd_send()
2017 remaining_data_length, info->max_fragmented_send_size); in smbd_send()
2018 rc = -EINVAL; in smbd_send()
2028 iov = rqst->rq_iov; in smbd_send()
2032 for (i = 0; i < rqst->rq_nvec; i++) in smbd_send()
2036 log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d rq_tailsz=%d buflen=%lu\n", in smbd_send()
2037 rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz, in smbd_send()
2038 rqst->rq_tailsz, smb_rqst_len(server, rqst)); in smbd_send()
2046 remaining_data_length -= in smbd_send()
2047 (buflen-iov[i].iov_len); in smbd_send()
2049 start, i, i - start, in smbd_send()
2052 info, &iov[start], i-start, in smbd_send()
2058 nvecs = (buflen+max_iov_size-1)/max_iov_size; in smbd_send()
2067 if (j == nvecs-1) in smbd_send()
2069 buflen - in smbd_send()
2070 max_iov_size*(nvecs-1); in smbd_send()
2071 remaining_data_length -= vec.iov_len; in smbd_send()
2083 if (i == rqst->rq_nvec) in smbd_send()
2090 if (i == rqst->rq_nvec) { in smbd_send()
2092 remaining_data_length -= buflen; in smbd_send()
2094 start, i, i - start, in smbd_send()
2097 i-start, remaining_data_length); in smbd_send()
2107 for (i = 0; i < rqst->rq_npages; i++) { in smbd_send()
2111 nvecs = (buflen + max_iov_size - 1) / max_iov_size; in smbd_send()
2116 if (j == nvecs-1) in smbd_send()
2117 size = buflen - j*max_iov_size; in smbd_send()
2118 remaining_data_length -= size; in smbd_send()
2123 info, rqst->rq_pages[i], in smbd_send()
2143 wait_event(info->wait_send_pending, in smbd_send()
2144 atomic_read(&info->send_pending) == 0); in smbd_send()
2149 static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc) in register_mr_done() argument
2154 if (wc->status) { in register_mr_done()
2155 log_rdma_mr(ERR, "status=%d\n", wc->status); in register_mr_done()
2156 cqe = wc->wr_cqe; in register_mr_done()
2158 smbd_disconnect_rdma_connection(mr->conn); in register_mr_done()
2167 * There is one workqueue that recovers MRs, there is no need to lock as the
2178 list_for_each_entry(smbdirect_mr, &info->mr_list, list) { in smbd_mr_recovery_work()
2179 if (smbdirect_mr->state == MR_ERROR) { in smbd_mr_recovery_work()
2182 rc = ib_dereg_mr(smbdirect_mr->mr); in smbd_mr_recovery_work()
2191 smbdirect_mr->mr = ib_alloc_mr( in smbd_mr_recovery_work()
2192 info->pd, info->mr_type, in smbd_mr_recovery_work()
2193 info->max_frmr_depth); in smbd_mr_recovery_work()
2194 if (IS_ERR(smbdirect_mr->mr)) { in smbd_mr_recovery_work()
2196 info->mr_type, in smbd_mr_recovery_work()
2197 info->max_frmr_depth); in smbd_mr_recovery_work()
2205 smbdirect_mr->state = MR_READY; in smbd_mr_recovery_work()
2207 /* smbdirect_mr->state is updated by this function in smbd_mr_recovery_work()
2210 * implicates a memory barrier and guarantees this in smbd_mr_recovery_work()
2214 if (atomic_inc_return(&info->mr_ready_count) == 1) in smbd_mr_recovery_work()
2215 wake_up_interruptible(&info->wait_mr); in smbd_mr_recovery_work()
2223 cancel_work_sync(&info->mr_recovery_work); in destroy_mr_list()
2224 list_for_each_entry_safe(mr, tmp, &info->mr_list, list) { in destroy_mr_list()
2225 if (mr->state == MR_INVALIDATED) in destroy_mr_list()
2226 ib_dma_unmap_sg(info->id->device, mr->sgl, in destroy_mr_list()
2227 mr->sgl_count, mr->dir); in destroy_mr_list()
2228 ib_dereg_mr(mr->mr); in destroy_mr_list()
2229 kfree(mr->sgl); in destroy_mr_list()
2246 INIT_LIST_HEAD(&info->mr_list); in allocate_mr_list()
2247 init_waitqueue_head(&info->wait_mr); in allocate_mr_list()
2248 spin_lock_init(&info->mr_list_lock); in allocate_mr_list()
2249 atomic_set(&info->mr_ready_count, 0); in allocate_mr_list()
2250 atomic_set(&info->mr_used_count, 0); in allocate_mr_list()
2251 init_waitqueue_head(&info->wait_for_mr_cleanup); in allocate_mr_list()
2253 for (i = 0; i < info->responder_resources * 2; i++) { in allocate_mr_list()
2257 smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type, in allocate_mr_list()
2258 info->max_frmr_depth); in allocate_mr_list()
2259 if (IS_ERR(smbdirect_mr->mr)) { in allocate_mr_list()
2261 info->mr_type, info->max_frmr_depth); in allocate_mr_list()
2264 smbdirect_mr->sgl = kcalloc( in allocate_mr_list()
2265 info->max_frmr_depth, in allocate_mr_list()
2268 if (!smbdirect_mr->sgl) { in allocate_mr_list()
2270 ib_dereg_mr(smbdirect_mr->mr); in allocate_mr_list()
2273 smbdirect_mr->state = MR_READY; in allocate_mr_list()
2274 smbdirect_mr->conn = info; in allocate_mr_list()
2276 list_add_tail(&smbdirect_mr->list, &info->mr_list); in allocate_mr_list()
2277 atomic_inc(&info->mr_ready_count); in allocate_mr_list()
2279 INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work); in allocate_mr_list()
2285 list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) { in allocate_mr_list()
2286 ib_dereg_mr(smbdirect_mr->mr); in allocate_mr_list()
2287 kfree(smbdirect_mr->sgl); in allocate_mr_list()
2290 return -ENOMEM; in allocate_mr_list()
2306 rc = wait_event_interruptible(info->wait_mr, in get_mr()
2307 atomic_read(&info->mr_ready_count) || in get_mr()
2308 info->transport_status != SMBD_CONNECTED); in get_mr()
2314 if (info->transport_status != SMBD_CONNECTED) { in get_mr()
2315 log_rdma_mr(ERR, "info->transport_status=%x\n", in get_mr()
2316 info->transport_status); in get_mr()
2320 spin_lock(&info->mr_list_lock); in get_mr()
2321 list_for_each_entry(ret, &info->mr_list, list) { in get_mr()
2322 if (ret->state == MR_READY) { in get_mr()
2323 ret->state = MR_REGISTERED; in get_mr()
2324 spin_unlock(&info->mr_list_lock); in get_mr()
2325 atomic_dec(&info->mr_ready_count); in get_mr()
2326 atomic_inc(&info->mr_used_count); in get_mr()
2331 spin_unlock(&info->mr_list_lock); in get_mr()
2340 * Register memory for RDMA read/write
2341 * pages[]: the list of pages to register memory with
2343 * tailsz: if non-zero, the bytes to register in the last page
2357 if (num_pages > info->max_frmr_depth) { in smbd_register_mr()
2359 num_pages, info->max_frmr_depth); in smbd_register_mr()
2368 smbdirect_mr->need_invalidate = need_invalidate; in smbd_register_mr()
2369 smbdirect_mr->sgl_count = num_pages; in smbd_register_mr()
2370 sg_init_table(smbdirect_mr->sgl, num_pages); in smbd_register_mr()
2376 sg_set_page(&smbdirect_mr->sgl[0], pages[0], tailsz, offset); in smbd_register_mr()
2382 &smbdirect_mr->sgl[0], pages[0], PAGE_SIZE - offset, offset); in smbd_register_mr()
2384 while (i < num_pages - 1) { in smbd_register_mr()
2385 sg_set_page(&smbdirect_mr->sgl[i], pages[i], PAGE_SIZE, 0); in smbd_register_mr()
2388 sg_set_page(&smbdirect_mr->sgl[i], pages[i], in smbd_register_mr()
2393 smbdirect_mr->dir = dir; in smbd_register_mr()
2394 rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgl, num_pages, dir); in smbd_register_mr()
2401 rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgl, num_pages, in smbd_register_mr()
2410 ib_update_fast_reg_key(smbdirect_mr->mr, in smbd_register_mr()
2411 ib_inc_rkey(smbdirect_mr->mr->rkey)); in smbd_register_mr()
2412 reg_wr = &smbdirect_mr->wr; in smbd_register_mr()
2413 reg_wr->wr.opcode = IB_WR_REG_MR; in smbd_register_mr()
2414 smbdirect_mr->cqe.done = register_mr_done; in smbd_register_mr()
2415 reg_wr->wr.wr_cqe = &smbdirect_mr->cqe; in smbd_register_mr()
2416 reg_wr->wr.num_sge = 0; in smbd_register_mr()
2417 reg_wr->wr.send_flags = IB_SEND_SIGNALED; in smbd_register_mr()
2418 reg_wr->mr = smbdirect_mr->mr; in smbd_register_mr()
2419 reg_wr->key = smbdirect_mr->mr->rkey; in smbd_register_mr()
2420 reg_wr->access = writing ? in smbd_register_mr()
2425 * There is no need for waiting for complemtion on ib_post_send in smbd_register_mr()
2429 rc = ib_post_send(info->id->qp, ®_wr->wr, NULL); in smbd_register_mr()
2433 log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n", in smbd_register_mr()
2434 rc, reg_wr->key); in smbd_register_mr()
2438 ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgl, in smbd_register_mr()
2439 smbdirect_mr->sgl_count, smbdirect_mr->dir); in smbd_register_mr()
2442 smbdirect_mr->state = MR_ERROR; in smbd_register_mr()
2443 if (atomic_dec_and_test(&info->mr_used_count)) in smbd_register_mr()
2444 wake_up(&info->wait_for_mr_cleanup); in smbd_register_mr()
2451 static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc) in local_inv_done() argument
2456 cqe = wc->wr_cqe; in local_inv_done()
2458 smbdirect_mr->state = MR_INVALIDATED; in local_inv_done()
2459 if (wc->status != IB_WC_SUCCESS) { in local_inv_done()
2460 log_rdma_mr(ERR, "invalidate failed status=%x\n", wc->status); in local_inv_done()
2461 smbdirect_mr->state = MR_ERROR; in local_inv_done()
2463 complete(&smbdirect_mr->invalidate_done); in local_inv_done()
2475 struct smbd_connection *info = smbdirect_mr->conn; in smbd_deregister_mr()
2478 if (smbdirect_mr->need_invalidate) { in smbd_deregister_mr()
2480 wr = &smbdirect_mr->inv_wr; in smbd_deregister_mr()
2481 wr->opcode = IB_WR_LOCAL_INV; in smbd_deregister_mr()
2482 smbdirect_mr->cqe.done = local_inv_done; in smbd_deregister_mr()
2483 wr->wr_cqe = &smbdirect_mr->cqe; in smbd_deregister_mr()
2484 wr->num_sge = 0; in smbd_deregister_mr()
2485 wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey; in smbd_deregister_mr()
2486 wr->send_flags = IB_SEND_SIGNALED; in smbd_deregister_mr()
2488 init_completion(&smbdirect_mr->invalidate_done); in smbd_deregister_mr()
2489 rc = ib_post_send(info->id->qp, wr, NULL); in smbd_deregister_mr()
2495 wait_for_completion(&smbdirect_mr->invalidate_done); in smbd_deregister_mr()
2496 smbdirect_mr->need_invalidate = false; in smbd_deregister_mr()
2502 smbdirect_mr->state = MR_INVALIDATED; in smbd_deregister_mr()
2504 if (smbdirect_mr->state == MR_INVALIDATED) { in smbd_deregister_mr()
2506 info->id->device, smbdirect_mr->sgl, in smbd_deregister_mr()
2507 smbdirect_mr->sgl_count, in smbd_deregister_mr()
2508 smbdirect_mr->dir); in smbd_deregister_mr()
2509 smbdirect_mr->state = MR_READY; in smbd_deregister_mr()
2510 if (atomic_inc_return(&info->mr_ready_count) == 1) in smbd_deregister_mr()
2511 wake_up_interruptible(&info->wait_mr); in smbd_deregister_mr()
2517 queue_work(info->workqueue, &info->mr_recovery_work); in smbd_deregister_mr()
2520 if (atomic_dec_and_test(&info->mr_used_count)) in smbd_deregister_mr()
2521 wake_up(&info->wait_for_mr_cleanup); in smbd_deregister_mr()