Lines Matching +full:sig +full:- +full:dir
1 // SPDX-License-Identifier: GPL-2.0-or-later
7 * Nicholas A. Bellinger <nab@linux-iscsi.org>
69 return -EINVAL; in isert_sg_tablesize_set()
77 return (conn->pi_support && in isert_prot_cmd()
78 cmd->prot_op != TARGET_PROT_NORMAL); in isert_prot_cmd()
87 ib_event_msg(e->event), e->event, isert_conn); in isert_qp_event_callback()
89 switch (e->event) { in isert_qp_event_callback()
91 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); in isert_qp_event_callback()
106 struct isert_device *device = isert_conn->device; in isert_create_qp()
107 struct ib_device *ib_dev = device->ib_device; in isert_create_qp()
111 isert_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_WORKQUEUE); in isert_create_qp()
112 if (IS_ERR(isert_conn->cq)) { in isert_create_qp()
114 ret = PTR_ERR(isert_conn->cq); in isert_create_qp()
117 isert_conn->cq_size = cq_size; in isert_create_qp()
122 attr.send_cq = isert_conn->cq; in isert_create_qp()
123 attr.recv_cq = isert_conn->cq; in isert_create_qp()
126 factor = rdma_rw_mr_factor(device->ib_device, cma_id->port_num, in isert_create_qp()
129 attr.cap.max_send_sge = device->ib_device->attrs.max_send_sge; in isert_create_qp()
133 if (device->pi_capable) in isert_create_qp()
136 ret = rdma_create_qp(cma_id, device->pd, &attr); in isert_create_qp()
139 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size); in isert_create_qp()
144 return cma_id->qp; in isert_create_qp()
150 struct isert_device *device = isert_conn->device; in isert_alloc_rx_descriptors()
151 struct ib_device *ib_dev = device->ib_device; in isert_alloc_rx_descriptors()
157 isert_conn->rx_descs = kcalloc(ISERT_QP_MAX_RECV_DTOS, in isert_alloc_rx_descriptors()
160 if (!isert_conn->rx_descs) in isert_alloc_rx_descriptors()
161 return -ENOMEM; in isert_alloc_rx_descriptors()
163 rx_desc = isert_conn->rx_descs; in isert_alloc_rx_descriptors()
166 dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf, in isert_alloc_rx_descriptors()
171 rx_desc->dma_addr = dma_addr; in isert_alloc_rx_descriptors()
173 rx_sg = &rx_desc->rx_sg; in isert_alloc_rx_descriptors()
174 rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc); in isert_alloc_rx_descriptors()
175 rx_sg->length = ISER_RX_PAYLOAD_SIZE; in isert_alloc_rx_descriptors()
176 rx_sg->lkey = device->pd->local_dma_lkey; in isert_alloc_rx_descriptors()
177 rx_desc->rx_cqe.done = isert_recv_done; in isert_alloc_rx_descriptors()
183 rx_desc = isert_conn->rx_descs; in isert_alloc_rx_descriptors()
185 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, in isert_alloc_rx_descriptors()
188 kfree(isert_conn->rx_descs); in isert_alloc_rx_descriptors()
189 isert_conn->rx_descs = NULL; in isert_alloc_rx_descriptors()
191 return -ENOMEM; in isert_alloc_rx_descriptors()
197 struct ib_device *ib_dev = isert_conn->device->ib_device; in isert_free_rx_descriptors()
201 if (!isert_conn->rx_descs) in isert_free_rx_descriptors()
204 rx_desc = isert_conn->rx_descs; in isert_free_rx_descriptors()
206 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, in isert_free_rx_descriptors()
210 kfree(isert_conn->rx_descs); in isert_free_rx_descriptors()
211 isert_conn->rx_descs = NULL; in isert_free_rx_descriptors()
217 struct ib_device *ib_dev = device->ib_device; in isert_create_device_ib_res()
220 isert_dbg("devattr->max_send_sge: %d devattr->max_recv_sge %d\n", in isert_create_device_ib_res()
221 ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge); in isert_create_device_ib_res()
222 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd); in isert_create_device_ib_res()
224 device->pd = ib_alloc_pd(ib_dev, 0); in isert_create_device_ib_res()
225 if (IS_ERR(device->pd)) { in isert_create_device_ib_res()
226 ret = PTR_ERR(device->pd); in isert_create_device_ib_res()
233 if (ib_dev->attrs.device_cap_flags & IB_DEVICE_INTEGRITY_HANDOVER) in isert_create_device_ib_res()
234 device->pi_capable = true; in isert_create_device_ib_res()
236 device->pi_capable = false; in isert_create_device_ib_res()
246 ib_dealloc_pd(device->pd); in isert_free_device_ib_res()
253 device->refcount--; in isert_device_put()
254 isert_info("device %p refcount %d\n", device, device->refcount); in isert_device_put()
255 if (!device->refcount) { in isert_device_put()
257 list_del(&device->dev_node); in isert_device_put()
271 if (device->ib_device->node_guid == cma_id->device->node_guid) { in isert_device_get()
272 device->refcount++; in isert_device_get()
274 device, device->refcount); in isert_device_get()
283 return ERR_PTR(-ENOMEM); in isert_device_get()
286 INIT_LIST_HEAD(&device->dev_node); in isert_device_get()
288 device->ib_device = cma_id->device; in isert_device_get()
296 device->refcount++; in isert_device_get()
297 list_add_tail(&device->dev_node, &device_list); in isert_device_get()
299 device, device->refcount); in isert_device_get()
308 isert_conn->state = ISER_CONN_INIT; in isert_init_conn()
309 INIT_LIST_HEAD(&isert_conn->node); in isert_init_conn()
310 init_completion(&isert_conn->login_comp); in isert_init_conn()
311 init_completion(&isert_conn->login_req_comp); in isert_init_conn()
312 init_waitqueue_head(&isert_conn->rem_wait); in isert_init_conn()
313 kref_init(&isert_conn->kref); in isert_init_conn()
314 mutex_init(&isert_conn->mutex); in isert_init_conn()
315 INIT_WORK(&isert_conn->release_work, isert_release_work); in isert_init_conn()
321 struct ib_device *ib_dev = isert_conn->device->ib_device; in isert_free_login_buf()
323 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, in isert_free_login_buf()
325 kfree(isert_conn->login_rsp_buf); in isert_free_login_buf()
327 ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr, in isert_free_login_buf()
329 kfree(isert_conn->login_desc); in isert_free_login_buf()
338 isert_conn->login_desc = kzalloc(sizeof(*isert_conn->login_desc), in isert_alloc_login_buf()
340 if (!isert_conn->login_desc) in isert_alloc_login_buf()
341 return -ENOMEM; in isert_alloc_login_buf()
343 isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev, in isert_alloc_login_buf()
344 isert_conn->login_desc->buf, in isert_alloc_login_buf()
346 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr); in isert_alloc_login_buf()
349 isert_conn->login_desc->dma_addr = 0; in isert_alloc_login_buf()
353 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); in isert_alloc_login_buf()
354 if (!isert_conn->login_rsp_buf) { in isert_alloc_login_buf()
355 ret = -ENOMEM; in isert_alloc_login_buf()
359 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, in isert_alloc_login_buf()
360 isert_conn->login_rsp_buf, in isert_alloc_login_buf()
362 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); in isert_alloc_login_buf()
365 isert_conn->login_rsp_dma = 0; in isert_alloc_login_buf()
372 kfree(isert_conn->login_rsp_buf); in isert_alloc_login_buf()
374 ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr, in isert_alloc_login_buf()
377 kfree(isert_conn->login_desc); in isert_alloc_login_buf()
385 struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs; in isert_set_nego_params()
388 isert_conn->initiator_depth = min_t(u8, param->initiator_depth, in isert_set_nego_params()
389 attr->max_qp_init_rd_atom); in isert_set_nego_params()
390 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth); in isert_set_nego_params()
392 if (param->private_data) { in isert_set_nego_params()
393 u8 flags = *(u8 *)param->private_data; in isert_set_nego_params()
399 isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) && in isert_set_nego_params()
400 (attr->device_cap_flags & in isert_set_nego_params()
402 if (isert_conn->snd_w_inv) in isert_set_nego_params()
410 ib_destroy_qp(isert_conn->qp); in isert_destroy_qp()
411 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size); in isert_destroy_qp()
417 struct isert_np *isert_np = cma_id->context; in isert_connect_request()
418 struct iscsi_np *np = isert_np->np; in isert_connect_request()
423 spin_lock_bh(&np->np_thread_lock); in isert_connect_request()
424 if (!np->enabled) { in isert_connect_request()
425 spin_unlock_bh(&np->np_thread_lock); in isert_connect_request()
429 spin_unlock_bh(&np->np_thread_lock); in isert_connect_request()
432 cma_id, cma_id->context); in isert_connect_request()
436 return -ENOMEM; in isert_connect_request()
439 isert_conn->cm_id = cma_id; in isert_connect_request()
446 isert_conn->device = device; in isert_connect_request()
448 ret = isert_alloc_login_buf(isert_conn, cma_id->device); in isert_connect_request()
452 isert_set_nego_params(isert_conn, &event->param.conn); in isert_connect_request()
454 isert_conn->qp = isert_create_qp(isert_conn, cma_id); in isert_connect_request()
455 if (IS_ERR(isert_conn->qp)) { in isert_connect_request()
456 ret = PTR_ERR(isert_conn->qp); in isert_connect_request()
468 mutex_lock(&isert_np->mutex); in isert_connect_request()
469 list_add_tail(&isert_conn->node, &isert_np->accepted); in isert_connect_request()
470 mutex_unlock(&isert_np->mutex); in isert_connect_request()
489 struct isert_device *device = isert_conn->device; in isert_connect_release()
496 if (isert_conn->cm_id && in isert_connect_release()
497 !isert_conn->dev_removed) in isert_connect_release()
498 rdma_destroy_id(isert_conn->cm_id); in isert_connect_release()
500 if (isert_conn->qp) in isert_connect_release()
503 if (isert_conn->login_desc) in isert_connect_release()
508 if (isert_conn->dev_removed) in isert_connect_release()
509 wake_up_interruptible(&isert_conn->rem_wait); in isert_connect_release()
517 struct isert_conn *isert_conn = cma_id->qp->qp_context; in isert_connected_handler()
518 struct isert_np *isert_np = cma_id->context; in isert_connected_handler()
522 mutex_lock(&isert_conn->mutex); in isert_connected_handler()
523 isert_conn->state = ISER_CONN_UP; in isert_connected_handler()
524 kref_get(&isert_conn->kref); in isert_connected_handler()
525 mutex_unlock(&isert_conn->mutex); in isert_connected_handler()
527 mutex_lock(&isert_np->mutex); in isert_connected_handler()
528 list_move_tail(&isert_conn->node, &isert_np->pending); in isert_connected_handler()
529 mutex_unlock(&isert_np->mutex); in isert_connected_handler()
532 up(&isert_np->sem); in isert_connected_handler()
541 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm, in isert_release_kref()
542 current->pid); in isert_release_kref()
550 kref_put(&isert_conn->kref, isert_release_kref); in isert_put_conn()
556 struct isert_np *isert_np = isert_conn->cm_id->context; in isert_handle_unbound_conn()
558 mutex_lock(&isert_np->mutex); in isert_handle_unbound_conn()
559 if (!list_empty(&isert_conn->node)) { in isert_handle_unbound_conn()
564 list_del_init(&isert_conn->node); in isert_handle_unbound_conn()
566 queue_work(isert_release_wq, &isert_conn->release_work); in isert_handle_unbound_conn()
568 mutex_unlock(&isert_np->mutex); in isert_handle_unbound_conn()
572 * isert_conn_terminate() - Initiate connection termination
588 if (isert_conn->state >= ISER_CONN_TERMINATING) in isert_conn_terminate()
592 isert_conn, isert_conn->state); in isert_conn_terminate()
593 isert_conn->state = ISER_CONN_TERMINATING; in isert_conn_terminate()
594 err = rdma_disconnect(isert_conn->cm_id); in isert_conn_terminate()
609 isert_np->cm_id = NULL; in isert_np_cma_handler()
612 isert_np->cm_id = isert_setup_id(isert_np); in isert_np_cma_handler()
613 if (IS_ERR(isert_np->cm_id)) { in isert_np_cma_handler()
615 isert_np, PTR_ERR(isert_np->cm_id)); in isert_np_cma_handler()
616 isert_np->cm_id = NULL; in isert_np_cma_handler()
624 return -1; in isert_np_cma_handler()
631 struct isert_conn *isert_conn = cma_id->qp->qp_context; in isert_disconnected_handler()
633 mutex_lock(&isert_conn->mutex); in isert_disconnected_handler()
634 switch (isert_conn->state) { in isert_disconnected_handler()
639 ib_drain_qp(isert_conn->qp); in isert_disconnected_handler()
644 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); in isert_disconnected_handler()
648 isert_conn, isert_conn->state); in isert_disconnected_handler()
650 mutex_unlock(&isert_conn->mutex); in isert_disconnected_handler()
658 struct isert_conn *isert_conn = cma_id->qp->qp_context; in isert_connect_error()
660 ib_drain_qp(isert_conn->qp); in isert_connect_error()
661 list_del_init(&isert_conn->node); in isert_connect_error()
662 isert_conn->cm_id = NULL; in isert_connect_error()
665 return -1; in isert_connect_error()
671 struct isert_np *isert_np = cma_id->context; in isert_cma_handler()
676 rdma_event_msg(event->event), event->event, in isert_cma_handler()
677 event->status, cma_id, cma_id->context); in isert_cma_handler()
679 if (isert_np->cm_id == cma_id) in isert_cma_handler()
680 return isert_np_cma_handler(cma_id->context, event->event); in isert_cma_handler()
682 switch (event->event) { in isert_cma_handler()
694 ret = isert_disconnected_handler(cma_id, event->event); in isert_cma_handler()
697 isert_conn = cma_id->qp->qp_context; in isert_cma_handler()
698 isert_conn->dev_removed = true; in isert_cma_handler()
699 isert_disconnected_handler(cma_id, event->event); in isert_cma_handler()
700 wait_event_interruptible(isert_conn->rem_wait, in isert_cma_handler()
701 isert_conn->state == ISER_CONN_DOWN); in isert_cma_handler()
704 * return non-zero from the callback to destroy in isert_cma_handler()
710 rdma_reject_msg(cma_id, event->status)); in isert_cma_handler()
717 isert_err("Unhandled RDMA CMA event: %d\n", event->event); in isert_cma_handler()
731 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { in isert_post_recvm()
732 rx_desc = &isert_conn->rx_descs[i]; in isert_post_recvm()
734 rx_wr->wr_cqe = &rx_desc->rx_cqe; in isert_post_recvm()
735 rx_wr->sg_list = &rx_desc->rx_sg; in isert_post_recvm()
736 rx_wr->num_sge = 1; in isert_post_recvm()
737 rx_wr->next = rx_wr + 1; in isert_post_recvm()
738 rx_desc->in_use = false; in isert_post_recvm()
740 rx_wr--; in isert_post_recvm()
741 rx_wr->next = NULL; /* mark end of work requests list */ in isert_post_recvm()
743 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, NULL); in isert_post_recvm()
756 if (!rx_desc->in_use) { in isert_post_recv()
758 * if the descriptor is not in-use we already reposted it in isert_post_recv()
764 rx_desc->in_use = false; in isert_post_recv()
765 rx_wr.wr_cqe = &rx_desc->rx_cqe; in isert_post_recv()
766 rx_wr.sg_list = &rx_desc->rx_sg; in isert_post_recv()
770 ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL); in isert_post_recv()
780 struct ib_device *ib_dev = isert_conn->cm_id->device; in isert_login_post_send()
784 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, in isert_login_post_send()
787 tx_desc->tx_cqe.done = isert_login_send_done; in isert_login_post_send()
790 send_wr.wr_cqe = &tx_desc->tx_cqe; in isert_login_post_send()
791 send_wr.sg_list = tx_desc->tx_sg; in isert_login_post_send()
792 send_wr.num_sge = tx_desc->num_sge; in isert_login_post_send()
796 ret = ib_post_send(isert_conn->qp, &send_wr, NULL); in isert_login_post_send()
808 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); in __isert_create_send_desc()
809 tx_desc->iser_header.flags = ISCSI_CTRL; in __isert_create_send_desc()
811 tx_desc->num_sge = 1; in __isert_create_send_desc()
813 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) { in __isert_create_send_desc()
814 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; in __isert_create_send_desc()
824 struct isert_device *device = isert_conn->device; in isert_create_send_desc()
825 struct ib_device *ib_dev = device->ib_device; in isert_create_send_desc()
827 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, in isert_create_send_desc()
837 struct isert_device *device = isert_conn->device; in isert_init_tx_hdrs()
838 struct ib_device *ib_dev = device->ib_device; in isert_init_tx_hdrs()
845 return -ENOMEM; in isert_init_tx_hdrs()
848 tx_desc->dma_addr = dma_addr; in isert_init_tx_hdrs()
849 tx_desc->tx_sg[0].addr = tx_desc->dma_addr; in isert_init_tx_hdrs()
850 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; in isert_init_tx_hdrs()
851 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; in isert_init_tx_hdrs()
854 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, in isert_init_tx_hdrs()
855 tx_desc->tx_sg[0].lkey); in isert_init_tx_hdrs()
864 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; in isert_init_send_wr()
866 tx_desc->tx_cqe.done = isert_send_done; in isert_init_send_wr()
867 send_wr->wr_cqe = &tx_desc->tx_cqe; in isert_init_send_wr()
869 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) { in isert_init_send_wr()
870 send_wr->opcode = IB_WR_SEND_WITH_INV; in isert_init_send_wr()
871 send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey; in isert_init_send_wr()
873 send_wr->opcode = IB_WR_SEND; in isert_init_send_wr()
876 send_wr->sg_list = &tx_desc->tx_sg[0]; in isert_init_send_wr()
877 send_wr->num_sge = isert_cmd->tx_desc.num_sge; in isert_init_send_wr()
878 send_wr->send_flags = IB_SEND_SIGNALED; in isert_init_send_wr()
889 sge.addr = isert_conn->login_desc->dma_addr + in isert_login_post_recv()
890 isert_get_hdr_offset(isert_conn->login_desc); in isert_login_post_recv()
892 sge.lkey = isert_conn->device->pd->local_dma_lkey; in isert_login_post_recv()
897 isert_conn->login_desc->rx_cqe.done = isert_login_recv_done; in isert_login_post_recv()
900 rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe; in isert_login_post_recv()
904 ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL); in isert_login_post_recv()
915 struct isert_conn *isert_conn = conn->context; in isert_put_login_tx()
916 struct isert_device *device = isert_conn->device; in isert_put_login_tx()
917 struct ib_device *ib_dev = device->ib_device; in isert_put_login_tx()
918 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc; in isert_put_login_tx()
923 memcpy(&tx_desc->iscsi_header, &login->rsp[0], in isert_put_login_tx()
929 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1]; in isert_put_login_tx()
931 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma, in isert_put_login_tx()
934 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length); in isert_put_login_tx()
936 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma, in isert_put_login_tx()
939 tx_dsg->addr = isert_conn->login_rsp_dma; in isert_put_login_tx()
940 tx_dsg->length = length; in isert_put_login_tx()
941 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey; in isert_put_login_tx()
942 tx_desc->num_sge = 2; in isert_put_login_tx()
944 if (!login->login_failed) { in isert_put_login_tx()
945 if (login->login_complete) { in isert_put_login_tx()
956 mutex_lock(&isert_conn->mutex); in isert_put_login_tx()
957 isert_conn->state = ISER_CONN_FULL_FEATURE; in isert_put_login_tx()
958 mutex_unlock(&isert_conn->mutex); in isert_put_login_tx()
977 struct iser_rx_desc *rx_desc = isert_conn->login_desc; in isert_rx_login_req()
978 int rx_buflen = isert_conn->login_req_len; in isert_rx_login_req()
979 struct iscsi_conn *conn = isert_conn->conn; in isert_rx_login_req()
980 struct iscsi_login *login = conn->conn_login; in isert_rx_login_req()
987 if (login->first_request) { in isert_rx_login_req()
994 login->leading_connection = (!login_req->tsih) ? 1 : 0; in isert_rx_login_req()
995 login->current_stage = in isert_rx_login_req()
996 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) in isert_rx_login_req()
998 login->version_min = login_req->min_version; in isert_rx_login_req()
999 login->version_max = login_req->max_version; in isert_rx_login_req()
1000 memcpy(login->isid, login_req->isid, 6); in isert_rx_login_req()
1001 login->cmd_sn = be32_to_cpu(login_req->cmdsn); in isert_rx_login_req()
1002 login->init_task_tag = login_req->itt; in isert_rx_login_req()
1003 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); in isert_rx_login_req()
1004 login->cid = be16_to_cpu(login_req->cid); in isert_rx_login_req()
1005 login->tsih = be16_to_cpu(login_req->tsih); in isert_rx_login_req()
1008 memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN); in isert_rx_login_req()
1014 memcpy(login->req_buf, isert_get_data(rx_desc), size); in isert_rx_login_req()
1016 if (login->first_request) { in isert_rx_login_req()
1017 complete(&isert_conn->login_comp); in isert_rx_login_req()
1020 schedule_delayed_work(&conn->login_work, 0); in isert_rx_login_req()
1026 struct isert_conn *isert_conn = conn->context; in isert_allocate_cmd()
1036 isert_cmd->conn = isert_conn; in isert_allocate_cmd()
1037 isert_cmd->iscsi_cmd = cmd; in isert_allocate_cmd()
1038 isert_cmd->rx_desc = rx_desc; in isert_allocate_cmd()
1048 struct iscsi_conn *conn = isert_conn->conn; in isert_handle_scsi_cmd()
1058 imm_data = cmd->immediate_data; in isert_handle_scsi_cmd()
1059 imm_data_len = cmd->first_burst_len; in isert_handle_scsi_cmd()
1060 unsol_data = cmd->unsolicited_data; in isert_handle_scsi_cmd()
1061 data_len = cmd->se_cmd.data_length; in isert_handle_scsi_cmd()
1064 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; in isert_handle_scsi_cmd()
1078 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, in isert_handle_scsi_cmd()
1083 sg_init_table(&isert_cmd->sg, 1); in isert_handle_scsi_cmd()
1084 cmd->se_cmd.t_data_sg = &isert_cmd->sg; in isert_handle_scsi_cmd()
1085 cmd->se_cmd.t_data_nents = 1; in isert_handle_scsi_cmd()
1086 sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc), in isert_handle_scsi_cmd()
1092 cmd->write_data_done += imm_data_len; in isert_handle_scsi_cmd()
1094 if (cmd->write_data_done == cmd->se_cmd.data_length) { in isert_handle_scsi_cmd()
1095 spin_lock_bh(&cmd->istate_lock); in isert_handle_scsi_cmd()
1096 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; in isert_handle_scsi_cmd()
1097 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; in isert_handle_scsi_cmd()
1098 spin_unlock_bh(&cmd->istate_lock); in isert_handle_scsi_cmd()
1102 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); in isert_handle_scsi_cmd()
1107 target_put_sess_cmd(&cmd->se_cmd); in isert_handle_scsi_cmd()
1117 struct iscsi_conn *conn = isert_conn->conn; in isert_handle_iscsi_dataout()
1120 u32 unsol_data_len = ntoh24(hdr->dlength); in isert_handle_iscsi_dataout()
1131 if (!cmd->unsolicited_data) { in isert_handle_iscsi_dataout()
1134 return -1; in isert_handle_iscsi_dataout()
1139 unsol_data_len, cmd->write_data_done, in isert_handle_iscsi_dataout()
1140 cmd->se_cmd.data_length); in isert_handle_iscsi_dataout()
1142 sg_off = cmd->write_data_done / PAGE_SIZE; in isert_handle_iscsi_dataout()
1143 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; in isert_handle_iscsi_dataout()
1145 page_off = cmd->write_data_done % PAGE_SIZE; in isert_handle_iscsi_dataout()
1147 * FIXME: Non page-aligned unsolicited_data out in isert_handle_iscsi_dataout()
1150 isert_err("unexpected non-page aligned data payload\n"); in isert_handle_iscsi_dataout()
1152 return -1; in isert_handle_iscsi_dataout()
1166 * multiple data-outs on the same command can arrive - in isert_handle_iscsi_dataout()
1177 struct iscsi_conn *conn = isert_conn->conn; in isert_handle_nop_out()
1196 struct iscsi_conn *conn = isert_conn->conn; in isert_handle_text_cmd()
1197 u32 payload_length = ntoh24(hdr->dlength); in isert_handle_text_cmd()
1208 return -ENOMEM; in isert_handle_text_cmd()
1210 cmd->text_in_ptr = text_in; in isert_handle_text_cmd()
1212 memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length); in isert_handle_text_cmd()
1223 struct iscsi_conn *conn = isert_conn->conn; in isert_rx_opcode()
1226 int ret = -EINVAL; in isert_rx_opcode()
1227 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); in isert_rx_opcode()
1229 if (conn->sess->sess_ops->SessionType && in isert_rx_opcode()
1243 isert_cmd->read_stag = read_stag; in isert_rx_opcode()
1244 isert_cmd->read_va = read_va; in isert_rx_opcode()
1245 isert_cmd->write_stag = write_stag; in isert_rx_opcode()
1246 isert_cmd->write_va = write_va; in isert_rx_opcode()
1247 isert_cmd->inv_rkey = read_stag ? read_stag : write_stag; in isert_rx_opcode()
1281 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) in isert_rx_opcode()
1282 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); in isert_rx_opcode()
1305 if (wc->status != IB_WC_WR_FLUSH_ERR) in isert_print_wc()
1307 ib_wc_status_msg(wc->status), wc->status, in isert_print_wc()
1308 wc->vendor_err); in isert_print_wc()
1311 ib_wc_status_msg(wc->status), wc->status); in isert_print_wc()
1317 struct isert_conn *isert_conn = wc->qp->qp_context; in isert_recv_done()
1318 struct ib_device *ib_dev = isert_conn->cm_id->device; in isert_recv_done()
1319 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); in isert_recv_done()
1325 if (unlikely(wc->status != IB_WC_SUCCESS)) { in isert_recv_done()
1327 if (wc->status != IB_WC_WR_FLUSH_ERR) in isert_recv_done()
1328 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); in isert_recv_done()
1332 rx_desc->in_use = true; in isert_recv_done()
1334 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, in isert_recv_done()
1338 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags, in isert_recv_done()
1339 (int)(wc->byte_len - ISER_HEADERS_LEN)); in isert_recv_done()
1341 switch (iser_ctrl->flags & 0xF0) { in isert_recv_done()
1343 if (iser_ctrl->flags & ISER_RSV) { in isert_recv_done()
1344 read_stag = be32_to_cpu(iser_ctrl->read_stag); in isert_recv_done()
1345 read_va = be64_to_cpu(iser_ctrl->read_va); in isert_recv_done()
1349 if (iser_ctrl->flags & ISER_WSV) { in isert_recv_done()
1350 write_stag = be32_to_cpu(iser_ctrl->write_stag); in isert_recv_done()
1351 write_va = be64_to_cpu(iser_ctrl->write_va); in isert_recv_done()
1362 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags); in isert_recv_done()
1369 ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr, in isert_recv_done()
1376 struct isert_conn *isert_conn = wc->qp->qp_context; in isert_login_recv_done()
1377 struct ib_device *ib_dev = isert_conn->device->ib_device; in isert_login_recv_done()
1379 if (unlikely(wc->status != IB_WC_SUCCESS)) { in isert_login_recv_done()
1384 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr, in isert_login_recv_done()
1387 isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN; in isert_login_recv_done()
1389 if (isert_conn->conn) { in isert_login_recv_done()
1390 struct iscsi_login *login = isert_conn->conn->conn_login; in isert_login_recv_done()
1392 if (login && !login->first_request) in isert_login_recv_done()
1396 mutex_lock(&isert_conn->mutex); in isert_login_recv_done()
1397 complete(&isert_conn->login_req_comp); in isert_login_recv_done()
1398 mutex_unlock(&isert_conn->mutex); in isert_login_recv_done()
1400 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr, in isert_login_recv_done()
1407 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd; in isert_rdma_rw_ctx_destroy()
1408 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd); in isert_rdma_rw_ctx_destroy() local
1410 if (!cmd->rw.nr_ops) in isert_rdma_rw_ctx_destroy()
1414 rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp, in isert_rdma_rw_ctx_destroy()
1415 conn->cm_id->port_num, se_cmd->t_data_sg, in isert_rdma_rw_ctx_destroy()
1416 se_cmd->t_data_nents, se_cmd->t_prot_sg, in isert_rdma_rw_ctx_destroy()
1417 se_cmd->t_prot_nents, dir); in isert_rdma_rw_ctx_destroy()
1419 rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num, in isert_rdma_rw_ctx_destroy()
1420 se_cmd->t_data_sg, se_cmd->t_data_nents, dir); in isert_rdma_rw_ctx_destroy()
1423 cmd->rw.nr_ops = 0; in isert_rdma_rw_ctx_destroy()
1429 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; in isert_put_cmd()
1430 struct isert_conn *isert_conn = isert_cmd->conn; in isert_put_cmd()
1431 struct iscsi_conn *conn = isert_conn->conn; in isert_put_cmd()
1436 switch (cmd->iscsi_opcode) { in isert_put_cmd()
1438 spin_lock_bh(&conn->cmd_lock); in isert_put_cmd()
1439 if (!list_empty(&cmd->i_conn_node)) in isert_put_cmd()
1440 list_del_init(&cmd->i_conn_node); in isert_put_cmd()
1441 spin_unlock_bh(&conn->cmd_lock); in isert_put_cmd()
1443 if (cmd->data_direction == DMA_TO_DEVICE) { in isert_put_cmd()
1452 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { in isert_put_cmd()
1453 struct se_cmd *se_cmd = &cmd->se_cmd; in isert_put_cmd()
1460 transport_generic_free_cmd(&cmd->se_cmd, 0); in isert_put_cmd()
1463 spin_lock_bh(&conn->cmd_lock); in isert_put_cmd()
1464 if (!list_empty(&cmd->i_conn_node)) in isert_put_cmd()
1465 list_del_init(&cmd->i_conn_node); in isert_put_cmd()
1466 spin_unlock_bh(&conn->cmd_lock); in isert_put_cmd()
1468 transport_generic_free_cmd(&cmd->se_cmd, 0); in isert_put_cmd()
1473 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; in isert_put_cmd()
1475 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE) in isert_put_cmd()
1478 spin_lock_bh(&conn->cmd_lock); in isert_put_cmd()
1479 if (!list_empty(&cmd->i_conn_node)) in isert_put_cmd()
1480 list_del_init(&cmd->i_conn_node); in isert_put_cmd()
1481 spin_unlock_bh(&conn->cmd_lock); in isert_put_cmd()
1486 * associated cmd->se_cmd needs to be released. in isert_put_cmd()
1488 if (cmd->se_cmd.se_tfo != NULL) { in isert_put_cmd()
1490 cmd->iscsi_opcode); in isert_put_cmd()
1491 transport_generic_free_cmd(&cmd->se_cmd, 0); in isert_put_cmd()
1504 if (tx_desc->dma_addr != 0) { in isert_unmap_tx_desc()
1505 isert_dbg("unmap single for tx_desc->dma_addr\n"); in isert_unmap_tx_desc()
1506 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, in isert_unmap_tx_desc()
1508 tx_desc->dma_addr = 0; in isert_unmap_tx_desc()
1516 if (isert_cmd->pdu_buf_dma != 0) { in isert_completion_put()
1517 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n"); in isert_completion_put()
1518 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, in isert_completion_put()
1519 isert_cmd->pdu_buf_len, DMA_TO_DEVICE); in isert_completion_put()
1520 isert_cmd->pdu_buf_dma = 0; in isert_completion_put()
1541 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8; in isert_check_pi_status()
1545 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; in isert_check_pi_status()
1548 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; in isert_check_pi_status()
1551 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; in isert_check_pi_status()
1556 se_cmd->sense_info = sec_offset_err + se_cmd->t_task_lba; in isert_check_pi_status()
1561 (unsigned long long)se_cmd->sense_info, in isert_check_pi_status()
1574 struct isert_conn *isert_conn = wc->qp->qp_context; in isert_rdma_write_done()
1575 struct isert_device *device = isert_conn->device; in isert_rdma_write_done()
1576 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); in isert_rdma_write_done()
1578 struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd; in isert_rdma_write_done()
1581 if (unlikely(wc->status != IB_WC_SUCCESS)) { in isert_rdma_write_done()
1583 if (wc->status != IB_WC_WR_FLUSH_ERR) in isert_rdma_write_done()
1584 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); in isert_rdma_write_done()
1585 isert_completion_put(desc, isert_cmd, device->ib_device, true); in isert_rdma_write_done()
1591 ret = isert_check_pi_status(cmd, isert_cmd->rw.reg->mr); in isert_rdma_write_done()
1597 * plus two references to handle queue-full, so re-add in isert_rdma_write_done()
1598 * one here as target-core will have already dropped in isert_rdma_write_done()
1601 kref_get(&cmd->cmd_kref); in isert_rdma_write_done()
1602 transport_generic_request_failure(cmd, cmd->pi_err); in isert_rdma_write_done()
1607 ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); in isert_rdma_write_done()
1616 struct isert_conn *isert_conn = wc->qp->qp_context; in isert_rdma_read_done()
1617 struct isert_device *device = isert_conn->device; in isert_rdma_read_done()
1618 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); in isert_rdma_read_done()
1620 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; in isert_rdma_read_done()
1621 struct se_cmd *se_cmd = &cmd->se_cmd; in isert_rdma_read_done()
1624 if (unlikely(wc->status != IB_WC_SUCCESS)) { in isert_rdma_read_done()
1626 if (wc->status != IB_WC_WR_FLUSH_ERR) in isert_rdma_read_done()
1627 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); in isert_rdma_read_done()
1628 isert_completion_put(desc, isert_cmd, device->ib_device, true); in isert_rdma_read_done()
1637 ret = isert_check_pi_status(se_cmd, isert_cmd->rw.reg->mr); in isert_rdma_read_done()
1639 cmd->write_data_done = 0; in isert_rdma_read_done()
1642 spin_lock_bh(&cmd->istate_lock); in isert_rdma_read_done()
1643 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; in isert_rdma_read_done()
1644 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; in isert_rdma_read_done()
1645 spin_unlock_bh(&cmd->istate_lock); in isert_rdma_read_done()
1649 * se_cmd->cmd_kref reference after T10-PI error, and handle in isert_rdma_read_done()
1650 * any non-zero ->queue_status() callback error retries. in isert_rdma_read_done()
1653 transport_generic_request_failure(se_cmd, se_cmd->pi_err); in isert_rdma_read_done()
1663 struct isert_conn *isert_conn = isert_cmd->conn; in isert_do_control_comp()
1664 struct ib_device *ib_dev = isert_conn->cm_id->device; in isert_do_control_comp()
1665 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; in isert_do_control_comp()
1667 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state); in isert_do_control_comp()
1669 switch (cmd->i_state) { in isert_do_control_comp()
1671 iscsit_tmr_post_handler(cmd, cmd->conn); in isert_do_control_comp()
1675 cmd->i_state = ISTATE_SENT_STATUS; in isert_do_control_comp()
1676 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, in isert_do_control_comp()
1680 iscsit_logout_post_handler(cmd, cmd->conn); in isert_do_control_comp()
1683 isert_err("Unknown i_state %d\n", cmd->i_state); in isert_do_control_comp()
1692 struct isert_conn *isert_conn = wc->qp->qp_context; in isert_login_send_done()
1693 struct ib_device *ib_dev = isert_conn->cm_id->device; in isert_login_send_done()
1694 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); in isert_login_send_done()
1696 if (unlikely(wc->status != IB_WC_SUCCESS)) { in isert_login_send_done()
1698 if (wc->status != IB_WC_WR_FLUSH_ERR) in isert_login_send_done()
1699 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); in isert_login_send_done()
1708 struct isert_conn *isert_conn = wc->qp->qp_context; in isert_send_done()
1709 struct ib_device *ib_dev = isert_conn->cm_id->device; in isert_send_done()
1710 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); in isert_send_done()
1713 if (unlikely(wc->status != IB_WC_SUCCESS)) { in isert_send_done()
1715 if (wc->status != IB_WC_WR_FLUSH_ERR) in isert_send_done()
1716 iscsit_cause_connection_reinstatement(isert_conn->conn, 0); in isert_send_done()
1723 switch (isert_cmd->iscsi_cmd->i_state) { in isert_send_done()
1730 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp); in isert_send_done()
1731 queue_work(isert_comp_wq, &isert_cmd->comp_work); in isert_send_done()
1734 isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS; in isert_send_done()
1745 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc); in isert_post_response()
1749 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, NULL); in isert_post_response()
1761 struct isert_conn *isert_conn = conn->context; in isert_put_response()
1762 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; in isert_put_response()
1764 &isert_cmd->tx_desc.iscsi_header; in isert_put_response()
1766 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); in isert_put_response()
1768 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); in isert_put_response()
1772 if (cmd->se_cmd.sense_buffer && in isert_put_response()
1773 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || in isert_put_response()
1774 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { in isert_put_response()
1775 struct isert_device *device = isert_conn->device; in isert_put_response()
1776 struct ib_device *ib_dev = device->ib_device; in isert_put_response()
1777 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; in isert_put_response()
1780 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, in isert_put_response()
1781 cmd->sense_buffer); in isert_put_response()
1782 cmd->se_cmd.scsi_sense_length += sizeof(__be16); in isert_put_response()
1784 padding = -(cmd->se_cmd.scsi_sense_length) & 3; in isert_put_response()
1785 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); in isert_put_response()
1786 pdu_len = cmd->se_cmd.scsi_sense_length + padding; in isert_put_response()
1788 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, in isert_put_response()
1789 (void *)cmd->sense_buffer, pdu_len, in isert_put_response()
1791 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) in isert_put_response()
1792 return -ENOMEM; in isert_put_response()
1794 isert_cmd->pdu_buf_len = pdu_len; in isert_put_response()
1795 tx_dsg->addr = isert_cmd->pdu_buf_dma; in isert_put_response()
1796 tx_dsg->length = pdu_len; in isert_put_response()
1797 tx_dsg->lkey = device->pd->local_dma_lkey; in isert_put_response()
1798 isert_cmd->tx_desc.num_sge = 2; in isert_put_response()
1812 struct isert_conn *isert_conn = conn->context; in isert_aborted_task()
1814 spin_lock_bh(&conn->cmd_lock); in isert_aborted_task()
1815 if (!list_empty(&cmd->i_conn_node)) in isert_aborted_task()
1816 list_del_init(&cmd->i_conn_node); in isert_aborted_task()
1817 spin_unlock_bh(&conn->cmd_lock); in isert_aborted_task()
1819 if (cmd->data_direction == DMA_TO_DEVICE) in isert_aborted_task()
1827 struct isert_conn *isert_conn = conn->context; in isert_get_sup_prot_ops()
1828 struct isert_device *device = isert_conn->device; in isert_get_sup_prot_ops()
1830 if (conn->tpg->tpg_attrib.t10_pi) { in isert_get_sup_prot_ops()
1831 if (device->pi_capable) { in isert_get_sup_prot_ops()
1833 isert_conn->pi_support = true; in isert_get_sup_prot_ops()
1839 isert_conn->pi_support = false; in isert_get_sup_prot_ops()
1849 struct isert_conn *isert_conn = conn->context; in isert_put_nopin()
1850 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; in isert_put_nopin()
1852 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); in isert_put_nopin()
1854 &isert_cmd->tx_desc.iscsi_header, in isert_put_nopin()
1856 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); in isert_put_nopin()
1868 struct isert_conn *isert_conn = conn->context; in isert_put_logout_rsp()
1869 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; in isert_put_logout_rsp()
1871 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); in isert_put_logout_rsp()
1873 &isert_cmd->tx_desc.iscsi_header); in isert_put_logout_rsp()
1874 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); in isert_put_logout_rsp()
1886 struct isert_conn *isert_conn = conn->context; in isert_put_tm_rsp()
1887 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; in isert_put_tm_rsp()
1889 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); in isert_put_tm_rsp()
1891 &isert_cmd->tx_desc.iscsi_header); in isert_put_tm_rsp()
1892 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); in isert_put_tm_rsp()
1904 struct isert_conn *isert_conn = conn->context; in isert_put_reject()
1905 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; in isert_put_reject()
1906 struct isert_device *device = isert_conn->device; in isert_put_reject()
1907 struct ib_device *ib_dev = device->ib_device; in isert_put_reject()
1908 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; in isert_put_reject()
1910 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header; in isert_put_reject()
1912 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); in isert_put_reject()
1914 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); in isert_put_reject()
1916 hton24(hdr->dlength, ISCSI_HDR_LEN); in isert_put_reject()
1917 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, in isert_put_reject()
1918 (void *)cmd->buf_ptr, ISCSI_HDR_LEN, in isert_put_reject()
1920 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) in isert_put_reject()
1921 return -ENOMEM; in isert_put_reject()
1922 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN; in isert_put_reject()
1923 tx_dsg->addr = isert_cmd->pdu_buf_dma; in isert_put_reject()
1924 tx_dsg->length = ISCSI_HDR_LEN; in isert_put_reject()
1925 tx_dsg->lkey = device->pd->local_dma_lkey; in isert_put_reject()
1926 isert_cmd->tx_desc.num_sge = 2; in isert_put_reject()
1939 struct isert_conn *isert_conn = conn->context; in isert_put_text_rsp()
1940 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; in isert_put_text_rsp()
1942 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; in isert_put_text_rsp()
1946 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); in isert_put_text_rsp()
1952 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); in isert_put_text_rsp()
1955 struct isert_device *device = isert_conn->device; in isert_put_text_rsp()
1956 struct ib_device *ib_dev = device->ib_device; in isert_put_text_rsp()
1957 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; in isert_put_text_rsp()
1958 void *txt_rsp_buf = cmd->buf_ptr; in isert_put_text_rsp()
1960 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, in isert_put_text_rsp()
1962 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) in isert_put_text_rsp()
1963 return -ENOMEM; in isert_put_text_rsp()
1965 isert_cmd->pdu_buf_len = txt_rsp_len; in isert_put_text_rsp()
1966 tx_dsg->addr = isert_cmd->pdu_buf_dma; in isert_put_text_rsp()
1967 tx_dsg->length = txt_rsp_len; in isert_put_text_rsp()
1968 tx_dsg->lkey = device->pd->local_dma_lkey; in isert_put_text_rsp()
1969 isert_cmd->tx_desc.num_sge = 2; in isert_put_text_rsp()
1981 domain->sig_type = IB_SIG_TYPE_T10_DIF; in isert_set_dif_domain()
1982 domain->sig.dif.bg_type = IB_T10DIF_CRC; in isert_set_dif_domain()
1983 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size; in isert_set_dif_domain()
1984 domain->sig.dif.ref_tag = se_cmd->reftag_seed; in isert_set_dif_domain()
1990 domain->sig.dif.apptag_check_mask = 0xffff; in isert_set_dif_domain()
1991 domain->sig.dif.app_escape = true; in isert_set_dif_domain()
1992 domain->sig.dif.ref_escape = true; in isert_set_dif_domain()
1993 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT || in isert_set_dif_domain()
1994 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT) in isert_set_dif_domain()
1995 domain->sig.dif.ref_remap = true; in isert_set_dif_domain()
2003 switch (se_cmd->prot_op) { in isert_set_sig_attrs()
2006 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; in isert_set_sig_attrs()
2007 isert_set_dif_domain(se_cmd, &sig_attrs->wire); in isert_set_sig_attrs()
2011 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; in isert_set_sig_attrs()
2012 isert_set_dif_domain(se_cmd, &sig_attrs->mem); in isert_set_sig_attrs()
2016 isert_set_dif_domain(se_cmd, &sig_attrs->wire); in isert_set_sig_attrs()
2017 isert_set_dif_domain(se_cmd, &sig_attrs->mem); in isert_set_sig_attrs()
2020 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op); in isert_set_sig_attrs()
2021 return -EINVAL; in isert_set_sig_attrs()
2024 if (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD) in isert_set_sig_attrs()
2025 sig_attrs->check_mask |= IB_SIG_CHECK_GUARD; in isert_set_sig_attrs()
2026 if (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG) in isert_set_sig_attrs()
2027 sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG; in isert_set_sig_attrs()
2028 if (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG) in isert_set_sig_attrs()
2029 sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG; in isert_set_sig_attrs()
2038 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd; in isert_rdma_rw_ctx_post()
2039 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd); in isert_rdma_rw_ctx_post() local
2040 u8 port_num = conn->cm_id->port_num; in isert_rdma_rw_ctx_post()
2045 if (cmd->ctx_init_done) in isert_rdma_rw_ctx_post()
2048 if (dir == DMA_FROM_DEVICE) { in isert_rdma_rw_ctx_post()
2049 addr = cmd->write_va; in isert_rdma_rw_ctx_post()
2050 rkey = cmd->write_stag; in isert_rdma_rw_ctx_post()
2051 offset = cmd->iscsi_cmd->write_data_done; in isert_rdma_rw_ctx_post()
2053 addr = cmd->read_va; in isert_rdma_rw_ctx_post()
2054 rkey = cmd->read_stag; in isert_rdma_rw_ctx_post()
2066 ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num, in isert_rdma_rw_ctx_post()
2067 se_cmd->t_data_sg, se_cmd->t_data_nents, in isert_rdma_rw_ctx_post()
2068 se_cmd->t_prot_sg, se_cmd->t_prot_nents, in isert_rdma_rw_ctx_post()
2069 &sig_attrs, addr, rkey, dir); in isert_rdma_rw_ctx_post()
2071 ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num, in isert_rdma_rw_ctx_post()
2072 se_cmd->t_data_sg, se_cmd->t_data_nents, in isert_rdma_rw_ctx_post()
2073 offset, addr, rkey, dir); in isert_rdma_rw_ctx_post()
2081 cmd->ctx_init_done = true; in isert_rdma_rw_ctx_post()
2084 ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr); in isert_rdma_rw_ctx_post()
2093 struct se_cmd *se_cmd = &cmd->se_cmd; in isert_put_datain()
2095 struct isert_conn *isert_conn = conn->context; in isert_put_datain()
2101 isert_cmd, se_cmd->data_length); in isert_put_datain()
2104 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done; in isert_put_datain()
2105 cqe = &isert_cmd->tx_desc.tx_cqe; in isert_put_datain()
2108 * Build isert_conn->tx_desc for iSCSI response PDU and attach in isert_put_datain()
2111 &isert_cmd->tx_desc); in isert_put_datain()
2113 &isert_cmd->tx_desc.iscsi_header); in isert_put_datain()
2114 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); in isert_put_datain()
2116 &isert_cmd->tx_desc.send_wr); in isert_put_datain()
2118 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc); in isert_put_datain()
2122 chain_wr = &isert_cmd->tx_desc.send_wr; in isert_put_datain()
2138 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done); in isert_get_dataout()
2140 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; in isert_get_dataout()
2141 ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context, in isert_get_dataout()
2142 &isert_cmd->tx_desc.tx_cqe, NULL); in isert_get_dataout()
2157 spin_lock_bh(&conn->cmd_lock); in isert_immediate_queue()
2158 list_del_init(&cmd->i_conn_node); in isert_immediate_queue()
2159 spin_unlock_bh(&conn->cmd_lock); in isert_immediate_queue()
2167 ret = -EINVAL; in isert_immediate_queue()
2177 struct isert_conn *isert_conn = conn->context; in isert_response_queue()
2184 isert_conn->logout_posted = true; in isert_response_queue()
2207 ret = -EINVAL; in isert_response_queue()
2217 struct iscsi_np *np = isert_np->np; in isert_setup_id()
2222 sa = (struct sockaddr *)&np->np_sockaddr; in isert_setup_id()
2223 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); in isert_setup_id()
2232 isert_dbg("id %p context %p\n", id, id->context); in isert_setup_id()
2273 return -ENOMEM; in isert_setup_np()
2275 sema_init(&isert_np->sem, 0); in isert_setup_np()
2276 mutex_init(&isert_np->mutex); in isert_setup_np()
2277 INIT_LIST_HEAD(&isert_np->accepted); in isert_setup_np()
2278 INIT_LIST_HEAD(&isert_np->pending); in isert_setup_np()
2279 isert_np->np = np; in isert_setup_np()
2282 * Setup the np->np_sockaddr from the passed sockaddr setup in isert_setup_np()
2285 memcpy(&np->np_sockaddr, ksockaddr, in isert_setup_np()
2294 isert_np->cm_id = isert_lid; in isert_setup_np()
2295 np->np_context = isert_np; in isert_setup_np()
2308 struct rdma_cm_id *cm_id = isert_conn->cm_id; in isert_rdma_accept()
2314 cp.initiator_depth = isert_conn->initiator_depth; in isert_rdma_accept()
2320 if (!isert_conn->snd_w_inv) in isert_rdma_accept()
2337 struct isert_conn *isert_conn = conn->context; in isert_get_login_rx()
2341 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp); in isert_get_login_rx()
2347 reinit_completion(&isert_conn->login_req_comp); in isert_get_login_rx()
2351 * kick schedule_delayed_work(&conn->login_work) as the packet is in isert_get_login_rx()
2355 if (!login->first_request) in isert_get_login_rx()
2361 ret = wait_for_completion_interruptible(&isert_conn->login_comp); in isert_get_login_rx()
2365 isert_info("processing login->req: %p\n", login->req); in isert_get_login_rx()
2374 struct rdma_cm_id *cm_id = isert_conn->cm_id; in isert_set_conn_info()
2375 struct rdma_route *cm_route = &cm_id->route; in isert_set_conn_info()
2377 conn->login_family = np->np_sockaddr.ss_family; in isert_set_conn_info()
2379 conn->login_sockaddr = cm_route->addr.dst_addr; in isert_set_conn_info()
2380 conn->local_sockaddr = cm_route->addr.src_addr; in isert_set_conn_info()
2386 struct isert_np *isert_np = np->np_context; in isert_accept_np()
2391 ret = down_interruptible(&isert_np->sem); in isert_accept_np()
2393 return -ENODEV; in isert_accept_np()
2395 spin_lock_bh(&np->np_thread_lock); in isert_accept_np()
2396 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { in isert_accept_np()
2397 spin_unlock_bh(&np->np_thread_lock); in isert_accept_np()
2399 np->np_thread_state); in isert_accept_np()
2402 * is in state RESET/SHUTDOWN/EXIT - bail in isert_accept_np()
2404 return -ENODEV; in isert_accept_np()
2406 spin_unlock_bh(&np->np_thread_lock); in isert_accept_np()
2408 mutex_lock(&isert_np->mutex); in isert_accept_np()
2409 if (list_empty(&isert_np->pending)) { in isert_accept_np()
2410 mutex_unlock(&isert_np->mutex); in isert_accept_np()
2413 isert_conn = list_first_entry(&isert_np->pending, in isert_accept_np()
2415 list_del_init(&isert_conn->node); in isert_accept_np()
2416 mutex_unlock(&isert_np->mutex); in isert_accept_np()
2418 conn->context = isert_conn; in isert_accept_np()
2419 isert_conn->conn = conn; in isert_accept_np()
2420 isert_conn->state = ISER_CONN_BOUND; in isert_accept_np()
2432 struct isert_np *isert_np = np->np_context; in isert_free_np()
2435 if (isert_np->cm_id) in isert_free_np()
2436 rdma_destroy_id(isert_np->cm_id); in isert_free_np()
2442 * process. So work-around this by cleaning up what ever piled in isert_free_np()
2445 mutex_lock(&isert_np->mutex); in isert_free_np()
2446 if (!list_empty(&isert_np->pending)) { in isert_free_np()
2449 &isert_np->pending, in isert_free_np()
2452 isert_conn, isert_conn->state); in isert_free_np()
2457 if (!list_empty(&isert_np->accepted)) { in isert_free_np()
2460 &isert_np->accepted, in isert_free_np()
2463 isert_conn, isert_conn->state); in isert_free_np()
2467 mutex_unlock(&isert_np->mutex); in isert_free_np()
2469 np->np_context = NULL; in isert_free_np()
2481 mutex_lock(&isert_conn->mutex); in isert_release_work()
2482 isert_conn->state = ISER_CONN_DOWN; in isert_release_work()
2483 mutex_unlock(&isert_conn->mutex); in isert_release_work()
2492 struct iscsi_conn *conn = isert_conn->conn; in isert_wait4logout()
2496 if (isert_conn->logout_posted) { in isert_wait4logout()
2498 wait_for_completion_timeout(&conn->conn_logout_comp, in isert_wait4logout()
2508 if (conn->sess) { in isert_wait4cmds()
2509 target_stop_session(conn->sess->se_sess); in isert_wait4cmds()
2510 target_wait_for_sess_cmds(conn->sess->se_sess); in isert_wait4cmds()
2515 * isert_put_unsol_pending_cmds() - Drop commands waiting for
2529 spin_lock_bh(&conn->cmd_lock); in isert_put_unsol_pending_cmds()
2530 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) { in isert_put_unsol_pending_cmds()
2531 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) && in isert_put_unsol_pending_cmds()
2532 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) && in isert_put_unsol_pending_cmds()
2533 (cmd->write_data_done < cmd->se_cmd.data_length)) in isert_put_unsol_pending_cmds()
2534 list_move_tail(&cmd->i_conn_node, &drop_cmd_list); in isert_put_unsol_pending_cmds()
2536 spin_unlock_bh(&conn->cmd_lock); in isert_put_unsol_pending_cmds()
2539 list_del_init(&cmd->i_conn_node); in isert_put_unsol_pending_cmds()
2540 if (cmd->i_state != ISTATE_REMOVE) { in isert_put_unsol_pending_cmds()
2551 struct isert_conn *isert_conn = conn->context; in isert_wait_conn()
2555 mutex_lock(&isert_conn->mutex); in isert_wait_conn()
2557 mutex_unlock(&isert_conn->mutex); in isert_wait_conn()
2559 ib_drain_qp(isert_conn->qp); in isert_wait_conn()
2564 queue_work(isert_release_wq, &isert_conn->release_work); in isert_wait_conn()
2569 struct isert_conn *isert_conn = conn->context; in isert_free_conn()
2571 ib_drain_qp(isert_conn->qp); in isert_free_conn()
2615 return -ENOMEM; in isert_init()
2622 ret = -ENOMEM; in isert_init()
2627 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); in isert_init()
2643 isert_info("iSER_TARGET[0] - Released iser_target_transport\n"); in isert_exit()
2646 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2647 MODULE_AUTHOR("nab@Linux-iSCSI.org");