Lines Matching +full:reserved +full:- +full:ipi +full:- +full:vectors
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
142 …le completion vectors. The default value is the minimum of four times the number of online CPU soc…
169 int tmo = *(int *)kp->arg; in srp_tmo_get()
185 if (kp->arg == &srp_reconnect_delay) in srp_tmo_set()
188 else if (kp->arg == &srp_fast_io_fail_tmo) in srp_tmo_set()
195 *(int *)kp->arg = tmo; in srp_tmo_set()
208 return (struct srp_target_port *) host->hostdata; in host_to_target()
213 return host_to_target(host)->target_name; in srp_target_info()
222 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || in srp_target_is_topspin()
223 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); in srp_target_is_topspin()
236 iu->buf = kzalloc(size, gfp_mask); in srp_alloc_iu()
237 if (!iu->buf) in srp_alloc_iu()
240 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size, in srp_alloc_iu()
242 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma)) in srp_alloc_iu()
245 iu->size = size; in srp_alloc_iu()
246 iu->direction = direction; in srp_alloc_iu()
251 kfree(iu->buf); in srp_alloc_iu()
263 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size, in srp_free_iu()
264 iu->direction); in srp_free_iu()
265 kfree(iu->buf); in srp_free_iu()
272 ib_event_msg(event->event), event->event); in srp_qp_event()
283 return -ENOMEM; in srp_init_ib_qp()
285 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev, in srp_init_ib_qp()
286 target->srp_host->port, in srp_init_ib_qp()
287 be16_to_cpu(target->ib_cm.pkey), in srp_init_ib_qp()
288 &attr->pkey_index); in srp_init_ib_qp()
292 attr->qp_state = IB_QPS_INIT; in srp_init_ib_qp()
293 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | in srp_init_ib_qp()
295 attr->port_num = target->srp_host->port; in srp_init_ib_qp()
310 struct srp_target_port *target = ch->target; in srp_new_ib_cm_id()
313 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, in srp_new_ib_cm_id()
318 if (ch->ib_cm.cm_id) in srp_new_ib_cm_id()
319 ib_destroy_cm_id(ch->ib_cm.cm_id); in srp_new_ib_cm_id()
320 ch->ib_cm.cm_id = new_cm_id; in srp_new_ib_cm_id()
321 if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev, in srp_new_ib_cm_id()
322 target->srp_host->port)) in srp_new_ib_cm_id()
323 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA; in srp_new_ib_cm_id()
325 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB; in srp_new_ib_cm_id()
326 ch->ib_cm.path.sgid = target->sgid; in srp_new_ib_cm_id()
327 ch->ib_cm.path.dgid = target->ib_cm.orig_dgid; in srp_new_ib_cm_id()
328 ch->ib_cm.path.pkey = target->ib_cm.pkey; in srp_new_ib_cm_id()
329 ch->ib_cm.path.service_id = target->ib_cm.service_id; in srp_new_ib_cm_id()
336 struct srp_target_port *target = ch->target; in srp_new_rdma_cm_id()
340 new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch, in srp_new_rdma_cm_id()
348 init_completion(&ch->done); in srp_new_rdma_cm_id()
349 ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ? in srp_new_rdma_cm_id()
350 &target->rdma_cm.src.sa : NULL, in srp_new_rdma_cm_id()
351 &target->rdma_cm.dst.sa, in srp_new_rdma_cm_id()
355 &target->rdma_cm.src, &target->rdma_cm.dst, ret); in srp_new_rdma_cm_id()
358 ret = wait_for_completion_interruptible(&ch->done); in srp_new_rdma_cm_id()
362 ret = ch->status; in srp_new_rdma_cm_id()
365 &target->rdma_cm.dst, ret); in srp_new_rdma_cm_id()
369 swap(ch->rdma_cm.cm_id, new_cm_id); in srp_new_rdma_cm_id()
380 struct srp_target_port *target = ch->target; in srp_new_cm_id()
382 return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) : in srp_new_cm_id()
387 * srp_destroy_fr_pool() - free the resources owned by a pool
398 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { in srp_destroy_fr_pool()
399 if (d->mr) in srp_destroy_fr_pool()
400 ib_dereg_mr(d->mr); in srp_destroy_fr_pool()
406 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
419 int i, ret = -EINVAL; in srp_create_fr_pool()
424 ret = -ENOMEM; in srp_create_fr_pool()
428 pool->size = pool_size; in srp_create_fr_pool()
429 pool->max_page_list_len = max_page_list_len; in srp_create_fr_pool()
430 spin_lock_init(&pool->lock); in srp_create_fr_pool()
431 INIT_LIST_HEAD(&pool->free_list); in srp_create_fr_pool()
433 if (device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) in srp_create_fr_pool()
438 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { in srp_create_fr_pool()
442 if (ret == -ENOMEM) in srp_create_fr_pool()
444 dev_name(&device->dev)); in srp_create_fr_pool()
447 d->mr = mr; in srp_create_fr_pool()
448 list_add_tail(&d->entry, &pool->free_list); in srp_create_fr_pool()
463 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
471 spin_lock_irqsave(&pool->lock, flags); in srp_fr_pool_get()
472 if (!list_empty(&pool->free_list)) { in srp_fr_pool_get()
473 d = list_first_entry(&pool->free_list, typeof(*d), entry); in srp_fr_pool_get()
474 list_del(&d->entry); in srp_fr_pool_get()
476 spin_unlock_irqrestore(&pool->lock, flags); in srp_fr_pool_get()
482 * srp_fr_pool_put() - put an FR descriptor back in the free list
488 * desc->mr->rkey before calling this function.
496 spin_lock_irqsave(&pool->lock, flags); in srp_fr_pool_put()
498 list_add(&desc[i]->entry, &pool->free_list); in srp_fr_pool_put()
499 spin_unlock_irqrestore(&pool->lock, flags); in srp_fr_pool_put()
504 struct srp_device *dev = target->srp_host->srp_dev; in srp_alloc_fr_pool()
506 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size, in srp_alloc_fr_pool()
507 dev->max_pages_per_mr); in srp_alloc_fr_pool()
511 * srp_destroy_qp() - destroy an RDMA queue pair
520 spin_lock_irq(&ch->lock); in srp_destroy_qp()
521 ib_process_cq_direct(ch->send_cq, -1); in srp_destroy_qp()
522 spin_unlock_irq(&ch->lock); in srp_destroy_qp()
524 ib_drain_qp(ch->qp); in srp_destroy_qp()
525 ib_destroy_qp(ch->qp); in srp_destroy_qp()
530 struct srp_target_port *target = ch->target; in srp_create_ch_ib()
531 struct srp_device *dev = target->srp_host->srp_dev; in srp_create_ch_ib()
532 const struct ib_device_attr *attr = &dev->dev->attrs; in srp_create_ch_ib()
537 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2; in srp_create_ch_ib()
542 return -ENOMEM; in srp_create_ch_ib()
545 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1, in srp_create_ch_ib()
546 ch->comp_vector, IB_POLL_SOFTIRQ); in srp_create_ch_ib()
552 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size, in srp_create_ch_ib()
553 ch->comp_vector, IB_POLL_DIRECT); in srp_create_ch_ib()
559 init_attr->event_handler = srp_qp_event; in srp_create_ch_ib()
560 init_attr->cap.max_send_wr = m * target->queue_size; in srp_create_ch_ib()
561 init_attr->cap.max_recv_wr = target->queue_size + 1; in srp_create_ch_ib()
562 init_attr->cap.max_recv_sge = 1; in srp_create_ch_ib()
563 init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge); in srp_create_ch_ib()
564 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; in srp_create_ch_ib()
565 init_attr->qp_type = IB_QPT_RC; in srp_create_ch_ib()
566 init_attr->send_cq = send_cq; in srp_create_ch_ib()
567 init_attr->recv_cq = recv_cq; in srp_create_ch_ib()
569 ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U); in srp_create_ch_ib()
571 if (target->using_rdma_cm) { in srp_create_ch_ib()
572 ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr); in srp_create_ch_ib()
573 qp = ch->rdma_cm.cm_id->qp; in srp_create_ch_ib()
575 qp = ib_create_qp(dev->pd, init_attr); in srp_create_ch_ib()
586 dev_name(&dev->dev->dev), ret); in srp_create_ch_ib()
590 if (dev->use_fast_reg) { in srp_create_ch_ib()
594 shost_printk(KERN_WARNING, target->scsi_host, PFX in srp_create_ch_ib()
600 if (ch->qp) in srp_create_ch_ib()
602 if (ch->recv_cq) in srp_create_ch_ib()
603 ib_free_cq(ch->recv_cq); in srp_create_ch_ib()
604 if (ch->send_cq) in srp_create_ch_ib()
605 ib_free_cq(ch->send_cq); in srp_create_ch_ib()
607 ch->qp = qp; in srp_create_ch_ib()
608 ch->recv_cq = recv_cq; in srp_create_ch_ib()
609 ch->send_cq = send_cq; in srp_create_ch_ib()
611 if (dev->use_fast_reg) { in srp_create_ch_ib()
612 if (ch->fr_pool) in srp_create_ch_ib()
613 srp_destroy_fr_pool(ch->fr_pool); in srp_create_ch_ib()
614 ch->fr_pool = fr_pool; in srp_create_ch_ib()
621 if (target->using_rdma_cm) in srp_create_ch_ib()
622 rdma_destroy_qp(ch->rdma_cm.cm_id); in srp_create_ch_ib()
639 * invoked. Hence the ch->[rt]x_ring checks.
644 struct srp_device *dev = target->srp_host->srp_dev; in srp_free_ch_ib()
647 if (!ch->target) in srp_free_ch_ib()
650 if (target->using_rdma_cm) { in srp_free_ch_ib()
651 if (ch->rdma_cm.cm_id) { in srp_free_ch_ib()
652 rdma_destroy_id(ch->rdma_cm.cm_id); in srp_free_ch_ib()
653 ch->rdma_cm.cm_id = NULL; in srp_free_ch_ib()
656 if (ch->ib_cm.cm_id) { in srp_free_ch_ib()
657 ib_destroy_cm_id(ch->ib_cm.cm_id); in srp_free_ch_ib()
658 ch->ib_cm.cm_id = NULL; in srp_free_ch_ib()
663 if (!ch->qp) in srp_free_ch_ib()
666 if (dev->use_fast_reg) { in srp_free_ch_ib()
667 if (ch->fr_pool) in srp_free_ch_ib()
668 srp_destroy_fr_pool(ch->fr_pool); in srp_free_ch_ib()
672 ib_free_cq(ch->send_cq); in srp_free_ch_ib()
673 ib_free_cq(ch->recv_cq); in srp_free_ch_ib()
681 ch->target = NULL; in srp_free_ch_ib()
683 ch->qp = NULL; in srp_free_ch_ib()
684 ch->send_cq = ch->recv_cq = NULL; in srp_free_ch_ib()
686 if (ch->rx_ring) { in srp_free_ch_ib()
687 for (i = 0; i < target->queue_size; ++i) in srp_free_ch_ib()
688 srp_free_iu(target->srp_host, ch->rx_ring[i]); in srp_free_ch_ib()
689 kfree(ch->rx_ring); in srp_free_ch_ib()
690 ch->rx_ring = NULL; in srp_free_ch_ib()
692 if (ch->tx_ring) { in srp_free_ch_ib()
693 for (i = 0; i < target->queue_size; ++i) in srp_free_ch_ib()
694 srp_free_iu(target->srp_host, ch->tx_ring[i]); in srp_free_ch_ib()
695 kfree(ch->tx_ring); in srp_free_ch_ib()
696 ch->tx_ring = NULL; in srp_free_ch_ib()
705 struct srp_target_port *target = ch->target; in srp_path_rec_completion()
707 ch->status = status; in srp_path_rec_completion()
709 shost_printk(KERN_ERR, target->scsi_host, in srp_path_rec_completion()
712 ch->ib_cm.path = *pathrec; in srp_path_rec_completion()
713 complete(&ch->done); in srp_path_rec_completion()
718 struct srp_target_port *target = ch->target; in srp_ib_lookup_path()
721 ch->ib_cm.path.numb_path = 1; in srp_ib_lookup_path()
723 init_completion(&ch->done); in srp_ib_lookup_path()
725 ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client, in srp_ib_lookup_path()
726 target->srp_host->srp_dev->dev, in srp_ib_lookup_path()
727 target->srp_host->port, in srp_ib_lookup_path()
728 &ch->ib_cm.path, in srp_ib_lookup_path()
737 ch, &ch->ib_cm.path_query); in srp_ib_lookup_path()
738 if (ch->ib_cm.path_query_id < 0) in srp_ib_lookup_path()
739 return ch->ib_cm.path_query_id; in srp_ib_lookup_path()
741 ret = wait_for_completion_interruptible(&ch->done); in srp_ib_lookup_path()
745 if (ch->status < 0) in srp_ib_lookup_path()
746 shost_printk(KERN_WARNING, target->scsi_host, in srp_ib_lookup_path()
748 ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw, in srp_ib_lookup_path()
749 be16_to_cpu(target->ib_cm.pkey), in srp_ib_lookup_path()
750 be64_to_cpu(target->ib_cm.service_id)); in srp_ib_lookup_path()
752 return ch->status; in srp_ib_lookup_path()
757 struct srp_target_port *target = ch->target; in srp_rdma_lookup_path()
760 init_completion(&ch->done); in srp_rdma_lookup_path()
762 ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS); in srp_rdma_lookup_path()
766 wait_for_completion_interruptible(&ch->done); in srp_rdma_lookup_path()
768 if (ch->status != 0) in srp_rdma_lookup_path()
769 shost_printk(KERN_WARNING, target->scsi_host, in srp_rdma_lookup_path()
772 return ch->status; in srp_rdma_lookup_path()
777 struct srp_target_port *target = ch->target; in srp_lookup_path()
779 return target->using_rdma_cm ? srp_rdma_lookup_path(ch) : in srp_lookup_path()
789 ret = ib_query_port(host->srp_dev->dev, host->port, &attr); in srp_get_subnet_timeout()
795 dev_name(&host->srp_dev->dev->dev), subnet_timeout); in srp_get_subnet_timeout()
803 struct srp_target_port *target = ch->target; in srp_send_req()
810 char *ipi, *tpi; in srp_send_req() local
815 return -ENOMEM; in srp_send_req()
817 req->ib_param.flow_control = 1; in srp_send_req()
818 req->ib_param.retry_count = target->tl_retry_count; in srp_send_req()
824 req->ib_param.responder_resources = 4; in srp_send_req()
825 req->ib_param.rnr_retry_count = 7; in srp_send_req()
826 req->ib_param.max_cm_retries = 15; in srp_send_req()
828 req->ib_req.opcode = SRP_LOGIN_REQ; in srp_send_req()
829 req->ib_req.tag = 0; in srp_send_req()
830 req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len); in srp_send_req()
831 req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | in srp_send_req()
833 req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI : in srp_send_req()
836 req->ib_req.req_flags |= SRP_IMMED_REQUESTED; in srp_send_req()
837 req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET); in srp_send_req()
840 if (target->using_rdma_cm) { in srp_send_req()
841 req->rdma_param.flow_control = req->ib_param.flow_control; in srp_send_req()
842 req->rdma_param.responder_resources = in srp_send_req()
843 req->ib_param.responder_resources; in srp_send_req()
844 req->rdma_param.initiator_depth = req->ib_param.initiator_depth; in srp_send_req()
845 req->rdma_param.retry_count = req->ib_param.retry_count; in srp_send_req()
846 req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count; in srp_send_req()
847 req->rdma_param.private_data = &req->rdma_req; in srp_send_req()
848 req->rdma_param.private_data_len = sizeof(req->rdma_req); in srp_send_req()
850 req->rdma_req.opcode = req->ib_req.opcode; in srp_send_req()
851 req->rdma_req.tag = req->ib_req.tag; in srp_send_req()
852 req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len; in srp_send_req()
853 req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt; in srp_send_req()
854 req->rdma_req.req_flags = req->ib_req.req_flags; in srp_send_req()
855 req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset; in srp_send_req()
857 ipi = req->rdma_req.initiator_port_id; in srp_send_req()
858 tpi = req->rdma_req.target_port_id; in srp_send_req()
862 subnet_timeout = srp_get_subnet_timeout(target->srp_host); in srp_send_req()
864 req->ib_param.primary_path = &ch->ib_cm.path; in srp_send_req()
865 req->ib_param.alternate_path = NULL; in srp_send_req()
866 req->ib_param.service_id = target->ib_cm.service_id; in srp_send_req()
867 get_random_bytes(&req->ib_param.starting_psn, 4); in srp_send_req()
868 req->ib_param.starting_psn &= 0xffffff; in srp_send_req()
869 req->ib_param.qp_num = ch->qp->qp_num; in srp_send_req()
870 req->ib_param.qp_type = ch->qp->qp_type; in srp_send_req()
871 req->ib_param.local_cm_response_timeout = subnet_timeout + 2; in srp_send_req()
872 req->ib_param.remote_cm_response_timeout = subnet_timeout + 2; in srp_send_req()
873 req->ib_param.private_data = &req->ib_req; in srp_send_req()
874 req->ib_param.private_data_len = sizeof(req->ib_req); in srp_send_req()
876 ipi = req->ib_req.initiator_port_id; in srp_send_req()
877 tpi = req->ib_req.target_port_id; in srp_send_req()
889 if (target->io_class == SRP_REV10_IB_IO_CLASS) { in srp_send_req()
890 memcpy(ipi, &target->sgid.global.interface_id, 8); in srp_send_req()
891 memcpy(ipi + 8, &target->initiator_ext, 8); in srp_send_req()
892 memcpy(tpi, &target->ioc_guid, 8); in srp_send_req()
893 memcpy(tpi + 8, &target->id_ext, 8); in srp_send_req()
895 memcpy(ipi, &target->initiator_ext, 8); in srp_send_req()
896 memcpy(ipi + 8, &target->sgid.global.interface_id, 8); in srp_send_req()
897 memcpy(tpi, &target->id_ext, 8); in srp_send_req()
898 memcpy(tpi + 8, &target->ioc_guid, 8); in srp_send_req()
907 shost_printk(KERN_DEBUG, target->scsi_host, in srp_send_req()
910 be64_to_cpu(target->ioc_guid)); in srp_send_req()
911 memset(ipi, 0, 8); in srp_send_req()
912 memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8); in srp_send_req()
915 if (target->using_rdma_cm) in srp_send_req()
916 status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param); in srp_send_req()
918 status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param); in srp_send_req()
929 spin_lock_irq(&target->lock); in srp_queue_remove_work()
930 if (target->state != SRP_TARGET_REMOVED) { in srp_queue_remove_work()
931 target->state = SRP_TARGET_REMOVED; in srp_queue_remove_work()
934 spin_unlock_irq(&target->lock); in srp_queue_remove_work()
937 queue_work(srp_remove_wq, &target->remove_work); in srp_queue_remove_work()
949 for (i = 0; i < target->ch_count; i++) { in srp_disconnect_target()
950 ch = &target->ch[i]; in srp_disconnect_target()
951 ch->connected = false; in srp_disconnect_target()
953 if (target->using_rdma_cm) { in srp_disconnect_target()
954 if (ch->rdma_cm.cm_id) in srp_disconnect_target()
955 rdma_disconnect(ch->rdma_cm.cm_id); in srp_disconnect_target()
957 if (ch->ib_cm.cm_id) in srp_disconnect_target()
958 ret = ib_send_cm_dreq(ch->ib_cm.cm_id, in srp_disconnect_target()
962 shost_printk(KERN_DEBUG, target->scsi_host, in srp_disconnect_target()
971 struct srp_device *dev = target->srp_host->srp_dev; in srp_exit_cmd_priv()
972 struct ib_device *ibdev = dev->dev; in srp_exit_cmd_priv()
975 kfree(req->fr_list); in srp_exit_cmd_priv()
976 if (req->indirect_dma_addr) { in srp_exit_cmd_priv()
977 ib_dma_unmap_single(ibdev, req->indirect_dma_addr, in srp_exit_cmd_priv()
978 target->indirect_size, in srp_exit_cmd_priv()
981 kfree(req->indirect_desc); in srp_exit_cmd_priv()
989 struct srp_device *srp_dev = target->srp_host->srp_dev; in srp_init_cmd_priv()
990 struct ib_device *ibdev = srp_dev->dev; in srp_init_cmd_priv()
993 int ret = -ENOMEM; in srp_init_cmd_priv()
995 if (srp_dev->use_fast_reg) { in srp_init_cmd_priv()
996 req->fr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *), in srp_init_cmd_priv()
998 if (!req->fr_list) in srp_init_cmd_priv()
1001 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); in srp_init_cmd_priv()
1002 if (!req->indirect_desc) in srp_init_cmd_priv()
1005 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, in srp_init_cmd_priv()
1006 target->indirect_size, in srp_init_cmd_priv()
1013 req->indirect_dma_addr = dma_addr; in srp_init_cmd_priv()
1021 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
1032 for (g = shost->hostt->shost_groups; *g; ++g) { in srp_del_scsi_host_attr()
1033 for (attr = (*g)->attrs; *attr; ++attr) { in srp_del_scsi_host_attr()
1037 device_remove_file(&shost->shost_dev, dev_attr); in srp_del_scsi_host_attr()
1047 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); in srp_remove_target()
1049 srp_del_scsi_host_attr(target->scsi_host); in srp_remove_target()
1050 srp_rport_get(target->rport); in srp_remove_target()
1051 srp_remove_host(target->scsi_host); in srp_remove_target()
1052 scsi_remove_host(target->scsi_host); in srp_remove_target()
1053 srp_stop_rport_timers(target->rport); in srp_remove_target()
1055 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net); in srp_remove_target()
1056 for (i = 0; i < target->ch_count; i++) { in srp_remove_target()
1057 ch = &target->ch[i]; in srp_remove_target()
1060 cancel_work_sync(&target->tl_err_work); in srp_remove_target()
1061 srp_rport_put(target->rport); in srp_remove_target()
1062 kfree(target->ch); in srp_remove_target()
1063 target->ch = NULL; in srp_remove_target()
1065 spin_lock(&target->srp_host->target_lock); in srp_remove_target()
1066 list_del(&target->list); in srp_remove_target()
1067 spin_unlock(&target->srp_host->target_lock); in srp_remove_target()
1069 scsi_host_put(target->scsi_host); in srp_remove_target()
1077 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); in srp_remove_work()
1084 struct srp_target_port *target = rport->lld_data; in srp_rport_delete()
1090 * srp_connected_ch() - number of connected channels
1097 for (i = 0; i < target->ch_count; i++) in srp_connected_ch()
1098 c += target->ch[i].connected; in srp_connected_ch()
1106 struct srp_target_port *target = ch->target; in srp_connect_ch()
1116 init_completion(&ch->done); in srp_connect_ch()
1120 ret = wait_for_completion_interruptible(&ch->done); in srp_connect_ch()
1130 ret = ch->status; in srp_connect_ch()
1133 ch->connected = true; in srp_connect_ch()
1146 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_connect_ch()
1148 ret = -ECONNRESET; in srp_connect_ch()
1157 return ret <= 0 ? ret : -ENODEV; in srp_connect_ch()
1176 wr.wr_cqe = &req->reg_cqe; in srp_inv_rkey()
1177 req->reg_cqe.done = srp_inv_rkey_err_done; in srp_inv_rkey()
1178 return ib_post_send(ch->qp, &wr, NULL); in srp_inv_rkey()
1185 struct srp_target_port *target = ch->target; in srp_unmap_data()
1186 struct srp_device *dev = target->srp_host->srp_dev; in srp_unmap_data()
1187 struct ib_device *ibdev = dev->dev; in srp_unmap_data()
1191 (scmnd->sc_data_direction != DMA_TO_DEVICE && in srp_unmap_data()
1192 scmnd->sc_data_direction != DMA_FROM_DEVICE)) in srp_unmap_data()
1195 if (dev->use_fast_reg) { in srp_unmap_data()
1198 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) { in srp_unmap_data()
1199 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey); in srp_unmap_data()
1201 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_unmap_data()
1203 (*pfr)->mr->rkey, res); in srp_unmap_data()
1205 &target->tl_err_work); in srp_unmap_data()
1208 if (req->nmdesc) in srp_unmap_data()
1209 srp_fr_pool_put(ch->fr_pool, req->fr_list, in srp_unmap_data()
1210 req->nmdesc); in srp_unmap_data()
1214 scmnd->sc_data_direction); in srp_unmap_data()
1218 * srp_claim_req - Take ownership of the scmnd associated with a request.
1222 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1223 * ownership of @req->scmnd if it equals @scmnd.
1235 spin_lock_irqsave(&ch->lock, flags); in srp_claim_req()
1236 if (req->scmnd && in srp_claim_req()
1237 (!sdev || req->scmnd->device == sdev) && in srp_claim_req()
1238 (!scmnd || req->scmnd == scmnd)) { in srp_claim_req()
1239 scmnd = req->scmnd; in srp_claim_req()
1240 req->scmnd = NULL; in srp_claim_req()
1244 spin_unlock_irqrestore(&ch->lock, flags); in srp_claim_req()
1250 * srp_free_req() - Unmap data and adjust ch->req_lim.
1254 * @req_lim_delta: Amount to be added to @target->req_lim.
1263 spin_lock_irqsave(&ch->lock, flags); in srp_free_req()
1264 ch->req_lim += req_lim_delta; in srp_free_req()
1265 spin_unlock_irqrestore(&ch->lock, flags); in srp_free_req()
1275 scmnd->result = result; in srp_finish_req()
1288 struct srp_target_port *target = context->srp_target; in srp_terminate_cmd()
1290 struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; in srp_terminate_cmd()
1293 srp_finish_req(ch, req, NULL, context->scsi_result); in srp_terminate_cmd()
1300 struct srp_target_port *target = rport->lld_data; in srp_terminate_io()
1304 scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, &context); in srp_terminate_io()
1333 * serializes calls of this function via rport->mutex and also blocks
1338 struct srp_target_port *target = rport->lld_data; in srp_rport_reconnect()
1340 uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, in srp_rport_reconnect()
1342 target->max_it_iu_size); in srp_rport_reconnect()
1348 if (target->state == SRP_TARGET_SCANNING) in srp_rport_reconnect()
1349 return -ENODEV; in srp_rport_reconnect()
1356 for (i = 0; i < target->ch_count; i++) { in srp_rport_reconnect()
1357 ch = &target->ch[i]; in srp_rport_reconnect()
1364 scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, in srp_rport_reconnect()
1367 for (i = 0; i < target->ch_count; i++) { in srp_rport_reconnect()
1368 ch = &target->ch[i]; in srp_rport_reconnect()
1376 INIT_LIST_HEAD(&ch->free_tx); in srp_rport_reconnect()
1377 for (j = 0; j < target->queue_size; ++j) in srp_rport_reconnect()
1378 list_add(&ch->tx_ring[j]->list, &ch->free_tx); in srp_rport_reconnect()
1381 target->qp_in_error = false; in srp_rport_reconnect()
1383 for (i = 0; i < target->ch_count; i++) { in srp_rport_reconnect()
1384 ch = &target->ch[i]; in srp_rport_reconnect()
1392 shost_printk(KERN_INFO, target->scsi_host, in srp_rport_reconnect()
1401 struct srp_direct_buf *desc = state->desc; in srp_map_desc()
1405 desc->va = cpu_to_be64(dma_addr); in srp_map_desc()
1406 desc->key = cpu_to_be32(rkey); in srp_map_desc()
1407 desc->len = cpu_to_be32(dma_len); in srp_map_desc()
1409 state->total_len += dma_len; in srp_map_desc()
1410 state->desc++; in srp_map_desc()
1411 state->ndesc++; in srp_map_desc()
1420 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1422 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1430 struct srp_target_port *target = ch->target; in srp_map_finish_fr()
1431 struct srp_device *dev = target->srp_host->srp_dev; in srp_map_finish_fr()
1437 if (state->fr.next >= state->fr.end) { in srp_map_finish_fr()
1438 shost_printk(KERN_ERR, ch->target->scsi_host, in srp_map_finish_fr()
1440 ch->target->mr_per_cmd); in srp_map_finish_fr()
1441 return -ENOMEM; in srp_map_finish_fr()
1444 WARN_ON_ONCE(!dev->use_fast_reg); in srp_map_finish_fr()
1446 if (sg_nents == 1 && target->global_rkey) { in srp_map_finish_fr()
1449 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset, in srp_map_finish_fr()
1450 sg_dma_len(state->sg) - sg_offset, in srp_map_finish_fr()
1451 target->global_rkey); in srp_map_finish_fr()
1457 desc = srp_fr_pool_get(ch->fr_pool); in srp_map_finish_fr()
1459 return -ENOMEM; in srp_map_finish_fr()
1461 rkey = ib_inc_rkey(desc->mr->rkey); in srp_map_finish_fr()
1462 ib_update_fast_reg_key(desc->mr, rkey); in srp_map_finish_fr()
1464 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p, in srp_map_finish_fr()
1465 dev->mr_page_size); in srp_map_finish_fr()
1467 srp_fr_pool_put(ch->fr_pool, &desc, 1); in srp_map_finish_fr()
1469 dev_name(&req->scmnd->device->sdev_gendev), sg_nents, in srp_map_finish_fr()
1470 sg_offset_p ? *sg_offset_p : -1, n); in srp_map_finish_fr()
1474 WARN_ON_ONCE(desc->mr->length == 0); in srp_map_finish_fr()
1476 req->reg_cqe.done = srp_reg_mr_err_done; in srp_map_finish_fr()
1480 wr.wr.wr_cqe = &req->reg_cqe; in srp_map_finish_fr()
1483 wr.mr = desc->mr; in srp_map_finish_fr()
1484 wr.key = desc->mr->rkey; in srp_map_finish_fr()
1489 *state->fr.next++ = desc; in srp_map_finish_fr()
1490 state->nmdesc++; in srp_map_finish_fr()
1492 srp_map_desc(state, desc->mr->iova, in srp_map_finish_fr()
1493 desc->mr->length, desc->mr->rkey); in srp_map_finish_fr()
1495 err = ib_post_send(ch->qp, &wr.wr, NULL); in srp_map_finish_fr()
1497 WARN_ON_ONCE(err == -ENOMEM); in srp_map_finish_fr()
1510 state->fr.next = req->fr_list; in srp_map_sg_fr()
1511 state->fr.end = req->fr_list + ch->target->mr_per_cmd; in srp_map_sg_fr()
1512 state->sg = scat; in srp_map_sg_fr()
1524 count -= n; in srp_map_sg_fr()
1526 state->sg = sg_next(state->sg); in srp_map_sg_fr()
1536 struct srp_target_port *target = ch->target; in srp_map_sg_dma()
1542 target->global_rkey); in srp_map_sg_dma()
1559 struct srp_target_port *target = ch->target; in srp_map_idb()
1560 struct srp_device *dev = target->srp_host->srp_dev; in srp_map_idb()
1571 state.base_dma_addr = req->indirect_dma_addr; in srp_map_idb()
1574 if (dev->use_fast_reg) { in srp_map_idb()
1576 sg_init_one(idb_sg, req->indirect_desc, idb_len); in srp_map_idb()
1577 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */ in srp_map_idb()
1579 idb_sg->dma_length = idb_sg->length; /* hack^2 */ in srp_map_idb()
1586 return -EINVAL; in srp_map_idb()
1598 struct srp_device *dev = ch->target->srp_host->srp_dev; in srp_check_mapping()
1603 for (i = 0; i < state->ndesc; i++) in srp_check_mapping()
1604 desc_len += be32_to_cpu(req->indirect_desc[i].len); in srp_check_mapping()
1605 if (dev->use_fast_reg) in srp_check_mapping()
1606 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++) in srp_check_mapping()
1607 mr_len += (*pfr)->mr->length; in srp_check_mapping()
1608 if (desc_len != scsi_bufflen(req->scmnd) || in srp_check_mapping()
1609 mr_len > scsi_bufflen(req->scmnd)) in srp_check_mapping()
1611 scsi_bufflen(req->scmnd), desc_len, mr_len, in srp_check_mapping()
1612 state->ndesc, state->nmdesc); in srp_check_mapping()
1616 * srp_map_data() - map SCSI data buffer onto an SRP request
1628 struct srp_target_port *target = ch->target; in srp_map_data()
1630 struct srp_cmd *cmd = req->cmd->buf; in srp_map_data()
1641 req->cmd->num_sge = 1; in srp_map_data()
1643 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) in srp_map_data()
1644 return sizeof(struct srp_cmd) + cmd->add_cdb_len; in srp_map_data()
1646 if (scmnd->sc_data_direction != DMA_FROM_DEVICE && in srp_map_data()
1647 scmnd->sc_data_direction != DMA_TO_DEVICE) { in srp_map_data()
1648 shost_printk(KERN_WARNING, target->scsi_host, in srp_map_data()
1650 scmnd->sc_data_direction); in srp_map_data()
1651 return -EINVAL; in srp_map_data()
1658 dev = target->srp_host->srp_dev; in srp_map_data()
1659 ibdev = dev->dev; in srp_map_data()
1661 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); in srp_map_data()
1663 return -EIO; in srp_map_data()
1665 if (ch->use_imm_data && in srp_map_data()
1666 count <= ch->max_imm_sge && in srp_map_data()
1667 SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len && in srp_map_data()
1668 scmnd->sc_data_direction == DMA_TO_DEVICE) { in srp_map_data()
1670 struct ib_sge *sge = &req->cmd->sge[1]; in srp_map_data()
1674 req->nmdesc = 0; in srp_map_data()
1675 buf = (void *)cmd->add_data + cmd->add_cdb_len; in srp_map_data()
1676 buf->len = cpu_to_be32(data_len); in srp_map_data()
1681 sge[i].lkey = target->lkey; in srp_map_data()
1683 req->cmd->num_sge += count; in srp_map_data()
1688 len = sizeof(struct srp_cmd) + cmd->add_cdb_len + in srp_map_data()
1691 if (count == 1 && target->global_rkey) { in srp_map_data()
1700 buf = (void *)cmd->add_data + cmd->add_cdb_len; in srp_map_data()
1701 buf->va = cpu_to_be64(sg_dma_address(scat)); in srp_map_data()
1702 buf->key = cpu_to_be32(target->global_rkey); in srp_map_data()
1703 buf->len = cpu_to_be32(sg_dma_len(scat)); in srp_map_data()
1705 req->nmdesc = 0; in srp_map_data()
1713 indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len; in srp_map_data()
1715 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr, in srp_map_data()
1716 target->indirect_size, DMA_TO_DEVICE); in srp_map_data()
1719 state.desc = req->indirect_desc; in srp_map_data()
1720 if (dev->use_fast_reg) in srp_map_data()
1724 req->nmdesc = state.nmdesc; in srp_map_data()
1743 * Memory registration collapsed the sg-list into one entry, in srp_map_data()
1748 buf = (void *)cmd->add_data + cmd->add_cdb_len; in srp_map_data()
1749 *buf = req->indirect_desc[0]; in srp_map_data()
1753 if (unlikely(target->cmd_sg_cnt < state.ndesc && in srp_map_data()
1754 !target->allow_ext_sg)) { in srp_map_data()
1755 shost_printk(KERN_ERR, target->scsi_host, in srp_map_data()
1757 ret = -EIO; in srp_map_data()
1761 count = min(state.ndesc, target->cmd_sg_cnt); in srp_map_data()
1766 len = sizeof(struct srp_cmd) + cmd->add_cdb_len + in srp_map_data()
1770 memcpy(indirect_hdr->desc_list, req->indirect_desc, in srp_map_data()
1773 if (!target->global_rkey) { in srp_map_data()
1778 req->nmdesc++; in srp_map_data()
1780 idb_rkey = cpu_to_be32(target->global_rkey); in srp_map_data()
1783 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); in srp_map_data()
1784 indirect_hdr->table_desc.key = idb_rkey; in srp_map_data()
1785 indirect_hdr->table_desc.len = cpu_to_be32(table_len); in srp_map_data()
1786 indirect_hdr->len = cpu_to_be32(state.total_len); in srp_map_data()
1788 if (scmnd->sc_data_direction == DMA_TO_DEVICE) in srp_map_data()
1789 cmd->data_out_desc_cnt = count; in srp_map_data()
1791 cmd->data_in_desc_cnt = count; in srp_map_data()
1793 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len, in srp_map_data()
1797 if (scmnd->sc_data_direction == DMA_TO_DEVICE) in srp_map_data()
1798 cmd->buf_fmt = fmt << 4; in srp_map_data()
1800 cmd->buf_fmt = fmt; in srp_map_data()
1806 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size) in srp_map_data()
1807 ret = -E2BIG; in srp_map_data()
1819 spin_lock_irqsave(&ch->lock, flags); in srp_put_tx_iu()
1820 list_add(&iu->list, &ch->free_tx); in srp_put_tx_iu()
1822 ++ch->req_lim; in srp_put_tx_iu()
1823 spin_unlock_irqrestore(&ch->lock, flags); in srp_put_tx_iu()
1827 * Must be called with ch->lock held to protect req_lim and free_tx.
1833 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1835 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1836 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1842 struct srp_target_port *target = ch->target; in __srp_get_tx_iu()
1846 lockdep_assert_held(&ch->lock); in __srp_get_tx_iu()
1848 ib_process_cq_direct(ch->send_cq, -1); in __srp_get_tx_iu()
1850 if (list_empty(&ch->free_tx)) in __srp_get_tx_iu()
1855 if (ch->req_lim <= rsv) { in __srp_get_tx_iu()
1856 ++target->zero_req_lim; in __srp_get_tx_iu()
1860 --ch->req_lim; in __srp_get_tx_iu()
1863 iu = list_first_entry(&ch->free_tx, struct srp_iu, list); in __srp_get_tx_iu()
1864 list_del(&iu->list); in __srp_get_tx_iu()
1870 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1875 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); in srp_send_done()
1876 struct srp_rdma_ch *ch = cq->cq_context; in srp_send_done()
1878 if (unlikely(wc->status != IB_WC_SUCCESS)) { in srp_send_done()
1883 lockdep_assert_held(&ch->lock); in srp_send_done()
1885 list_add(&iu->list, &ch->free_tx); in srp_send_done()
1889 * srp_post_send() - send an SRP information unit
1896 struct srp_target_port *target = ch->target; in srp_post_send()
1899 if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE)) in srp_post_send()
1900 return -EINVAL; in srp_post_send()
1902 iu->sge[0].addr = iu->dma; in srp_post_send()
1903 iu->sge[0].length = len; in srp_post_send()
1904 iu->sge[0].lkey = target->lkey; in srp_post_send()
1906 iu->cqe.done = srp_send_done; in srp_post_send()
1909 wr.wr_cqe = &iu->cqe; in srp_post_send()
1910 wr.sg_list = &iu->sge[0]; in srp_post_send()
1911 wr.num_sge = iu->num_sge; in srp_post_send()
1915 return ib_post_send(ch->qp, &wr, NULL); in srp_post_send()
1920 struct srp_target_port *target = ch->target; in srp_post_recv()
1924 list.addr = iu->dma; in srp_post_recv()
1925 list.length = iu->size; in srp_post_recv()
1926 list.lkey = target->lkey; in srp_post_recv()
1928 iu->cqe.done = srp_recv_done; in srp_post_recv()
1931 wr.wr_cqe = &iu->cqe; in srp_post_recv()
1935 return ib_post_recv(ch->qp, &wr, NULL); in srp_post_recv()
1940 struct srp_target_port *target = ch->target; in srp_process_rsp()
1945 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { in srp_process_rsp()
1946 spin_lock_irqsave(&ch->lock, flags); in srp_process_rsp()
1947 ch->req_lim += be32_to_cpu(rsp->req_lim_delta); in srp_process_rsp()
1948 if (rsp->tag == ch->tsk_mgmt_tag) { in srp_process_rsp()
1949 ch->tsk_mgmt_status = -1; in srp_process_rsp()
1950 if (be32_to_cpu(rsp->resp_data_len) >= 4) in srp_process_rsp()
1951 ch->tsk_mgmt_status = rsp->data[3]; in srp_process_rsp()
1952 complete(&ch->tsk_mgmt_done); in srp_process_rsp()
1954 shost_printk(KERN_ERR, target->scsi_host, in srp_process_rsp()
1956 rsp->tag); in srp_process_rsp()
1958 spin_unlock_irqrestore(&ch->lock, flags); in srp_process_rsp()
1960 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag); in srp_process_rsp()
1966 shost_printk(KERN_ERR, target->scsi_host, in srp_process_rsp()
1968 rsp->tag, ch - target->ch, ch->qp->qp_num); in srp_process_rsp()
1970 spin_lock_irqsave(&ch->lock, flags); in srp_process_rsp()
1971 ch->req_lim += be32_to_cpu(rsp->req_lim_delta); in srp_process_rsp()
1972 spin_unlock_irqrestore(&ch->lock, flags); in srp_process_rsp()
1976 scmnd->result = rsp->status; in srp_process_rsp()
1978 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { in srp_process_rsp()
1979 memcpy(scmnd->sense_buffer, rsp->data + in srp_process_rsp()
1980 be32_to_cpu(rsp->resp_data_len), in srp_process_rsp()
1981 min_t(int, be32_to_cpu(rsp->sense_data_len), in srp_process_rsp()
1985 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER)) in srp_process_rsp()
1986 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); in srp_process_rsp()
1987 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER)) in srp_process_rsp()
1988 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt)); in srp_process_rsp()
1989 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER)) in srp_process_rsp()
1990 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); in srp_process_rsp()
1991 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER)) in srp_process_rsp()
1992 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt)); in srp_process_rsp()
1995 be32_to_cpu(rsp->req_lim_delta)); in srp_process_rsp()
2004 struct srp_target_port *target = ch->target; in srp_response_common()
2005 struct ib_device *dev = target->srp_host->srp_dev->dev; in srp_response_common()
2010 spin_lock_irqsave(&ch->lock, flags); in srp_response_common()
2011 ch->req_lim += req_delta; in srp_response_common()
2013 spin_unlock_irqrestore(&ch->lock, flags); in srp_response_common()
2016 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_response_common()
2021 iu->num_sge = 1; in srp_response_common()
2022 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); in srp_response_common()
2023 memcpy(iu->buf, rsp, len); in srp_response_common()
2024 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); in srp_response_common()
2028 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_response_common()
2041 .tag = req->tag, in srp_process_cred_req()
2043 s32 delta = be32_to_cpu(req->req_lim_delta); in srp_process_cred_req()
2046 shost_printk(KERN_ERR, ch->target->scsi_host, PFX in srp_process_cred_req()
2053 struct srp_target_port *target = ch->target; in srp_process_aer_req()
2056 .tag = req->tag, in srp_process_aer_req()
2058 s32 delta = be32_to_cpu(req->req_lim_delta); in srp_process_aer_req()
2060 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_process_aer_req()
2061 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun)); in srp_process_aer_req()
2064 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_process_aer_req()
2070 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); in srp_recv_done()
2071 struct srp_rdma_ch *ch = cq->cq_context; in srp_recv_done()
2072 struct srp_target_port *target = ch->target; in srp_recv_done()
2073 struct ib_device *dev = target->srp_host->srp_dev->dev; in srp_recv_done()
2077 if (unlikely(wc->status != IB_WC_SUCCESS)) { in srp_recv_done()
2082 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len, in srp_recv_done()
2085 opcode = *(u8 *) iu->buf; in srp_recv_done()
2088 shost_printk(KERN_ERR, target->scsi_host, in srp_recv_done()
2091 iu->buf, wc->byte_len, true); in srp_recv_done()
2096 srp_process_rsp(ch, iu->buf); in srp_recv_done()
2100 srp_process_cred_req(ch, iu->buf); in srp_recv_done()
2104 srp_process_aer_req(ch, iu->buf); in srp_recv_done()
2109 shost_printk(KERN_WARNING, target->scsi_host, in srp_recv_done()
2114 shost_printk(KERN_WARNING, target->scsi_host, in srp_recv_done()
2119 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len, in srp_recv_done()
2124 shost_printk(KERN_ERR, target->scsi_host, in srp_recv_done()
2129 * srp_tl_err_work() - handle a transport layer error
2133 * hence the target->rport test.
2140 if (target->rport) in srp_tl_err_work()
2141 srp_start_tl_fail_timers(target->rport); in srp_tl_err_work()
2147 struct srp_rdma_ch *ch = cq->cq_context; in srp_handle_qp_err()
2148 struct srp_target_port *target = ch->target; in srp_handle_qp_err()
2150 if (ch->connected && !target->qp_in_error) { in srp_handle_qp_err()
2151 shost_printk(KERN_ERR, target->scsi_host, in srp_handle_qp_err()
2153 opname, ib_wc_status_msg(wc->status), wc->status, in srp_handle_qp_err()
2154 wc->wr_cqe); in srp_handle_qp_err()
2155 queue_work(system_long_wq, &target->tl_err_work); in srp_handle_qp_err()
2157 target->qp_in_error = true; in srp_handle_qp_err()
2173 scmnd->result = srp_chkready(target->rport); in srp_queuecommand()
2174 if (unlikely(scmnd->result)) in srp_queuecommand()
2177 WARN_ON_ONCE(rq->tag < 0); in srp_queuecommand()
2179 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; in srp_queuecommand()
2181 spin_lock_irqsave(&ch->lock, flags); in srp_queuecommand()
2183 spin_unlock_irqrestore(&ch->lock, flags); in srp_queuecommand()
2188 dev = target->srp_host->srp_dev->dev; in srp_queuecommand()
2189 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len, in srp_queuecommand()
2192 cmd = iu->buf; in srp_queuecommand()
2195 cmd->opcode = SRP_CMD; in srp_queuecommand()
2196 int_to_scsilun(scmnd->device->lun, &cmd->lun); in srp_queuecommand()
2197 cmd->tag = tag; in srp_queuecommand()
2198 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); in srp_queuecommand()
2199 if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) { in srp_queuecommand()
2200 cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb), in srp_queuecommand()
2202 if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN)) in srp_queuecommand()
2206 req->scmnd = scmnd; in srp_queuecommand()
2207 req->cmd = iu; in srp_queuecommand()
2211 shost_printk(KERN_ERR, target->scsi_host, in srp_queuecommand()
2214 * If we ran out of memory descriptors (-ENOMEM) because an in srp_queuecommand()
2216 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer in srp_queuecommand()
2219 scmnd->result = len == -ENOMEM ? in srp_queuecommand()
2224 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len, in srp_queuecommand()
2228 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); in srp_queuecommand()
2229 scmnd->result = DID_ERROR << 16; in srp_queuecommand()
2245 req->scmnd = NULL; in srp_queuecommand()
2248 if (scmnd->result) { in srp_queuecommand()
2264 struct srp_target_port *target = ch->target; in srp_alloc_iu_bufs()
2267 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring), in srp_alloc_iu_bufs()
2269 if (!ch->rx_ring) in srp_alloc_iu_bufs()
2271 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring), in srp_alloc_iu_bufs()
2273 if (!ch->tx_ring) in srp_alloc_iu_bufs()
2276 for (i = 0; i < target->queue_size; ++i) { in srp_alloc_iu_bufs()
2277 ch->rx_ring[i] = srp_alloc_iu(target->srp_host, in srp_alloc_iu_bufs()
2278 ch->max_ti_iu_len, in srp_alloc_iu_bufs()
2280 if (!ch->rx_ring[i]) in srp_alloc_iu_bufs()
2284 for (i = 0; i < target->queue_size; ++i) { in srp_alloc_iu_bufs()
2285 ch->tx_ring[i] = srp_alloc_iu(target->srp_host, in srp_alloc_iu_bufs()
2286 ch->max_it_iu_len, in srp_alloc_iu_bufs()
2288 if (!ch->tx_ring[i]) in srp_alloc_iu_bufs()
2291 list_add(&ch->tx_ring[i]->list, &ch->free_tx); in srp_alloc_iu_bufs()
2297 for (i = 0; i < target->queue_size; ++i) { in srp_alloc_iu_bufs()
2298 srp_free_iu(target->srp_host, ch->rx_ring[i]); in srp_alloc_iu_bufs()
2299 srp_free_iu(target->srp_host, ch->tx_ring[i]); in srp_alloc_iu_bufs()
2304 kfree(ch->tx_ring); in srp_alloc_iu_bufs()
2305 ch->tx_ring = NULL; in srp_alloc_iu_bufs()
2306 kfree(ch->rx_ring); in srp_alloc_iu_bufs()
2307 ch->rx_ring = NULL; in srp_alloc_iu_bufs()
2309 return -ENOMEM; in srp_alloc_iu_bufs()
2326 * Set target->rq_tmo_jiffies to one second more than the largest time in srp_compute_rq_tmo()
2328 * C9-140..142 in the IBTA spec for more information about how to in srp_compute_rq_tmo()
2331 T_tr_ns = 4096 * (1ULL << qp_attr->timeout); in srp_compute_rq_tmo()
2332 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns; in srp_compute_rq_tmo()
2343 struct srp_target_port *target = ch->target; in srp_cm_rep_handler()
2349 if (lrsp->opcode == SRP_LOGIN_RSP) { in srp_cm_rep_handler()
2350 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); in srp_cm_rep_handler()
2351 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta); in srp_cm_rep_handler()
2352 ch->use_imm_data = srp_use_imm_data && in srp_cm_rep_handler()
2353 (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP); in srp_cm_rep_handler()
2354 ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, in srp_cm_rep_handler()
2355 ch->use_imm_data, in srp_cm_rep_handler()
2356 target->max_it_iu_size); in srp_cm_rep_handler()
2357 WARN_ON_ONCE(ch->max_it_iu_len > in srp_cm_rep_handler()
2358 be32_to_cpu(lrsp->max_it_iu_len)); in srp_cm_rep_handler()
2360 if (ch->use_imm_data) in srp_cm_rep_handler()
2361 shost_printk(KERN_DEBUG, target->scsi_host, in srp_cm_rep_handler()
2366 * bounce requests back to the SCSI mid-layer. in srp_cm_rep_handler()
2368 target->scsi_host->can_queue in srp_cm_rep_handler()
2369 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE, in srp_cm_rep_handler()
2370 target->scsi_host->can_queue); in srp_cm_rep_handler()
2371 target->scsi_host->cmd_per_lun in srp_cm_rep_handler()
2372 = min_t(int, target->scsi_host->can_queue, in srp_cm_rep_handler()
2373 target->scsi_host->cmd_per_lun); in srp_cm_rep_handler()
2375 shost_printk(KERN_WARNING, target->scsi_host, in srp_cm_rep_handler()
2376 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode); in srp_cm_rep_handler()
2377 ret = -ECONNRESET; in srp_cm_rep_handler()
2381 if (!ch->rx_ring) { in srp_cm_rep_handler()
2387 for (i = 0; i < target->queue_size; i++) { in srp_cm_rep_handler()
2388 struct srp_iu *iu = ch->rx_ring[i]; in srp_cm_rep_handler()
2395 if (!target->using_rdma_cm) { in srp_cm_rep_handler()
2396 ret = -ENOMEM; in srp_cm_rep_handler()
2401 qp_attr->qp_state = IB_QPS_RTR; in srp_cm_rep_handler()
2406 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); in srp_cm_rep_handler()
2410 qp_attr->qp_state = IB_QPS_RTS; in srp_cm_rep_handler()
2415 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask); in srp_cm_rep_handler()
2417 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); in srp_cm_rep_handler()
2428 ch->status = ret; in srp_cm_rep_handler()
2435 struct srp_target_port *target = ch->target; in srp_ib_cm_rej_handler()
2436 struct Scsi_Host *shost = target->scsi_host; in srp_ib_cm_rej_handler()
2441 switch (event->param.rej_rcvd.reason) { in srp_ib_cm_rej_handler()
2443 cpi = event->param.rej_rcvd.ari; in srp_ib_cm_rej_handler()
2444 dlid = be16_to_cpu(cpi->redirect_lid); in srp_ib_cm_rej_handler()
2445 sa_path_set_dlid(&ch->ib_cm.path, dlid); in srp_ib_cm_rej_handler()
2446 ch->ib_cm.path.pkey = cpi->redirect_pkey; in srp_ib_cm_rej_handler()
2447 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; in srp_ib_cm_rej_handler()
2448 memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16); in srp_ib_cm_rej_handler()
2450 ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; in srp_ib_cm_rej_handler()
2455 union ib_gid *dgid = &ch->ib_cm.path.dgid; in srp_ib_cm_rej_handler()
2462 memcpy(dgid->raw, event->param.rej_rcvd.ari, 16); in srp_ib_cm_rej_handler()
2466 be64_to_cpu(dgid->global.subnet_prefix), in srp_ib_cm_rej_handler()
2467 be64_to_cpu(dgid->global.interface_id)); in srp_ib_cm_rej_handler()
2469 ch->status = SRP_PORT_REDIRECT; in srp_ib_cm_rej_handler()
2473 ch->status = -ECONNRESET; in srp_ib_cm_rej_handler()
2480 ch->status = -ECONNRESET; in srp_ib_cm_rej_handler()
2484 opcode = *(u8 *) event->private_data; in srp_ib_cm_rej_handler()
2486 struct srp_login_rej *rej = event->private_data; in srp_ib_cm_rej_handler()
2487 u32 reason = be32_to_cpu(rej->reason); in srp_ib_cm_rej_handler()
2495 target->sgid.raw, in srp_ib_cm_rej_handler()
2496 target->ib_cm.orig_dgid.raw, in srp_ib_cm_rej_handler()
2502 ch->status = -ECONNRESET; in srp_ib_cm_rej_handler()
2507 ch->status = SRP_STALE_CONN; in srp_ib_cm_rej_handler()
2512 event->param.rej_rcvd.reason); in srp_ib_cm_rej_handler()
2513 ch->status = -ECONNRESET; in srp_ib_cm_rej_handler()
2520 struct srp_rdma_ch *ch = cm_id->context; in srp_ib_cm_handler()
2521 struct srp_target_port *target = ch->target; in srp_ib_cm_handler()
2524 switch (event->event) { in srp_ib_cm_handler()
2526 shost_printk(KERN_DEBUG, target->scsi_host, in srp_ib_cm_handler()
2529 ch->status = -ECONNRESET; in srp_ib_cm_handler()
2534 srp_cm_rep_handler(cm_id, event->private_data, ch); in srp_ib_cm_handler()
2538 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); in srp_ib_cm_handler()
2545 shost_printk(KERN_WARNING, target->scsi_host, in srp_ib_cm_handler()
2546 PFX "DREQ received - connection closed\n"); in srp_ib_cm_handler()
2547 ch->connected = false; in srp_ib_cm_handler()
2549 shost_printk(KERN_ERR, target->scsi_host, in srp_ib_cm_handler()
2551 queue_work(system_long_wq, &target->tl_err_work); in srp_ib_cm_handler()
2555 shost_printk(KERN_ERR, target->scsi_host, in srp_ib_cm_handler()
2559 ch->status = 0; in srp_ib_cm_handler()
2568 shost_printk(KERN_WARNING, target->scsi_host, in srp_ib_cm_handler()
2569 PFX "Unhandled CM event %d\n", event->event); in srp_ib_cm_handler()
2574 complete(&ch->done); in srp_ib_cm_handler()
2582 struct srp_target_port *target = ch->target; in srp_rdma_cm_rej_handler()
2583 struct Scsi_Host *shost = target->scsi_host; in srp_rdma_cm_rej_handler()
2586 switch (event->status) { in srp_rdma_cm_rej_handler()
2590 ch->status = -ECONNRESET; in srp_rdma_cm_rej_handler()
2594 opcode = *(u8 *) event->param.conn.private_data; in srp_rdma_cm_rej_handler()
2598 event->param.conn.private_data; in srp_rdma_cm_rej_handler()
2599 u32 reason = be32_to_cpu(rej->reason); in srp_rdma_cm_rej_handler()
2612 ch->status = -ECONNRESET; in srp_rdma_cm_rej_handler()
2618 ch->status = SRP_STALE_CONN; in srp_rdma_cm_rej_handler()
2623 event->status); in srp_rdma_cm_rej_handler()
2624 ch->status = -ECONNRESET; in srp_rdma_cm_rej_handler()
2632 struct srp_rdma_ch *ch = cm_id->context; in srp_rdma_cm_handler()
2633 struct srp_target_port *target = ch->target; in srp_rdma_cm_handler()
2636 switch (event->event) { in srp_rdma_cm_handler()
2638 ch->status = 0; in srp_rdma_cm_handler()
2643 ch->status = -ENXIO; in srp_rdma_cm_handler()
2648 ch->status = 0; in srp_rdma_cm_handler()
2654 ch->status = -EHOSTUNREACH; in srp_rdma_cm_handler()
2659 shost_printk(KERN_DEBUG, target->scsi_host, in srp_rdma_cm_handler()
2662 ch->status = -ECONNRESET; in srp_rdma_cm_handler()
2667 srp_cm_rep_handler(NULL, event->param.conn.private_data, ch); in srp_rdma_cm_handler()
2671 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); in srp_rdma_cm_handler()
2678 if (ch->connected) { in srp_rdma_cm_handler()
2679 shost_printk(KERN_WARNING, target->scsi_host, in srp_rdma_cm_handler()
2681 rdma_disconnect(ch->rdma_cm.cm_id); in srp_rdma_cm_handler()
2683 ch->status = 0; in srp_rdma_cm_handler()
2684 queue_work(system_long_wq, &target->tl_err_work); in srp_rdma_cm_handler()
2689 shost_printk(KERN_ERR, target->scsi_host, in srp_rdma_cm_handler()
2693 ch->status = 0; in srp_rdma_cm_handler()
2697 shost_printk(KERN_WARNING, target->scsi_host, in srp_rdma_cm_handler()
2698 PFX "Unhandled CM event %d\n", event->event); in srp_rdma_cm_handler()
2703 complete(&ch->done); in srp_rdma_cm_handler()
2709 * srp_change_queue_depth - setting device queue depth
2718 if (!sdev->tagged_supported) in srp_change_queue_depth()
2726 struct srp_target_port *target = ch->target; in srp_send_tsk_mgmt()
2727 struct srp_rport *rport = target->rport; in srp_send_tsk_mgmt()
2728 struct ib_device *dev = target->srp_host->srp_dev->dev; in srp_send_tsk_mgmt()
2733 if (!ch->connected || target->qp_in_error) in srp_send_tsk_mgmt()
2734 return -1; in srp_send_tsk_mgmt()
2740 mutex_lock(&rport->mutex); in srp_send_tsk_mgmt()
2741 spin_lock_irq(&ch->lock); in srp_send_tsk_mgmt()
2743 spin_unlock_irq(&ch->lock); in srp_send_tsk_mgmt()
2746 mutex_unlock(&rport->mutex); in srp_send_tsk_mgmt()
2748 return -1; in srp_send_tsk_mgmt()
2751 iu->num_sge = 1; in srp_send_tsk_mgmt()
2753 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, in srp_send_tsk_mgmt()
2755 tsk_mgmt = iu->buf; in srp_send_tsk_mgmt()
2758 tsk_mgmt->opcode = SRP_TSK_MGMT; in srp_send_tsk_mgmt()
2759 int_to_scsilun(lun, &tsk_mgmt->lun); in srp_send_tsk_mgmt()
2760 tsk_mgmt->tsk_mgmt_func = func; in srp_send_tsk_mgmt()
2761 tsk_mgmt->task_tag = req_tag; in srp_send_tsk_mgmt()
2763 spin_lock_irq(&ch->lock); in srp_send_tsk_mgmt()
2764 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT; in srp_send_tsk_mgmt()
2765 tsk_mgmt->tag = ch->tsk_mgmt_tag; in srp_send_tsk_mgmt()
2766 spin_unlock_irq(&ch->lock); in srp_send_tsk_mgmt()
2768 init_completion(&ch->tsk_mgmt_done); in srp_send_tsk_mgmt()
2770 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, in srp_send_tsk_mgmt()
2774 mutex_unlock(&rport->mutex); in srp_send_tsk_mgmt()
2776 return -1; in srp_send_tsk_mgmt()
2778 res = wait_for_completion_timeout(&ch->tsk_mgmt_done, in srp_send_tsk_mgmt()
2781 *status = ch->tsk_mgmt_status; in srp_send_tsk_mgmt()
2782 mutex_unlock(&rport->mutex); in srp_send_tsk_mgmt()
2786 return res > 0 ? 0 : -1; in srp_send_tsk_mgmt()
2791 struct srp_target_port *target = host_to_target(scmnd->device->host); in srp_abort()
2798 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); in srp_abort()
2802 if (WARN_ON_ONCE(ch_idx >= target->ch_count)) in srp_abort()
2804 ch = &target->ch[ch_idx]; in srp_abort()
2807 shost_printk(KERN_ERR, target->scsi_host, in srp_abort()
2809 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, in srp_abort()
2812 else if (target->rport->state == SRP_RPORT_LOST) in srp_abort()
2818 scmnd->result = DID_ABORT << 16; in srp_abort()
2827 struct srp_target_port *target = host_to_target(scmnd->device->host); in srp_reset_device()
2831 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); in srp_reset_device()
2833 ch = &target->ch[0]; in srp_reset_device()
2834 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun, in srp_reset_device()
2845 struct srp_target_port *target = host_to_target(scmnd->device->host); in srp_reset_host()
2847 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); in srp_reset_host()
2849 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED; in srp_reset_host()
2854 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); in srp_target_alloc()
2857 if (target->target_can_queue) in srp_target_alloc()
2858 starget->can_queue = target->target_can_queue; in srp_target_alloc()
2864 struct Scsi_Host *shost = sdev->host; in srp_slave_configure()
2866 struct request_queue *q = sdev->request_queue; in srp_slave_configure()
2869 if (sdev->type == TYPE_DISK) { in srp_slave_configure()
2870 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies); in srp_slave_configure()
2882 return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext)); in id_ext_show()
2892 return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid)); in ioc_guid_show()
2902 if (target->using_rdma_cm) in service_id_show()
2903 return -ENOENT; in service_id_show()
2905 be64_to_cpu(target->ib_cm.service_id)); in service_id_show()
2915 if (target->using_rdma_cm) in pkey_show()
2916 return -ENOENT; in pkey_show()
2918 return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey)); in pkey_show()
2928 return sysfs_emit(buf, "%pI6\n", target->sgid.raw); in sgid_show()
2937 struct srp_rdma_ch *ch = &target->ch[0]; in dgid_show()
2939 if (target->using_rdma_cm) in dgid_show()
2940 return -ENOENT; in dgid_show()
2942 return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw); in dgid_show()
2952 if (target->using_rdma_cm) in orig_dgid_show()
2953 return -ENOENT; in orig_dgid_show()
2955 return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw); in orig_dgid_show()
2967 for (i = 0; i < target->ch_count; i++) { in req_lim_show()
2968 ch = &target->ch[i]; in req_lim_show()
2969 req_lim = min(req_lim, ch->req_lim); in req_lim_show()
2982 return sysfs_emit(buf, "%d\n", target->zero_req_lim); in zero_req_lim_show()
2992 return sysfs_emit(buf, "%u\n", target->srp_host->port); in local_ib_port_show()
3003 dev_name(&target->srp_host->srp_dev->dev->dev)); in local_ib_device_show()
3013 return sysfs_emit(buf, "%d\n", target->ch_count); in ch_count_show()
3023 return sysfs_emit(buf, "%d\n", target->comp_vector); in comp_vector_show()
3033 return sysfs_emit(buf, "%d\n", target->tl_retry_count); in tl_retry_count_show()
3043 return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt); in cmd_sg_entries_show()
3053 return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false"); in allow_ext_sg_show()
3098 .this_id = -1,
3119 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3121 * 0 and target->state != SRP_TARGET_REMOVED upon success.
3128 target->state = SRP_TARGET_SCANNING; in srp_add_target()
3129 sprintf(target->target_name, "SRP.T10:%016llX", in srp_add_target()
3130 be64_to_cpu(target->id_ext)); in srp_add_target()
3132 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent)) in srp_add_target()
3133 return -ENODEV; in srp_add_target()
3135 memcpy(ids.port_id, &target->id_ext, 8); in srp_add_target()
3136 memcpy(ids.port_id + 8, &target->ioc_guid, 8); in srp_add_target()
3138 rport = srp_rport_add(target->scsi_host, &ids); in srp_add_target()
3140 scsi_remove_host(target->scsi_host); in srp_add_target()
3144 rport->lld_data = target; in srp_add_target()
3145 target->rport = rport; in srp_add_target()
3147 spin_lock(&host->target_lock); in srp_add_target()
3148 list_add_tail(&target->list, &host->target_list); in srp_add_target()
3149 spin_unlock(&host->target_lock); in srp_add_target()
3151 scsi_scan_target(&target->scsi_host->shost_gendev, in srp_add_target()
3152 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL); in srp_add_target()
3154 if (srp_connected_ch(target) < target->ch_count || in srp_add_target()
3155 target->qp_in_error) { in srp_add_target()
3156 shost_printk(KERN_INFO, target->scsi_host, in srp_add_target()
3157 PFX "SCSI scan failed - removing SCSI host\n"); in srp_add_target()
3162 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n", in srp_add_target()
3163 dev_name(&target->scsi_host->shost_gendev), in srp_add_target()
3164 srp_sdev_count(target->scsi_host)); in srp_add_target()
3166 spin_lock_irq(&target->lock); in srp_add_target()
3167 if (target->state == SRP_TARGET_SCANNING) in srp_add_target()
3168 target->state = SRP_TARGET_LIVE; in srp_add_target()
3169 spin_unlock_irq(&target->lock); in srp_add_target()
3194 * srp_conn_unique() - check whether the connection to a target is unique
3204 if (target->state == SRP_TARGET_REMOVED) in srp_conn_unique()
3209 spin_lock(&host->target_lock); in srp_conn_unique()
3210 list_for_each_entry(t, &host->target_list, list) { in srp_conn_unique()
3212 target->id_ext == t->id_ext && in srp_conn_unique()
3213 target->ioc_guid == t->ioc_guid && in srp_conn_unique()
3214 target->initiator_ext == t->initiator_ext) { in srp_conn_unique()
3219 spin_unlock(&host->target_lock); in srp_conn_unique()
3296 * srp_parse_in - parse an IP address and port number combination
3303 * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3304 * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3314 return -ENOMEM; in srp_parse_in()
3324 addr_end = addr + strlen(addr) - 1; in srp_parse_in()
3332 pr_debug("%s -> %pISpfsc\n", addr_port_str, sa); in srp_parse_in()
3346 int ret = -EINVAL; in srp_parse_options()
3351 return -ENOMEM; in srp_parse_options()
3365 ret = -ENOMEM; in srp_parse_options()
3374 target->id_ext = cpu_to_be64(ull); in srp_parse_options()
3381 ret = -ENOMEM; in srp_parse_options()
3390 target->ioc_guid = cpu_to_be64(ull); in srp_parse_options()
3397 ret = -ENOMEM; in srp_parse_options()
3406 ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16); in srp_parse_options()
3417 target->ib_cm.pkey = cpu_to_be16(token); in srp_parse_options()
3423 ret = -ENOMEM; in srp_parse_options()
3432 target->ib_cm.service_id = cpu_to_be64(ull); in srp_parse_options()
3439 ret = -ENOMEM; in srp_parse_options()
3442 ret = srp_parse_in(net, &target->rdma_cm.src.ss, p, in srp_parse_options()
3449 target->rdma_cm.src_specified = true; in srp_parse_options()
3456 ret = -ENOMEM; in srp_parse_options()
3459 ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p, in srp_parse_options()
3462 ret = -EINVAL; in srp_parse_options()
3468 target->using_rdma_cm = true; in srp_parse_options()
3477 target->scsi_host->max_sectors = token; in srp_parse_options()
3485 target->scsi_host->can_queue = token; in srp_parse_options()
3486 target->queue_size = token + SRP_RSP_SQ_SIZE + in srp_parse_options()
3489 target->scsi_host->cmd_per_lun = token; in srp_parse_options()
3498 target->scsi_host->cmd_per_lun = token; in srp_parse_options()
3507 target->target_can_queue = token; in srp_parse_options()
3522 target->io_class = token; in srp_parse_options()
3528 ret = -ENOMEM; in srp_parse_options()
3537 target->initiator_ext = cpu_to_be64(ull); in srp_parse_options()
3547 target->cmd_sg_cnt = token; in srp_parse_options()
3555 target->allow_ext_sg = !!token; in srp_parse_options()
3565 target->sg_tablesize = token; in srp_parse_options()
3573 target->comp_vector = token; in srp_parse_options()
3582 target->tl_retry_count = token; in srp_parse_options()
3590 target->max_it_iu_size = token; in srp_parse_options()
3598 target->ch_count = token; in srp_parse_options()
3617 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue in srp_parse_options()
3620 target->scsi_host->cmd_per_lun, in srp_parse_options()
3621 target->scsi_host->can_queue); in srp_parse_options()
3637 struct srp_device *srp_dev = host->srp_dev; in add_target_store()
3638 struct ib_device *ibdev = srp_dev->dev; in add_target_store()
3647 return -ENOMEM; in add_target_store()
3649 target_host->transportt = ib_srp_transport_template; in add_target_store()
3650 target_host->max_channel = 0; in add_target_store()
3651 target_host->max_id = 1; in add_target_store()
3652 target_host->max_lun = -1LL; in add_target_store()
3653 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; in add_target_store()
3654 target_host->max_segment_size = ib_dma_max_seg_size(ibdev); in add_target_store()
3656 if (!(ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)) in add_target_store()
3657 target_host->virt_boundary_mask = ~srp_dev->mr_page_mask; in add_target_store()
3661 target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET); in add_target_store()
3662 target->io_class = SRP_REV16A_IB_IO_CLASS; in add_target_store()
3663 target->scsi_host = target_host; in add_target_store()
3664 target->srp_host = host; in add_target_store()
3665 target->lkey = host->srp_dev->pd->local_dma_lkey; in add_target_store()
3666 target->global_rkey = host->srp_dev->global_rkey; in add_target_store()
3667 target->cmd_sg_cnt = cmd_sg_entries; in add_target_store()
3668 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; in add_target_store()
3669 target->allow_ext_sg = allow_ext_sg; in add_target_store()
3670 target->tl_retry_count = 7; in add_target_store()
3671 target->queue_size = SRP_DEFAULT_QUEUE_SIZE; in add_target_store()
3677 scsi_host_get(target->scsi_host); in add_target_store()
3679 ret = mutex_lock_interruptible(&host->add_target_mutex); in add_target_store()
3683 ret = srp_parse_options(target->net, buf, target); in add_target_store()
3687 if (!srp_conn_unique(target->srp_host, target)) { in add_target_store()
3688 if (target->using_rdma_cm) { in add_target_store()
3689 shost_printk(KERN_INFO, target->scsi_host, in add_target_store()
3691 be64_to_cpu(target->id_ext), in add_target_store()
3692 be64_to_cpu(target->ioc_guid), in add_target_store()
3693 &target->rdma_cm.dst); in add_target_store()
3695 shost_printk(KERN_INFO, target->scsi_host, in add_target_store()
3697 be64_to_cpu(target->id_ext), in add_target_store()
3698 be64_to_cpu(target->ioc_guid), in add_target_store()
3699 be64_to_cpu(target->initiator_ext)); in add_target_store()
3701 ret = -EEXIST; in add_target_store()
3705 if (!srp_dev->has_fr && !target->allow_ext_sg && in add_target_store()
3706 target->cmd_sg_cnt < target->sg_tablesize) { in add_target_store()
3708 target->sg_tablesize = target->cmd_sg_cnt; in add_target_store()
3711 if (srp_dev->use_fast_reg) { in add_target_store()
3712 bool gaps_reg = ibdev->attrs.kernel_cap_flags & in add_target_store()
3715 max_sectors_per_mr = srp_dev->max_pages_per_mr << in add_target_store()
3716 (ilog2(srp_dev->mr_page_size) - 9); in add_target_store()
3732 (target->scsi_host->max_sectors + 1 + in add_target_store()
3733 max_sectors_per_mr - 1) / max_sectors_per_mr; in add_target_store()
3736 (target->sg_tablesize + in add_target_store()
3737 srp_dev->max_pages_per_mr - 1) / in add_target_store()
3738 srp_dev->max_pages_per_mr; in add_target_store()
3741 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size, in add_target_store()
3745 target_host->sg_tablesize = target->sg_tablesize; in add_target_store()
3746 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd; in add_target_store()
3747 target->mr_per_cmd = mr_per_cmd; in add_target_store()
3748 target->indirect_size = target->sg_tablesize * in add_target_store()
3750 max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, in add_target_store()
3752 target->max_it_iu_size); in add_target_store()
3754 INIT_WORK(&target->tl_err_work, srp_tl_err_work); in add_target_store()
3755 INIT_WORK(&target->remove_work, srp_remove_work); in add_target_store()
3756 spin_lock_init(&target->lock); in add_target_store()
3757 ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid); in add_target_store()
3761 ret = -ENOMEM; in add_target_store()
3762 if (target->ch_count == 0) { in add_target_store()
3763 target->ch_count = in add_target_store()
3766 ibdev->num_comp_vectors), in add_target_store()
3770 target->ch = kcalloc(target->ch_count, sizeof(*target->ch), in add_target_store()
3772 if (!target->ch) in add_target_store()
3775 for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) { in add_target_store()
3776 ch = &target->ch[ch_idx]; in add_target_store()
3777 ch->target = target; in add_target_store()
3778 ch->comp_vector = ch_idx % ibdev->num_comp_vectors; in add_target_store()
3779 spin_lock_init(&ch->lock); in add_target_store()
3780 INIT_LIST_HEAD(&ch->free_tx); in add_target_store()
3793 if (target->using_rdma_cm) in add_target_store()
3795 &target->rdma_cm.dst); in add_target_store()
3798 target->ib_cm.orig_dgid.raw); in add_target_store()
3799 shost_printk(KERN_ERR, target->scsi_host, in add_target_store()
3802 target->ch_count, dst); in add_target_store()
3807 target->ch_count = ch - target->ch; in add_target_store()
3815 target->scsi_host->nr_hw_queues = target->ch_count; in add_target_store()
3821 if (target->state != SRP_TARGET_REMOVED) { in add_target_store()
3822 if (target->using_rdma_cm) { in add_target_store()
3823 shost_printk(KERN_DEBUG, target->scsi_host, PFX in add_target_store()
3825 be64_to_cpu(target->id_ext), in add_target_store()
3826 be64_to_cpu(target->ioc_guid), in add_target_store()
3827 target->sgid.raw, &target->rdma_cm.dst); in add_target_store()
3829 shost_printk(KERN_DEBUG, target->scsi_host, PFX in add_target_store()
3831 be64_to_cpu(target->id_ext), in add_target_store()
3832 be64_to_cpu(target->ioc_guid), in add_target_store()
3833 be16_to_cpu(target->ib_cm.pkey), in add_target_store()
3834 be64_to_cpu(target->ib_cm.service_id), in add_target_store()
3835 target->sgid.raw, in add_target_store()
3836 target->ib_cm.orig_dgid.raw); in add_target_store()
3843 mutex_unlock(&host->add_target_mutex); in add_target_store()
3846 scsi_host_put(target->scsi_host); in add_target_store()
3853 if (target->state != SRP_TARGET_REMOVED) in add_target_store()
3854 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net); in add_target_store()
3855 scsi_host_put(target->scsi_host); in add_target_store()
3864 for (i = 0; i < target->ch_count; i++) { in add_target_store()
3865 ch = &target->ch[i]; in add_target_store()
3869 kfree(target->ch); in add_target_store()
3880 return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev)); in ibdev_show()
3890 return sysfs_emit(buf, "%u\n", host->port); in port_show()
3910 INIT_LIST_HEAD(&host->target_list); in srp_add_port()
3911 spin_lock_init(&host->target_lock); in srp_add_port()
3912 mutex_init(&host->add_target_mutex); in srp_add_port()
3913 host->srp_dev = device; in srp_add_port()
3914 host->port = port; in srp_add_port()
3916 device_initialize(&host->dev); in srp_add_port()
3917 host->dev.class = &srp_class; in srp_add_port()
3918 host->dev.parent = device->dev->dev.parent; in srp_add_port()
3919 if (dev_set_name(&host->dev, "srp-%s-%u", dev_name(&device->dev->dev), in srp_add_port()
3922 if (device_add(&host->dev)) in srp_add_port()
3928 device_del(&host->dev); in srp_add_port()
3929 put_device(&host->dev); in srp_add_port()
3938 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { in srp_rename_dev()
3941 snprintf(name, sizeof(name), "srp-%s-%u", in srp_rename_dev()
3942 dev_name(&device->dev), host->port); in srp_rename_dev()
3943 device_rename(&host->dev, name); in srp_rename_dev()
3950 struct ib_device_attr *attr = &device->attrs; in srp_add_one()
3959 return -ENOMEM; in srp_add_one()
3966 mr_page_shift = max(12, ffs(attr->page_size_cap) - 1); in srp_add_one()
3967 srp_dev->mr_page_size = 1 << mr_page_shift; in srp_add_one()
3968 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1); in srp_add_one()
3969 max_pages_per_mr = attr->max_mr_size; in srp_add_one()
3970 do_div(max_pages_per_mr, srp_dev->mr_page_size); in srp_add_one()
3972 attr->max_mr_size, srp_dev->mr_page_size, in srp_add_one()
3974 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR, in srp_add_one()
3977 srp_dev->has_fr = (attr->device_cap_flags & in srp_add_one()
3979 if (!never_register && !srp_dev->has_fr) in srp_add_one()
3980 dev_warn(&device->dev, "FR is not supported\n"); in srp_add_one()
3982 attr->max_mr_size >= 2 * srp_dev->mr_page_size) in srp_add_one()
3983 srp_dev->use_fast_reg = srp_dev->has_fr; in srp_add_one()
3985 if (never_register || !register_always || !srp_dev->has_fr) in srp_add_one()
3988 if (srp_dev->use_fast_reg) { in srp_add_one()
3989 srp_dev->max_pages_per_mr = in srp_add_one()
3990 min_t(u32, srp_dev->max_pages_per_mr, in srp_add_one()
3991 attr->max_fast_reg_page_list_len); in srp_add_one()
3993 srp_dev->mr_max_size = srp_dev->mr_page_size * in srp_add_one()
3994 srp_dev->max_pages_per_mr; in srp_add_one()
3995 …pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len … in srp_add_one()
3996 dev_name(&device->dev), mr_page_shift, attr->max_mr_size, in srp_add_one()
3997 attr->max_fast_reg_page_list_len, in srp_add_one()
3998 srp_dev->max_pages_per_mr, srp_dev->mr_max_size); in srp_add_one()
4000 INIT_LIST_HEAD(&srp_dev->dev_list); in srp_add_one()
4002 srp_dev->dev = device; in srp_add_one()
4003 srp_dev->pd = ib_alloc_pd(device, flags); in srp_add_one()
4004 if (IS_ERR(srp_dev->pd)) { in srp_add_one()
4005 int ret = PTR_ERR(srp_dev->pd); in srp_add_one()
4012 srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey; in srp_add_one()
4013 WARN_ON_ONCE(srp_dev->global_rkey == 0); in srp_add_one()
4019 list_add_tail(&host->list, &srp_dev->dev_list); in srp_add_one()
4034 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { in srp_remove_one()
4039 device_del(&host->dev); in srp_remove_one()
4044 spin_lock(&host->target_lock); in srp_remove_one()
4045 list_for_each_entry(target, &host->target_list, list) in srp_remove_one()
4047 spin_unlock(&host->target_lock); in srp_remove_one()
4052 * target->tl_err_work so waiting for the remove works to in srp_remove_one()
4057 put_device(&host->dev); in srp_remove_one()
4060 ib_dealloc_pd(srp_dev->pd); in srp_remove_one()
4118 ret = -ENOMEM; in srp_init_module()
4122 ret = -ENOMEM; in srp_init_module()