Lines Matching full:target
95 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
142 …"Number of RDMA channels to use for communication with an SRP target. Using more than one channel …
216 static int srp_target_is_topspin(struct srp_target_port *target) in srp_target_is_topspin() argument
222 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || in srp_target_is_topspin()
223 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); in srp_target_is_topspin()
275 static int srp_init_ib_qp(struct srp_target_port *target, in srp_init_ib_qp() argument
285 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev, in srp_init_ib_qp()
286 target->srp_host->port, in srp_init_ib_qp()
287 be16_to_cpu(target->ib_cm.pkey), in srp_init_ib_qp()
295 attr->port_num = target->srp_host->port; in srp_init_ib_qp()
310 struct srp_target_port *target = ch->target; in srp_new_ib_cm_id() local
313 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, in srp_new_ib_cm_id()
321 if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev, in srp_new_ib_cm_id()
322 target->srp_host->port)) in srp_new_ib_cm_id()
326 ch->ib_cm.path.sgid = target->sgid; in srp_new_ib_cm_id()
327 ch->ib_cm.path.dgid = target->ib_cm.orig_dgid; in srp_new_ib_cm_id()
328 ch->ib_cm.path.pkey = target->ib_cm.pkey; in srp_new_ib_cm_id()
329 ch->ib_cm.path.service_id = target->ib_cm.service_id; in srp_new_ib_cm_id()
336 struct srp_target_port *target = ch->target; in srp_new_rdma_cm_id() local
340 new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch, in srp_new_rdma_cm_id()
349 ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ? in srp_new_rdma_cm_id()
350 &target->rdma_cm.src.sa : NULL, in srp_new_rdma_cm_id()
351 &target->rdma_cm.dst.sa, in srp_new_rdma_cm_id()
355 &target->rdma_cm.src, &target->rdma_cm.dst, ret); in srp_new_rdma_cm_id()
365 &target->rdma_cm.dst, ret); in srp_new_rdma_cm_id()
380 struct srp_target_port *target = ch->target; in srp_new_cm_id() local
382 return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) : in srp_new_cm_id()
502 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target) in srp_alloc_fr_pool() argument
504 struct srp_device *dev = target->srp_host->srp_dev; in srp_alloc_fr_pool()
506 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size, in srp_alloc_fr_pool()
530 struct srp_target_port *target = ch->target; in srp_create_ch_ib() local
531 struct srp_device *dev = target->srp_host->srp_dev; in srp_create_ch_ib()
537 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2; in srp_create_ch_ib()
545 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1, in srp_create_ch_ib()
552 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size, in srp_create_ch_ib()
560 init_attr->cap.max_send_wr = m * target->queue_size; in srp_create_ch_ib()
561 init_attr->cap.max_recv_wr = target->queue_size + 1; in srp_create_ch_ib()
571 if (target->using_rdma_cm) { in srp_create_ch_ib()
577 ret = srp_init_ib_qp(target, qp); in srp_create_ch_ib()
591 fr_pool = srp_alloc_fr_pool(target); in srp_create_ch_ib()
594 shost_printk(KERN_WARNING, target->scsi_host, PFX in srp_create_ch_ib()
621 if (target->using_rdma_cm) in srp_create_ch_ib()
641 static void srp_free_ch_ib(struct srp_target_port *target, in srp_free_ch_ib() argument
644 struct srp_device *dev = target->srp_host->srp_dev; in srp_free_ch_ib()
647 if (!ch->target) in srp_free_ch_ib()
650 if (target->using_rdma_cm) { in srp_free_ch_ib()
681 ch->target = NULL; in srp_free_ch_ib()
687 for (i = 0; i < target->queue_size; ++i) in srp_free_ch_ib()
688 srp_free_iu(target->srp_host, ch->rx_ring[i]); in srp_free_ch_ib()
693 for (i = 0; i < target->queue_size; ++i) in srp_free_ch_ib()
694 srp_free_iu(target->srp_host, ch->tx_ring[i]); in srp_free_ch_ib()
705 struct srp_target_port *target = ch->target; in srp_path_rec_completion() local
709 shost_printk(KERN_ERR, target->scsi_host, in srp_path_rec_completion()
718 struct srp_target_port *target = ch->target; in srp_ib_lookup_path() local
726 target->srp_host->srp_dev->dev, in srp_ib_lookup_path()
727 target->srp_host->port, in srp_ib_lookup_path()
746 shost_printk(KERN_WARNING, target->scsi_host, in srp_ib_lookup_path()
749 be16_to_cpu(target->ib_cm.pkey), in srp_ib_lookup_path()
750 be64_to_cpu(target->ib_cm.service_id)); in srp_ib_lookup_path()
757 struct srp_target_port *target = ch->target; in srp_rdma_lookup_path() local
769 shost_printk(KERN_WARNING, target->scsi_host, in srp_rdma_lookup_path()
777 struct srp_target_port *target = ch->target; in srp_lookup_path() local
779 return target->using_rdma_cm ? srp_rdma_lookup_path(ch) : in srp_lookup_path()
803 struct srp_target_port *target = ch->target; in srp_send_req() local
818 req->ib_param.retry_count = target->tl_retry_count; in srp_send_req()
840 if (target->using_rdma_cm) { in srp_send_req()
862 subnet_timeout = srp_get_subnet_timeout(target->srp_host); in srp_send_req()
866 req->ib_param.service_id = target->ib_cm.service_id; in srp_send_req()
889 if (target->io_class == SRP_REV10_IB_IO_CLASS) { in srp_send_req()
890 memcpy(ipi, &target->sgid.global.interface_id, 8); in srp_send_req()
891 memcpy(ipi + 8, &target->initiator_ext, 8); in srp_send_req()
892 memcpy(tpi, &target->ioc_guid, 8); in srp_send_req()
893 memcpy(tpi + 8, &target->id_ext, 8); in srp_send_req()
895 memcpy(ipi, &target->initiator_ext, 8); in srp_send_req()
896 memcpy(ipi + 8, &target->sgid.global.interface_id, 8); in srp_send_req()
897 memcpy(tpi, &target->id_ext, 8); in srp_send_req()
898 memcpy(tpi + 8, &target->ioc_guid, 8); in srp_send_req()
906 if (srp_target_is_topspin(target)) { in srp_send_req()
907 shost_printk(KERN_DEBUG, target->scsi_host, in srp_send_req()
909 "activated for target GUID %016llx\n", in srp_send_req()
910 be64_to_cpu(target->ioc_guid)); in srp_send_req()
912 memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8); in srp_send_req()
915 if (target->using_rdma_cm) in srp_send_req()
925 static bool srp_queue_remove_work(struct srp_target_port *target) in srp_queue_remove_work() argument
929 spin_lock_irq(&target->lock); in srp_queue_remove_work()
930 if (target->state != SRP_TARGET_REMOVED) { in srp_queue_remove_work()
931 target->state = SRP_TARGET_REMOVED; in srp_queue_remove_work()
934 spin_unlock_irq(&target->lock); in srp_queue_remove_work()
937 queue_work(srp_remove_wq, &target->remove_work); in srp_queue_remove_work()
942 static void srp_disconnect_target(struct srp_target_port *target) in srp_disconnect_target() argument
949 for (i = 0; i < target->ch_count; i++) { in srp_disconnect_target()
950 ch = &target->ch[i]; in srp_disconnect_target()
953 if (target->using_rdma_cm) { in srp_disconnect_target()
962 shost_printk(KERN_DEBUG, target->scsi_host, in srp_disconnect_target()
970 struct srp_target_port *target = host_to_target(shost); in srp_exit_cmd_priv() local
971 struct srp_device *dev = target->srp_host->srp_dev; in srp_exit_cmd_priv()
978 target->indirect_size, in srp_exit_cmd_priv()
988 struct srp_target_port *target = host_to_target(shost); in srp_init_cmd_priv() local
989 struct srp_device *srp_dev = target->srp_host->srp_dev; in srp_init_cmd_priv()
996 req->fr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *), in srp_init_cmd_priv()
1001 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); in srp_init_cmd_priv()
1006 target->indirect_size, in srp_init_cmd_priv()
1035 static void srp_remove_target(struct srp_target_port *target) in srp_remove_target() argument
1040 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); in srp_remove_target()
1042 srp_del_scsi_host_attr(target->scsi_host); in srp_remove_target()
1043 srp_rport_get(target->rport); in srp_remove_target()
1044 srp_remove_host(target->scsi_host); in srp_remove_target()
1045 scsi_remove_host(target->scsi_host); in srp_remove_target()
1046 srp_stop_rport_timers(target->rport); in srp_remove_target()
1047 srp_disconnect_target(target); in srp_remove_target()
1048 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net); in srp_remove_target()
1049 for (i = 0; i < target->ch_count; i++) { in srp_remove_target()
1050 ch = &target->ch[i]; in srp_remove_target()
1051 srp_free_ch_ib(target, ch); in srp_remove_target()
1053 cancel_work_sync(&target->tl_err_work); in srp_remove_target()
1054 srp_rport_put(target->rport); in srp_remove_target()
1055 kfree(target->ch); in srp_remove_target()
1056 target->ch = NULL; in srp_remove_target()
1058 spin_lock(&target->srp_host->target_lock); in srp_remove_target()
1059 list_del(&target->list); in srp_remove_target()
1060 spin_unlock(&target->srp_host->target_lock); in srp_remove_target()
1062 scsi_host_put(target->scsi_host); in srp_remove_target()
1067 struct srp_target_port *target = in srp_remove_work() local
1070 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); in srp_remove_work()
1072 srp_remove_target(target); in srp_remove_work()
1077 struct srp_target_port *target = rport->lld_data; in srp_rport_delete() local
1079 srp_queue_remove_work(target); in srp_rport_delete()
1084 * @target: SRP target port.
1086 static int srp_connected_ch(struct srp_target_port *target) in srp_connected_ch() argument
1090 for (i = 0; i < target->ch_count; i++) in srp_connected_ch()
1091 c += target->ch[i].connected; in srp_connected_ch()
1099 struct srp_target_port *target = ch->target; in srp_connect_ch() local
1102 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0); in srp_connect_ch()
1139 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_connect_ch()
1178 struct srp_target_port *target = ch->target; in srp_unmap_data() local
1179 struct srp_device *dev = target->srp_host->srp_dev; in srp_unmap_data()
1194 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_unmap_data()
1198 &target->tl_err_work); in srp_unmap_data()
1247 * @req_lim_delta: Amount to be added to @target->req_lim.
1282 struct srp_target_port *target = context->srp_target; in srp_terminate_cmd() local
1284 struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; in srp_terminate_cmd()
1294 struct srp_target_port *target = rport->lld_data; in srp_terminate_io() local
1295 struct srp_terminate_context context = { .srp_target = target, in srp_terminate_io()
1298 scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, &context); in srp_terminate_io()
1301 /* Calculate maximum initiator to target information unit length. */
1332 struct srp_target_port *target = rport->lld_data; in srp_rport_reconnect() local
1334 uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, in srp_rport_reconnect()
1336 target->max_it_iu_size); in srp_rport_reconnect()
1340 srp_disconnect_target(target); in srp_rport_reconnect()
1342 if (target->state == SRP_TARGET_SCANNING) in srp_rport_reconnect()
1346 * Now get a new local CM ID so that we avoid confusing the target in in srp_rport_reconnect()
1350 for (i = 0; i < target->ch_count; i++) { in srp_rport_reconnect()
1351 ch = &target->ch[i]; in srp_rport_reconnect()
1356 .srp_target = target, .scsi_result = DID_RESET << 16}; in srp_rport_reconnect()
1358 scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, in srp_rport_reconnect()
1361 for (i = 0; i < target->ch_count; i++) { in srp_rport_reconnect()
1362 ch = &target->ch[i]; in srp_rport_reconnect()
1371 for (j = 0; j < target->queue_size; ++j) in srp_rport_reconnect()
1375 target->qp_in_error = false; in srp_rport_reconnect()
1377 for (i = 0; i < target->ch_count; i++) { in srp_rport_reconnect()
1378 ch = &target->ch[i]; in srp_rport_reconnect()
1386 shost_printk(KERN_INFO, target->scsi_host, in srp_rport_reconnect()
1424 struct srp_target_port *target = ch->target; in srp_map_finish_fr() local
1425 struct srp_device *dev = target->srp_host->srp_dev; in srp_map_finish_fr()
1432 shost_printk(KERN_ERR, ch->target->scsi_host, in srp_map_finish_fr()
1434 ch->target->mr_per_cmd); in srp_map_finish_fr()
1440 if (sg_nents == 1 && target->global_rkey) { in srp_map_finish_fr()
1445 target->global_rkey); in srp_map_finish_fr()
1505 state->fr.end = req->fr_list + ch->target->mr_per_cmd; in srp_map_sg_fr()
1530 struct srp_target_port *target = ch->target; in srp_map_sg_dma() local
1536 target->global_rkey); in srp_map_sg_dma()
1553 struct srp_target_port *target = ch->target; in srp_map_idb() local
1554 struct srp_device *dev = target->srp_host->srp_dev; in srp_map_idb()
1592 struct srp_device *dev = ch->target->srp_host->srp_dev; in srp_check_mapping()
1622 struct srp_target_port *target = ch->target; in srp_map_data() local
1642 shost_printk(KERN_WARNING, target->scsi_host, in srp_map_data()
1652 dev = target->srp_host->srp_dev; in srp_map_data()
1675 sge[i].lkey = target->lkey; in srp_map_data()
1685 if (count == 1 && target->global_rkey) { in srp_map_data()
1696 buf->key = cpu_to_be32(target->global_rkey); in srp_map_data()
1710 target->indirect_size, DMA_TO_DEVICE); in srp_map_data()
1731 * target is not using an external indirect table, we are in srp_map_data()
1747 if (unlikely(target->cmd_sg_cnt < state.ndesc && in srp_map_data()
1748 !target->allow_ext_sg)) { in srp_map_data()
1749 shost_printk(KERN_ERR, target->scsi_host, in srp_map_data()
1755 count = min(state.ndesc, target->cmd_sg_cnt); in srp_map_data()
1767 if (!target->global_rkey) { in srp_map_data()
1774 idb_rkey = cpu_to_be32(target->global_rkey); in srp_map_data()
1800 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size) in srp_map_data()
1830 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1836 struct srp_target_port *target = ch->target; in __srp_get_tx_iu() local
1847 /* Initiator responses to target requests do not consume credits */ in __srp_get_tx_iu()
1850 ++target->zero_req_lim; in __srp_get_tx_iu()
1890 struct srp_target_port *target = ch->target; in srp_post_send() local
1898 iu->sge[0].lkey = target->lkey; in srp_post_send()
1914 struct srp_target_port *target = ch->target; in srp_post_recv() local
1920 list.lkey = target->lkey; in srp_post_recv()
1934 struct srp_target_port *target = ch->target; in srp_process_rsp() local
1948 shost_printk(KERN_ERR, target->scsi_host, in srp_process_rsp()
1954 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag); in srp_process_rsp()
1959 shost_printk(KERN_ERR, target->scsi_host, in srp_process_rsp()
1961 rsp->tag, ch - target->ch, ch->qp->qp_num); in srp_process_rsp()
1997 struct srp_target_port *target = ch->target; in srp_response_common() local
1998 struct ib_device *dev = target->srp_host->srp_dev->dev; in srp_response_common()
2009 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_response_common()
2021 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_response_common()
2039 shost_printk(KERN_ERR, ch->target->scsi_host, PFX in srp_process_cred_req()
2046 struct srp_target_port *target = ch->target; in srp_process_aer_req() local
2053 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_process_aer_req()
2057 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_process_aer_req()
2065 struct srp_target_port *target = ch->target; in srp_recv_done() local
2066 struct ib_device *dev = target->srp_host->srp_dev->dev; in srp_recv_done()
2081 shost_printk(KERN_ERR, target->scsi_host, in srp_recv_done()
2101 /* XXX Handle target logout */ in srp_recv_done()
2102 shost_printk(KERN_WARNING, target->scsi_host, in srp_recv_done()
2103 PFX "Got target logout request\n"); in srp_recv_done()
2107 shost_printk(KERN_WARNING, target->scsi_host, in srp_recv_done()
2117 shost_printk(KERN_ERR, target->scsi_host, in srp_recv_done()
2123 * @work: Work structure embedded in an SRP target port.
2126 * hence the target->rport test.
2130 struct srp_target_port *target; in srp_tl_err_work() local
2132 target = container_of(work, struct srp_target_port, tl_err_work); in srp_tl_err_work()
2133 if (target->rport) in srp_tl_err_work()
2134 srp_start_tl_fail_timers(target->rport); in srp_tl_err_work()
2141 struct srp_target_port *target = ch->target; in srp_handle_qp_err() local
2143 if (ch->connected && !target->qp_in_error) { in srp_handle_qp_err()
2144 shost_printk(KERN_ERR, target->scsi_host, in srp_handle_qp_err()
2148 queue_work(system_long_wq, &target->tl_err_work); in srp_handle_qp_err()
2150 target->qp_in_error = true; in srp_handle_qp_err()
2156 struct srp_target_port *target = host_to_target(shost); in srp_queuecommand() local
2166 scmnd->result = srp_chkready(target->rport); in srp_queuecommand()
2172 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; in srp_queuecommand()
2181 dev = target->srp_host->srp_dev->dev; in srp_queuecommand()
2204 shost_printk(KERN_ERR, target->scsi_host, in srp_queuecommand()
2221 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); in srp_queuecommand()
2257 struct srp_target_port *target = ch->target; in srp_alloc_iu_bufs() local
2260 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring), in srp_alloc_iu_bufs()
2264 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring), in srp_alloc_iu_bufs()
2269 for (i = 0; i < target->queue_size; ++i) { in srp_alloc_iu_bufs()
2270 ch->rx_ring[i] = srp_alloc_iu(target->srp_host, in srp_alloc_iu_bufs()
2277 for (i = 0; i < target->queue_size; ++i) { in srp_alloc_iu_bufs()
2278 ch->tx_ring[i] = srp_alloc_iu(target->srp_host, in srp_alloc_iu_bufs()
2290 for (i = 0; i < target->queue_size; ++i) { in srp_alloc_iu_bufs()
2291 srp_free_iu(target->srp_host, ch->rx_ring[i]); in srp_alloc_iu_bufs()
2292 srp_free_iu(target->srp_host, ch->tx_ring[i]); in srp_alloc_iu_bufs()
2319 * Set target->rq_tmo_jiffies to one second more than the largest time in srp_compute_rq_tmo()
2336 struct srp_target_port *target = ch->target; in srp_cm_rep_handler() local
2347 ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, in srp_cm_rep_handler()
2349 target->max_it_iu_size); in srp_cm_rep_handler()
2354 shost_printk(KERN_DEBUG, target->scsi_host, in srp_cm_rep_handler()
2361 target->scsi_host->can_queue in srp_cm_rep_handler()
2363 target->scsi_host->can_queue); in srp_cm_rep_handler()
2364 target->scsi_host->cmd_per_lun in srp_cm_rep_handler()
2365 = min_t(int, target->scsi_host->can_queue, in srp_cm_rep_handler()
2366 target->scsi_host->cmd_per_lun); in srp_cm_rep_handler()
2368 shost_printk(KERN_WARNING, target->scsi_host, in srp_cm_rep_handler()
2380 for (i = 0; i < target->queue_size; i++) { in srp_cm_rep_handler()
2388 if (!target->using_rdma_cm) { in srp_cm_rep_handler()
2408 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask); in srp_cm_rep_handler()
2428 struct srp_target_port *target = ch->target; in srp_ib_cm_rej_handler() local
2429 struct Scsi_Host *shost = target->scsi_host; in srp_ib_cm_rej_handler()
2447 if (srp_target_is_topspin(target)) { in srp_ib_cm_rej_handler()
2458 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", in srp_ib_cm_rej_handler()
2488 target->sgid.raw, in srp_ib_cm_rej_handler()
2489 target->ib_cm.orig_dgid.raw, in srp_ib_cm_rej_handler()
2514 struct srp_target_port *target = ch->target; in srp_ib_cm_handler() local
2519 shost_printk(KERN_DEBUG, target->scsi_host, in srp_ib_cm_handler()
2531 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); in srp_ib_cm_handler()
2538 shost_printk(KERN_WARNING, target->scsi_host, in srp_ib_cm_handler()
2542 shost_printk(KERN_ERR, target->scsi_host, in srp_ib_cm_handler()
2544 queue_work(system_long_wq, &target->tl_err_work); in srp_ib_cm_handler()
2548 shost_printk(KERN_ERR, target->scsi_host, in srp_ib_cm_handler()
2561 shost_printk(KERN_WARNING, target->scsi_host, in srp_ib_cm_handler()
2575 struct srp_target_port *target = ch->target; in srp_rdma_cm_rej_handler() local
2576 struct Scsi_Host *shost = target->scsi_host; in srp_rdma_cm_rej_handler()
2626 struct srp_target_port *target = ch->target; in srp_rdma_cm_handler() local
2652 shost_printk(KERN_DEBUG, target->scsi_host, in srp_rdma_cm_handler()
2664 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); in srp_rdma_cm_handler()
2672 shost_printk(KERN_WARNING, target->scsi_host, in srp_rdma_cm_handler()
2677 queue_work(system_long_wq, &target->tl_err_work); in srp_rdma_cm_handler()
2682 shost_printk(KERN_ERR, target->scsi_host, in srp_rdma_cm_handler()
2690 shost_printk(KERN_WARNING, target->scsi_host, in srp_rdma_cm_handler()
2719 struct srp_target_port *target = ch->target; in srp_send_tsk_mgmt() local
2720 struct srp_rport *rport = target->rport; in srp_send_tsk_mgmt()
2721 struct ib_device *dev = target->srp_host->srp_dev->dev; in srp_send_tsk_mgmt()
2726 if (!ch->connected || target->qp_in_error) in srp_send_tsk_mgmt()
2784 struct srp_target_port *target = host_to_target(scmnd->device->host); in srp_abort() local
2791 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); in srp_abort()
2797 if (WARN_ON_ONCE(ch_idx >= target->ch_count)) in srp_abort()
2799 ch = &target->ch[ch_idx]; in srp_abort()
2802 shost_printk(KERN_ERR, target->scsi_host, in srp_abort()
2807 else if (target->rport->state == SRP_RPORT_LOST) in srp_abort()
2822 struct srp_target_port *target = host_to_target(scmnd->device->host); in srp_reset_device() local
2826 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); in srp_reset_device()
2828 ch = &target->ch[0]; in srp_reset_device()
2840 struct srp_target_port *target = host_to_target(scmnd->device->host); in srp_reset_host() local
2842 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); in srp_reset_host()
2844 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED; in srp_reset_host()
2850 struct srp_target_port *target = host_to_target(shost); in srp_target_alloc() local
2852 if (target->target_can_queue) in srp_target_alloc()
2853 starget->can_queue = target->target_can_queue; in srp_target_alloc()
2860 struct srp_target_port *target = host_to_target(shost); in srp_slave_configure() local
2865 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies); in srp_slave_configure()
2875 struct srp_target_port *target = host_to_target(class_to_shost(dev)); in id_ext_show() local
2877 return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext)); in id_ext_show()
2885 struct srp_target_port *target = host_to_target(class_to_shost(dev)); in ioc_guid_show() local
2887 return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid)); in ioc_guid_show()
2895 struct srp_target_port *target = host_to_target(class_to_shost(dev)); in service_id_show() local
2897 if (target->using_rdma_cm) in service_id_show()
2900 be64_to_cpu(target->ib_cm.service_id)); in service_id_show()
2908 struct srp_target_port *target = host_to_target(class_to_shost(dev)); in pkey_show() local
2910 if (target->using_rdma_cm) in pkey_show()
2913 return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey)); in pkey_show()
2921 struct srp_target_port *target = host_to_target(class_to_shost(dev)); in sgid_show() local
2923 return sysfs_emit(buf, "%pI6\n", target->sgid.raw); in sgid_show()
2931 struct srp_target_port *target = host_to_target(class_to_shost(dev)); in dgid_show() local
2932 struct srp_rdma_ch *ch = &target->ch[0]; in dgid_show()
2934 if (target->using_rdma_cm) in dgid_show()
2945 struct srp_target_port *target = host_to_target(class_to_shost(dev)); in orig_dgid_show() local
2947 if (target->using_rdma_cm) in orig_dgid_show()
2950 return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw); in orig_dgid_show()
2958 struct srp_target_port *target = host_to_target(class_to_shost(dev)); in req_lim_show() local
2962 for (i = 0; i < target->ch_count; i++) { in req_lim_show()
2963 ch = &target->ch[i]; in req_lim_show()
2975 struct srp_target_port *target = host_to_target(class_to_shost(dev)); in zero_req_lim_show() local
2977 return sysfs_emit(buf, "%d\n", target->zero_req_lim); in zero_req_lim_show()
2985 struct srp_target_port *target = host_to_target(class_to_shost(dev)); in local_ib_port_show() local
2987 return sysfs_emit(buf, "%d\n", target->srp_host->port); in local_ib_port_show()
2995 struct srp_target_port *target = host_to_target(class_to_shost(dev)); in local_ib_device_show() local
2998 dev_name(&target->srp_host->srp_dev->dev->dev)); in local_ib_device_show()
3006 struct srp_target_port *target = host_to_target(class_to_shost(dev)); in ch_count_show() local
3008 return sysfs_emit(buf, "%d\n", target->ch_count); in ch_count_show()
3016 struct srp_target_port *target = host_to_target(class_to_shost(dev)); in comp_vector_show() local
3018 return sysfs_emit(buf, "%d\n", target->comp_vector); in comp_vector_show()
3026 struct srp_target_port *target = host_to_target(class_to_shost(dev)); in tl_retry_count_show() local
3028 return sysfs_emit(buf, "%d\n", target->tl_retry_count); in tl_retry_count_show()
3036 struct srp_target_port *target = host_to_target(class_to_shost(dev)); in cmd_sg_entries_show() local
3038 return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt); in cmd_sg_entries_show()
3046 struct srp_target_port *target = host_to_target(class_to_shost(dev)); in allow_ext_sg_show() local
3048 return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false"); in allow_ext_sg_show()
3111 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
3112 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3114 * 0 and target->state != SRP_TARGET_REMOVED upon success.
3116 static int srp_add_target(struct srp_host *host, struct srp_target_port *target) in srp_add_target() argument
3121 target->state = SRP_TARGET_SCANNING; in srp_add_target()
3122 sprintf(target->target_name, "SRP.T10:%016llX", in srp_add_target()
3123 be64_to_cpu(target->id_ext)); in srp_add_target()
3125 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent)) in srp_add_target()
3128 memcpy(ids.port_id, &target->id_ext, 8); in srp_add_target()
3129 memcpy(ids.port_id + 8, &target->ioc_guid, 8); in srp_add_target()
3131 rport = srp_rport_add(target->scsi_host, &ids); in srp_add_target()
3133 scsi_remove_host(target->scsi_host); in srp_add_target()
3137 rport->lld_data = target; in srp_add_target()
3138 target->rport = rport; in srp_add_target()
3141 list_add_tail(&target->list, &host->target_list); in srp_add_target()
3144 scsi_scan_target(&target->scsi_host->shost_gendev, in srp_add_target()
3145 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL); in srp_add_target()
3147 if (srp_connected_ch(target) < target->ch_count || in srp_add_target()
3148 target->qp_in_error) { in srp_add_target()
3149 shost_printk(KERN_INFO, target->scsi_host, in srp_add_target()
3151 srp_queue_remove_work(target); in srp_add_target()
3156 dev_name(&target->scsi_host->shost_gendev), in srp_add_target()
3157 srp_sdev_count(target->scsi_host)); in srp_add_target()
3159 spin_lock_irq(&target->lock); in srp_add_target()
3160 if (target->state == SRP_TARGET_SCANNING) in srp_add_target()
3161 target->state = SRP_TARGET_LIVE; in srp_add_target()
3162 spin_unlock_irq(&target->lock); in srp_add_target()
3182 * srp_conn_unique() - check whether the connection to a target is unique
3184 * @target: SRP target port.
3187 struct srp_target_port *target) in srp_conn_unique() argument
3192 if (target->state == SRP_TARGET_REMOVED) in srp_conn_unique()
3199 if (t != target && in srp_conn_unique()
3200 target->id_ext == t->id_ext && in srp_conn_unique()
3201 target->ioc_guid == t->ioc_guid && in srp_conn_unique()
3202 target->initiator_ext == t->initiator_ext) { in srp_conn_unique()
3214 * Target ports are added by writing
3325 struct srp_target_port *target) in srp_parse_options() argument
3362 target->id_ext = cpu_to_be64(ull); in srp_parse_options()
3378 target->ioc_guid = cpu_to_be64(ull); in srp_parse_options()
3394 ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16); in srp_parse_options()
3405 target->ib_cm.pkey = cpu_to_be16(token); in srp_parse_options()
3420 target->ib_cm.service_id = cpu_to_be64(ull); in srp_parse_options()
3430 ret = srp_parse_in(net, &target->rdma_cm.src.ss, p, in srp_parse_options()
3437 target->rdma_cm.src_specified = true; in srp_parse_options()
3447 ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p, in srp_parse_options()
3456 target->using_rdma_cm = true; in srp_parse_options()
3465 target->scsi_host->max_sectors = token; in srp_parse_options()
3473 target->scsi_host->can_queue = token; in srp_parse_options()
3474 target->queue_size = token + SRP_RSP_SQ_SIZE + in srp_parse_options()
3477 target->scsi_host->cmd_per_lun = token; in srp_parse_options()
3486 target->scsi_host->cmd_per_lun = token; in srp_parse_options()
3495 target->target_can_queue = token; in srp_parse_options()
3510 target->io_class = token; in srp_parse_options()
3525 target->initiator_ext = cpu_to_be64(ull); in srp_parse_options()
3535 target->cmd_sg_cnt = token; in srp_parse_options()
3543 target->allow_ext_sg = !!token; in srp_parse_options()
3553 target->sg_tablesize = token; in srp_parse_options()
3561 target->comp_vector = token; in srp_parse_options()
3570 target->tl_retry_count = token; in srp_parse_options()
3575 pr_warn("bad maximum initiator to target IU size '%s'\n", p); in srp_parse_options()
3578 target->max_it_iu_size = token; in srp_parse_options()
3586 target->ch_count = token; in srp_parse_options()
3590 pr_warn("unknown parameter or missing value '%s' in target creation request\n", in srp_parse_options()
3603 pr_warn("target creation request is missing one or more parameters\n"); in srp_parse_options()
3605 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue in srp_parse_options()
3608 target->scsi_host->cmd_per_lun, in srp_parse_options()
3609 target->scsi_host->can_queue); in srp_parse_options()
3623 struct srp_target_port *target; in add_target_store() local
3647 target = host_to_target(target_host); in add_target_store()
3649 target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET); in add_target_store()
3650 target->io_class = SRP_REV16A_IB_IO_CLASS; in add_target_store()
3651 target->scsi_host = target_host; in add_target_store()
3652 target->srp_host = host; in add_target_store()
3653 target->lkey = host->srp_dev->pd->local_dma_lkey; in add_target_store()
3654 target->global_rkey = host->srp_dev->global_rkey; in add_target_store()
3655 target->cmd_sg_cnt = cmd_sg_entries; in add_target_store()
3656 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; in add_target_store()
3657 target->allow_ext_sg = allow_ext_sg; in add_target_store()
3658 target->tl_retry_count = 7; in add_target_store()
3659 target->queue_size = SRP_DEFAULT_QUEUE_SIZE; in add_target_store()
3665 scsi_host_get(target->scsi_host); in add_target_store()
3671 ret = srp_parse_options(target->net, buf, target); in add_target_store()
3675 if (!srp_conn_unique(target->srp_host, target)) { in add_target_store()
3676 if (target->using_rdma_cm) { in add_target_store()
3677 shost_printk(KERN_INFO, target->scsi_host, in add_target_store()
3678 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n", in add_target_store()
3679 be64_to_cpu(target->id_ext), in add_target_store()
3680 be64_to_cpu(target->ioc_guid), in add_target_store()
3681 &target->rdma_cm.dst); in add_target_store()
3683 shost_printk(KERN_INFO, target->scsi_host, in add_target_store()
3684 …PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n… in add_target_store()
3685 be64_to_cpu(target->id_ext), in add_target_store()
3686 be64_to_cpu(target->ioc_guid), in add_target_store()
3687 be64_to_cpu(target->initiator_ext)); in add_target_store()
3693 if (!srp_dev->has_fr && !target->allow_ext_sg && in add_target_store()
3694 target->cmd_sg_cnt < target->sg_tablesize) { in add_target_store()
3696 target->sg_tablesize = target->cmd_sg_cnt; in add_target_store()
3720 (target->scsi_host->max_sectors + 1 + in add_target_store()
3724 (target->sg_tablesize + in add_target_store()
3729 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size, in add_target_store()
3733 target_host->sg_tablesize = target->sg_tablesize; in add_target_store()
3734 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd; in add_target_store()
3735 target->mr_per_cmd = mr_per_cmd; in add_target_store()
3736 target->indirect_size = target->sg_tablesize * in add_target_store()
3738 max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, in add_target_store()
3740 target->max_it_iu_size); in add_target_store()
3742 INIT_WORK(&target->tl_err_work, srp_tl_err_work); in add_target_store()
3743 INIT_WORK(&target->remove_work, srp_remove_work); in add_target_store()
3744 spin_lock_init(&target->lock); in add_target_store()
3745 ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid); in add_target_store()
3750 if (target->ch_count == 0) { in add_target_store()
3751 target->ch_count = in add_target_store()
3758 target->ch = kcalloc(target->ch_count, sizeof(*target->ch), in add_target_store()
3760 if (!target->ch) in add_target_store()
3763 for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) { in add_target_store()
3764 ch = &target->ch[ch_idx]; in add_target_store()
3765 ch->target = target; in add_target_store()
3781 if (target->using_rdma_cm) in add_target_store()
3783 &target->rdma_cm.dst); in add_target_store()
3786 target->ib_cm.orig_dgid.raw); in add_target_store()
3787 shost_printk(KERN_ERR, target->scsi_host, in add_target_store()
3790 target->ch_count, dst); in add_target_store()
3794 srp_free_ch_ib(target, ch); in add_target_store()
3795 target->ch_count = ch - target->ch; in add_target_store()
3803 target->scsi_host->nr_hw_queues = target->ch_count; in add_target_store()
3805 ret = srp_add_target(host, target); in add_target_store()
3809 if (target->state != SRP_TARGET_REMOVED) { in add_target_store()
3810 if (target->using_rdma_cm) { in add_target_store()
3811 shost_printk(KERN_DEBUG, target->scsi_host, PFX in add_target_store()
3812 "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n", in add_target_store()
3813 be64_to_cpu(target->id_ext), in add_target_store()
3814 be64_to_cpu(target->ioc_guid), in add_target_store()
3815 target->sgid.raw, &target->rdma_cm.dst); in add_target_store()
3817 shost_printk(KERN_DEBUG, target->scsi_host, PFX in add_target_store()
3818 … "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n", in add_target_store()
3819 be64_to_cpu(target->id_ext), in add_target_store()
3820 be64_to_cpu(target->ioc_guid), in add_target_store()
3821 be16_to_cpu(target->ib_cm.pkey), in add_target_store()
3822 be64_to_cpu(target->ib_cm.service_id), in add_target_store()
3823 target->sgid.raw, in add_target_store()
3824 target->ib_cm.orig_dgid.raw); in add_target_store()
3834 scsi_host_put(target->scsi_host); in add_target_store()
3841 if (target->state != SRP_TARGET_REMOVED) in add_target_store()
3842 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net); in add_target_store()
3843 scsi_host_put(target->scsi_host); in add_target_store()
3849 srp_disconnect_target(target); in add_target_store()
3852 for (i = 0; i < target->ch_count; i++) { in add_target_store()
3853 ch = &target->ch[i]; in add_target_store()
3854 srp_free_ch_ib(target, ch); in add_target_store()
3857 kfree(target->ch); in add_target_store()
4020 struct srp_target_port *target; in srp_remove_one() local
4028 * target ports can be created. in srp_remove_one()
4033 * Remove all target ports. in srp_remove_one()
4036 list_for_each_entry(target, &host->target_list, list) in srp_remove_one()
4037 srp_queue_remove_work(target); in srp_remove_one()
4041 * Wait for tl_err and target port removal tasks. in srp_remove_one()