Lines Matching refs:shost
61 int scsi_init_sense_cache(struct Scsi_Host *shost) in scsi_init_sense_cache() argument
274 static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd) in scsi_dec_host_busy() argument
280 if (unlikely(scsi_host_in_recovery(shost))) { in scsi_dec_host_busy()
281 spin_lock_irqsave(shost->host_lock, flags); in scsi_dec_host_busy()
282 if (shost->host_failed || shost->host_eh_scheduled) in scsi_dec_host_busy()
283 scsi_eh_wakeup(shost); in scsi_dec_host_busy()
284 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_dec_host_busy()
291 struct Scsi_Host *shost = sdev->host; in scsi_device_unbusy() local
294 scsi_dec_host_busy(shost, cmd); in scsi_device_unbusy()
324 struct Scsi_Host *shost = current_sdev->host; in scsi_single_lun_run() local
328 spin_lock_irqsave(shost->host_lock, flags); in scsi_single_lun_run()
330 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_single_lun_run()
339 shost->queuecommand_may_block); in scsi_single_lun_run()
341 spin_lock_irqsave(shost->host_lock, flags); in scsi_single_lun_run()
345 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_single_lun_run()
368 static inline bool scsi_host_is_busy(struct Scsi_Host *shost) in scsi_host_is_busy() argument
370 if (atomic_read(&shost->host_blocked) > 0) in scsi_host_is_busy()
372 if (shost->host_self_blocked) in scsi_host_is_busy()
377 static void scsi_starved_list_run(struct Scsi_Host *shost) in scsi_starved_list_run() argument
383 spin_lock_irqsave(shost->host_lock, flags); in scsi_starved_list_run()
384 list_splice_init(&shost->starved_list, &starved_list); in scsi_starved_list_run()
399 if (scsi_host_is_busy(shost)) in scsi_starved_list_run()
407 &shost->starved_list); in scsi_starved_list_run()
424 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_starved_list_run()
429 spin_lock_irqsave(shost->host_lock, flags); in scsi_starved_list_run()
432 list_splice(&starved_list, &shost->starved_list); in scsi_starved_list_run()
433 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_starved_list_run()
465 void scsi_run_host_queues(struct Scsi_Host *shost) in scsi_run_host_queues() argument
469 shost_for_each_device(sdev, shost) in scsi_run_host_queues()
1281 static inline int scsi_target_queue_ready(struct Scsi_Host *shost, in scsi_target_queue_ready() argument
1288 spin_lock_irq(shost->host_lock); in scsi_target_queue_ready()
1291 spin_unlock_irq(shost->host_lock); in scsi_target_queue_ready()
1295 spin_unlock_irq(shost->host_lock); in scsi_target_queue_ready()
1322 spin_lock_irq(shost->host_lock); in scsi_target_queue_ready()
1323 list_move_tail(&sdev->starved_entry, &shost->starved_list); in scsi_target_queue_ready()
1324 spin_unlock_irq(shost->host_lock); in scsi_target_queue_ready()
1337 struct Scsi_Host *shost, in scsi_host_queue_ready() argument
1341 if (atomic_read(&shost->host_blocked) > 0) { in scsi_host_queue_ready()
1342 if (scsi_host_busy(shost) > 0) in scsi_host_queue_ready()
1348 if (atomic_dec_return(&shost->host_blocked) > 0) in scsi_host_queue_ready()
1352 shost_printk(KERN_INFO, shost, in scsi_host_queue_ready()
1356 if (shost->host_self_blocked) in scsi_host_queue_ready()
1361 spin_lock_irq(shost->host_lock); in scsi_host_queue_ready()
1364 spin_unlock_irq(shost->host_lock); in scsi_host_queue_ready()
1372 spin_lock_irq(shost->host_lock); in scsi_host_queue_ready()
1374 list_add_tail(&sdev->starved_entry, &shost->starved_list); in scsi_host_queue_ready()
1375 spin_unlock_irq(shost->host_lock); in scsi_host_queue_ready()
1377 scsi_dec_host_busy(shost, cmd); in scsi_host_queue_ready()
1396 struct Scsi_Host *shost; in scsi_mq_lld_busy() local
1401 shost = sdev->host; in scsi_mq_lld_busy()
1409 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) in scsi_mq_lld_busy()
1536 static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost) in scsi_mq_inline_sgl_size() argument
1538 return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) * in scsi_mq_inline_sgl_size()
1546 struct Scsi_Host *shost = sdev->host; in scsi_prepare_cmd() local
1570 if (!shost->hostt->init_cmd_priv) in scsi_prepare_cmd()
1571 memset(cmd + 1, 0, shost->hostt->cmd_size); in scsi_prepare_cmd()
1579 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; in scsi_prepare_cmd()
1582 if (scsi_host_get_prot(shost)) { in scsi_prepare_cmd()
1711 struct Scsi_Host *shost = sdev->host; in scsi_queue_rq() local
1729 if (!scsi_target_queue_ready(shost, sdev)) in scsi_queue_rq()
1731 if (unlikely(scsi_host_in_recovery(shost))) { in scsi_queue_rq()
1736 if (!scsi_host_queue_ready(q, shost, sdev, cmd)) in scsi_queue_rq()
1769 scsi_dec_host_busy(shost, cmd); in scsi_queue_rq()
1810 struct Scsi_Host *shost = set->driver_data; in scsi_mq_init_request() local
1820 if (scsi_host_get_prot(shost)) { in scsi_mq_init_request()
1822 shost->hostt->cmd_size; in scsi_mq_init_request()
1823 cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost); in scsi_mq_init_request()
1826 if (shost->hostt->init_cmd_priv) { in scsi_mq_init_request()
1827 ret = shost->hostt->init_cmd_priv(shost, cmd); in scsi_mq_init_request()
1838 struct Scsi_Host *shost = set->driver_data; in scsi_mq_exit_request() local
1841 if (shost->hostt->exit_cmd_priv) in scsi_mq_exit_request()
1842 shost->hostt->exit_cmd_priv(shost, cmd); in scsi_mq_exit_request()
1849 struct Scsi_Host *shost = hctx->driver_data; in scsi_mq_poll() local
1851 if (shost->hostt->mq_poll) in scsi_mq_poll()
1852 return shost->hostt->mq_poll(shost, hctx->queue_num); in scsi_mq_poll()
1860 struct Scsi_Host *shost = data; in scsi_init_hctx() local
1862 hctx->driver_data = shost; in scsi_init_hctx()
1868 struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set); in scsi_map_queues() local
1870 if (shost->hostt->map_queues) in scsi_map_queues()
1871 return shost->hostt->map_queues(shost); in scsi_map_queues()
1875 void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) in __scsi_init_queue() argument
1877 struct device *dev = shost->dma_dev; in __scsi_init_queue()
1882 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, in __scsi_init_queue()
1885 if (scsi_host_prot_dma(shost)) { in __scsi_init_queue()
1886 shost->sg_prot_tablesize = in __scsi_init_queue()
1887 min_not_zero(shost->sg_prot_tablesize, in __scsi_init_queue()
1889 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); in __scsi_init_queue()
1890 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); in __scsi_init_queue()
1893 blk_queue_max_hw_sectors(q, shost->max_sectors); in __scsi_init_queue()
1894 blk_queue_segment_boundary(q, shost->dma_boundary); in __scsi_init_queue()
1895 dma_set_seg_boundary(dev, shost->dma_boundary); in __scsi_init_queue()
1897 blk_queue_max_segment_size(q, shost->max_segment_size); in __scsi_init_queue()
1898 blk_queue_virt_boundary(q, shost->virt_boundary_mask); in __scsi_init_queue()
1935 struct Scsi_Host *shost = hctx->driver_data; in scsi_commit_rqs() local
1937 shost->hostt->commit_rqs(shost, hctx->queue_num); in scsi_commit_rqs()
1961 int scsi_mq_setup_tags(struct Scsi_Host *shost) in scsi_mq_setup_tags() argument
1964 struct blk_mq_tag_set *tag_set = &shost->tag_set; in scsi_mq_setup_tags()
1967 scsi_mq_inline_sgl_size(shost)); in scsi_mq_setup_tags()
1968 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; in scsi_mq_setup_tags()
1969 if (scsi_host_get_prot(shost)) in scsi_mq_setup_tags()
1974 if (shost->hostt->commit_rqs) in scsi_mq_setup_tags()
1978 tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1; in scsi_mq_setup_tags()
1979 tag_set->nr_maps = shost->nr_maps ? : 1; in scsi_mq_setup_tags()
1980 tag_set->queue_depth = shost->can_queue; in scsi_mq_setup_tags()
1982 tag_set->numa_node = dev_to_node(shost->dma_dev); in scsi_mq_setup_tags()
1985 BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); in scsi_mq_setup_tags()
1986 if (shost->queuecommand_may_block) in scsi_mq_setup_tags()
1988 tag_set->driver_data = shost; in scsi_mq_setup_tags()
1989 if (shost->host_tagset) in scsi_mq_setup_tags()
1997 struct Scsi_Host *shost = container_of(kref, typeof(*shost), in scsi_mq_free_tags() local
2000 blk_mq_free_tag_set(&shost->tag_set); in scsi_mq_free_tags()
2001 complete(&shost->tagset_freed); in scsi_mq_free_tags()
2040 void scsi_block_requests(struct Scsi_Host *shost) in scsi_block_requests() argument
2042 shost->host_self_blocked = 1; in scsi_block_requests()
2056 void scsi_unblock_requests(struct Scsi_Host *shost) in scsi_unblock_requests() argument
2058 shost->host_self_blocked = 0; in scsi_unblock_requests()
2059 scsi_run_host_queues(shost); in scsi_unblock_requests()
2913 scsi_block_targets(struct Scsi_Host *shost, struct device *dev) in scsi_block_targets() argument
2917 blk_mq_wait_quiesce_done(&shost->tag_set); in scsi_block_targets()
2957 scsi_host_block(struct Scsi_Host *shost) in scsi_host_block() argument
2966 shost_for_each_device(sdev, shost) { in scsi_host_block()
2977 blk_mq_wait_quiesce_done(&shost->tag_set); in scsi_host_block()
2984 scsi_host_unblock(struct Scsi_Host *shost, int new_state) in scsi_host_unblock() argument
2989 shost_for_each_device(sdev, shost) { in scsi_host_unblock()