Searched refs:queue_max_hw_sectors (Results 1 – 15 of 15) sorted by relevance
107 if (queue_max_hw_sectors(sdev->request_queue) > max_sectors) in slave_configure()130 min_t(size_t, queue_max_hw_sectors(sdev->request_queue), in slave_configure()572 return sprintf(buf, "%u\n", queue_max_hw_sectors(sdev->request_queue)); in max_sectors_show()
230 if (len > (queue_max_hw_sectors(q) << 9)) in blk_rq_map_kern()
225 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, in queue_max_sectors_store()248 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; in queue_max_hw_sectors_show()
292 if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) in sg_io()
693 if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q)) in __bio_add_pc_page()
144 queue_max_hw_sectors(q) << SECTOR_SHIFT); in sd_zbc_alloc_report_buffer()
3167 rw_max = min(rw_max, queue_max_hw_sectors(q)); in sd_revalidate_disk()
392 queue_max_hw_sectors(dev->q) / (geo->csecs >> SECTOR_SHIFT)); in pblk_core_init()1243 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue)); in pblk_init()
109 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); in iblock_configure_device()
306 min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q)); in pscsi_add_device_to_list()
1277 static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) in queue_max_hw_sectors() function
1346 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); in drbd_setup_queue_param()1384 now = queue_max_hw_sectors(device->rq_queue) << 9; in drbd_reconsider_queue_parameters()1389 local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9; in drbd_reconsider_queue_parameters()
624 max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9; in make_resync_request()
967 max_bio_size = queue_max_hw_sectors(q) << 9; in drbd_send_sizes()
5162 q->limits.max_sectors = queue_max_hw_sectors(q); in rbd_init_disk()