/Linux-v5.4/block/ |
D | blk-zoned.c | 91 struct request_queue *q = bdev_get_queue(bdev); in blkdev_nr_zones() 166 struct request_queue *q = bdev_get_queue(bdev); in blkdev_report_zones() 227 if (!blk_queue_zone_resetall(bdev_get_queue(bdev))) in blkdev_allow_reset_all_zones() 257 struct request_queue *q = bdev_get_queue(bdev); in blkdev_reset_zones() 326 q = bdev_get_queue(bdev); in blkdev_report_zones_ioctl() 384 q = bdev_get_queue(bdev); in blkdev_reset_zones_ioctl()
|
D | blk-lib.c | 29 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_discard() 135 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_write_same() 218 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_write_zeroes() 272 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_zero_pages()
|
D | ioctl.c | 206 struct request_queue *q = bdev_get_queue(bdev); in blk_ioctl_discard() 565 queue_max_sectors(bdev_get_queue(bdev))); in blkdev_ioctl() 568 return put_ushort(arg, !blk_queue_nonrot(bdev_get_queue(bdev))); in blkdev_ioctl()
|
D | compat_ioctl.c | 378 queue_max_sectors(bdev_get_queue(bdev))); in compat_blkdev_ioctl() 382 !blk_queue_nonrot(bdev_get_queue(bdev))); in compat_blkdev_ioctl()
|
D | blk-flush.c | 442 q = bdev_get_queue(bdev); in blkdev_issue_flush()
|
D | blk-settings.c | 635 struct request_queue *bq = bdev_get_queue(bdev); in bdev_stack_limits()
|
/Linux-v5.4/include/linux/ |
D | blkdev.h | 898 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) in bdev_get_queue() function 1309 return queue_logical_block_size(bdev_get_queue(bdev)); in bdev_logical_block_size() 1319 return queue_physical_block_size(bdev_get_queue(bdev)); in bdev_physical_block_size() 1329 return queue_io_min(bdev_get_queue(bdev)); in bdev_io_min() 1339 return queue_io_opt(bdev_get_queue(bdev)); in bdev_io_opt() 1361 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset() 1405 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment() 1415 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_same() 1425 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_zeroes_sectors() 1435 struct request_queue *q = bdev_get_queue(bdev); in bdev_zoned_model() [all …]
|
/Linux-v5.4/drivers/md/ |
D | dm-table.c | 296 q = bdev_get_queue(bdev); in device_area_is_invalid() 472 struct request_queue *q = bdev_get_queue(bdev); in dm_set_device_limits() 929 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_rq_based() 1422 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_zoned_model() 1452 struct request_queue *q = bdev_get_queue(dev->bdev); in device_matches_zone_sectors() 1635 struct request_queue *q = bdev_get_queue(dev->bdev); in device_flush_capable() 1702 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_nonrot() 1710 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_not_random() 1749 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_write_same_capable() 1776 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_write_zeroes_capable() [all …]
|
D | md-linear.c | 64 struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); in linear_congested() 123 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in linear_conf()
|
D | raid0.c | 40 struct request_queue *q = bdev_get_queue(devlist[i]->bdev); in raid0_congested() 412 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid0_run() 559 trace_block_bio_remap(bdev_get_queue(rdev->bdev), in raid0_handle_discard()
|
D | dm-clone-target.c | 1471 source_q = bdev_get_queue(clone->source_dev->bdev); in clone_is_congested() 1472 dest_q = bdev_get_queue(clone->dest_dev->bdev); in clone_is_congested() 1977 struct request_queue *q = bdev_get_queue(bdev); in bdev_supports_discards() 1989 struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits; in disable_passdown_if_not_supported() 2011 struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits; in set_discard_limits()
|
D | dm-mpath.c | 515 q = bdev_get_queue(bdev); in multipath_clone_and_map() 816 struct request_queue *q = bdev_get_queue(bdev); in setup_scsi_dh() 895 q = bdev_get_queue(p->path.dev->bdev); in parse_path() 1538 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in activate_or_offline_path() 1952 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in pgpath_busy()
|
D | dm-zoned-target.c | 578 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) { in dmz_bdev_is_dying() 691 q = bdev_get_queue(dev->bdev); in dmz_get_zoned_device()
|
D | md-multipath.c | 164 struct request_queue *q = bdev_get_queue(rdev->bdev); in multipath_congested()
|
D | dm-io.c | 306 struct request_queue *q = bdev_get_queue(where->bdev); in do_region()
|
/Linux-v5.4/drivers/target/ |
D | target_core_iblock.c | 106 q = bdev_get_queue(bd); in iblock_configure_device() 695 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); in iblock_execute_rw() 794 struct request_queue *q = bdev_get_queue(bd); in iblock_get_blocks() 855 struct request_queue *q = bdev_get_queue(bd); in iblock_get_write_cache()
|
/Linux-v5.4/fs/jfs/ |
D | ioctl.c | 125 struct request_queue *q = bdev_get_queue(sb->s_bdev); in jfs_ioctl()
|
D | super.c | 377 struct request_queue *q = bdev_get_queue(sb->s_bdev); in parse_options() 396 struct request_queue *q = bdev_get_queue(sb->s_bdev); in parse_options()
|
/Linux-v5.4/fs/xfs/ |
D | xfs_discard.c | 150 struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev); in xfs_ioc_trim()
|
/Linux-v5.4/drivers/md/bcache/ |
D | request.c | 1017 !blk_queue_discard(bdev_get_queue(dc->bdev))) in cached_dev_write() 1123 !blk_queue_discard(bdev_get_queue(dc->bdev))) in detached_dev_do_request() 1249 struct request_queue *q = bdev_get_queue(dc->bdev); in cached_dev_congested() 1260 q = bdev_get_queue(ca->bdev); in cached_dev_congested() 1372 q = bdev_get_queue(ca->bdev); in flash_dev_congested()
|
/Linux-v5.4/fs/iomap/ |
D | direct-io.c | 69 dio->submit.last_queue = bdev_get_queue(iomap->bdev); in iomap_dio_submit_bio() 234 blk_queue_fua(bdev_get_queue(iomap->bdev))) in iomap_dio_bio_actor()
|
/Linux-v5.4/drivers/nvme/target/ |
D | io-cmd-bdev.c | 13 const struct queue_limits *ql = &bdev_get_queue(bdev)->limits; in nvmet_bdev_set_limits()
|
/Linux-v5.4/drivers/block/xen-blkback/ |
D | xenbus.c | 481 q = bdev_get_queue(bdev); in xen_vbd_create() 540 struct request_queue *q = bdev_get_queue(bdev); in xen_blkbk_discard()
|
/Linux-v5.4/fs/ |
D | block_dev.c | 257 !blk_poll(bdev_get_queue(bdev), qc, true)) in __blkdev_direct_IO_simple() 293 struct request_queue *q = bdev_get_queue(bdev); in blkdev_iopoll() 451 !blk_poll(bdev_get_queue(bdev), qc, true)) in __blkdev_direct_IO()
|
/Linux-v5.4/fs/fat/ |
D | file.c | 130 struct request_queue *q = bdev_get_queue(sb->s_bdev); in fat_ioctl_fitrim()
|