Home
last modified time | relevance | path

Searched refs:bdev_get_queue (Results 1 – 25 of 61) sorted by relevance

123

/Linux-v5.4/block/
Dblk-zoned.c91 struct request_queue *q = bdev_get_queue(bdev); in blkdev_nr_zones()
166 struct request_queue *q = bdev_get_queue(bdev); in blkdev_report_zones()
227 if (!blk_queue_zone_resetall(bdev_get_queue(bdev))) in blkdev_allow_reset_all_zones()
257 struct request_queue *q = bdev_get_queue(bdev); in blkdev_reset_zones()
326 q = bdev_get_queue(bdev); in blkdev_report_zones_ioctl()
384 q = bdev_get_queue(bdev); in blkdev_reset_zones_ioctl()
Dblk-lib.c29 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_discard()
135 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_write_same()
218 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_write_zeroes()
272 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_zero_pages()
Dioctl.c206 struct request_queue *q = bdev_get_queue(bdev); in blk_ioctl_discard()
565 queue_max_sectors(bdev_get_queue(bdev))); in blkdev_ioctl()
568 return put_ushort(arg, !blk_queue_nonrot(bdev_get_queue(bdev))); in blkdev_ioctl()
Dcompat_ioctl.c378 queue_max_sectors(bdev_get_queue(bdev))); in compat_blkdev_ioctl()
382 !blk_queue_nonrot(bdev_get_queue(bdev))); in compat_blkdev_ioctl()
Dblk-flush.c442 q = bdev_get_queue(bdev); in blkdev_issue_flush()
Dblk-settings.c635 struct request_queue *bq = bdev_get_queue(bdev); in bdev_stack_limits()
/Linux-v5.4/include/linux/
Dblkdev.h898 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) in bdev_get_queue() function
1309 return queue_logical_block_size(bdev_get_queue(bdev)); in bdev_logical_block_size()
1319 return queue_physical_block_size(bdev_get_queue(bdev)); in bdev_physical_block_size()
1329 return queue_io_min(bdev_get_queue(bdev)); in bdev_io_min()
1339 return queue_io_opt(bdev_get_queue(bdev)); in bdev_io_opt()
1361 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset()
1405 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment()
1415 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_same()
1425 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_zeroes_sectors()
1435 struct request_queue *q = bdev_get_queue(bdev); in bdev_zoned_model()
[all …]
/Linux-v5.4/drivers/md/
Ddm-table.c296 q = bdev_get_queue(bdev); in device_area_is_invalid()
472 struct request_queue *q = bdev_get_queue(bdev); in dm_set_device_limits()
929 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_rq_based()
1422 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_zoned_model()
1452 struct request_queue *q = bdev_get_queue(dev->bdev); in device_matches_zone_sectors()
1635 struct request_queue *q = bdev_get_queue(dev->bdev); in device_flush_capable()
1702 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_nonrot()
1710 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_not_random()
1749 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_write_same_capable()
1776 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_write_zeroes_capable()
[all …]
Dmd-linear.c64 struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); in linear_congested()
123 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in linear_conf()
Draid0.c40 struct request_queue *q = bdev_get_queue(devlist[i]->bdev); in raid0_congested()
412 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid0_run()
559 trace_block_bio_remap(bdev_get_queue(rdev->bdev), in raid0_handle_discard()
Ddm-clone-target.c1471 source_q = bdev_get_queue(clone->source_dev->bdev); in clone_is_congested()
1472 dest_q = bdev_get_queue(clone->dest_dev->bdev); in clone_is_congested()
1977 struct request_queue *q = bdev_get_queue(bdev); in bdev_supports_discards()
1989 struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits; in disable_passdown_if_not_supported()
2011 struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits; in set_discard_limits()
Ddm-mpath.c515 q = bdev_get_queue(bdev); in multipath_clone_and_map()
816 struct request_queue *q = bdev_get_queue(bdev); in setup_scsi_dh()
895 q = bdev_get_queue(p->path.dev->bdev); in parse_path()
1538 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in activate_or_offline_path()
1952 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in pgpath_busy()
Ddm-zoned-target.c578 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) { in dmz_bdev_is_dying()
691 q = bdev_get_queue(dev->bdev); in dmz_get_zoned_device()
Dmd-multipath.c164 struct request_queue *q = bdev_get_queue(rdev->bdev); in multipath_congested()
Ddm-io.c306 struct request_queue *q = bdev_get_queue(where->bdev); in do_region()
/Linux-v5.4/drivers/target/
Dtarget_core_iblock.c106 q = bdev_get_queue(bd); in iblock_configure_device()
695 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); in iblock_execute_rw()
794 struct request_queue *q = bdev_get_queue(bd); in iblock_get_blocks()
855 struct request_queue *q = bdev_get_queue(bd); in iblock_get_write_cache()
/Linux-v5.4/fs/jfs/
Dioctl.c125 struct request_queue *q = bdev_get_queue(sb->s_bdev); in jfs_ioctl()
Dsuper.c377 struct request_queue *q = bdev_get_queue(sb->s_bdev); in parse_options()
396 struct request_queue *q = bdev_get_queue(sb->s_bdev); in parse_options()
/Linux-v5.4/fs/xfs/
Dxfs_discard.c150 struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev); in xfs_ioc_trim()
/Linux-v5.4/drivers/md/bcache/
Drequest.c1017 !blk_queue_discard(bdev_get_queue(dc->bdev))) in cached_dev_write()
1123 !blk_queue_discard(bdev_get_queue(dc->bdev))) in detached_dev_do_request()
1249 struct request_queue *q = bdev_get_queue(dc->bdev); in cached_dev_congested()
1260 q = bdev_get_queue(ca->bdev); in cached_dev_congested()
1372 q = bdev_get_queue(ca->bdev); in flash_dev_congested()
/Linux-v5.4/fs/iomap/
Ddirect-io.c69 dio->submit.last_queue = bdev_get_queue(iomap->bdev); in iomap_dio_submit_bio()
234 blk_queue_fua(bdev_get_queue(iomap->bdev))) in iomap_dio_bio_actor()
/Linux-v5.4/drivers/nvme/target/
Dio-cmd-bdev.c13 const struct queue_limits *ql = &bdev_get_queue(bdev)->limits; in nvmet_bdev_set_limits()
/Linux-v5.4/drivers/block/xen-blkback/
Dxenbus.c481 q = bdev_get_queue(bdev); in xen_vbd_create()
540 struct request_queue *q = bdev_get_queue(bdev); in xen_blkbk_discard()
/Linux-v5.4/fs/
Dblock_dev.c257 !blk_poll(bdev_get_queue(bdev), qc, true)) in __blkdev_direct_IO_simple()
293 struct request_queue *q = bdev_get_queue(bdev); in blkdev_iopoll()
451 !blk_poll(bdev_get_queue(bdev), qc, true)) in __blkdev_direct_IO()
/Linux-v5.4/fs/fat/
Dfile.c130 struct request_queue *q = bdev_get_queue(sb->s_bdev); in fat_ioctl_fitrim()

123