Home
last modified time | relevance | path

Searched refs:bdev_get_queue (Results 1 – 25 of 69) sorted by relevance

123

/Linux-v5.15/drivers/block/rnbd/
Drnbd-srv-dev.h51 return queue_max_segments(bdev_get_queue(dev->bdev)); in rnbd_dev_get_max_segs()
56 return queue_max_hw_sectors(bdev_get_queue(dev->bdev)); in rnbd_dev_get_max_hw_sects()
61 return blk_queue_secure_erase(bdev_get_queue(dev->bdev)); in rnbd_dev_get_secure_discard()
66 if (!blk_queue_discard(bdev_get_queue(dev->bdev))) in rnbd_dev_get_max_discard_sects()
69 return blk_queue_get_max_sectors(bdev_get_queue(dev->bdev), in rnbd_dev_get_max_discard_sects()
75 return bdev_get_queue(dev->bdev)->limits.discard_granularity; in rnbd_dev_get_discard_granularity()
80 return bdev_get_queue(dev->bdev)->limits.discard_alignment; in rnbd_dev_get_discard_alignment()
/Linux-v5.15/include/linux/
Dblkdev.h897 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) in bdev_get_queue() function
967 return blk_queue_zone_no(bdev_get_queue(bio->bi_bdev), in bio_zone_no()
973 return blk_queue_zone_is_seq(bdev_get_queue(bio->bi_bdev), in bio_zone_is_seq()
1397 return queue_logical_block_size(bdev_get_queue(bdev)); in bdev_logical_block_size()
1407 return queue_physical_block_size(bdev_get_queue(bdev)); in bdev_physical_block_size()
1417 return queue_io_min(bdev_get_queue(bdev)); in bdev_io_min()
1427 return queue_io_opt(bdev_get_queue(bdev)); in bdev_io_opt()
1439 return queue_zone_write_granularity(bdev_get_queue(bdev)); in bdev_zone_write_granularity()
1461 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset()
1520 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment()
[all …]
/Linux-v5.15/block/
Dblk-lib.c30 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_discard()
170 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_write_same()
253 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_write_zeroes()
307 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_zero_pages()
Dblk-zoned.c153 if (!blk_queue_is_zoned(bdev_get_queue(bdev)) || in blkdev_report_zones()
193 struct request_queue *q = bdev_get_queue(bdev); in blkdev_zone_reset_all_emulated()
269 struct request_queue *q = bdev_get_queue(bdev); in blkdev_zone_mgmt()
356 q = bdev_get_queue(bdev); in blkdev_report_zones_ioctl()
414 q = bdev_get_queue(bdev); in blkdev_zone_mgmt_ioctl()
Dfops.c111 !blk_poll(bdev_get_queue(bdev), qc, true)) in __blkdev_direct_IO_simple()
147 struct request_queue *q = bdev_get_queue(bdev); in blkdev_iopoll()
307 !blk_poll(bdev_get_queue(bdev), qc, true)) in __blkdev_direct_IO()
Dioctl.c115 struct request_queue *q = bdev_get_queue(bdev); in blk_ioctl_discard()
502 queue_max_sectors(bdev_get_queue(bdev))); in blkdev_common_ioctl()
505 return put_ushort(argp, !blk_queue_nonrot(bdev_get_queue(bdev))); in blkdev_common_ioctl()
/Linux-v5.15/drivers/md/
Ddm-table.c402 struct request_queue *q = bdev_get_queue(bdev); in dm_set_device_limits()
849 struct request_queue *q = bdev_get_queue(bdev); in device_is_rq_stackable()
1205 err = blk_crypto_evict_key(bdev_get_queue(dev->bdev), args->key); in dm_keyslot_evict_callback()
1251 struct blk_keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm; in device_intersect_crypto_modes()
1568 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_zoned_model()
1607 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_matches_zone_sectors()
1765 struct request_queue *q = bdev_get_queue(dev->bdev); in device_flush_capable()
1815 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_rotational()
1823 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_not_random()
1831 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_write_same_capable()
[all …]
Dmd-linear.c100 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in linear_conf()
Ddm-zoned-target.c590 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) { in dmz_bdev_is_dying()
790 q = bdev_get_queue(zoned_dev->bdev); in dmz_fixup_devices()
808 q = bdev_get_queue(zoned_dev->bdev); in dmz_fixup_devices()
Ddm-mpath.c532 q = bdev_get_queue(bdev); in multipath_clone_and_map()
873 struct request_queue *q = bdev_get_queue(bdev); in setup_scsi_dh()
952 q = bdev_get_queue(p->path.dev->bdev); in parse_path()
1618 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in activate_or_offline_path()
2091 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in pgpath_busy()
Ddm-zone.c274 return !blk_queue_is_zoned(bdev_get_queue(dev->bdev)); in device_not_zone_append_capable()
Ddm-clone-target.c2022 struct request_queue *q = bdev_get_queue(bdev); in bdev_supports_discards()
2034 struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits; in disable_passdown_if_not_supported()
2056 struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits; in set_discard_limits()
Ddm-io.c306 struct request_queue *q = bdev_get_queue(where->bdev); in do_region()
/Linux-v5.15/drivers/target/
Dtarget_core_iblock.c115 q = bdev_get_queue(bd); in iblock_configure_device()
734 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); in iblock_execute_rw()
833 struct request_queue *q = bdev_get_queue(bd); in iblock_get_blocks()
895 struct request_queue *q = bdev_get_queue(bd); in iblock_get_write_cache()
/Linux-v5.15/fs/jfs/
Dioctl.c113 struct request_queue *q = bdev_get_queue(sb->s_bdev); in jfs_ioctl()
Dsuper.c377 struct request_queue *q = bdev_get_queue(sb->s_bdev); in parse_options()
396 struct request_queue *q = bdev_get_queue(sb->s_bdev); in parse_options()
/Linux-v5.15/fs/xfs/
Dxfs_discard.c155 struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev); in xfs_ioc_trim()
/Linux-v5.15/fs/iomap/
Ddirect-io.c70 dio->submit.last_queue = bdev_get_queue(iter->iomap.bdev); in iomap_dio_submit_bio()
270 blk_queue_fua(bdev_get_queue(iomap->bdev))) in iomap_dio_bio_iter()
/Linux-v5.15/fs/crypto/
Dinline_crypt.c40 devs[0] = bdev_get_queue(sb->s_bdev); in fscrypt_get_devices()
/Linux-v5.15/fs/exfat/
Dfile.c356 struct request_queue *q = bdev_get_queue(inode->i_sb->s_bdev); in exfat_ioctl_fitrim()
/Linux-v5.15/drivers/nvme/target/
Dio-cmd-bdev.c13 const struct queue_limits *ql = &bdev_get_queue(bdev)->limits; in nvmet_bdev_set_limits()
/Linux-v5.15/drivers/block/xen-blkback/
Dxenbus.c518 q = bdev_get_queue(bdev); in xen_vbd_create()
580 struct request_queue *q = bdev_get_queue(bdev); in xen_blkbk_discard()
/Linux-v5.15/kernel/trace/
Dblktrace.c721 q = bdev_get_queue(bdev); in blk_trace_ioctl()
1766 struct request_queue *q = bdev_get_queue(bdev); in sysfs_blk_trace_attr_show()
1800 struct request_queue *q = bdev_get_queue(bdev); in sysfs_blk_trace_attr_store()
/Linux-v5.15/fs/fat/
Dfile.c130 struct request_queue *q = bdev_get_queue(sb->s_bdev); in fat_ioctl_fitrim()
/Linux-v5.15/fs/ext4/
Dioctl.c829 q = bdev_get_queue(EXT4_SB(sb)->s_journal->j_dev); in ext4_ioctl_checkpoint()
1099 struct request_queue *q = bdev_get_queue(sb->s_bdev); in __ext4_ioctl()

123