| /Linux-v5.15/block/ |
| D | elevator.c | 63 struct elevator_queue *e = q->elevator; in elv_iosched_allow_bio_merge() 215 struct elevator_queue *e = q->elevator; in elv_rqhash_add() 231 struct elevator_queue *e = q->elevator; in elv_rqhash_find() 306 struct elevator_queue *e = q->elevator; in elv_merge() 400 struct elevator_queue *e = q->elevator; in elv_merged_request() 414 struct elevator_queue *e = q->elevator; in elv_merge_requests() 425 struct elevator_queue *e = q->elevator; in elv_latter_request() 435 struct elevator_queue *e = q->elevator; in elv_former_request() 492 struct elevator_queue *e = q->elevator; in elv_register_queue() 520 struct elevator_queue *e = q->elevator; in elv_unregister_queue() [all …]
|
| D | mq-deadline.c | 212 struct deadline_data *dd = q->elevator->elevator_data; in dd_request_merged() 232 struct deadline_data *dd = q->elevator->elevator_data; in dd_merged_requests() 479 struct deadline_data *dd = hctx->queue->elevator->elevator_data; in dd_dispatch_request() 500 struct deadline_data *dd = data->q->elevator->elevator_data; in dd_limit_depth() 517 struct deadline_data *dd = q->elevator->elevator_data; in dd_depth_updated() 592 q->elevator = eq; in dd_init_sched() 610 struct deadline_data *dd = q->elevator->elevator_data; in dd_request_merge() 642 struct deadline_data *dd = q->elevator->elevator_data; in dd_bio_merge() 663 struct deadline_data *dd = q->elevator->elevator_data; in dd_insert_request() 717 struct deadline_data *dd = q->elevator->elevator_data; in dd_insert_requests() [all …]
|
| D | blk-mq-sched.h | 47 struct elevator_queue *e = q->elevator; in blk_mq_sched_allow_merge() 57 struct elevator_queue *e = rq->q->elevator; in blk_mq_sched_completed_request() 66 struct elevator_queue *e = q->elevator; in blk_mq_sched_requeue_request() 74 struct elevator_queue *e = hctx->queue->elevator; in blk_mq_sched_has_work()
|
| D | blk-mq-sched.c | 121 struct elevator_queue *e = q->elevator; in __blk_mq_do_dispatch_sched() 297 const bool has_sched = q->elevator; in __blk_mq_sched_dispatch_requests() 369 struct elevator_queue *e = q->elevator; in __blk_mq_sched_bio_merge() 433 struct elevator_queue *e = q->elevator; in blk_mq_sched_insert_request() 496 e = hctx->queue->elevator; in blk_mq_sched_insert_requests() 598 q->elevator = NULL; in blk_mq_init_sched() 633 eq = q->elevator; in blk_mq_init_sched() 651 q->elevator = NULL; in blk_mq_init_sched() 690 q->elevator = NULL; in blk_mq_exit_sched()
|
| D | kyber-iosched.c | 425 q->elevator = eq; in kyber_init_sched() 454 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; in kyber_depth_updated() 560 struct kyber_queue_data *kqd = data->q->elevator->elevator_data; in kyber_limit_depth() 614 struct kyber_queue_data *kqd = rq->q->elevator->elevator_data; in kyber_finish_request() 639 struct kyber_queue_data *kqd = rq->q->elevator->elevator_data; in kyber_completed_request() 803 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; in kyber_dispatch_request() 902 struct kyber_queue_data *kqd = q->elevator->elevator_data; \ 961 struct kyber_queue_data *kqd = q->elevator->elevator_data; in KYBER_DEBUGFS_DOMAIN_ATTRS()
|
| D | blk-ioc.c | 45 struct elevator_type *et = icq->q->elevator->type; in ioc_exit_icq() 64 struct elevator_type *et = q->elevator->type; in ioc_destroy_icq() 375 struct elevator_type *et = q->elevator->type; in ioc_create_icq()
|
| D | blk-flush.c | 241 if (!q->elevator) { in flush_end_io() 320 if (!q->elevator) { in blk_kick_flush() 357 if (q->elevator) { in mq_flush_data_end_io()
|
| D | blk-sysfs.c | 762 if (q->elevator) { in blk_exit_queue() 764 __elevator_exit(q, q->elevator); in blk_exit_queue() 890 if (q->elevator) { in blk_register_queue() 908 if (q->elevator) in blk_register_queue() 909 kobject_uevent(&q->elevator->kobj, KOBJ_ADD); in blk_register_queue() 973 if (q->elevator) in blk_unregister_queue()
|
| D | blk-mq.c | 281 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator; in blk_mq_need_time_stamp() 290 if (data->q->elevator) { in blk_mq_rq_ctx_init() 339 struct elevator_queue *e = data->q->elevator; in blk_mq_rq_ctx_init() 358 struct elevator_queue *e = q->elevator; in __blk_mq_alloc_request() 481 if (!q->elevator) in blk_mq_alloc_request_hctx() 517 struct elevator_queue *e = q->elevator; in blk_mq_free_request() 1632 struct elevator_queue *e = q->elevator; in blk_mq_has_sqsched() 2032 if (q->elevator && !bypass_insert) in __blk_mq_try_issue_directly() 2265 } else if (q->elevator) { in blk_mq_submit_bio() 3645 if (q->elevator && q->elevator->type->ops.depth_updated) in blk_mq_update_nr_requests() [all …]
|
| D | Makefile | 6 obj-$(CONFIG_BLOCK) := bdev.o fops.o bio.o elevator.o blk-core.o blk-sysfs.o \
|
| D | bfq-iosched.c | 421 return bic->icq.q->elevator->elevator_data; in bic_to_bfqd() 577 struct bfq_data *bfqd = data->q->elevator->elevator_data; in bfq_limit_depth() 2249 struct bfq_data *bfqd = q->elevator->elevator_data; 2256 struct bfq_data *bfqd = q->elevator->elevator_data; 2326 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_bio_merge() 2358 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_request_merge() 3050 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_allow_bio_merge() 3490 bfq_update_peak_rate(q->elevator->elevator_data, rq); in bfq_dispatch_remove() 4909 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_has_work() 4921 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in __bfq_dispatch_request() [all …]
|
| D | blk-mq-debugfs.c | 837 if (q->elevator && !q->sched_debugfs_dir) in blk_mq_debugfs_register() 844 if (q->elevator && !hctx->sched_debugfs_dir) in blk_mq_debugfs_register() 918 struct elevator_type *e = q->elevator->type; in blk_mq_debugfs_register_sched() 989 struct elevator_type *e = q->elevator->type; in blk_mq_debugfs_register_sched_hctx()
|
| D | blk-mq.h | 170 if (data->q->elevator) in blk_mq_tags_from_data()
|
| D | blk-mq-tag.c | 80 if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) && in __blk_mq_get_tag()
|
| D | blk-core.c | 404 if (q->elevator) in blk_cleanup_queue()
|
| D | bfq-cgroup.c | 544 struct bfq_data *bfqd = blkg->q->elevator->elevator_data; in bfq_pd_init()
|
| /Linux-v5.15/tools/cgroup/ |
| D | iocost_coef_gen.py | 107 global elevator_path, nomerges_path, elevator, nomerges 111 f.write(elevator) 143 elevator = re.sub(r'.*\[(.*)\].*', r'\1', f.read().strip()) variable
|
| /Linux-v5.15/Documentation/block/ |
| D | biodoc.rst | 306 interfaces would typically use, and the elevator add_request routine 832 I/O scheduler, a.k.a. elevator, is implemented in two layers. Generic dispatch 833 queue and specific I/O schedulers. Unless stated otherwise, elevator is used 848 calls elevator_xxx_fn in the elevator switch (block/elevator.c). Oh, xxx 849 and xxx might not match exactly, but use your imagination. If an elevator 856 The functions an elevator may implement are: (* are mandatory) 899 elevator_put_req_fn Must be used to allocate and free any elevator 910 elevator_exit_fn Allocate and free any elevator specific storage 966 advantage of the sorting/merging logic in the elevator. If the 1112 - elevator support for kiobuf request merging (axboe)
|
| /Linux-v5.15/Documentation/filesystems/ |
| D | zonefs.rst | 148 implemented by the block layer elevator. An elevator implementing the sequential 149 write feature for zoned block device (ELEVATOR_F_ZBD_SEQ_WRITE elevator feature) 150 must be used. This type of elevator (e.g. mq-deadline) is set by default
|
| /Linux-v5.15/Documentation/ABI/testing/ |
| D | sysfs-block | 173 Standard I/O elevator operations include attempts to
|
| /Linux-v5.15/include/linux/ |
| D | blkdev.h | 375 struct elevator_queue *elevator; member
|
| /Linux-v5.15/Documentation/admin-guide/LSM/ |
| D | Smack.rst | 6 "Good for you, you've decided to clean the elevator!"
|
| /Linux-v5.15/ |
| D | CREDITS | 158 D: elevator + block layer rewrites
|