Lines Matching refs:q
60 static ssize_t queue_requests_show(struct request_queue *q, char *page) in queue_requests_show() argument
62 return queue_var_show(q->nr_requests, (page)); in queue_requests_show()
66 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument
71 if (!queue_is_mq(q)) in queue_requests_store()
81 err = blk_mq_update_nr_requests(q, nr); in queue_requests_store()
88 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument
90 unsigned long ra_kb = q->backing_dev_info->ra_pages << in queue_ra_show()
97 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument
105 q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); in queue_ra_store()
110 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) in queue_max_sectors_show() argument
112 int max_sectors_kb = queue_max_sectors(q) >> 1; in queue_max_sectors_show()
117 static ssize_t queue_max_segments_show(struct request_queue *q, char *page) in queue_max_segments_show() argument
119 return queue_var_show(queue_max_segments(q), (page)); in queue_max_segments_show()
122 static ssize_t queue_max_discard_segments_show(struct request_queue *q, in queue_max_discard_segments_show() argument
125 return queue_var_show(queue_max_discard_segments(q), (page)); in queue_max_discard_segments_show()
128 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) in queue_max_integrity_segments_show() argument
130 return queue_var_show(q->limits.max_integrity_segments, (page)); in queue_max_integrity_segments_show()
133 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) in queue_max_segment_size_show() argument
135 return queue_var_show(queue_max_segment_size(q), (page)); in queue_max_segment_size_show()
138 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) in queue_logical_block_size_show() argument
140 return queue_var_show(queue_logical_block_size(q), page); in queue_logical_block_size_show()
143 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) in queue_physical_block_size_show() argument
145 return queue_var_show(queue_physical_block_size(q), page); in queue_physical_block_size_show()
148 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) in queue_chunk_sectors_show() argument
150 return queue_var_show(q->limits.chunk_sectors, page); in queue_chunk_sectors_show()
153 static ssize_t queue_io_min_show(struct request_queue *q, char *page) in queue_io_min_show() argument
155 return queue_var_show(queue_io_min(q), page); in queue_io_min_show()
158 static ssize_t queue_io_opt_show(struct request_queue *q, char *page) in queue_io_opt_show() argument
160 return queue_var_show(queue_io_opt(q), page); in queue_io_opt_show()
163 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) in queue_discard_granularity_show() argument
165 return queue_var_show(q->limits.discard_granularity, page); in queue_discard_granularity_show()
168 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) in queue_discard_max_hw_show() argument
172 (unsigned long long)q->limits.max_hw_discard_sectors << 9); in queue_discard_max_hw_show()
175 static ssize_t queue_discard_max_show(struct request_queue *q, char *page) in queue_discard_max_show() argument
178 (unsigned long long)q->limits.max_discard_sectors << 9); in queue_discard_max_show()
181 static ssize_t queue_discard_max_store(struct request_queue *q, in queue_discard_max_store() argument
190 if (max_discard & (q->limits.discard_granularity - 1)) in queue_discard_max_store()
197 if (max_discard > q->limits.max_hw_discard_sectors) in queue_discard_max_store()
198 max_discard = q->limits.max_hw_discard_sectors; in queue_discard_max_store()
200 q->limits.max_discard_sectors = max_discard; in queue_discard_max_store()
204 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) in queue_discard_zeroes_data_show() argument
209 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) in queue_write_same_max_show() argument
212 (unsigned long long)q->limits.max_write_same_sectors << 9); in queue_write_same_max_show()
215 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) in queue_write_zeroes_max_show() argument
218 (unsigned long long)q->limits.max_write_zeroes_sectors << 9); in queue_write_zeroes_max_show()
222 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) in queue_max_sectors_store() argument
225 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, in queue_max_sectors_store()
233 q->limits.max_dev_sectors >> 1); in queue_max_sectors_store()
238 spin_lock_irq(&q->queue_lock); in queue_max_sectors_store()
239 q->limits.max_sectors = max_sectors_kb << 1; in queue_max_sectors_store()
240 q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); in queue_max_sectors_store()
241 spin_unlock_irq(&q->queue_lock); in queue_max_sectors_store()
246 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) in queue_max_hw_sectors_show() argument
248 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; in queue_max_hw_sectors_show()
255 queue_show_##name(struct request_queue *q, char *page) \
258 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
262 queue_store_##name(struct request_queue *q, const char *page, size_t count) \
273 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
275 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
284 static ssize_t queue_zoned_show(struct request_queue *q, char *page) in queue_zoned_show() argument
286 switch (blk_queue_zoned_model(q)) { in queue_zoned_show()
296 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) in queue_nr_zones_show() argument
298 return queue_var_show(blk_queue_nr_zones(q), page); in queue_nr_zones_show()
301 static ssize_t queue_nomerges_show(struct request_queue *q, char *page) in queue_nomerges_show() argument
303 return queue_var_show((blk_queue_nomerges(q) << 1) | in queue_nomerges_show()
304 blk_queue_noxmerges(q), page); in queue_nomerges_show()
307 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, in queue_nomerges_store() argument
316 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); in queue_nomerges_store()
317 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); in queue_nomerges_store()
319 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); in queue_nomerges_store()
321 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); in queue_nomerges_store()
326 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) in queue_rq_affinity_show() argument
328 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); in queue_rq_affinity_show()
329 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); in queue_rq_affinity_show()
335 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) in queue_rq_affinity_store() argument
346 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
347 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
349 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
350 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
352 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
353 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
359 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) in queue_poll_delay_show() argument
363 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) in queue_poll_delay_show()
366 val = q->poll_nsec / 1000; in queue_poll_delay_show()
371 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, in queue_poll_delay_store() argument
376 if (!q->mq_ops || !q->mq_ops->poll) in queue_poll_delay_store()
384 q->poll_nsec = BLK_MQ_POLL_CLASSIC; in queue_poll_delay_store()
386 q->poll_nsec = val * 1000; in queue_poll_delay_store()
393 static ssize_t queue_poll_show(struct request_queue *q, char *page) in queue_poll_show() argument
395 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); in queue_poll_show()
398 static ssize_t queue_poll_store(struct request_queue *q, const char *page, in queue_poll_store() argument
404 if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL || in queue_poll_store()
405 !q->tag_set->map[HCTX_TYPE_POLL].nr_queues) in queue_poll_store()
413 blk_queue_flag_set(QUEUE_FLAG_POLL, q); in queue_poll_store()
415 blk_queue_flag_clear(QUEUE_FLAG_POLL, q); in queue_poll_store()
420 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) in queue_io_timeout_show() argument
422 return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); in queue_io_timeout_show()
425 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, in queue_io_timeout_store() argument
435 blk_queue_rq_timeout(q, msecs_to_jiffies(val)); in queue_io_timeout_store()
440 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) in queue_wb_lat_show() argument
442 if (!wbt_rq_qos(q)) in queue_wb_lat_show()
445 return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); in queue_wb_lat_show()
448 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, in queue_wb_lat_store() argument
461 rqos = wbt_rq_qos(q); in queue_wb_lat_store()
463 ret = wbt_init(q); in queue_wb_lat_store()
469 val = wbt_default_latency_nsec(q); in queue_wb_lat_store()
473 if (wbt_get_min_lat(q) == val) in queue_wb_lat_store()
481 blk_mq_freeze_queue(q); in queue_wb_lat_store()
482 blk_mq_quiesce_queue(q); in queue_wb_lat_store()
484 wbt_set_min_lat(q, val); in queue_wb_lat_store()
486 blk_mq_unquiesce_queue(q); in queue_wb_lat_store()
487 blk_mq_unfreeze_queue(q); in queue_wb_lat_store()
492 static ssize_t queue_wc_show(struct request_queue *q, char *page) in queue_wc_show() argument
494 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) in queue_wc_show()
500 static ssize_t queue_wc_store(struct request_queue *q, const char *page, in queue_wc_store() argument
515 blk_queue_flag_set(QUEUE_FLAG_WC, q); in queue_wc_store()
517 blk_queue_flag_clear(QUEUE_FLAG_WC, q); in queue_wc_store()
522 static ssize_t queue_fua_show(struct request_queue *q, char *page) in queue_fua_show() argument
524 return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); in queue_fua_show()
527 static ssize_t queue_dax_show(struct request_queue *q, char *page) in queue_dax_show() argument
529 return queue_var_show(blk_queue_dax(q), page); in queue_dax_show()
775 struct request_queue *q = in queue_attr_visible() local
779 (!q->mq_ops || !q->mq_ops->timeout)) in queue_attr_visible()
797 struct request_queue *q = in queue_attr_show() local
803 mutex_lock(&q->sysfs_lock); in queue_attr_show()
804 if (blk_queue_dying(q)) { in queue_attr_show()
805 mutex_unlock(&q->sysfs_lock); in queue_attr_show()
808 res = entry->show(q, page); in queue_attr_show()
809 mutex_unlock(&q->sysfs_lock); in queue_attr_show()
818 struct request_queue *q; in queue_attr_store() local
824 q = container_of(kobj, struct request_queue, kobj); in queue_attr_store()
825 mutex_lock(&q->sysfs_lock); in queue_attr_store()
826 if (blk_queue_dying(q)) { in queue_attr_store()
827 mutex_unlock(&q->sysfs_lock); in queue_attr_store()
830 res = entry->store(q, page, length); in queue_attr_store()
831 mutex_unlock(&q->sysfs_lock); in queue_attr_store()
837 struct request_queue *q = container_of(rcu_head, struct request_queue, in blk_free_queue_rcu() local
839 kmem_cache_free(blk_requestq_cachep, q); in blk_free_queue_rcu()
843 static void blk_exit_queue(struct request_queue *q) in blk_exit_queue() argument
850 if (q->elevator) { in blk_exit_queue()
851 ioc_clear_queue(q); in blk_exit_queue()
852 __elevator_exit(q, q->elevator); in blk_exit_queue()
853 q->elevator = NULL; in blk_exit_queue()
861 blkcg_exit_queue(q); in blk_exit_queue()
868 bdi_put(q->backing_dev_info); in blk_exit_queue()
886 struct request_queue *q = container_of(work, typeof(*q), release_work); in __blk_release_queue() local
888 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) in __blk_release_queue()
889 blk_stat_remove_callback(q, q->poll_cb); in __blk_release_queue()
890 blk_stat_free_callback(q->poll_cb); in __blk_release_queue()
892 blk_free_queue_stats(q->stats); in __blk_release_queue()
894 if (queue_is_mq(q)) in __blk_release_queue()
895 cancel_delayed_work_sync(&q->requeue_work); in __blk_release_queue()
897 blk_exit_queue(q); in __blk_release_queue()
899 blk_queue_free_zone_bitmaps(q); in __blk_release_queue()
901 if (queue_is_mq(q)) in __blk_release_queue()
902 blk_mq_release(q); in __blk_release_queue()
904 blk_trace_shutdown(q); in __blk_release_queue()
906 if (queue_is_mq(q)) in __blk_release_queue()
907 blk_mq_debugfs_unregister(q); in __blk_release_queue()
909 bioset_exit(&q->bio_split); in __blk_release_queue()
911 ida_simple_remove(&blk_queue_ida, q->id); in __blk_release_queue()
912 call_rcu(&q->rcu_head, blk_free_queue_rcu); in __blk_release_queue()
917 struct request_queue *q = in blk_release_queue() local
920 INIT_WORK(&q->release_work, __blk_release_queue); in blk_release_queue()
921 schedule_work(&q->release_work); in blk_release_queue()
942 struct request_queue *q = disk->queue; in blk_register_queue() local
945 if (WARN_ON(!q)) in blk_register_queue()
948 WARN_ONCE(blk_queue_registered(q), in blk_register_queue()
961 if (!blk_queue_init_done(q)) { in blk_register_queue()
962 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); in blk_register_queue()
963 percpu_ref_switch_to_percpu(&q->q_usage_counter); in blk_register_queue()
970 mutex_lock(&q->sysfs_dir_lock); in blk_register_queue()
972 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); in blk_register_queue()
978 ret = sysfs_create_group(&q->kobj, &queue_attr_group); in blk_register_queue()
981 kobject_del(&q->kobj); in blk_register_queue()
986 if (queue_is_mq(q)) { in blk_register_queue()
987 __blk_mq_register_dev(dev, q); in blk_register_queue()
988 blk_mq_debugfs_register(q); in blk_register_queue()
991 mutex_lock(&q->sysfs_lock); in blk_register_queue()
992 if (q->elevator) { in blk_register_queue()
993 ret = elv_register_queue(q, false); in blk_register_queue()
995 mutex_unlock(&q->sysfs_lock); in blk_register_queue()
996 mutex_unlock(&q->sysfs_dir_lock); in blk_register_queue()
997 kobject_del(&q->kobj); in blk_register_queue()
1005 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); in blk_register_queue()
1006 wbt_enable_default(q); in blk_register_queue()
1007 blk_throtl_register_queue(q); in blk_register_queue()
1010 kobject_uevent(&q->kobj, KOBJ_ADD); in blk_register_queue()
1012 kobject_uevent(&q->elevator->kobj, KOBJ_ADD); in blk_register_queue()
1013 mutex_unlock(&q->sysfs_lock); in blk_register_queue()
1017 mutex_unlock(&q->sysfs_dir_lock); in blk_register_queue()
1031 struct request_queue *q = disk->queue; in blk_unregister_queue() local
1033 if (WARN_ON(!q)) in blk_unregister_queue()
1037 if (!blk_queue_registered(q)) in blk_unregister_queue()
1045 mutex_lock(&q->sysfs_lock); in blk_unregister_queue()
1046 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); in blk_unregister_queue()
1047 mutex_unlock(&q->sysfs_lock); in blk_unregister_queue()
1049 mutex_lock(&q->sysfs_dir_lock); in blk_unregister_queue()
1054 if (queue_is_mq(q)) in blk_unregister_queue()
1055 blk_mq_unregister_dev(disk_to_dev(disk), q); in blk_unregister_queue()
1057 kobject_uevent(&q->kobj, KOBJ_REMOVE); in blk_unregister_queue()
1058 kobject_del(&q->kobj); in blk_unregister_queue()
1061 mutex_lock(&q->sysfs_lock); in blk_unregister_queue()
1062 if (q->elevator) in blk_unregister_queue()
1063 elv_unregister_queue(q); in blk_unregister_queue()
1064 mutex_unlock(&q->sysfs_lock); in blk_unregister_queue()
1065 mutex_unlock(&q->sysfs_dir_lock); in blk_unregister_queue()