Lines Matching full:q

61 static ssize_t queue_requests_show(struct request_queue *q, char *page)  in queue_requests_show()  argument
63 return queue_var_show(q->nr_requests, (page)); in queue_requests_show()
67 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument
72 if (!queue_is_mq(q)) in queue_requests_store()
82 err = blk_mq_update_nr_requests(q, nr); in queue_requests_store()
89 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument
91 unsigned long ra_kb = q->backing_dev_info->ra_pages << in queue_ra_show()
98 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument
106 q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); in queue_ra_store()
111 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) in queue_max_sectors_show() argument
113 int max_sectors_kb = queue_max_sectors(q) >> 1; in queue_max_sectors_show()
118 static ssize_t queue_max_segments_show(struct request_queue *q, char *page) in queue_max_segments_show() argument
120 return queue_var_show(queue_max_segments(q), (page)); in queue_max_segments_show()
123 static ssize_t queue_max_discard_segments_show(struct request_queue *q, in queue_max_discard_segments_show() argument
126 return queue_var_show(queue_max_discard_segments(q), (page)); in queue_max_discard_segments_show()
129 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) in queue_max_integrity_segments_show() argument
131 return queue_var_show(q->limits.max_integrity_segments, (page)); in queue_max_integrity_segments_show()
134 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) in queue_max_segment_size_show() argument
136 return queue_var_show(queue_max_segment_size(q), (page)); in queue_max_segment_size_show()
139 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) in queue_logical_block_size_show() argument
141 return queue_var_show(queue_logical_block_size(q), page); in queue_logical_block_size_show()
144 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) in queue_physical_block_size_show() argument
146 return queue_var_show(queue_physical_block_size(q), page); in queue_physical_block_size_show()
149 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) in queue_chunk_sectors_show() argument
151 return queue_var_show(q->limits.chunk_sectors, page); in queue_chunk_sectors_show()
154 static ssize_t queue_io_min_show(struct request_queue *q, char *page) in queue_io_min_show() argument
156 return queue_var_show(queue_io_min(q), page); in queue_io_min_show()
159 static ssize_t queue_io_opt_show(struct request_queue *q, char *page) in queue_io_opt_show() argument
161 return queue_var_show(queue_io_opt(q), page); in queue_io_opt_show()
164 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) in queue_discard_granularity_show() argument
166 return queue_var_show(q->limits.discard_granularity, page); in queue_discard_granularity_show()
169 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) in queue_discard_max_hw_show() argument
173 (unsigned long long)q->limits.max_hw_discard_sectors << 9); in queue_discard_max_hw_show()
176 static ssize_t queue_discard_max_show(struct request_queue *q, char *page) in queue_discard_max_show() argument
179 (unsigned long long)q->limits.max_discard_sectors << 9); in queue_discard_max_show()
182 static ssize_t queue_discard_max_store(struct request_queue *q, in queue_discard_max_store() argument
191 if (max_discard & (q->limits.discard_granularity - 1)) in queue_discard_max_store()
198 if (max_discard > q->limits.max_hw_discard_sectors) in queue_discard_max_store()
199 max_discard = q->limits.max_hw_discard_sectors; in queue_discard_max_store()
201 q->limits.max_discard_sectors = max_discard; in queue_discard_max_store()
205 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) in queue_discard_zeroes_data_show() argument
210 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) in queue_write_same_max_show() argument
213 (unsigned long long)q->limits.max_write_same_sectors << 9); in queue_write_same_max_show()
216 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) in queue_write_zeroes_max_show() argument
219 (unsigned long long)q->limits.max_write_zeroes_sectors << 9); in queue_write_zeroes_max_show()
222 static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page) in queue_zone_append_max_show() argument
224 unsigned long long max_sectors = q->limits.max_zone_append_sectors; in queue_zone_append_max_show()
230 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) in queue_max_sectors_store() argument
233 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, in queue_max_sectors_store()
241 q->limits.max_dev_sectors >> 1); in queue_max_sectors_store()
246 spin_lock_irq(&q->queue_lock); in queue_max_sectors_store()
247 q->limits.max_sectors = max_sectors_kb << 1; in queue_max_sectors_store()
248 q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); in queue_max_sectors_store()
249 spin_unlock_irq(&q->queue_lock); in queue_max_sectors_store()
254 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) in queue_max_hw_sectors_show() argument
256 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; in queue_max_hw_sectors_show()
263 queue_##name##_show(struct request_queue *q, char *page) \
266 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
270 queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
281 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
283 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
293 static ssize_t queue_zoned_show(struct request_queue *q, char *page) in queue_zoned_show() argument
295 switch (blk_queue_zoned_model(q)) { in queue_zoned_show()
305 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) in queue_nr_zones_show() argument
307 return queue_var_show(blk_queue_nr_zones(q), page); in queue_nr_zones_show()
310 static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) in queue_max_open_zones_show() argument
312 return queue_var_show(queue_max_open_zones(q), page); in queue_max_open_zones_show()
315 static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page) in queue_max_active_zones_show() argument
317 return queue_var_show(queue_max_active_zones(q), page); in queue_max_active_zones_show()
320 static ssize_t queue_nomerges_show(struct request_queue *q, char *page) in queue_nomerges_show() argument
322 return queue_var_show((blk_queue_nomerges(q) << 1) | in queue_nomerges_show()
323 blk_queue_noxmerges(q), page); in queue_nomerges_show()
326 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, in queue_nomerges_store() argument
335 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); in queue_nomerges_store()
336 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); in queue_nomerges_store()
338 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); in queue_nomerges_store()
340 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); in queue_nomerges_store()
345 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) in queue_rq_affinity_show() argument
347 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); in queue_rq_affinity_show()
348 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); in queue_rq_affinity_show()
354 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) in queue_rq_affinity_store() argument
365 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
366 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
368 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
369 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
371 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
372 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
378 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) in queue_poll_delay_show() argument
382 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) in queue_poll_delay_show()
385 val = q->poll_nsec / 1000; in queue_poll_delay_show()
390 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, in queue_poll_delay_store() argument
395 if (!q->mq_ops || !q->mq_ops->poll) in queue_poll_delay_store()
403 q->poll_nsec = BLK_MQ_POLL_CLASSIC; in queue_poll_delay_store()
405 q->poll_nsec = val * 1000; in queue_poll_delay_store()
412 static ssize_t queue_poll_show(struct request_queue *q, char *page) in queue_poll_show() argument
414 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); in queue_poll_show()
417 static ssize_t queue_poll_store(struct request_queue *q, const char *page, in queue_poll_store() argument
423 if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL || in queue_poll_store()
424 !q->tag_set->map[HCTX_TYPE_POLL].nr_queues) in queue_poll_store()
432 blk_queue_flag_set(QUEUE_FLAG_POLL, q); in queue_poll_store()
434 blk_queue_flag_clear(QUEUE_FLAG_POLL, q); in queue_poll_store()
439 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) in queue_io_timeout_show() argument
441 return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); in queue_io_timeout_show()
444 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, in queue_io_timeout_store() argument
454 blk_queue_rq_timeout(q, msecs_to_jiffies(val)); in queue_io_timeout_store()
459 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) in queue_wb_lat_show() argument
461 if (!wbt_rq_qos(q)) in queue_wb_lat_show()
464 return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); in queue_wb_lat_show()
467 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, in queue_wb_lat_store() argument
480 rqos = wbt_rq_qos(q); in queue_wb_lat_store()
482 ret = wbt_init(q); in queue_wb_lat_store()
488 val = wbt_default_latency_nsec(q); in queue_wb_lat_store()
492 if (wbt_get_min_lat(q) == val) in queue_wb_lat_store()
500 blk_mq_freeze_queue(q); in queue_wb_lat_store()
501 blk_mq_quiesce_queue(q); in queue_wb_lat_store()
503 wbt_set_min_lat(q, val); in queue_wb_lat_store()
505 blk_mq_unquiesce_queue(q); in queue_wb_lat_store()
506 blk_mq_unfreeze_queue(q); in queue_wb_lat_store()
511 static ssize_t queue_wc_show(struct request_queue *q, char *page) in queue_wc_show() argument
513 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) in queue_wc_show()
519 static ssize_t queue_wc_store(struct request_queue *q, const char *page, in queue_wc_store() argument
534 blk_queue_flag_set(QUEUE_FLAG_WC, q); in queue_wc_store()
536 blk_queue_flag_clear(QUEUE_FLAG_WC, q); in queue_wc_store()
541 static ssize_t queue_fua_show(struct request_queue *q, char *page) in queue_fua_show() argument
543 return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); in queue_fua_show()
546 static ssize_t queue_dax_show(struct request_queue *q, char *page) in queue_dax_show() argument
548 return queue_var_show(blk_queue_dax(q), page); in queue_dax_show()
668 struct request_queue *q = in queue_attr_visible() local
672 (!q->mq_ops || !q->mq_ops->timeout)) in queue_attr_visible()
677 !blk_queue_is_zoned(q)) in queue_attr_visible()
695 struct request_queue *q = in queue_attr_show() local
701 mutex_lock(&q->sysfs_lock); in queue_attr_show()
702 res = entry->show(q, page); in queue_attr_show()
703 mutex_unlock(&q->sysfs_lock); in queue_attr_show()
712 struct request_queue *q; in queue_attr_store() local
718 q = container_of(kobj, struct request_queue, kobj); in queue_attr_store()
719 mutex_lock(&q->sysfs_lock); in queue_attr_store()
720 res = entry->store(q, page, length); in queue_attr_store()
721 mutex_unlock(&q->sysfs_lock); in queue_attr_store()
727 struct request_queue *q = container_of(rcu_head, struct request_queue, in blk_free_queue_rcu() local
729 kmem_cache_free(blk_requestq_cachep, q); in blk_free_queue_rcu()
733 static void blk_exit_queue(struct request_queue *q) in blk_exit_queue() argument
740 if (q->elevator) { in blk_exit_queue()
741 ioc_clear_queue(q); in blk_exit_queue()
742 __elevator_exit(q, q->elevator); in blk_exit_queue()
746 * Remove all references to @q from the block cgroup controller before in blk_exit_queue()
747 * restoring @q->queue_lock to avoid that restoring this pointer causes in blk_exit_queue()
750 blkcg_exit_queue(q); in blk_exit_queue()
753 * Since the cgroup code may dereference the @q->backing_dev_info in blk_exit_queue()
757 bdi_put(q->backing_dev_info); in blk_exit_queue()
782 struct request_queue *q = in blk_release_queue() local
787 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) in blk_release_queue()
788 blk_stat_remove_callback(q, q->poll_cb); in blk_release_queue()
789 blk_stat_free_callback(q->poll_cb); in blk_release_queue()
791 blk_free_queue_stats(q->stats); in blk_release_queue()
793 if (queue_is_mq(q)) { in blk_release_queue()
797 cancel_delayed_work_sync(&q->requeue_work); in blk_release_queue()
799 queue_for_each_hw_ctx(q, hctx, i) in blk_release_queue()
803 blk_exit_queue(q); in blk_release_queue()
805 blk_queue_free_zone_bitmaps(q); in blk_release_queue()
807 if (queue_is_mq(q)) in blk_release_queue()
808 blk_mq_release(q); in blk_release_queue()
810 blk_trace_shutdown(q); in blk_release_queue()
811 mutex_lock(&q->debugfs_mutex); in blk_release_queue()
812 debugfs_remove_recursive(q->debugfs_dir); in blk_release_queue()
813 mutex_unlock(&q->debugfs_mutex); in blk_release_queue()
815 if (queue_is_mq(q)) in blk_release_queue()
816 blk_mq_debugfs_unregister(q); in blk_release_queue()
818 bioset_exit(&q->bio_split); in blk_release_queue()
820 ida_simple_remove(&blk_queue_ida, q->id); in blk_release_queue()
821 call_rcu(&q->rcu_head, blk_free_queue_rcu); in blk_release_queue()
842 struct request_queue *q = disk->queue; in blk_register_queue() local
844 if (WARN_ON(!q)) in blk_register_queue()
847 WARN_ONCE(blk_queue_registered(q), in blk_register_queue()
860 if (!blk_queue_init_done(q)) { in blk_register_queue()
861 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); in blk_register_queue()
862 percpu_ref_switch_to_percpu(&q->q_usage_counter); in blk_register_queue()
865 blk_queue_update_readahead(q); in blk_register_queue()
871 mutex_lock(&q->sysfs_dir_lock); in blk_register_queue()
873 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); in blk_register_queue()
879 ret = sysfs_create_group(&q->kobj, &queue_attr_group); in blk_register_queue()
882 kobject_del(&q->kobj); in blk_register_queue()
887 mutex_lock(&q->debugfs_mutex); in blk_register_queue()
888 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), in blk_register_queue()
890 mutex_unlock(&q->debugfs_mutex); in blk_register_queue()
892 if (queue_is_mq(q)) { in blk_register_queue()
893 __blk_mq_register_dev(dev, q); in blk_register_queue()
894 blk_mq_debugfs_register(q); in blk_register_queue()
897 mutex_lock(&q->sysfs_lock); in blk_register_queue()
898 if (q->elevator) { in blk_register_queue()
899 ret = elv_register_queue(q, false); in blk_register_queue()
901 mutex_unlock(&q->sysfs_lock); in blk_register_queue()
902 mutex_unlock(&q->sysfs_dir_lock); in blk_register_queue()
903 kobject_del(&q->kobj); in blk_register_queue()
910 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); in blk_register_queue()
911 wbt_enable_default(q); in blk_register_queue()
912 blk_throtl_register_queue(q); in blk_register_queue()
915 kobject_uevent(&q->kobj, KOBJ_ADD); in blk_register_queue()
916 if (q->elevator) in blk_register_queue()
917 kobject_uevent(&q->elevator->kobj, KOBJ_ADD); in blk_register_queue()
918 mutex_unlock(&q->sysfs_lock); in blk_register_queue()
922 mutex_unlock(&q->sysfs_dir_lock); in blk_register_queue()
936 struct request_queue *q = disk->queue; in blk_unregister_queue() local
938 if (WARN_ON(!q)) in blk_unregister_queue()
942 if (!blk_queue_registered(q)) in blk_unregister_queue()
950 mutex_lock(&q->sysfs_lock); in blk_unregister_queue()
951 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); in blk_unregister_queue()
952 mutex_unlock(&q->sysfs_lock); in blk_unregister_queue()
954 mutex_lock(&q->sysfs_dir_lock); in blk_unregister_queue()
959 if (queue_is_mq(q)) in blk_unregister_queue()
960 blk_mq_unregister_dev(disk_to_dev(disk), q); in blk_unregister_queue()
962 kobject_uevent(&q->kobj, KOBJ_REMOVE); in blk_unregister_queue()
963 kobject_del(&q->kobj); in blk_unregister_queue()
966 mutex_lock(&q->sysfs_lock); in blk_unregister_queue()
967 if (q->elevator) in blk_unregister_queue()
968 elv_unregister_queue(q); in blk_unregister_queue()
969 mutex_unlock(&q->sysfs_lock); in blk_unregister_queue()
970 mutex_unlock(&q->sysfs_dir_lock); in blk_unregister_queue()