Lines Matching full:q
63 static ssize_t queue_requests_show(struct request_queue *q, char *page) in queue_requests_show() argument
65 return queue_var_show(q->nr_requests, page); in queue_requests_show()
69 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument
74 if (!queue_is_mq(q)) in queue_requests_store()
84 err = blk_mq_update_nr_requests(q, nr); in queue_requests_store()
91 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument
95 if (!q->disk) in queue_ra_show()
97 ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10); in queue_ra_show()
102 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument
107 if (!q->disk) in queue_ra_store()
112 q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10); in queue_ra_store()
116 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) in queue_max_sectors_show() argument
118 int max_sectors_kb = queue_max_sectors(q) >> 1; in queue_max_sectors_show()
123 static ssize_t queue_max_segments_show(struct request_queue *q, char *page) in queue_max_segments_show() argument
125 return queue_var_show(queue_max_segments(q), page); in queue_max_segments_show()
128 static ssize_t queue_max_discard_segments_show(struct request_queue *q, in queue_max_discard_segments_show() argument
131 return queue_var_show(queue_max_discard_segments(q), page); in queue_max_discard_segments_show()
134 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) in queue_max_integrity_segments_show() argument
136 return queue_var_show(q->limits.max_integrity_segments, page); in queue_max_integrity_segments_show()
139 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) in queue_max_segment_size_show() argument
141 return queue_var_show(queue_max_segment_size(q), page); in queue_max_segment_size_show()
144 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) in queue_logical_block_size_show() argument
146 return queue_var_show(queue_logical_block_size(q), page); in queue_logical_block_size_show()
149 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) in queue_physical_block_size_show() argument
151 return queue_var_show(queue_physical_block_size(q), page); in queue_physical_block_size_show()
154 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) in queue_chunk_sectors_show() argument
156 return queue_var_show(q->limits.chunk_sectors, page); in queue_chunk_sectors_show()
159 static ssize_t queue_io_min_show(struct request_queue *q, char *page) in queue_io_min_show() argument
161 return queue_var_show(queue_io_min(q), page); in queue_io_min_show()
164 static ssize_t queue_io_opt_show(struct request_queue *q, char *page) in queue_io_opt_show() argument
166 return queue_var_show(queue_io_opt(q), page); in queue_io_opt_show()
169 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) in queue_discard_granularity_show() argument
171 return queue_var_show(q->limits.discard_granularity, page); in queue_discard_granularity_show()
174 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) in queue_discard_max_hw_show() argument
178 (unsigned long long)q->limits.max_hw_discard_sectors << 9); in queue_discard_max_hw_show()
181 static ssize_t queue_discard_max_show(struct request_queue *q, char *page) in queue_discard_max_show() argument
184 (unsigned long long)q->limits.max_discard_sectors << 9); in queue_discard_max_show()
187 static ssize_t queue_discard_max_store(struct request_queue *q, in queue_discard_max_store() argument
196 if (max_discard & (q->limits.discard_granularity - 1)) in queue_discard_max_store()
203 if (max_discard > q->limits.max_hw_discard_sectors) in queue_discard_max_store()
204 max_discard = q->limits.max_hw_discard_sectors; in queue_discard_max_store()
206 q->limits.max_discard_sectors = max_discard; in queue_discard_max_store()
210 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) in queue_discard_zeroes_data_show() argument
215 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) in queue_write_same_max_show() argument
220 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) in queue_write_zeroes_max_show() argument
223 (unsigned long long)q->limits.max_write_zeroes_sectors << 9); in queue_write_zeroes_max_show()
226 static ssize_t queue_zone_write_granularity_show(struct request_queue *q, in queue_zone_write_granularity_show() argument
229 return queue_var_show(queue_zone_write_granularity(q), page); in queue_zone_write_granularity_show()
232 static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page) in queue_zone_append_max_show() argument
234 unsigned long long max_sectors = q->limits.max_zone_append_sectors; in queue_zone_append_max_show()
240 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) in queue_max_sectors_store() argument
243 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, in queue_max_sectors_store()
251 q->limits.max_dev_sectors >> 1); in queue_max_sectors_store()
256 spin_lock_irq(&q->queue_lock); in queue_max_sectors_store()
257 q->limits.max_sectors = max_sectors_kb << 1; in queue_max_sectors_store()
258 if (q->disk) in queue_max_sectors_store()
259 q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); in queue_max_sectors_store()
260 spin_unlock_irq(&q->queue_lock); in queue_max_sectors_store()
265 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) in queue_max_hw_sectors_show() argument
267 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; in queue_max_hw_sectors_show()
272 static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page) in queue_virt_boundary_mask_show() argument
274 return queue_var_show(q->limits.virt_boundary_mask, page); in queue_virt_boundary_mask_show()
277 static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page) in queue_dma_alignment_show() argument
279 return queue_var_show(queue_dma_alignment(q), page); in queue_dma_alignment_show()
284 queue_##name##_show(struct request_queue *q, char *page) \
287 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
291 queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
302 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
304 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
314 static ssize_t queue_zoned_show(struct request_queue *q, char *page) in queue_zoned_show() argument
316 switch (blk_queue_zoned_model(q)) { in queue_zoned_show()
326 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) in queue_nr_zones_show() argument
328 return queue_var_show(disk_nr_zones(q->disk), page); in queue_nr_zones_show()
331 static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) in queue_max_open_zones_show() argument
333 return queue_var_show(bdev_max_open_zones(q->disk->part0), page); in queue_max_open_zones_show()
336 static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page) in queue_max_active_zones_show() argument
338 return queue_var_show(bdev_max_active_zones(q->disk->part0), page); in queue_max_active_zones_show()
341 static ssize_t queue_nomerges_show(struct request_queue *q, char *page) in queue_nomerges_show() argument
343 return queue_var_show((blk_queue_nomerges(q) << 1) | in queue_nomerges_show()
344 blk_queue_noxmerges(q), page); in queue_nomerges_show()
347 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, in queue_nomerges_store() argument
356 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); in queue_nomerges_store()
357 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); in queue_nomerges_store()
359 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); in queue_nomerges_store()
361 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); in queue_nomerges_store()
366 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) in queue_rq_affinity_show() argument
368 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); in queue_rq_affinity_show()
369 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); in queue_rq_affinity_show()
375 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) in queue_rq_affinity_store() argument
386 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
387 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
389 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
390 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
392 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
393 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
399 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) in queue_poll_delay_show() argument
403 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) in queue_poll_delay_show()
406 val = q->poll_nsec / 1000; in queue_poll_delay_show()
411 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, in queue_poll_delay_store() argument
416 if (!q->mq_ops || !q->mq_ops->poll) in queue_poll_delay_store()
424 q->poll_nsec = BLK_MQ_POLL_CLASSIC; in queue_poll_delay_store()
426 q->poll_nsec = val * 1000; in queue_poll_delay_store()
433 static ssize_t queue_poll_show(struct request_queue *q, char *page) in queue_poll_show() argument
435 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); in queue_poll_show()
438 static ssize_t queue_poll_store(struct request_queue *q, const char *page, in queue_poll_store() argument
441 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) in queue_poll_store()
448 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) in queue_io_timeout_show() argument
450 return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); in queue_io_timeout_show()
453 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, in queue_io_timeout_store() argument
463 blk_queue_rq_timeout(q, msecs_to_jiffies(val)); in queue_io_timeout_store()
468 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) in queue_wb_lat_show() argument
470 if (!wbt_rq_qos(q)) in queue_wb_lat_show()
473 return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); in queue_wb_lat_show()
476 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, in queue_wb_lat_store() argument
489 rqos = wbt_rq_qos(q); in queue_wb_lat_store()
491 ret = wbt_init(q); in queue_wb_lat_store()
497 val = wbt_default_latency_nsec(q); in queue_wb_lat_store()
501 if (wbt_get_min_lat(q) == val) in queue_wb_lat_store()
509 blk_mq_freeze_queue(q); in queue_wb_lat_store()
510 blk_mq_quiesce_queue(q); in queue_wb_lat_store()
512 wbt_set_min_lat(q, val); in queue_wb_lat_store()
514 blk_mq_unquiesce_queue(q); in queue_wb_lat_store()
515 blk_mq_unfreeze_queue(q); in queue_wb_lat_store()
520 static ssize_t queue_wc_show(struct request_queue *q, char *page) in queue_wc_show() argument
522 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) in queue_wc_show()
528 static ssize_t queue_wc_store(struct request_queue *q, const char *page, in queue_wc_store() argument
543 blk_queue_flag_set(QUEUE_FLAG_WC, q); in queue_wc_store()
545 blk_queue_flag_clear(QUEUE_FLAG_WC, q); in queue_wc_store()
550 static ssize_t queue_fua_show(struct request_queue *q, char *page) in queue_fua_show() argument
552 return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); in queue_fua_show()
555 static ssize_t queue_dax_show(struct request_queue *q, char *page) in queue_dax_show() argument
557 return queue_var_show(blk_queue_dax(q), page); in queue_dax_show()
683 struct request_queue *q = in queue_attr_visible() local
687 (!q->mq_ops || !q->mq_ops->timeout)) in queue_attr_visible()
692 !blk_queue_is_zoned(q)) in queue_attr_visible()
710 struct request_queue *q = in queue_attr_show() local
716 mutex_lock(&q->sysfs_lock); in queue_attr_show()
717 res = entry->show(q, page); in queue_attr_show()
718 mutex_unlock(&q->sysfs_lock); in queue_attr_show()
727 struct request_queue *q; in queue_attr_store() local
733 q = container_of(kobj, struct request_queue, kobj); in queue_attr_store()
734 mutex_lock(&q->sysfs_lock); in queue_attr_store()
735 res = entry->store(q, page, length); in queue_attr_store()
736 mutex_unlock(&q->sysfs_lock); in queue_attr_store()
742 struct request_queue *q = container_of(rcu_head, struct request_queue, in blk_free_queue_rcu() local
745 kmem_cache_free(blk_get_queue_kmem_cache(blk_queue_has_srcu(q)), q); in blk_free_queue_rcu()
765 struct request_queue *q = in blk_release_queue() local
770 percpu_ref_exit(&q->q_usage_counter); in blk_release_queue()
772 if (q->poll_stat) in blk_release_queue()
773 blk_stat_remove_callback(q, q->poll_cb); in blk_release_queue()
774 blk_stat_free_callback(q->poll_cb); in blk_release_queue()
776 blk_free_queue_stats(q->stats); in blk_release_queue()
777 kfree(q->poll_stat); in blk_release_queue()
779 if (queue_is_mq(q)) in blk_release_queue()
780 blk_mq_release(q); in blk_release_queue()
782 if (blk_queue_has_srcu(q)) in blk_release_queue()
783 cleanup_srcu_struct(q->srcu); in blk_release_queue()
785 ida_free(&blk_queue_ida, q->id); in blk_release_queue()
786 call_rcu(&q->rcu_head, blk_free_queue_rcu); in blk_release_queue()
811 struct request_queue *q = disk->queue; in blk_register_queue() local
814 mutex_lock(&q->sysfs_dir_lock); in blk_register_queue()
816 ret = kobject_add(&q->kobj, &disk_to_dev(disk)->kobj, "queue"); in blk_register_queue()
820 if (queue_is_mq(q)) in blk_register_queue()
822 mutex_lock(&q->sysfs_lock); in blk_register_queue()
824 mutex_lock(&q->debugfs_mutex); in blk_register_queue()
825 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), in blk_register_queue()
827 if (queue_is_mq(q)) in blk_register_queue()
828 blk_mq_debugfs_register(q); in blk_register_queue()
829 mutex_unlock(&q->debugfs_mutex); in blk_register_queue()
835 if (q->elevator) { in blk_register_queue()
836 ret = elv_register_queue(q, false); in blk_register_queue()
841 ret = blk_crypto_sysfs_register(q); in blk_register_queue()
845 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); in blk_register_queue()
846 wbt_enable_default(q); in blk_register_queue()
850 kobject_uevent(&q->kobj, KOBJ_ADD); in blk_register_queue()
851 if (q->elevator) in blk_register_queue()
852 kobject_uevent(&q->elevator->kobj, KOBJ_ADD); in blk_register_queue()
853 mutex_unlock(&q->sysfs_lock); in blk_register_queue()
856 mutex_unlock(&q->sysfs_dir_lock); in blk_register_queue()
867 if (!blk_queue_init_done(q)) { in blk_register_queue()
868 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); in blk_register_queue()
869 percpu_ref_switch_to_percpu(&q->q_usage_counter); in blk_register_queue()
875 elv_unregister_queue(q); in blk_register_queue()
877 mutex_unlock(&q->sysfs_lock); in blk_register_queue()
878 mutex_unlock(&q->sysfs_dir_lock); in blk_register_queue()
879 kobject_del(&q->kobj); in blk_register_queue()
893 struct request_queue *q = disk->queue; in blk_unregister_queue() local
895 if (WARN_ON(!q)) in blk_unregister_queue()
899 if (!blk_queue_registered(q)) in blk_unregister_queue()
907 mutex_lock(&q->sysfs_lock); in blk_unregister_queue()
908 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); in blk_unregister_queue()
909 mutex_unlock(&q->sysfs_lock); in blk_unregister_queue()
911 mutex_lock(&q->sysfs_dir_lock); in blk_unregister_queue()
916 if (queue_is_mq(q)) in blk_unregister_queue()
918 blk_crypto_sysfs_unregister(q); in blk_unregister_queue()
920 mutex_lock(&q->sysfs_lock); in blk_unregister_queue()
921 elv_unregister_queue(q); in blk_unregister_queue()
923 mutex_unlock(&q->sysfs_lock); in blk_unregister_queue()
926 kobject_uevent(&q->kobj, KOBJ_REMOVE); in blk_unregister_queue()
927 kobject_del(&q->kobj); in blk_unregister_queue()
928 mutex_unlock(&q->sysfs_dir_lock); in blk_unregister_queue()
930 mutex_lock(&q->debugfs_mutex); in blk_unregister_queue()
931 blk_trace_shutdown(q); in blk_unregister_queue()
932 debugfs_remove_recursive(q->debugfs_dir); in blk_unregister_queue()
933 q->debugfs_dir = NULL; in blk_unregister_queue()
934 q->sched_debugfs_dir = NULL; in blk_unregister_queue()
935 q->rqos_debugfs_dir = NULL; in blk_unregister_queue()
936 mutex_unlock(&q->debugfs_mutex); in blk_unregister_queue()