Lines Matching refs:q

60 static ssize_t queue_requests_show(struct request_queue *q, char *page)  in queue_requests_show()  argument
62 return queue_var_show(q->nr_requests, (page)); in queue_requests_show()
66 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument
71 if (!q->request_fn && !q->mq_ops) in queue_requests_store()
81 if (q->request_fn) in queue_requests_store()
82 err = blk_update_nr_requests(q, nr); in queue_requests_store()
84 err = blk_mq_update_nr_requests(q, nr); in queue_requests_store()
92 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument
94 unsigned long ra_kb = q->backing_dev_info->ra_pages << in queue_ra_show()
101 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument
109 q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); in queue_ra_store()
114 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) in queue_max_sectors_show() argument
116 int max_sectors_kb = queue_max_sectors(q) >> 1; in queue_max_sectors_show()
121 static ssize_t queue_max_segments_show(struct request_queue *q, char *page) in queue_max_segments_show() argument
123 return queue_var_show(queue_max_segments(q), (page)); in queue_max_segments_show()
126 static ssize_t queue_max_discard_segments_show(struct request_queue *q, in queue_max_discard_segments_show() argument
129 return queue_var_show(queue_max_discard_segments(q), (page)); in queue_max_discard_segments_show()
132 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) in queue_max_integrity_segments_show() argument
134 return queue_var_show(q->limits.max_integrity_segments, (page)); in queue_max_integrity_segments_show()
137 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) in queue_max_segment_size_show() argument
139 if (blk_queue_cluster(q)) in queue_max_segment_size_show()
140 return queue_var_show(queue_max_segment_size(q), (page)); in queue_max_segment_size_show()
145 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) in queue_logical_block_size_show() argument
147 return queue_var_show(queue_logical_block_size(q), page); in queue_logical_block_size_show()
150 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) in queue_physical_block_size_show() argument
152 return queue_var_show(queue_physical_block_size(q), page); in queue_physical_block_size_show()
155 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) in queue_chunk_sectors_show() argument
157 return queue_var_show(q->limits.chunk_sectors, page); in queue_chunk_sectors_show()
160 static ssize_t queue_io_min_show(struct request_queue *q, char *page) in queue_io_min_show() argument
162 return queue_var_show(queue_io_min(q), page); in queue_io_min_show()
165 static ssize_t queue_io_opt_show(struct request_queue *q, char *page) in queue_io_opt_show() argument
167 return queue_var_show(queue_io_opt(q), page); in queue_io_opt_show()
170 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) in queue_discard_granularity_show() argument
172 return queue_var_show(q->limits.discard_granularity, page); in queue_discard_granularity_show()
175 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) in queue_discard_max_hw_show() argument
179 (unsigned long long)q->limits.max_hw_discard_sectors << 9); in queue_discard_max_hw_show()
182 static ssize_t queue_discard_max_show(struct request_queue *q, char *page) in queue_discard_max_show() argument
185 (unsigned long long)q->limits.max_discard_sectors << 9); in queue_discard_max_show()
188 static ssize_t queue_discard_max_store(struct request_queue *q, in queue_discard_max_store() argument
197 if (max_discard & (q->limits.discard_granularity - 1)) in queue_discard_max_store()
204 if (max_discard > q->limits.max_hw_discard_sectors) in queue_discard_max_store()
205 max_discard = q->limits.max_hw_discard_sectors; in queue_discard_max_store()
207 q->limits.max_discard_sectors = max_discard; in queue_discard_max_store()
211 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) in queue_discard_zeroes_data_show() argument
216 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) in queue_write_same_max_show() argument
219 (unsigned long long)q->limits.max_write_same_sectors << 9); in queue_write_same_max_show()
222 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) in queue_write_zeroes_max_show() argument
225 (unsigned long long)q->limits.max_write_zeroes_sectors << 9); in queue_write_zeroes_max_show()
229 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) in queue_max_sectors_store() argument
232 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, in queue_max_sectors_store()
240 q->limits.max_dev_sectors >> 1); in queue_max_sectors_store()
245 spin_lock_irq(q->queue_lock); in queue_max_sectors_store()
246 q->limits.max_sectors = max_sectors_kb << 1; in queue_max_sectors_store()
247 q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); in queue_max_sectors_store()
248 spin_unlock_irq(q->queue_lock); in queue_max_sectors_store()
253 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) in queue_max_hw_sectors_show() argument
255 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; in queue_max_hw_sectors_show()
262 queue_show_##name(struct request_queue *q, char *page) \
265 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
269 queue_store_##name(struct request_queue *q, const char *page, size_t count) \
280 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
282 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
291 static ssize_t queue_zoned_show(struct request_queue *q, char *page) in queue_zoned_show() argument
293 switch (blk_queue_zoned_model(q)) { in queue_zoned_show()
303 static ssize_t queue_nomerges_show(struct request_queue *q, char *page) in queue_nomerges_show() argument
305 return queue_var_show((blk_queue_nomerges(q) << 1) | in queue_nomerges_show()
306 blk_queue_noxmerges(q), page); in queue_nomerges_show()
309 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, in queue_nomerges_store() argument
318 spin_lock_irq(q->queue_lock); in queue_nomerges_store()
319 queue_flag_clear(QUEUE_FLAG_NOMERGES, q); in queue_nomerges_store()
320 queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); in queue_nomerges_store()
322 queue_flag_set(QUEUE_FLAG_NOMERGES, q); in queue_nomerges_store()
324 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); in queue_nomerges_store()
325 spin_unlock_irq(q->queue_lock); in queue_nomerges_store()
330 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) in queue_rq_affinity_show() argument
332 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); in queue_rq_affinity_show()
333 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); in queue_rq_affinity_show()
339 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) in queue_rq_affinity_store() argument
349 spin_lock_irq(q->queue_lock); in queue_rq_affinity_store()
351 queue_flag_set(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
352 queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
354 queue_flag_set(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
355 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
357 queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
358 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
360 spin_unlock_irq(q->queue_lock); in queue_rq_affinity_store()
365 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) in queue_poll_delay_show() argument
369 if (q->poll_nsec == -1) in queue_poll_delay_show()
372 val = q->poll_nsec / 1000; in queue_poll_delay_show()
377 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, in queue_poll_delay_store() argument
382 if (!q->mq_ops || !q->mq_ops->poll) in queue_poll_delay_store()
390 q->poll_nsec = -1; in queue_poll_delay_store()
392 q->poll_nsec = val * 1000; in queue_poll_delay_store()
397 static ssize_t queue_poll_show(struct request_queue *q, char *page) in queue_poll_show() argument
399 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); in queue_poll_show()
402 static ssize_t queue_poll_store(struct request_queue *q, const char *page, in queue_poll_store() argument
408 if (!q->mq_ops || !q->mq_ops->poll) in queue_poll_store()
416 blk_queue_flag_set(QUEUE_FLAG_POLL, q); in queue_poll_store()
418 blk_queue_flag_clear(QUEUE_FLAG_POLL, q); in queue_poll_store()
423 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) in queue_wb_lat_show() argument
425 if (!wbt_rq_qos(q)) in queue_wb_lat_show()
428 return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); in queue_wb_lat_show()
431 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, in queue_wb_lat_store() argument
444 rqos = wbt_rq_qos(q); in queue_wb_lat_store()
446 ret = wbt_init(q); in queue_wb_lat_store()
452 val = wbt_default_latency_nsec(q); in queue_wb_lat_store()
461 if (q->mq_ops) { in queue_wb_lat_store()
462 blk_mq_freeze_queue(q); in queue_wb_lat_store()
463 blk_mq_quiesce_queue(q); in queue_wb_lat_store()
465 blk_queue_bypass_start(q); in queue_wb_lat_store()
467 wbt_set_min_lat(q, val); in queue_wb_lat_store()
468 wbt_update_limits(q); in queue_wb_lat_store()
470 if (q->mq_ops) { in queue_wb_lat_store()
471 blk_mq_unquiesce_queue(q); in queue_wb_lat_store()
472 blk_mq_unfreeze_queue(q); in queue_wb_lat_store()
474 blk_queue_bypass_end(q); in queue_wb_lat_store()
479 static ssize_t queue_wc_show(struct request_queue *q, char *page) in queue_wc_show() argument
481 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) in queue_wc_show()
487 static ssize_t queue_wc_store(struct request_queue *q, const char *page, in queue_wc_store() argument
502 blk_queue_flag_set(QUEUE_FLAG_WC, q); in queue_wc_store()
504 blk_queue_flag_clear(QUEUE_FLAG_WC, q); in queue_wc_store()
509 static ssize_t queue_fua_show(struct request_queue *q, char *page) in queue_fua_show() argument
511 return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); in queue_fua_show()
514 static ssize_t queue_dax_show(struct request_queue *q, char *page) in queue_dax_show() argument
516 return queue_var_show(blk_queue_dax(q), page); in queue_dax_show()
752 struct request_queue *q = in queue_attr_show() local
758 mutex_lock(&q->sysfs_lock); in queue_attr_show()
759 if (blk_queue_dying(q)) { in queue_attr_show()
760 mutex_unlock(&q->sysfs_lock); in queue_attr_show()
763 res = entry->show(q, page); in queue_attr_show()
764 mutex_unlock(&q->sysfs_lock); in queue_attr_show()
773 struct request_queue *q; in queue_attr_store() local
779 q = container_of(kobj, struct request_queue, kobj); in queue_attr_store()
780 mutex_lock(&q->sysfs_lock); in queue_attr_store()
781 if (blk_queue_dying(q)) { in queue_attr_store()
782 mutex_unlock(&q->sysfs_lock); in queue_attr_store()
785 res = entry->store(q, page, length); in queue_attr_store()
786 mutex_unlock(&q->sysfs_lock); in queue_attr_store()
792 struct request_queue *q = container_of(rcu_head, struct request_queue, in blk_free_queue_rcu() local
794 kmem_cache_free(blk_requestq_cachep, q); in blk_free_queue_rcu()
816 struct request_queue *q = container_of(work, typeof(*q), release_work); in __blk_release_queue() local
818 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) in __blk_release_queue()
819 blk_stat_remove_callback(q, q->poll_cb); in __blk_release_queue()
820 blk_stat_free_callback(q->poll_cb); in __blk_release_queue()
822 if (!blk_queue_dead(q)) { in __blk_release_queue()
827 WARN_ONCE(blk_queue_init_done(q), in __blk_release_queue()
829 q); in __blk_release_queue()
830 blk_exit_queue(q); in __blk_release_queue()
833 WARN(blk_queue_root_blkg(q), in __blk_release_queue()
835 q); in __blk_release_queue()
837 blk_free_queue_stats(q->stats); in __blk_release_queue()
839 blk_exit_rl(q, &q->root_rl); in __blk_release_queue()
841 if (q->queue_tags) in __blk_release_queue()
842 __blk_queue_free_tags(q); in __blk_release_queue()
844 if (!q->mq_ops) { in __blk_release_queue()
845 if (q->exit_rq_fn) in __blk_release_queue()
846 q->exit_rq_fn(q, q->fq->flush_rq); in __blk_release_queue()
847 blk_free_flush_queue(q->fq); in __blk_release_queue()
849 blk_mq_release(q); in __blk_release_queue()
852 blk_trace_shutdown(q); in __blk_release_queue()
854 if (q->mq_ops) in __blk_release_queue()
855 blk_mq_debugfs_unregister(q); in __blk_release_queue()
857 bioset_exit(&q->bio_split); in __blk_release_queue()
859 ida_simple_remove(&blk_queue_ida, q->id); in __blk_release_queue()
860 call_rcu(&q->rcu_head, blk_free_queue_rcu); in __blk_release_queue()
865 struct request_queue *q = in blk_release_queue() local
868 INIT_WORK(&q->release_work, __blk_release_queue); in blk_release_queue()
869 schedule_work(&q->release_work); in blk_release_queue()
891 struct request_queue *q = disk->queue; in blk_register_queue() local
893 if (WARN_ON(!q)) in blk_register_queue()
896 WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags), in blk_register_queue()
899 queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q); in blk_register_queue()
910 if (!blk_queue_init_done(q)) { in blk_register_queue()
911 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); in blk_register_queue()
912 percpu_ref_switch_to_percpu(&q->q_usage_counter); in blk_register_queue()
913 blk_queue_bypass_end(q); in blk_register_queue()
921 mutex_lock(&q->sysfs_lock); in blk_register_queue()
923 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); in blk_register_queue()
929 if (q->mq_ops) { in blk_register_queue()
930 __blk_mq_register_dev(dev, q); in blk_register_queue()
931 blk_mq_debugfs_register(q); in blk_register_queue()
934 kobject_uevent(&q->kobj, KOBJ_ADD); in blk_register_queue()
936 wbt_enable_default(q); in blk_register_queue()
938 blk_throtl_register_queue(q); in blk_register_queue()
940 if (q->request_fn || (q->mq_ops && q->elevator)) { in blk_register_queue()
941 ret = elv_register_queue(q); in blk_register_queue()
943 mutex_unlock(&q->sysfs_lock); in blk_register_queue()
944 kobject_uevent(&q->kobj, KOBJ_REMOVE); in blk_register_queue()
945 kobject_del(&q->kobj); in blk_register_queue()
953 mutex_unlock(&q->sysfs_lock); in blk_register_queue()
967 struct request_queue *q = disk->queue; in blk_unregister_queue() local
969 if (WARN_ON(!q)) in blk_unregister_queue()
973 if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)) in blk_unregister_queue()
981 mutex_lock(&q->sysfs_lock); in blk_unregister_queue()
983 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); in blk_unregister_queue()
989 if (q->mq_ops) in blk_unregister_queue()
990 blk_mq_unregister_dev(disk_to_dev(disk), q); in blk_unregister_queue()
991 mutex_unlock(&q->sysfs_lock); in blk_unregister_queue()
993 kobject_uevent(&q->kobj, KOBJ_REMOVE); in blk_unregister_queue()
994 kobject_del(&q->kobj); in blk_unregister_queue()
997 rq_qos_exit(q); in blk_unregister_queue()
999 mutex_lock(&q->sysfs_lock); in blk_unregister_queue()
1000 if (q->request_fn || (q->mq_ops && q->elevator)) in blk_unregister_queue()
1001 elv_unregister_queue(q); in blk_unregister_queue()
1002 mutex_unlock(&q->sysfs_lock); in blk_unregister_queue()