1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to sysfs handling
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/blk-mq.h>
13 #include <linux/blk-cgroup.h>
14 #include <linux/debugfs.h>
18 #include "blk-mq-debugfs.h"
21 struct queue_sysfs_entry
{
22 struct attribute attr
;
23 ssize_t (*show
)(struct request_queue
*, char *);
24 ssize_t (*store
)(struct request_queue
*, const char *, size_t);
28 queue_var_show(unsigned long var
, char *page
)
30 return sprintf(page
, "%lu\n", var
);
34 queue_var_store(unsigned long *var
, const char *page
, size_t count
)
39 err
= kstrtoul(page
, 10, &v
);
40 if (err
|| v
> UINT_MAX
)
48 static ssize_t
queue_var_store64(s64
*var
, const char *page
)
53 err
= kstrtos64(page
, 10, &v
);
61 static ssize_t
queue_requests_show(struct request_queue
*q
, char *page
)
63 return queue_var_show(q
->nr_requests
, (page
));
67 queue_requests_store(struct request_queue
*q
, const char *page
, size_t count
)
75 ret
= queue_var_store(&nr
, page
, count
);
79 if (nr
< BLKDEV_MIN_RQ
)
82 err
= blk_mq_update_nr_requests(q
, nr
);
89 static ssize_t
queue_ra_show(struct request_queue
*q
, char *page
)
91 unsigned long ra_kb
= q
->backing_dev_info
->ra_pages
<<
94 return queue_var_show(ra_kb
, (page
));
98 queue_ra_store(struct request_queue
*q
, const char *page
, size_t count
)
101 ssize_t ret
= queue_var_store(&ra_kb
, page
, count
);
106 q
->backing_dev_info
->ra_pages
= ra_kb
>> (PAGE_SHIFT
- 10);
111 static ssize_t
queue_max_sectors_show(struct request_queue
*q
, char *page
)
113 int max_sectors_kb
= queue_max_sectors(q
) >> 1;
115 return queue_var_show(max_sectors_kb
, (page
));
118 static ssize_t
queue_max_segments_show(struct request_queue
*q
, char *page
)
120 return queue_var_show(queue_max_segments(q
), (page
));
123 static ssize_t
queue_max_discard_segments_show(struct request_queue
*q
,
126 return queue_var_show(queue_max_discard_segments(q
), (page
));
129 static ssize_t
queue_max_integrity_segments_show(struct request_queue
*q
, char *page
)
131 return queue_var_show(q
->limits
.max_integrity_segments
, (page
));
134 static ssize_t
queue_max_segment_size_show(struct request_queue
*q
, char *page
)
136 return queue_var_show(queue_max_segment_size(q
), (page
));
139 static ssize_t
queue_logical_block_size_show(struct request_queue
*q
, char *page
)
141 return queue_var_show(queue_logical_block_size(q
), page
);
144 static ssize_t
queue_physical_block_size_show(struct request_queue
*q
, char *page
)
146 return queue_var_show(queue_physical_block_size(q
), page
);
149 static ssize_t
queue_chunk_sectors_show(struct request_queue
*q
, char *page
)
151 return queue_var_show(q
->limits
.chunk_sectors
, page
);
154 static ssize_t
queue_io_min_show(struct request_queue
*q
, char *page
)
156 return queue_var_show(queue_io_min(q
), page
);
159 static ssize_t
queue_io_opt_show(struct request_queue
*q
, char *page
)
161 return queue_var_show(queue_io_opt(q
), page
);
164 static ssize_t
queue_discard_granularity_show(struct request_queue
*q
, char *page
)
166 return queue_var_show(q
->limits
.discard_granularity
, page
);
169 static ssize_t
queue_discard_max_hw_show(struct request_queue
*q
, char *page
)
172 return sprintf(page
, "%llu\n",
173 (unsigned long long)q
->limits
.max_hw_discard_sectors
<< 9);
176 static ssize_t
queue_discard_max_show(struct request_queue
*q
, char *page
)
178 return sprintf(page
, "%llu\n",
179 (unsigned long long)q
->limits
.max_discard_sectors
<< 9);
182 static ssize_t
queue_discard_max_store(struct request_queue
*q
,
183 const char *page
, size_t count
)
185 unsigned long max_discard
;
186 ssize_t ret
= queue_var_store(&max_discard
, page
, count
);
191 if (max_discard
& (q
->limits
.discard_granularity
- 1))
195 if (max_discard
> UINT_MAX
)
198 if (max_discard
> q
->limits
.max_hw_discard_sectors
)
199 max_discard
= q
->limits
.max_hw_discard_sectors
;
201 q
->limits
.max_discard_sectors
= max_discard
;
205 static ssize_t
queue_discard_zeroes_data_show(struct request_queue
*q
, char *page
)
207 return queue_var_show(0, page
);
210 static ssize_t
queue_write_same_max_show(struct request_queue
*q
, char *page
)
212 return sprintf(page
, "%llu\n",
213 (unsigned long long)q
->limits
.max_write_same_sectors
<< 9);
216 static ssize_t
queue_write_zeroes_max_show(struct request_queue
*q
, char *page
)
218 return sprintf(page
, "%llu\n",
219 (unsigned long long)q
->limits
.max_write_zeroes_sectors
<< 9);
222 static ssize_t
queue_zone_write_granularity_show(struct request_queue
*q
,
225 return queue_var_show(queue_zone_write_granularity(q
), page
);
228 static ssize_t
queue_zone_append_max_show(struct request_queue
*q
, char *page
)
230 unsigned long long max_sectors
= q
->limits
.max_zone_append_sectors
;
232 return sprintf(page
, "%llu\n", max_sectors
<< SECTOR_SHIFT
);
236 queue_max_sectors_store(struct request_queue
*q
, const char *page
, size_t count
)
238 unsigned long max_sectors_kb
,
239 max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1,
240 page_kb
= 1 << (PAGE_SHIFT
- 10);
241 ssize_t ret
= queue_var_store(&max_sectors_kb
, page
, count
);
246 max_hw_sectors_kb
= min_not_zero(max_hw_sectors_kb
, (unsigned long)
247 q
->limits
.max_dev_sectors
>> 1);
249 if (max_sectors_kb
> max_hw_sectors_kb
|| max_sectors_kb
< page_kb
)
252 spin_lock_irq(&q
->queue_lock
);
253 q
->limits
.max_sectors
= max_sectors_kb
<< 1;
254 q
->backing_dev_info
->io_pages
= max_sectors_kb
>> (PAGE_SHIFT
- 10);
255 spin_unlock_irq(&q
->queue_lock
);
260 static ssize_t
queue_max_hw_sectors_show(struct request_queue
*q
, char *page
)
262 int max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1;
264 return queue_var_show(max_hw_sectors_kb
, (page
));
267 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
269 queue_##name##_show(struct request_queue *q, char *page) \
272 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
273 return queue_var_show(neg ? !bit : bit, page); \
276 queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
280 ret = queue_var_store(&val, page, count); \
287 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
289 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
293 QUEUE_SYSFS_BIT_FNS(nonrot
, NONROT
, 1);
294 QUEUE_SYSFS_BIT_FNS(random
, ADD_RANDOM
, 0);
295 QUEUE_SYSFS_BIT_FNS(iostats
, IO_STAT
, 0);
296 QUEUE_SYSFS_BIT_FNS(stable_writes
, STABLE_WRITES
, 0);
297 #undef QUEUE_SYSFS_BIT_FNS
299 static ssize_t
queue_zoned_show(struct request_queue
*q
, char *page
)
301 switch (blk_queue_zoned_model(q
)) {
303 return sprintf(page
, "host-aware\n");
305 return sprintf(page
, "host-managed\n");
307 return sprintf(page
, "none\n");
311 static ssize_t
queue_nr_zones_show(struct request_queue
*q
, char *page
)
313 return queue_var_show(blk_queue_nr_zones(q
), page
);
316 static ssize_t
queue_max_open_zones_show(struct request_queue
*q
, char *page
)
318 return queue_var_show(queue_max_open_zones(q
), page
);
321 static ssize_t
queue_max_active_zones_show(struct request_queue
*q
, char *page
)
323 return queue_var_show(queue_max_active_zones(q
), page
);
326 static ssize_t
queue_nomerges_show(struct request_queue
*q
, char *page
)
328 return queue_var_show((blk_queue_nomerges(q
) << 1) |
329 blk_queue_noxmerges(q
), page
);
332 static ssize_t
queue_nomerges_store(struct request_queue
*q
, const char *page
,
336 ssize_t ret
= queue_var_store(&nm
, page
, count
);
341 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES
, q
);
342 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES
, q
);
344 blk_queue_flag_set(QUEUE_FLAG_NOMERGES
, q
);
346 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES
, q
);
351 static ssize_t
queue_rq_affinity_show(struct request_queue
*q
, char *page
)
353 bool set
= test_bit(QUEUE_FLAG_SAME_COMP
, &q
->queue_flags
);
354 bool force
= test_bit(QUEUE_FLAG_SAME_FORCE
, &q
->queue_flags
);
356 return queue_var_show(set
<< force
, page
);
360 queue_rq_affinity_store(struct request_queue
*q
, const char *page
, size_t count
)
362 ssize_t ret
= -EINVAL
;
366 ret
= queue_var_store(&val
, page
, count
);
371 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
372 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE
, q
);
373 } else if (val
== 1) {
374 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
375 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE
, q
);
376 } else if (val
== 0) {
377 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP
, q
);
378 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE
, q
);
384 static ssize_t
queue_poll_delay_show(struct request_queue
*q
, char *page
)
388 if (q
->poll_nsec
== BLK_MQ_POLL_CLASSIC
)
389 val
= BLK_MQ_POLL_CLASSIC
;
391 val
= q
->poll_nsec
/ 1000;
393 return sprintf(page
, "%d\n", val
);
396 static ssize_t
queue_poll_delay_store(struct request_queue
*q
, const char *page
,
401 if (!q
->mq_ops
|| !q
->mq_ops
->poll
)
404 err
= kstrtoint(page
, 10, &val
);
408 if (val
== BLK_MQ_POLL_CLASSIC
)
409 q
->poll_nsec
= BLK_MQ_POLL_CLASSIC
;
411 q
->poll_nsec
= val
* 1000;
418 static ssize_t
queue_poll_show(struct request_queue
*q
, char *page
)
420 return queue_var_show(test_bit(QUEUE_FLAG_POLL
, &q
->queue_flags
), page
);
423 static ssize_t
queue_poll_store(struct request_queue
*q
, const char *page
,
426 unsigned long poll_on
;
429 if (!q
->tag_set
|| q
->tag_set
->nr_maps
<= HCTX_TYPE_POLL
||
430 !q
->tag_set
->map
[HCTX_TYPE_POLL
].nr_queues
)
433 ret
= queue_var_store(&poll_on
, page
, count
);
438 blk_queue_flag_set(QUEUE_FLAG_POLL
, q
);
440 blk_mq_freeze_queue(q
);
441 blk_queue_flag_clear(QUEUE_FLAG_POLL
, q
);
442 blk_mq_unfreeze_queue(q
);
448 static ssize_t
queue_io_timeout_show(struct request_queue
*q
, char *page
)
450 return sprintf(page
, "%u\n", jiffies_to_msecs(q
->rq_timeout
));
453 static ssize_t
queue_io_timeout_store(struct request_queue
*q
, const char *page
,
459 err
= kstrtou32(page
, 10, &val
);
463 blk_queue_rq_timeout(q
, msecs_to_jiffies(val
));
468 static ssize_t
queue_wb_lat_show(struct request_queue
*q
, char *page
)
473 return sprintf(page
, "%llu\n", div_u64(wbt_get_min_lat(q
), 1000));
476 static ssize_t
queue_wb_lat_store(struct request_queue
*q
, const char *page
,
483 ret
= queue_var_store64(&val
, page
);
489 rqos
= wbt_rq_qos(q
);
497 val
= wbt_default_latency_nsec(q
);
501 if (wbt_get_min_lat(q
) == val
)
505 * Ensure that the queue is idled, in case the latency update
506 * ends up either enabling or disabling wbt completely. We can't
507 * have IO inflight if that happens.
509 blk_mq_freeze_queue(q
);
510 blk_mq_quiesce_queue(q
);
512 wbt_set_min_lat(q
, val
);
514 blk_mq_unquiesce_queue(q
);
515 blk_mq_unfreeze_queue(q
);
520 static ssize_t
queue_wc_show(struct request_queue
*q
, char *page
)
522 if (test_bit(QUEUE_FLAG_WC
, &q
->queue_flags
))
523 return sprintf(page
, "write back\n");
525 return sprintf(page
, "write through\n");
528 static ssize_t
queue_wc_store(struct request_queue
*q
, const char *page
,
533 if (!strncmp(page
, "write back", 10))
535 else if (!strncmp(page
, "write through", 13) ||
536 !strncmp(page
, "none", 4))
543 blk_queue_flag_set(QUEUE_FLAG_WC
, q
);
545 blk_queue_flag_clear(QUEUE_FLAG_WC
, q
);
550 static ssize_t
queue_fua_show(struct request_queue
*q
, char *page
)
552 return sprintf(page
, "%u\n", test_bit(QUEUE_FLAG_FUA
, &q
->queue_flags
));
555 static ssize_t
queue_dax_show(struct request_queue
*q
, char *page
)
557 return queue_var_show(blk_queue_dax(q
), page
);
560 #define QUEUE_RO_ENTRY(_prefix, _name) \
561 static struct queue_sysfs_entry _prefix##_entry = { \
562 .attr = { .name = _name, .mode = 0444 }, \
563 .show = _prefix##_show, \
566 #define QUEUE_RW_ENTRY(_prefix, _name) \
567 static struct queue_sysfs_entry _prefix##_entry = { \
568 .attr = { .name = _name, .mode = 0644 }, \
569 .show = _prefix##_show, \
570 .store = _prefix##_store, \
573 QUEUE_RW_ENTRY(queue_requests
, "nr_requests");
574 QUEUE_RW_ENTRY(queue_ra
, "read_ahead_kb");
575 QUEUE_RW_ENTRY(queue_max_sectors
, "max_sectors_kb");
576 QUEUE_RO_ENTRY(queue_max_hw_sectors
, "max_hw_sectors_kb");
577 QUEUE_RO_ENTRY(queue_max_segments
, "max_segments");
578 QUEUE_RO_ENTRY(queue_max_integrity_segments
, "max_integrity_segments");
579 QUEUE_RO_ENTRY(queue_max_segment_size
, "max_segment_size");
580 QUEUE_RW_ENTRY(elv_iosched
, "scheduler");
582 QUEUE_RO_ENTRY(queue_logical_block_size
, "logical_block_size");
583 QUEUE_RO_ENTRY(queue_physical_block_size
, "physical_block_size");
584 QUEUE_RO_ENTRY(queue_chunk_sectors
, "chunk_sectors");
585 QUEUE_RO_ENTRY(queue_io_min
, "minimum_io_size");
586 QUEUE_RO_ENTRY(queue_io_opt
, "optimal_io_size");
588 QUEUE_RO_ENTRY(queue_max_discard_segments
, "max_discard_segments");
589 QUEUE_RO_ENTRY(queue_discard_granularity
, "discard_granularity");
590 QUEUE_RO_ENTRY(queue_discard_max_hw
, "discard_max_hw_bytes");
591 QUEUE_RW_ENTRY(queue_discard_max
, "discard_max_bytes");
592 QUEUE_RO_ENTRY(queue_discard_zeroes_data
, "discard_zeroes_data");
594 QUEUE_RO_ENTRY(queue_write_same_max
, "write_same_max_bytes");
595 QUEUE_RO_ENTRY(queue_write_zeroes_max
, "write_zeroes_max_bytes");
596 QUEUE_RO_ENTRY(queue_zone_append_max
, "zone_append_max_bytes");
597 QUEUE_RO_ENTRY(queue_zone_write_granularity
, "zone_write_granularity");
599 QUEUE_RO_ENTRY(queue_zoned
, "zoned");
600 QUEUE_RO_ENTRY(queue_nr_zones
, "nr_zones");
601 QUEUE_RO_ENTRY(queue_max_open_zones
, "max_open_zones");
602 QUEUE_RO_ENTRY(queue_max_active_zones
, "max_active_zones");
604 QUEUE_RW_ENTRY(queue_nomerges
, "nomerges");
605 QUEUE_RW_ENTRY(queue_rq_affinity
, "rq_affinity");
606 QUEUE_RW_ENTRY(queue_poll
, "io_poll");
607 QUEUE_RW_ENTRY(queue_poll_delay
, "io_poll_delay");
608 QUEUE_RW_ENTRY(queue_wc
, "write_cache");
609 QUEUE_RO_ENTRY(queue_fua
, "fua");
610 QUEUE_RO_ENTRY(queue_dax
, "dax");
611 QUEUE_RW_ENTRY(queue_io_timeout
, "io_timeout");
612 QUEUE_RW_ENTRY(queue_wb_lat
, "wbt_lat_usec");
614 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
615 QUEUE_RW_ENTRY(blk_throtl_sample_time
, "throttle_sample_time");
618 /* legacy alias for logical_block_size: */
619 static struct queue_sysfs_entry queue_hw_sector_size_entry
= {
620 .attr
= {.name
= "hw_sector_size", .mode
= 0444 },
621 .show
= queue_logical_block_size_show
,
624 QUEUE_RW_ENTRY(queue_nonrot
, "rotational");
625 QUEUE_RW_ENTRY(queue_iostats
, "iostats");
626 QUEUE_RW_ENTRY(queue_random
, "add_random");
627 QUEUE_RW_ENTRY(queue_stable_writes
, "stable_writes");
629 static struct attribute
*queue_attrs
[] = {
630 &queue_requests_entry
.attr
,
631 &queue_ra_entry
.attr
,
632 &queue_max_hw_sectors_entry
.attr
,
633 &queue_max_sectors_entry
.attr
,
634 &queue_max_segments_entry
.attr
,
635 &queue_max_discard_segments_entry
.attr
,
636 &queue_max_integrity_segments_entry
.attr
,
637 &queue_max_segment_size_entry
.attr
,
638 &elv_iosched_entry
.attr
,
639 &queue_hw_sector_size_entry
.attr
,
640 &queue_logical_block_size_entry
.attr
,
641 &queue_physical_block_size_entry
.attr
,
642 &queue_chunk_sectors_entry
.attr
,
643 &queue_io_min_entry
.attr
,
644 &queue_io_opt_entry
.attr
,
645 &queue_discard_granularity_entry
.attr
,
646 &queue_discard_max_entry
.attr
,
647 &queue_discard_max_hw_entry
.attr
,
648 &queue_discard_zeroes_data_entry
.attr
,
649 &queue_write_same_max_entry
.attr
,
650 &queue_write_zeroes_max_entry
.attr
,
651 &queue_zone_append_max_entry
.attr
,
652 &queue_zone_write_granularity_entry
.attr
,
653 &queue_nonrot_entry
.attr
,
654 &queue_zoned_entry
.attr
,
655 &queue_nr_zones_entry
.attr
,
656 &queue_max_open_zones_entry
.attr
,
657 &queue_max_active_zones_entry
.attr
,
658 &queue_nomerges_entry
.attr
,
659 &queue_rq_affinity_entry
.attr
,
660 &queue_iostats_entry
.attr
,
661 &queue_stable_writes_entry
.attr
,
662 &queue_random_entry
.attr
,
663 &queue_poll_entry
.attr
,
664 &queue_wc_entry
.attr
,
665 &queue_fua_entry
.attr
,
666 &queue_dax_entry
.attr
,
667 &queue_wb_lat_entry
.attr
,
668 &queue_poll_delay_entry
.attr
,
669 &queue_io_timeout_entry
.attr
,
670 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
671 &blk_throtl_sample_time_entry
.attr
,
676 static umode_t
queue_attr_visible(struct kobject
*kobj
, struct attribute
*attr
,
679 struct request_queue
*q
=
680 container_of(kobj
, struct request_queue
, kobj
);
682 if (attr
== &queue_io_timeout_entry
.attr
&&
683 (!q
->mq_ops
|| !q
->mq_ops
->timeout
))
686 if ((attr
== &queue_max_open_zones_entry
.attr
||
687 attr
== &queue_max_active_zones_entry
.attr
) &&
688 !blk_queue_is_zoned(q
))
694 static struct attribute_group queue_attr_group
= {
695 .attrs
= queue_attrs
,
696 .is_visible
= queue_attr_visible
,
700 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
703 queue_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
705 struct queue_sysfs_entry
*entry
= to_queue(attr
);
706 struct request_queue
*q
=
707 container_of(kobj
, struct request_queue
, kobj
);
712 mutex_lock(&q
->sysfs_lock
);
713 res
= entry
->show(q
, page
);
714 mutex_unlock(&q
->sysfs_lock
);
719 queue_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
720 const char *page
, size_t length
)
722 struct queue_sysfs_entry
*entry
= to_queue(attr
);
723 struct request_queue
*q
;
729 q
= container_of(kobj
, struct request_queue
, kobj
);
730 mutex_lock(&q
->sysfs_lock
);
731 res
= entry
->store(q
, page
, length
);
732 mutex_unlock(&q
->sysfs_lock
);
736 static void blk_free_queue_rcu(struct rcu_head
*rcu_head
)
738 struct request_queue
*q
= container_of(rcu_head
, struct request_queue
,
740 kmem_cache_free(blk_requestq_cachep
, q
);
743 /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
744 static void blk_exit_queue(struct request_queue
*q
)
747 * Since the I/O scheduler exit code may access cgroup information,
748 * perform I/O scheduler exit before disassociating from the block
753 __elevator_exit(q
, q
->elevator
);
757 * Remove all references to @q from the block cgroup controller before
758 * restoring @q->queue_lock to avoid that restoring this pointer causes
759 * e.g. blkcg_print_blkgs() to crash.
764 * Since the cgroup code may dereference the @q->backing_dev_info
765 * pointer, only decrease its reference count after having removed the
766 * association with the block cgroup controller.
768 bdi_put(q
->backing_dev_info
);
772 * blk_release_queue - releases all allocated resources of the request_queue
773 * @kobj: pointer to a kobject, whose container is a request_queue
775 * This function releases all allocated resources of the request queue.
777 * The struct request_queue refcount is incremented with blk_get_queue() and
778 * decremented with blk_put_queue(). Once the refcount reaches 0 this function
781 * For drivers that have a request_queue on a gendisk and added with
782 * __device_add_disk() the refcount to request_queue will reach 0 with
783 * the last put_disk() called by the driver. For drivers which don't use
784 * __device_add_disk() this happens with blk_cleanup_queue().
786 * Drivers exist which depend on the release of the request_queue to be
787 * synchronous, it should not be deferred.
791 static void blk_release_queue(struct kobject
*kobj
)
793 struct request_queue
*q
=
794 container_of(kobj
, struct request_queue
, kobj
);
798 if (test_bit(QUEUE_FLAG_POLL_STATS
, &q
->queue_flags
))
799 blk_stat_remove_callback(q
, q
->poll_cb
);
800 blk_stat_free_callback(q
->poll_cb
);
802 blk_free_queue_stats(q
->stats
);
804 if (queue_is_mq(q
)) {
805 struct blk_mq_hw_ctx
*hctx
;
808 cancel_delayed_work_sync(&q
->requeue_work
);
810 queue_for_each_hw_ctx(q
, hctx
, i
)
811 cancel_delayed_work_sync(&hctx
->run_work
);
816 blk_queue_free_zone_bitmaps(q
);
821 blk_trace_shutdown(q
);
822 mutex_lock(&q
->debugfs_mutex
);
823 debugfs_remove_recursive(q
->debugfs_dir
);
824 mutex_unlock(&q
->debugfs_mutex
);
827 blk_mq_debugfs_unregister(q
);
829 bioset_exit(&q
->bio_split
);
831 ida_simple_remove(&blk_queue_ida
, q
->id
);
832 call_rcu(&q
->rcu_head
, blk_free_queue_rcu
);
835 static const struct sysfs_ops queue_sysfs_ops
= {
836 .show
= queue_attr_show
,
837 .store
= queue_attr_store
,
840 struct kobj_type blk_queue_ktype
= {
841 .sysfs_ops
= &queue_sysfs_ops
,
842 .release
= blk_release_queue
,
846 * blk_register_queue - register a block layer queue with sysfs
847 * @disk: Disk of which the request queue should be registered with sysfs.
849 int blk_register_queue(struct gendisk
*disk
)
852 struct device
*dev
= disk_to_dev(disk
);
853 struct request_queue
*q
= disk
->queue
;
858 WARN_ONCE(blk_queue_registered(q
),
859 "%s is registering an already registered queue\n",
860 kobject_name(&dev
->kobj
));
863 * SCSI probing may synchronously create and destroy a lot of
864 * request_queues for non-existent devices. Shutting down a fully
865 * functional queue takes measureable wallclock time as RCU grace
866 * periods are involved. To avoid excessive latency in these
867 * cases, a request_queue starts out in a degraded mode which is
868 * faster to shut down and is made fully functional here as
869 * request_queues for non-existent devices never get registered.
871 if (!blk_queue_init_done(q
)) {
872 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE
, q
);
873 percpu_ref_switch_to_percpu(&q
->q_usage_counter
);
876 blk_queue_update_readahead(q
);
878 ret
= blk_trace_init_sysfs(dev
);
882 mutex_lock(&q
->sysfs_dir_lock
);
884 ret
= kobject_add(&q
->kobj
, kobject_get(&dev
->kobj
), "%s", "queue");
886 blk_trace_remove_sysfs(dev
);
890 ret
= sysfs_create_group(&q
->kobj
, &queue_attr_group
);
892 blk_trace_remove_sysfs(dev
);
893 kobject_del(&q
->kobj
);
894 kobject_put(&dev
->kobj
);
898 mutex_lock(&q
->debugfs_mutex
);
899 q
->debugfs_dir
= debugfs_create_dir(kobject_name(q
->kobj
.parent
),
901 mutex_unlock(&q
->debugfs_mutex
);
903 if (queue_is_mq(q
)) {
904 __blk_mq_register_dev(dev
, q
);
905 blk_mq_debugfs_register(q
);
908 mutex_lock(&q
->sysfs_lock
);
910 ret
= elv_register_queue(q
, false);
912 mutex_unlock(&q
->sysfs_lock
);
913 mutex_unlock(&q
->sysfs_dir_lock
);
914 kobject_del(&q
->kobj
);
915 blk_trace_remove_sysfs(dev
);
916 kobject_put(&dev
->kobj
);
921 blk_queue_flag_set(QUEUE_FLAG_REGISTERED
, q
);
922 wbt_enable_default(q
);
923 blk_throtl_register_queue(q
);
925 /* Now everything is ready and send out KOBJ_ADD uevent */
926 kobject_uevent(&q
->kobj
, KOBJ_ADD
);
928 kobject_uevent(&q
->elevator
->kobj
, KOBJ_ADD
);
929 mutex_unlock(&q
->sysfs_lock
);
933 mutex_unlock(&q
->sysfs_dir_lock
);
936 EXPORT_SYMBOL_GPL(blk_register_queue
);
939 * blk_unregister_queue - counterpart of blk_register_queue()
940 * @disk: Disk of which the request queue should be unregistered from sysfs.
942 * Note: the caller is responsible for guaranteeing that this function is called
943 * after blk_register_queue() has finished.
945 void blk_unregister_queue(struct gendisk
*disk
)
947 struct request_queue
*q
= disk
->queue
;
952 /* Return early if disk->queue was never registered. */
953 if (!blk_queue_registered(q
))
957 * Since sysfs_remove_dir() prevents adding new directory entries
958 * before removal of existing entries starts, protect against
959 * concurrent elv_iosched_store() calls.
961 mutex_lock(&q
->sysfs_lock
);
962 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED
, q
);
963 mutex_unlock(&q
->sysfs_lock
);
965 mutex_lock(&q
->sysfs_dir_lock
);
967 * Remove the sysfs attributes before unregistering the queue data
968 * structures that can be modified through sysfs.
971 blk_mq_unregister_dev(disk_to_dev(disk
), q
);
973 kobject_uevent(&q
->kobj
, KOBJ_REMOVE
);
974 kobject_del(&q
->kobj
);
975 blk_trace_remove_sysfs(disk_to_dev(disk
));
977 mutex_lock(&q
->sysfs_lock
);
979 elv_unregister_queue(q
);
980 mutex_unlock(&q
->sysfs_lock
);
981 mutex_unlock(&q
->sysfs_dir_lock
);
983 kobject_put(&disk_to_dev(disk
)->kobj
);