1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to sysfs handling
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/blk-mq.h>
13 #include <linux/blk-cgroup.h>
14 #include <linux/debugfs.h>
18 #include "blk-mq-debugfs.h"
21 struct queue_sysfs_entry
{
22 struct attribute attr
;
23 ssize_t (*show
)(struct request_queue
*, char *);
24 ssize_t (*store
)(struct request_queue
*, const char *, size_t);
28 queue_var_show(unsigned long var
, char *page
)
30 return sprintf(page
, "%lu\n", var
);
34 queue_var_store(unsigned long *var
, const char *page
, size_t count
)
39 err
= kstrtoul(page
, 10, &v
);
40 if (err
|| v
> UINT_MAX
)
48 static ssize_t
queue_var_store64(s64
*var
, const char *page
)
53 err
= kstrtos64(page
, 10, &v
);
61 static ssize_t
queue_requests_show(struct request_queue
*q
, char *page
)
63 return queue_var_show(q
->nr_requests
, page
);
67 queue_requests_store(struct request_queue
*q
, const char *page
, size_t count
)
75 ret
= queue_var_store(&nr
, page
, count
);
79 if (nr
< BLKDEV_MIN_RQ
)
82 err
= blk_mq_update_nr_requests(q
, nr
);
89 static ssize_t
queue_ra_show(struct request_queue
*q
, char *page
)
95 ra_kb
= q
->disk
->bdi
->ra_pages
<< (PAGE_SHIFT
- 10);
96 return queue_var_show(ra_kb
, page
);
100 queue_ra_store(struct request_queue
*q
, const char *page
, size_t count
)
107 ret
= queue_var_store(&ra_kb
, page
, count
);
110 q
->disk
->bdi
->ra_pages
= ra_kb
>> (PAGE_SHIFT
- 10);
114 static ssize_t
queue_max_sectors_show(struct request_queue
*q
, char *page
)
116 int max_sectors_kb
= queue_max_sectors(q
) >> 1;
118 return queue_var_show(max_sectors_kb
, page
);
121 static ssize_t
queue_max_segments_show(struct request_queue
*q
, char *page
)
123 return queue_var_show(queue_max_segments(q
), page
);
126 static ssize_t
queue_max_discard_segments_show(struct request_queue
*q
,
129 return queue_var_show(queue_max_discard_segments(q
), page
);
132 static ssize_t
queue_max_integrity_segments_show(struct request_queue
*q
, char *page
)
134 return queue_var_show(q
->limits
.max_integrity_segments
, page
);
137 static ssize_t
queue_max_segment_size_show(struct request_queue
*q
, char *page
)
139 return queue_var_show(queue_max_segment_size(q
), page
);
142 static ssize_t
queue_logical_block_size_show(struct request_queue
*q
, char *page
)
144 return queue_var_show(queue_logical_block_size(q
), page
);
147 static ssize_t
queue_physical_block_size_show(struct request_queue
*q
, char *page
)
149 return queue_var_show(queue_physical_block_size(q
), page
);
152 static ssize_t
queue_chunk_sectors_show(struct request_queue
*q
, char *page
)
154 return queue_var_show(q
->limits
.chunk_sectors
, page
);
157 static ssize_t
queue_io_min_show(struct request_queue
*q
, char *page
)
159 return queue_var_show(queue_io_min(q
), page
);
162 static ssize_t
queue_io_opt_show(struct request_queue
*q
, char *page
)
164 return queue_var_show(queue_io_opt(q
), page
);
167 static ssize_t
queue_discard_granularity_show(struct request_queue
*q
, char *page
)
169 return queue_var_show(q
->limits
.discard_granularity
, page
);
172 static ssize_t
queue_discard_max_hw_show(struct request_queue
*q
, char *page
)
175 return sprintf(page
, "%llu\n",
176 (unsigned long long)q
->limits
.max_hw_discard_sectors
<< 9);
179 static ssize_t
queue_discard_max_show(struct request_queue
*q
, char *page
)
181 return sprintf(page
, "%llu\n",
182 (unsigned long long)q
->limits
.max_discard_sectors
<< 9);
185 static ssize_t
queue_discard_max_store(struct request_queue
*q
,
186 const char *page
, size_t count
)
188 unsigned long max_discard
;
189 ssize_t ret
= queue_var_store(&max_discard
, page
, count
);
194 if (max_discard
& (q
->limits
.discard_granularity
- 1))
198 if (max_discard
> UINT_MAX
)
201 if (max_discard
> q
->limits
.max_hw_discard_sectors
)
202 max_discard
= q
->limits
.max_hw_discard_sectors
;
204 q
->limits
.max_discard_sectors
= max_discard
;
208 static ssize_t
queue_discard_zeroes_data_show(struct request_queue
*q
, char *page
)
210 return queue_var_show(0, page
);
213 static ssize_t
queue_write_same_max_show(struct request_queue
*q
, char *page
)
215 return sprintf(page
, "%llu\n",
216 (unsigned long long)q
->limits
.max_write_same_sectors
<< 9);
219 static ssize_t
queue_write_zeroes_max_show(struct request_queue
*q
, char *page
)
221 return sprintf(page
, "%llu\n",
222 (unsigned long long)q
->limits
.max_write_zeroes_sectors
<< 9);
225 static ssize_t
queue_zone_write_granularity_show(struct request_queue
*q
,
228 return queue_var_show(queue_zone_write_granularity(q
), page
);
231 static ssize_t
queue_zone_append_max_show(struct request_queue
*q
, char *page
)
233 unsigned long long max_sectors
= q
->limits
.max_zone_append_sectors
;
235 return sprintf(page
, "%llu\n", max_sectors
<< SECTOR_SHIFT
);
239 queue_max_sectors_store(struct request_queue
*q
, const char *page
, size_t count
)
241 unsigned long max_sectors_kb
,
242 max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1,
243 page_kb
= 1 << (PAGE_SHIFT
- 10);
244 ssize_t ret
= queue_var_store(&max_sectors_kb
, page
, count
);
249 max_hw_sectors_kb
= min_not_zero(max_hw_sectors_kb
, (unsigned long)
250 q
->limits
.max_dev_sectors
>> 1);
252 if (max_sectors_kb
> max_hw_sectors_kb
|| max_sectors_kb
< page_kb
)
255 spin_lock_irq(&q
->queue_lock
);
256 q
->limits
.max_sectors
= max_sectors_kb
<< 1;
258 q
->disk
->bdi
->io_pages
= max_sectors_kb
>> (PAGE_SHIFT
- 10);
259 spin_unlock_irq(&q
->queue_lock
);
264 static ssize_t
queue_max_hw_sectors_show(struct request_queue
*q
, char *page
)
266 int max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1;
268 return queue_var_show(max_hw_sectors_kb
, page
);
271 static ssize_t
queue_virt_boundary_mask_show(struct request_queue
*q
, char *page
)
273 return queue_var_show(q
->limits
.virt_boundary_mask
, page
);
276 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
278 queue_##name##_show(struct request_queue *q, char *page) \
281 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
282 return queue_var_show(neg ? !bit : bit, page); \
285 queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
289 ret = queue_var_store(&val, page, count); \
296 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
298 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
302 QUEUE_SYSFS_BIT_FNS(nonrot
, NONROT
, 1);
303 QUEUE_SYSFS_BIT_FNS(random
, ADD_RANDOM
, 0);
304 QUEUE_SYSFS_BIT_FNS(iostats
, IO_STAT
, 0);
305 QUEUE_SYSFS_BIT_FNS(stable_writes
, STABLE_WRITES
, 0);
306 #undef QUEUE_SYSFS_BIT_FNS
308 static ssize_t
queue_zoned_show(struct request_queue
*q
, char *page
)
310 switch (blk_queue_zoned_model(q
)) {
312 return sprintf(page
, "host-aware\n");
314 return sprintf(page
, "host-managed\n");
316 return sprintf(page
, "none\n");
320 static ssize_t
queue_nr_zones_show(struct request_queue
*q
, char *page
)
322 return queue_var_show(blk_queue_nr_zones(q
), page
);
325 static ssize_t
queue_max_open_zones_show(struct request_queue
*q
, char *page
)
327 return queue_var_show(queue_max_open_zones(q
), page
);
330 static ssize_t
queue_max_active_zones_show(struct request_queue
*q
, char *page
)
332 return queue_var_show(queue_max_active_zones(q
), page
);
335 static ssize_t
queue_nomerges_show(struct request_queue
*q
, char *page
)
337 return queue_var_show((blk_queue_nomerges(q
) << 1) |
338 blk_queue_noxmerges(q
), page
);
341 static ssize_t
queue_nomerges_store(struct request_queue
*q
, const char *page
,
345 ssize_t ret
= queue_var_store(&nm
, page
, count
);
350 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES
, q
);
351 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES
, q
);
353 blk_queue_flag_set(QUEUE_FLAG_NOMERGES
, q
);
355 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES
, q
);
360 static ssize_t
queue_rq_affinity_show(struct request_queue
*q
, char *page
)
362 bool set
= test_bit(QUEUE_FLAG_SAME_COMP
, &q
->queue_flags
);
363 bool force
= test_bit(QUEUE_FLAG_SAME_FORCE
, &q
->queue_flags
);
365 return queue_var_show(set
<< force
, page
);
369 queue_rq_affinity_store(struct request_queue
*q
, const char *page
, size_t count
)
371 ssize_t ret
= -EINVAL
;
375 ret
= queue_var_store(&val
, page
, count
);
380 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
381 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE
, q
);
382 } else if (val
== 1) {
383 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
384 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE
, q
);
385 } else if (val
== 0) {
386 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP
, q
);
387 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE
, q
);
393 static ssize_t
queue_poll_delay_show(struct request_queue
*q
, char *page
)
397 if (q
->poll_nsec
== BLK_MQ_POLL_CLASSIC
)
398 val
= BLK_MQ_POLL_CLASSIC
;
400 val
= q
->poll_nsec
/ 1000;
402 return sprintf(page
, "%d\n", val
);
405 static ssize_t
queue_poll_delay_store(struct request_queue
*q
, const char *page
,
410 if (!q
->mq_ops
|| !q
->mq_ops
->poll
)
413 err
= kstrtoint(page
, 10, &val
);
417 if (val
== BLK_MQ_POLL_CLASSIC
)
418 q
->poll_nsec
= BLK_MQ_POLL_CLASSIC
;
420 q
->poll_nsec
= val
* 1000;
427 static ssize_t
queue_poll_show(struct request_queue
*q
, char *page
)
429 return queue_var_show(test_bit(QUEUE_FLAG_POLL
, &q
->queue_flags
), page
);
432 static ssize_t
queue_poll_store(struct request_queue
*q
, const char *page
,
435 unsigned long poll_on
;
438 if (!q
->tag_set
|| q
->tag_set
->nr_maps
<= HCTX_TYPE_POLL
||
439 !q
->tag_set
->map
[HCTX_TYPE_POLL
].nr_queues
)
442 ret
= queue_var_store(&poll_on
, page
, count
);
447 blk_queue_flag_set(QUEUE_FLAG_POLL
, q
);
449 blk_mq_freeze_queue(q
);
450 blk_queue_flag_clear(QUEUE_FLAG_POLL
, q
);
451 blk_mq_unfreeze_queue(q
);
457 static ssize_t
queue_io_timeout_show(struct request_queue
*q
, char *page
)
459 return sprintf(page
, "%u\n", jiffies_to_msecs(q
->rq_timeout
));
462 static ssize_t
queue_io_timeout_store(struct request_queue
*q
, const char *page
,
468 err
= kstrtou32(page
, 10, &val
);
472 blk_queue_rq_timeout(q
, msecs_to_jiffies(val
));
477 static ssize_t
queue_wb_lat_show(struct request_queue
*q
, char *page
)
482 return sprintf(page
, "%llu\n", div_u64(wbt_get_min_lat(q
), 1000));
485 static ssize_t
queue_wb_lat_store(struct request_queue
*q
, const char *page
,
492 ret
= queue_var_store64(&val
, page
);
498 rqos
= wbt_rq_qos(q
);
506 val
= wbt_default_latency_nsec(q
);
510 if (wbt_get_min_lat(q
) == val
)
514 * Ensure that the queue is idled, in case the latency update
515 * ends up either enabling or disabling wbt completely. We can't
516 * have IO inflight if that happens.
518 blk_mq_freeze_queue(q
);
519 blk_mq_quiesce_queue(q
);
521 wbt_set_min_lat(q
, val
);
523 blk_mq_unquiesce_queue(q
);
524 blk_mq_unfreeze_queue(q
);
529 static ssize_t
queue_wc_show(struct request_queue
*q
, char *page
)
531 if (test_bit(QUEUE_FLAG_WC
, &q
->queue_flags
))
532 return sprintf(page
, "write back\n");
534 return sprintf(page
, "write through\n");
537 static ssize_t
queue_wc_store(struct request_queue
*q
, const char *page
,
542 if (!strncmp(page
, "write back", 10))
544 else if (!strncmp(page
, "write through", 13) ||
545 !strncmp(page
, "none", 4))
552 blk_queue_flag_set(QUEUE_FLAG_WC
, q
);
554 blk_queue_flag_clear(QUEUE_FLAG_WC
, q
);
559 static ssize_t
queue_fua_show(struct request_queue
*q
, char *page
)
561 return sprintf(page
, "%u\n", test_bit(QUEUE_FLAG_FUA
, &q
->queue_flags
));
564 static ssize_t
queue_dax_show(struct request_queue
*q
, char *page
)
566 return queue_var_show(blk_queue_dax(q
), page
);
569 #define QUEUE_RO_ENTRY(_prefix, _name) \
570 static struct queue_sysfs_entry _prefix##_entry = { \
571 .attr = { .name = _name, .mode = 0444 }, \
572 .show = _prefix##_show, \
575 #define QUEUE_RW_ENTRY(_prefix, _name) \
576 static struct queue_sysfs_entry _prefix##_entry = { \
577 .attr = { .name = _name, .mode = 0644 }, \
578 .show = _prefix##_show, \
579 .store = _prefix##_store, \
582 QUEUE_RW_ENTRY(queue_requests
, "nr_requests");
583 QUEUE_RW_ENTRY(queue_ra
, "read_ahead_kb");
584 QUEUE_RW_ENTRY(queue_max_sectors
, "max_sectors_kb");
585 QUEUE_RO_ENTRY(queue_max_hw_sectors
, "max_hw_sectors_kb");
586 QUEUE_RO_ENTRY(queue_max_segments
, "max_segments");
587 QUEUE_RO_ENTRY(queue_max_integrity_segments
, "max_integrity_segments");
588 QUEUE_RO_ENTRY(queue_max_segment_size
, "max_segment_size");
589 QUEUE_RW_ENTRY(elv_iosched
, "scheduler");
591 QUEUE_RO_ENTRY(queue_logical_block_size
, "logical_block_size");
592 QUEUE_RO_ENTRY(queue_physical_block_size
, "physical_block_size");
593 QUEUE_RO_ENTRY(queue_chunk_sectors
, "chunk_sectors");
594 QUEUE_RO_ENTRY(queue_io_min
, "minimum_io_size");
595 QUEUE_RO_ENTRY(queue_io_opt
, "optimal_io_size");
597 QUEUE_RO_ENTRY(queue_max_discard_segments
, "max_discard_segments");
598 QUEUE_RO_ENTRY(queue_discard_granularity
, "discard_granularity");
599 QUEUE_RO_ENTRY(queue_discard_max_hw
, "discard_max_hw_bytes");
600 QUEUE_RW_ENTRY(queue_discard_max
, "discard_max_bytes");
601 QUEUE_RO_ENTRY(queue_discard_zeroes_data
, "discard_zeroes_data");
603 QUEUE_RO_ENTRY(queue_write_same_max
, "write_same_max_bytes");
604 QUEUE_RO_ENTRY(queue_write_zeroes_max
, "write_zeroes_max_bytes");
605 QUEUE_RO_ENTRY(queue_zone_append_max
, "zone_append_max_bytes");
606 QUEUE_RO_ENTRY(queue_zone_write_granularity
, "zone_write_granularity");
608 QUEUE_RO_ENTRY(queue_zoned
, "zoned");
609 QUEUE_RO_ENTRY(queue_nr_zones
, "nr_zones");
610 QUEUE_RO_ENTRY(queue_max_open_zones
, "max_open_zones");
611 QUEUE_RO_ENTRY(queue_max_active_zones
, "max_active_zones");
613 QUEUE_RW_ENTRY(queue_nomerges
, "nomerges");
614 QUEUE_RW_ENTRY(queue_rq_affinity
, "rq_affinity");
615 QUEUE_RW_ENTRY(queue_poll
, "io_poll");
616 QUEUE_RW_ENTRY(queue_poll_delay
, "io_poll_delay");
617 QUEUE_RW_ENTRY(queue_wc
, "write_cache");
618 QUEUE_RO_ENTRY(queue_fua
, "fua");
619 QUEUE_RO_ENTRY(queue_dax
, "dax");
620 QUEUE_RW_ENTRY(queue_io_timeout
, "io_timeout");
621 QUEUE_RW_ENTRY(queue_wb_lat
, "wbt_lat_usec");
622 QUEUE_RO_ENTRY(queue_virt_boundary_mask
, "virt_boundary_mask");
624 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
625 QUEUE_RW_ENTRY(blk_throtl_sample_time
, "throttle_sample_time");
628 /* legacy alias for logical_block_size: */
629 static struct queue_sysfs_entry queue_hw_sector_size_entry
= {
630 .attr
= {.name
= "hw_sector_size", .mode
= 0444 },
631 .show
= queue_logical_block_size_show
,
634 QUEUE_RW_ENTRY(queue_nonrot
, "rotational");
635 QUEUE_RW_ENTRY(queue_iostats
, "iostats");
636 QUEUE_RW_ENTRY(queue_random
, "add_random");
637 QUEUE_RW_ENTRY(queue_stable_writes
, "stable_writes");
639 static struct attribute
*queue_attrs
[] = {
640 &queue_requests_entry
.attr
,
641 &queue_ra_entry
.attr
,
642 &queue_max_hw_sectors_entry
.attr
,
643 &queue_max_sectors_entry
.attr
,
644 &queue_max_segments_entry
.attr
,
645 &queue_max_discard_segments_entry
.attr
,
646 &queue_max_integrity_segments_entry
.attr
,
647 &queue_max_segment_size_entry
.attr
,
648 &elv_iosched_entry
.attr
,
649 &queue_hw_sector_size_entry
.attr
,
650 &queue_logical_block_size_entry
.attr
,
651 &queue_physical_block_size_entry
.attr
,
652 &queue_chunk_sectors_entry
.attr
,
653 &queue_io_min_entry
.attr
,
654 &queue_io_opt_entry
.attr
,
655 &queue_discard_granularity_entry
.attr
,
656 &queue_discard_max_entry
.attr
,
657 &queue_discard_max_hw_entry
.attr
,
658 &queue_discard_zeroes_data_entry
.attr
,
659 &queue_write_same_max_entry
.attr
,
660 &queue_write_zeroes_max_entry
.attr
,
661 &queue_zone_append_max_entry
.attr
,
662 &queue_zone_write_granularity_entry
.attr
,
663 &queue_nonrot_entry
.attr
,
664 &queue_zoned_entry
.attr
,
665 &queue_nr_zones_entry
.attr
,
666 &queue_max_open_zones_entry
.attr
,
667 &queue_max_active_zones_entry
.attr
,
668 &queue_nomerges_entry
.attr
,
669 &queue_rq_affinity_entry
.attr
,
670 &queue_iostats_entry
.attr
,
671 &queue_stable_writes_entry
.attr
,
672 &queue_random_entry
.attr
,
673 &queue_poll_entry
.attr
,
674 &queue_wc_entry
.attr
,
675 &queue_fua_entry
.attr
,
676 &queue_dax_entry
.attr
,
677 &queue_wb_lat_entry
.attr
,
678 &queue_poll_delay_entry
.attr
,
679 &queue_io_timeout_entry
.attr
,
680 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
681 &blk_throtl_sample_time_entry
.attr
,
683 &queue_virt_boundary_mask_entry
.attr
,
687 static umode_t
queue_attr_visible(struct kobject
*kobj
, struct attribute
*attr
,
690 struct request_queue
*q
=
691 container_of(kobj
, struct request_queue
, kobj
);
693 if (attr
== &queue_io_timeout_entry
.attr
&&
694 (!q
->mq_ops
|| !q
->mq_ops
->timeout
))
697 if ((attr
== &queue_max_open_zones_entry
.attr
||
698 attr
== &queue_max_active_zones_entry
.attr
) &&
699 !blk_queue_is_zoned(q
))
705 static struct attribute_group queue_attr_group
= {
706 .attrs
= queue_attrs
,
707 .is_visible
= queue_attr_visible
,
711 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
714 queue_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
716 struct queue_sysfs_entry
*entry
= to_queue(attr
);
717 struct request_queue
*q
=
718 container_of(kobj
, struct request_queue
, kobj
);
723 mutex_lock(&q
->sysfs_lock
);
724 res
= entry
->show(q
, page
);
725 mutex_unlock(&q
->sysfs_lock
);
730 queue_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
731 const char *page
, size_t length
)
733 struct queue_sysfs_entry
*entry
= to_queue(attr
);
734 struct request_queue
*q
;
740 q
= container_of(kobj
, struct request_queue
, kobj
);
741 mutex_lock(&q
->sysfs_lock
);
742 res
= entry
->store(q
, page
, length
);
743 mutex_unlock(&q
->sysfs_lock
);
747 static void blk_free_queue_rcu(struct rcu_head
*rcu_head
)
749 struct request_queue
*q
= container_of(rcu_head
, struct request_queue
,
751 kmem_cache_free(blk_requestq_cachep
, q
);
754 /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
755 static void blk_exit_queue(struct request_queue
*q
)
758 * Since the I/O scheduler exit code may access cgroup information,
759 * perform I/O scheduler exit before disassociating from the block
764 __elevator_exit(q
, q
->elevator
);
768 * Remove all references to @q from the block cgroup controller before
769 * restoring @q->queue_lock to avoid that restoring this pointer causes
770 * e.g. blkcg_print_blkgs() to crash.
776 * blk_release_queue - releases all allocated resources of the request_queue
777 * @kobj: pointer to a kobject, whose container is a request_queue
779 * This function releases all allocated resources of the request queue.
781 * The struct request_queue refcount is incremented with blk_get_queue() and
782 * decremented with blk_put_queue(). Once the refcount reaches 0 this function
785 * For drivers that have a request_queue on a gendisk and added with
786 * __device_add_disk() the refcount to request_queue will reach 0 with
787 * the last put_disk() called by the driver. For drivers which don't use
788 * __device_add_disk() this happens with blk_cleanup_queue().
790 * Drivers exist which depend on the release of the request_queue to be
791 * synchronous, it should not be deferred.
795 static void blk_release_queue(struct kobject
*kobj
)
797 struct request_queue
*q
=
798 container_of(kobj
, struct request_queue
, kobj
);
802 if (test_bit(QUEUE_FLAG_POLL_STATS
, &q
->queue_flags
))
803 blk_stat_remove_callback(q
, q
->poll_cb
);
804 blk_stat_free_callback(q
->poll_cb
);
806 blk_free_queue_stats(q
->stats
);
810 blk_queue_free_zone_bitmaps(q
);
815 blk_trace_shutdown(q
);
816 mutex_lock(&q
->debugfs_mutex
);
817 debugfs_remove_recursive(q
->debugfs_dir
);
818 mutex_unlock(&q
->debugfs_mutex
);
821 blk_mq_debugfs_unregister(q
);
823 bioset_exit(&q
->bio_split
);
825 ida_simple_remove(&blk_queue_ida
, q
->id
);
826 call_rcu(&q
->rcu_head
, blk_free_queue_rcu
);
829 static const struct sysfs_ops queue_sysfs_ops
= {
830 .show
= queue_attr_show
,
831 .store
= queue_attr_store
,
834 struct kobj_type blk_queue_ktype
= {
835 .sysfs_ops
= &queue_sysfs_ops
,
836 .release
= blk_release_queue
,
840 * blk_register_queue - register a block layer queue with sysfs
841 * @disk: Disk of which the request queue should be registered with sysfs.
843 int blk_register_queue(struct gendisk
*disk
)
846 struct device
*dev
= disk_to_dev(disk
);
847 struct request_queue
*q
= disk
->queue
;
849 ret
= blk_trace_init_sysfs(dev
);
853 mutex_lock(&q
->sysfs_dir_lock
);
855 ret
= kobject_add(&q
->kobj
, kobject_get(&dev
->kobj
), "%s", "queue");
857 blk_trace_remove_sysfs(dev
);
861 ret
= sysfs_create_group(&q
->kobj
, &queue_attr_group
);
863 blk_trace_remove_sysfs(dev
);
864 kobject_del(&q
->kobj
);
865 kobject_put(&dev
->kobj
);
869 mutex_lock(&q
->debugfs_mutex
);
870 q
->debugfs_dir
= debugfs_create_dir(kobject_name(q
->kobj
.parent
),
872 mutex_unlock(&q
->debugfs_mutex
);
874 if (queue_is_mq(q
)) {
875 __blk_mq_register_dev(dev
, q
);
876 blk_mq_debugfs_register(q
);
879 mutex_lock(&q
->sysfs_lock
);
881 ret
= elv_register_queue(q
, false);
883 mutex_unlock(&q
->sysfs_lock
);
884 mutex_unlock(&q
->sysfs_dir_lock
);
885 kobject_del(&q
->kobj
);
886 blk_trace_remove_sysfs(dev
);
887 kobject_put(&dev
->kobj
);
892 blk_queue_flag_set(QUEUE_FLAG_REGISTERED
, q
);
893 wbt_enable_default(q
);
894 blk_throtl_register_queue(q
);
896 /* Now everything is ready and send out KOBJ_ADD uevent */
897 kobject_uevent(&q
->kobj
, KOBJ_ADD
);
899 kobject_uevent(&q
->elevator
->kobj
, KOBJ_ADD
);
900 mutex_unlock(&q
->sysfs_lock
);
904 mutex_unlock(&q
->sysfs_dir_lock
);
907 * SCSI probing may synchronously create and destroy a lot of
908 * request_queues for non-existent devices. Shutting down a fully
909 * functional queue takes measureable wallclock time as RCU grace
910 * periods are involved. To avoid excessive latency in these
911 * cases, a request_queue starts out in a degraded mode which is
912 * faster to shut down and is made fully functional here as
913 * request_queues for non-existent devices never get registered.
915 if (!blk_queue_init_done(q
)) {
916 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE
, q
);
917 percpu_ref_switch_to_percpu(&q
->q_usage_counter
);
924 * blk_unregister_queue - counterpart of blk_register_queue()
925 * @disk: Disk of which the request queue should be unregistered from sysfs.
927 * Note: the caller is responsible for guaranteeing that this function is called
928 * after blk_register_queue() has finished.
930 void blk_unregister_queue(struct gendisk
*disk
)
932 struct request_queue
*q
= disk
->queue
;
937 /* Return early if disk->queue was never registered. */
938 if (!blk_queue_registered(q
))
942 * Since sysfs_remove_dir() prevents adding new directory entries
943 * before removal of existing entries starts, protect against
944 * concurrent elv_iosched_store() calls.
946 mutex_lock(&q
->sysfs_lock
);
947 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED
, q
);
948 mutex_unlock(&q
->sysfs_lock
);
950 mutex_lock(&q
->sysfs_dir_lock
);
952 * Remove the sysfs attributes before unregistering the queue data
953 * structures that can be modified through sysfs.
956 blk_mq_unregister_dev(disk_to_dev(disk
), q
);
957 blk_trace_remove_sysfs(disk_to_dev(disk
));
959 mutex_lock(&q
->sysfs_lock
);
961 elv_unregister_queue(q
);
962 mutex_unlock(&q
->sysfs_lock
);
964 /* Now that we've deleted all child objects, we can delete the queue. */
965 kobject_uevent(&q
->kobj
, KOBJ_REMOVE
);
966 kobject_del(&q
->kobj
);
968 mutex_unlock(&q
->sysfs_dir_lock
);
970 kobject_put(&disk_to_dev(disk
)->kobj
);