2 * Functions related to sysfs handling
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/backing-dev.h>
10 #include <linux/blktrace_api.h>
11 #include <linux/blk-mq.h>
12 #include <linux/blk-cgroup.h>
18 struct queue_sysfs_entry
{
19 struct attribute attr
;
20 ssize_t (*show
)(struct request_queue
*, char *);
21 ssize_t (*store
)(struct request_queue
*, const char *, size_t);
25 queue_var_show(unsigned long var
, char *page
)
27 return sprintf(page
, "%lu\n", var
);
31 queue_var_store(unsigned long *var
, const char *page
, size_t count
)
36 err
= kstrtoul(page
, 10, &v
);
37 if (err
|| v
> UINT_MAX
)
45 static ssize_t
queue_var_store64(s64
*var
, const char *page
)
50 err
= kstrtos64(page
, 10, &v
);
58 static ssize_t
queue_requests_show(struct request_queue
*q
, char *page
)
60 return queue_var_show(q
->nr_requests
, (page
));
64 queue_requests_store(struct request_queue
*q
, const char *page
, size_t count
)
69 if (!q
->request_fn
&& !q
->mq_ops
)
72 ret
= queue_var_store(&nr
, page
, count
);
76 if (nr
< BLKDEV_MIN_RQ
)
80 err
= blk_update_nr_requests(q
, nr
);
82 err
= blk_mq_update_nr_requests(q
, nr
);
90 static ssize_t
queue_ra_show(struct request_queue
*q
, char *page
)
92 unsigned long ra_kb
= q
->backing_dev_info
.ra_pages
<<
95 return queue_var_show(ra_kb
, (page
));
99 queue_ra_store(struct request_queue
*q
, const char *page
, size_t count
)
102 ssize_t ret
= queue_var_store(&ra_kb
, page
, count
);
107 q
->backing_dev_info
.ra_pages
= ra_kb
>> (PAGE_SHIFT
- 10);
112 static ssize_t
queue_max_sectors_show(struct request_queue
*q
, char *page
)
114 int max_sectors_kb
= queue_max_sectors(q
) >> 1;
116 return queue_var_show(max_sectors_kb
, (page
));
119 static ssize_t
queue_max_segments_show(struct request_queue
*q
, char *page
)
121 return queue_var_show(queue_max_segments(q
), (page
));
124 static ssize_t
queue_max_integrity_segments_show(struct request_queue
*q
, char *page
)
126 return queue_var_show(q
->limits
.max_integrity_segments
, (page
));
129 static ssize_t
queue_max_segment_size_show(struct request_queue
*q
, char *page
)
131 if (blk_queue_cluster(q
))
132 return queue_var_show(queue_max_segment_size(q
), (page
));
134 return queue_var_show(PAGE_SIZE
, (page
));
137 static ssize_t
queue_logical_block_size_show(struct request_queue
*q
, char *page
)
139 return queue_var_show(queue_logical_block_size(q
), page
);
142 static ssize_t
queue_physical_block_size_show(struct request_queue
*q
, char *page
)
144 return queue_var_show(queue_physical_block_size(q
), page
);
147 static ssize_t
queue_chunk_sectors_show(struct request_queue
*q
, char *page
)
149 return queue_var_show(q
->limits
.chunk_sectors
, page
);
152 static ssize_t
queue_io_min_show(struct request_queue
*q
, char *page
)
154 return queue_var_show(queue_io_min(q
), page
);
157 static ssize_t
queue_io_opt_show(struct request_queue
*q
, char *page
)
159 return queue_var_show(queue_io_opt(q
), page
);
162 static ssize_t
queue_discard_granularity_show(struct request_queue
*q
, char *page
)
164 return queue_var_show(q
->limits
.discard_granularity
, page
);
167 static ssize_t
queue_discard_max_hw_show(struct request_queue
*q
, char *page
)
170 return sprintf(page
, "%llu\n",
171 (unsigned long long)q
->limits
.max_hw_discard_sectors
<< 9);
174 static ssize_t
queue_discard_max_show(struct request_queue
*q
, char *page
)
176 return sprintf(page
, "%llu\n",
177 (unsigned long long)q
->limits
.max_discard_sectors
<< 9);
180 static ssize_t
queue_discard_max_store(struct request_queue
*q
,
181 const char *page
, size_t count
)
183 unsigned long max_discard
;
184 ssize_t ret
= queue_var_store(&max_discard
, page
, count
);
189 if (max_discard
& (q
->limits
.discard_granularity
- 1))
193 if (max_discard
> UINT_MAX
)
196 if (max_discard
> q
->limits
.max_hw_discard_sectors
)
197 max_discard
= q
->limits
.max_hw_discard_sectors
;
199 q
->limits
.max_discard_sectors
= max_discard
;
203 static ssize_t
queue_discard_zeroes_data_show(struct request_queue
*q
, char *page
)
205 return queue_var_show(queue_discard_zeroes_data(q
), page
);
208 static ssize_t
queue_write_same_max_show(struct request_queue
*q
, char *page
)
210 return sprintf(page
, "%llu\n",
211 (unsigned long long)q
->limits
.max_write_same_sectors
<< 9);
214 static ssize_t
queue_write_zeroes_max_show(struct request_queue
*q
, char *page
)
216 return sprintf(page
, "%llu\n",
217 (unsigned long long)q
->limits
.max_write_zeroes_sectors
<< 9);
221 queue_max_sectors_store(struct request_queue
*q
, const char *page
, size_t count
)
223 unsigned long max_sectors_kb
,
224 max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1,
225 page_kb
= 1 << (PAGE_SHIFT
- 10);
226 ssize_t ret
= queue_var_store(&max_sectors_kb
, page
, count
);
231 max_hw_sectors_kb
= min_not_zero(max_hw_sectors_kb
, (unsigned long)
232 q
->limits
.max_dev_sectors
>> 1);
234 if (max_sectors_kb
> max_hw_sectors_kb
|| max_sectors_kb
< page_kb
)
237 spin_lock_irq(q
->queue_lock
);
238 q
->limits
.max_sectors
= max_sectors_kb
<< 1;
239 q
->backing_dev_info
.io_pages
= max_sectors_kb
>> (PAGE_SHIFT
- 10);
240 spin_unlock_irq(q
->queue_lock
);
245 static ssize_t
queue_max_hw_sectors_show(struct request_queue
*q
, char *page
)
247 int max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1;
249 return queue_var_show(max_hw_sectors_kb
, (page
));
252 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
254 queue_show_##name(struct request_queue *q, char *page) \
257 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
258 return queue_var_show(neg ? !bit : bit, page); \
261 queue_store_##name(struct request_queue *q, const char *page, size_t count) \
265 ret = queue_var_store(&val, page, count); \
271 spin_lock_irq(q->queue_lock); \
273 queue_flag_set(QUEUE_FLAG_##flag, q); \
275 queue_flag_clear(QUEUE_FLAG_##flag, q); \
276 spin_unlock_irq(q->queue_lock); \
280 QUEUE_SYSFS_BIT_FNS(nonrot
, NONROT
, 1);
281 QUEUE_SYSFS_BIT_FNS(random
, ADD_RANDOM
, 0);
282 QUEUE_SYSFS_BIT_FNS(iostats
, IO_STAT
, 0);
283 #undef QUEUE_SYSFS_BIT_FNS
285 static ssize_t
queue_zoned_show(struct request_queue
*q
, char *page
)
287 switch (blk_queue_zoned_model(q
)) {
289 return sprintf(page
, "host-aware\n");
291 return sprintf(page
, "host-managed\n");
293 return sprintf(page
, "none\n");
297 static ssize_t
queue_nomerges_show(struct request_queue
*q
, char *page
)
299 return queue_var_show((blk_queue_nomerges(q
) << 1) |
300 blk_queue_noxmerges(q
), page
);
303 static ssize_t
queue_nomerges_store(struct request_queue
*q
, const char *page
,
307 ssize_t ret
= queue_var_store(&nm
, page
, count
);
312 spin_lock_irq(q
->queue_lock
);
313 queue_flag_clear(QUEUE_FLAG_NOMERGES
, q
);
314 queue_flag_clear(QUEUE_FLAG_NOXMERGES
, q
);
316 queue_flag_set(QUEUE_FLAG_NOMERGES
, q
);
318 queue_flag_set(QUEUE_FLAG_NOXMERGES
, q
);
319 spin_unlock_irq(q
->queue_lock
);
324 static ssize_t
queue_rq_affinity_show(struct request_queue
*q
, char *page
)
326 bool set
= test_bit(QUEUE_FLAG_SAME_COMP
, &q
->queue_flags
);
327 bool force
= test_bit(QUEUE_FLAG_SAME_FORCE
, &q
->queue_flags
);
329 return queue_var_show(set
<< force
, page
);
333 queue_rq_affinity_store(struct request_queue
*q
, const char *page
, size_t count
)
335 ssize_t ret
= -EINVAL
;
339 ret
= queue_var_store(&val
, page
, count
);
343 spin_lock_irq(q
->queue_lock
);
345 queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
346 queue_flag_set(QUEUE_FLAG_SAME_FORCE
, q
);
347 } else if (val
== 1) {
348 queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
349 queue_flag_clear(QUEUE_FLAG_SAME_FORCE
, q
);
350 } else if (val
== 0) {
351 queue_flag_clear(QUEUE_FLAG_SAME_COMP
, q
);
352 queue_flag_clear(QUEUE_FLAG_SAME_FORCE
, q
);
354 spin_unlock_irq(q
->queue_lock
);
359 static ssize_t
queue_poll_delay_show(struct request_queue
*q
, char *page
)
363 if (q
->poll_nsec
== -1)
366 val
= q
->poll_nsec
/ 1000;
368 return sprintf(page
, "%d\n", val
);
371 static ssize_t
queue_poll_delay_store(struct request_queue
*q
, const char *page
,
376 if (!q
->mq_ops
|| !q
->mq_ops
->poll
)
379 err
= kstrtoint(page
, 10, &val
);
386 q
->poll_nsec
= val
* 1000;
391 static ssize_t
queue_poll_show(struct request_queue
*q
, char *page
)
393 return queue_var_show(test_bit(QUEUE_FLAG_POLL
, &q
->queue_flags
), page
);
396 static ssize_t
queue_poll_store(struct request_queue
*q
, const char *page
,
399 unsigned long poll_on
;
402 if (!q
->mq_ops
|| !q
->mq_ops
->poll
)
405 ret
= queue_var_store(&poll_on
, page
, count
);
409 spin_lock_irq(q
->queue_lock
);
411 queue_flag_set(QUEUE_FLAG_POLL
, q
);
413 queue_flag_clear(QUEUE_FLAG_POLL
, q
);
414 spin_unlock_irq(q
->queue_lock
);
419 static ssize_t
queue_wb_lat_show(struct request_queue
*q
, char *page
)
424 return sprintf(page
, "%llu\n", div_u64(q
->rq_wb
->min_lat_nsec
, 1000));
427 static ssize_t
queue_wb_lat_store(struct request_queue
*q
, const char *page
,
434 ret
= queue_var_store64(&val
, page
);
452 rwb
->min_lat_nsec
= wbt_default_latency_nsec(q
);
454 rwb
->min_lat_nsec
= val
* 1000ULL;
456 if (rwb
->enable_state
== WBT_STATE_ON_DEFAULT
)
457 rwb
->enable_state
= WBT_STATE_ON_MANUAL
;
459 wbt_update_limits(rwb
);
463 static ssize_t
queue_wc_show(struct request_queue
*q
, char *page
)
465 if (test_bit(QUEUE_FLAG_WC
, &q
->queue_flags
))
466 return sprintf(page
, "write back\n");
468 return sprintf(page
, "write through\n");
471 static ssize_t
queue_wc_store(struct request_queue
*q
, const char *page
,
476 if (!strncmp(page
, "write back", 10))
478 else if (!strncmp(page
, "write through", 13) ||
479 !strncmp(page
, "none", 4))
485 spin_lock_irq(q
->queue_lock
);
487 queue_flag_set(QUEUE_FLAG_WC
, q
);
489 queue_flag_clear(QUEUE_FLAG_WC
, q
);
490 spin_unlock_irq(q
->queue_lock
);
495 static ssize_t
queue_dax_show(struct request_queue
*q
, char *page
)
497 return queue_var_show(blk_queue_dax(q
), page
);
500 static ssize_t
print_stat(char *page
, struct blk_rq_stat
*stat
, const char *pre
)
502 return sprintf(page
, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n",
503 pre
, (long long) stat
->nr_samples
,
504 (long long) stat
->mean
, (long long) stat
->min
,
505 (long long) stat
->max
);
508 static ssize_t
queue_stats_show(struct request_queue
*q
, char *page
)
510 struct blk_rq_stat stat
[2];
513 blk_queue_stat_get(q
, stat
);
515 ret
= print_stat(page
, &stat
[BLK_STAT_READ
], "read :");
516 ret
+= print_stat(page
+ ret
, &stat
[BLK_STAT_WRITE
], "write:");
520 static struct queue_sysfs_entry queue_requests_entry
= {
521 .attr
= {.name
= "nr_requests", .mode
= S_IRUGO
| S_IWUSR
},
522 .show
= queue_requests_show
,
523 .store
= queue_requests_store
,
526 static struct queue_sysfs_entry queue_ra_entry
= {
527 .attr
= {.name
= "read_ahead_kb", .mode
= S_IRUGO
| S_IWUSR
},
528 .show
= queue_ra_show
,
529 .store
= queue_ra_store
,
532 static struct queue_sysfs_entry queue_max_sectors_entry
= {
533 .attr
= {.name
= "max_sectors_kb", .mode
= S_IRUGO
| S_IWUSR
},
534 .show
= queue_max_sectors_show
,
535 .store
= queue_max_sectors_store
,
538 static struct queue_sysfs_entry queue_max_hw_sectors_entry
= {
539 .attr
= {.name
= "max_hw_sectors_kb", .mode
= S_IRUGO
},
540 .show
= queue_max_hw_sectors_show
,
543 static struct queue_sysfs_entry queue_max_segments_entry
= {
544 .attr
= {.name
= "max_segments", .mode
= S_IRUGO
},
545 .show
= queue_max_segments_show
,
548 static struct queue_sysfs_entry queue_max_integrity_segments_entry
= {
549 .attr
= {.name
= "max_integrity_segments", .mode
= S_IRUGO
},
550 .show
= queue_max_integrity_segments_show
,
553 static struct queue_sysfs_entry queue_max_segment_size_entry
= {
554 .attr
= {.name
= "max_segment_size", .mode
= S_IRUGO
},
555 .show
= queue_max_segment_size_show
,
558 static struct queue_sysfs_entry queue_iosched_entry
= {
559 .attr
= {.name
= "scheduler", .mode
= S_IRUGO
| S_IWUSR
},
560 .show
= elv_iosched_show
,
561 .store
= elv_iosched_store
,
564 static struct queue_sysfs_entry queue_hw_sector_size_entry
= {
565 .attr
= {.name
= "hw_sector_size", .mode
= S_IRUGO
},
566 .show
= queue_logical_block_size_show
,
569 static struct queue_sysfs_entry queue_logical_block_size_entry
= {
570 .attr
= {.name
= "logical_block_size", .mode
= S_IRUGO
},
571 .show
= queue_logical_block_size_show
,
574 static struct queue_sysfs_entry queue_physical_block_size_entry
= {
575 .attr
= {.name
= "physical_block_size", .mode
= S_IRUGO
},
576 .show
= queue_physical_block_size_show
,
579 static struct queue_sysfs_entry queue_chunk_sectors_entry
= {
580 .attr
= {.name
= "chunk_sectors", .mode
= S_IRUGO
},
581 .show
= queue_chunk_sectors_show
,
584 static struct queue_sysfs_entry queue_io_min_entry
= {
585 .attr
= {.name
= "minimum_io_size", .mode
= S_IRUGO
},
586 .show
= queue_io_min_show
,
589 static struct queue_sysfs_entry queue_io_opt_entry
= {
590 .attr
= {.name
= "optimal_io_size", .mode
= S_IRUGO
},
591 .show
= queue_io_opt_show
,
594 static struct queue_sysfs_entry queue_discard_granularity_entry
= {
595 .attr
= {.name
= "discard_granularity", .mode
= S_IRUGO
},
596 .show
= queue_discard_granularity_show
,
599 static struct queue_sysfs_entry queue_discard_max_hw_entry
= {
600 .attr
= {.name
= "discard_max_hw_bytes", .mode
= S_IRUGO
},
601 .show
= queue_discard_max_hw_show
,
604 static struct queue_sysfs_entry queue_discard_max_entry
= {
605 .attr
= {.name
= "discard_max_bytes", .mode
= S_IRUGO
| S_IWUSR
},
606 .show
= queue_discard_max_show
,
607 .store
= queue_discard_max_store
,
610 static struct queue_sysfs_entry queue_discard_zeroes_data_entry
= {
611 .attr
= {.name
= "discard_zeroes_data", .mode
= S_IRUGO
},
612 .show
= queue_discard_zeroes_data_show
,
615 static struct queue_sysfs_entry queue_write_same_max_entry
= {
616 .attr
= {.name
= "write_same_max_bytes", .mode
= S_IRUGO
},
617 .show
= queue_write_same_max_show
,
620 static struct queue_sysfs_entry queue_write_zeroes_max_entry
= {
621 .attr
= {.name
= "write_zeroes_max_bytes", .mode
= S_IRUGO
},
622 .show
= queue_write_zeroes_max_show
,
625 static struct queue_sysfs_entry queue_nonrot_entry
= {
626 .attr
= {.name
= "rotational", .mode
= S_IRUGO
| S_IWUSR
},
627 .show
= queue_show_nonrot
,
628 .store
= queue_store_nonrot
,
631 static struct queue_sysfs_entry queue_zoned_entry
= {
632 .attr
= {.name
= "zoned", .mode
= S_IRUGO
},
633 .show
= queue_zoned_show
,
636 static struct queue_sysfs_entry queue_nomerges_entry
= {
637 .attr
= {.name
= "nomerges", .mode
= S_IRUGO
| S_IWUSR
},
638 .show
= queue_nomerges_show
,
639 .store
= queue_nomerges_store
,
642 static struct queue_sysfs_entry queue_rq_affinity_entry
= {
643 .attr
= {.name
= "rq_affinity", .mode
= S_IRUGO
| S_IWUSR
},
644 .show
= queue_rq_affinity_show
,
645 .store
= queue_rq_affinity_store
,
648 static struct queue_sysfs_entry queue_iostats_entry
= {
649 .attr
= {.name
= "iostats", .mode
= S_IRUGO
| S_IWUSR
},
650 .show
= queue_show_iostats
,
651 .store
= queue_store_iostats
,
654 static struct queue_sysfs_entry queue_random_entry
= {
655 .attr
= {.name
= "add_random", .mode
= S_IRUGO
| S_IWUSR
},
656 .show
= queue_show_random
,
657 .store
= queue_store_random
,
660 static struct queue_sysfs_entry queue_poll_entry
= {
661 .attr
= {.name
= "io_poll", .mode
= S_IRUGO
| S_IWUSR
},
662 .show
= queue_poll_show
,
663 .store
= queue_poll_store
,
666 static struct queue_sysfs_entry queue_poll_delay_entry
= {
667 .attr
= {.name
= "io_poll_delay", .mode
= S_IRUGO
| S_IWUSR
},
668 .show
= queue_poll_delay_show
,
669 .store
= queue_poll_delay_store
,
672 static struct queue_sysfs_entry queue_wc_entry
= {
673 .attr
= {.name
= "write_cache", .mode
= S_IRUGO
| S_IWUSR
},
674 .show
= queue_wc_show
,
675 .store
= queue_wc_store
,
678 static struct queue_sysfs_entry queue_dax_entry
= {
679 .attr
= {.name
= "dax", .mode
= S_IRUGO
},
680 .show
= queue_dax_show
,
683 static struct queue_sysfs_entry queue_stats_entry
= {
684 .attr
= {.name
= "stats", .mode
= S_IRUGO
},
685 .show
= queue_stats_show
,
688 static struct queue_sysfs_entry queue_wb_lat_entry
= {
689 .attr
= {.name
= "wbt_lat_usec", .mode
= S_IRUGO
| S_IWUSR
},
690 .show
= queue_wb_lat_show
,
691 .store
= queue_wb_lat_store
,
694 static struct attribute
*default_attrs
[] = {
695 &queue_requests_entry
.attr
,
696 &queue_ra_entry
.attr
,
697 &queue_max_hw_sectors_entry
.attr
,
698 &queue_max_sectors_entry
.attr
,
699 &queue_max_segments_entry
.attr
,
700 &queue_max_integrity_segments_entry
.attr
,
701 &queue_max_segment_size_entry
.attr
,
702 &queue_iosched_entry
.attr
,
703 &queue_hw_sector_size_entry
.attr
,
704 &queue_logical_block_size_entry
.attr
,
705 &queue_physical_block_size_entry
.attr
,
706 &queue_chunk_sectors_entry
.attr
,
707 &queue_io_min_entry
.attr
,
708 &queue_io_opt_entry
.attr
,
709 &queue_discard_granularity_entry
.attr
,
710 &queue_discard_max_entry
.attr
,
711 &queue_discard_max_hw_entry
.attr
,
712 &queue_discard_zeroes_data_entry
.attr
,
713 &queue_write_same_max_entry
.attr
,
714 &queue_write_zeroes_max_entry
.attr
,
715 &queue_nonrot_entry
.attr
,
716 &queue_zoned_entry
.attr
,
717 &queue_nomerges_entry
.attr
,
718 &queue_rq_affinity_entry
.attr
,
719 &queue_iostats_entry
.attr
,
720 &queue_random_entry
.attr
,
721 &queue_poll_entry
.attr
,
722 &queue_wc_entry
.attr
,
723 &queue_dax_entry
.attr
,
724 &queue_stats_entry
.attr
,
725 &queue_wb_lat_entry
.attr
,
726 &queue_poll_delay_entry
.attr
,
730 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
733 queue_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
735 struct queue_sysfs_entry
*entry
= to_queue(attr
);
736 struct request_queue
*q
=
737 container_of(kobj
, struct request_queue
, kobj
);
742 mutex_lock(&q
->sysfs_lock
);
743 if (blk_queue_dying(q
)) {
744 mutex_unlock(&q
->sysfs_lock
);
747 res
= entry
->show(q
, page
);
748 mutex_unlock(&q
->sysfs_lock
);
753 queue_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
754 const char *page
, size_t length
)
756 struct queue_sysfs_entry
*entry
= to_queue(attr
);
757 struct request_queue
*q
;
763 q
= container_of(kobj
, struct request_queue
, kobj
);
764 mutex_lock(&q
->sysfs_lock
);
765 if (blk_queue_dying(q
)) {
766 mutex_unlock(&q
->sysfs_lock
);
769 res
= entry
->store(q
, page
, length
);
770 mutex_unlock(&q
->sysfs_lock
);
774 static void blk_free_queue_rcu(struct rcu_head
*rcu_head
)
776 struct request_queue
*q
= container_of(rcu_head
, struct request_queue
,
778 kmem_cache_free(blk_requestq_cachep
, q
);
782 * blk_release_queue: - release a &struct request_queue when it is no longer needed
783 * @kobj: the kobj belonging to the request queue to be released
786 * blk_release_queue is the pair to blk_init_queue() or
787 * blk_queue_make_request(). It should be called when a request queue is
788 * being released; typically when a block device is being de-registered.
789 * Currently, its primary task it to free all the &struct request
790 * structures that were allocated to the queue and the queue itself.
793 * The low level driver must have finished any outstanding requests first
794 * via blk_cleanup_queue().
796 static void blk_release_queue(struct kobject
*kobj
)
798 struct request_queue
*q
=
799 container_of(kobj
, struct request_queue
, kobj
);
802 bdi_exit(&q
->backing_dev_info
);
806 spin_lock_irq(q
->queue_lock
);
808 spin_unlock_irq(q
->queue_lock
);
809 elevator_exit(q
->elevator
);
812 blk_exit_rl(&q
->root_rl
);
815 __blk_queue_free_tags(q
);
818 blk_free_flush_queue(q
->fq
);
822 blk_trace_shutdown(q
);
825 bioset_free(q
->bio_split
);
827 ida_simple_remove(&blk_queue_ida
, q
->id
);
828 call_rcu(&q
->rcu_head
, blk_free_queue_rcu
);
831 static const struct sysfs_ops queue_sysfs_ops
= {
832 .show
= queue_attr_show
,
833 .store
= queue_attr_store
,
836 struct kobj_type blk_queue_ktype
= {
837 .sysfs_ops
= &queue_sysfs_ops
,
838 .default_attrs
= default_attrs
,
839 .release
= blk_release_queue
,
842 static void blk_wb_init(struct request_queue
*q
)
844 #ifndef CONFIG_BLK_WBT_MQ
848 #ifndef CONFIG_BLK_WBT_SQ
854 * If this fails, we don't get throttling
859 int blk_register_queue(struct gendisk
*disk
)
862 struct device
*dev
= disk_to_dev(disk
);
863 struct request_queue
*q
= disk
->queue
;
869 * SCSI probing may synchronously create and destroy a lot of
870 * request_queues for non-existent devices. Shutting down a fully
871 * functional queue takes measureable wallclock time as RCU grace
872 * periods are involved. To avoid excessive latency in these
873 * cases, a request_queue starts out in a degraded mode which is
874 * faster to shut down and is made fully functional here as
875 * request_queues for non-existent devices never get registered.
877 if (!blk_queue_init_done(q
)) {
878 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE
, q
);
879 percpu_ref_switch_to_percpu(&q
->q_usage_counter
);
880 blk_queue_bypass_end(q
);
883 ret
= blk_trace_init_sysfs(dev
);
887 ret
= kobject_add(&q
->kobj
, kobject_get(&dev
->kobj
), "%s", "queue");
889 blk_trace_remove_sysfs(dev
);
893 kobject_uevent(&q
->kobj
, KOBJ_ADD
);
896 blk_mq_register_dev(dev
, q
);
903 ret
= elv_register_queue(q
);
905 kobject_uevent(&q
->kobj
, KOBJ_REMOVE
);
906 kobject_del(&q
->kobj
);
907 blk_trace_remove_sysfs(dev
);
908 kobject_put(&dev
->kobj
);
915 void blk_unregister_queue(struct gendisk
*disk
)
917 struct request_queue
*q
= disk
->queue
;
923 blk_mq_unregister_dev(disk_to_dev(disk
), q
);
926 elv_unregister_queue(q
);
928 kobject_uevent(&q
->kobj
, KOBJ_REMOVE
);
929 kobject_del(&q
->kobj
);
930 blk_trace_remove_sysfs(disk_to_dev(disk
));
931 kobject_put(&disk_to_dev(disk
)->kobj
);