2 * Functions related to sysfs handling
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/backing-dev.h>
10 #include <linux/blktrace_api.h>
11 #include <linux/blk-mq.h>
12 #include <linux/blk-cgroup.h>
17 struct queue_sysfs_entry
{
18 struct attribute attr
;
19 ssize_t (*show
)(struct request_queue
*, char *);
20 ssize_t (*store
)(struct request_queue
*, const char *, size_t);
24 queue_var_show(unsigned long var
, char *page
)
26 return sprintf(page
, "%lu\n", var
);
30 queue_var_store(unsigned long *var
, const char *page
, size_t count
)
35 err
= kstrtoul(page
, 10, &v
);
36 if (err
|| v
> UINT_MAX
)
44 static ssize_t
queue_requests_show(struct request_queue
*q
, char *page
)
46 return queue_var_show(q
->nr_requests
, (page
));
50 queue_requests_store(struct request_queue
*q
, const char *page
, size_t count
)
55 if (!q
->request_fn
&& !q
->mq_ops
)
58 ret
= queue_var_store(&nr
, page
, count
);
62 if (nr
< BLKDEV_MIN_RQ
)
66 err
= blk_update_nr_requests(q
, nr
);
68 err
= blk_mq_update_nr_requests(q
, nr
);
76 static ssize_t
queue_ra_show(struct request_queue
*q
, char *page
)
78 unsigned long ra_kb
= q
->backing_dev_info
.ra_pages
<<
81 return queue_var_show(ra_kb
, (page
));
85 queue_ra_store(struct request_queue
*q
, const char *page
, size_t count
)
88 ssize_t ret
= queue_var_store(&ra_kb
, page
, count
);
93 q
->backing_dev_info
.ra_pages
= ra_kb
>> (PAGE_SHIFT
- 10);
98 static ssize_t
queue_max_sectors_show(struct request_queue
*q
, char *page
)
100 int max_sectors_kb
= queue_max_sectors(q
) >> 1;
102 return queue_var_show(max_sectors_kb
, (page
));
105 static ssize_t
queue_max_segments_show(struct request_queue
*q
, char *page
)
107 return queue_var_show(queue_max_segments(q
), (page
));
110 static ssize_t
queue_max_integrity_segments_show(struct request_queue
*q
, char *page
)
112 return queue_var_show(q
->limits
.max_integrity_segments
, (page
));
115 static ssize_t
queue_max_segment_size_show(struct request_queue
*q
, char *page
)
117 if (blk_queue_cluster(q
))
118 return queue_var_show(queue_max_segment_size(q
), (page
));
120 return queue_var_show(PAGE_SIZE
, (page
));
123 static ssize_t
queue_logical_block_size_show(struct request_queue
*q
, char *page
)
125 return queue_var_show(queue_logical_block_size(q
), page
);
128 static ssize_t
queue_physical_block_size_show(struct request_queue
*q
, char *page
)
130 return queue_var_show(queue_physical_block_size(q
), page
);
133 static ssize_t
queue_chunk_sectors_show(struct request_queue
*q
, char *page
)
135 return queue_var_show(q
->limits
.chunk_sectors
, page
);
138 static ssize_t
queue_io_min_show(struct request_queue
*q
, char *page
)
140 return queue_var_show(queue_io_min(q
), page
);
143 static ssize_t
queue_io_opt_show(struct request_queue
*q
, char *page
)
145 return queue_var_show(queue_io_opt(q
), page
);
148 static ssize_t
queue_discard_granularity_show(struct request_queue
*q
, char *page
)
150 return queue_var_show(q
->limits
.discard_granularity
, page
);
153 static ssize_t
queue_discard_max_hw_show(struct request_queue
*q
, char *page
)
156 return sprintf(page
, "%llu\n",
157 (unsigned long long)q
->limits
.max_hw_discard_sectors
<< 9);
160 static ssize_t
queue_discard_max_show(struct request_queue
*q
, char *page
)
162 return sprintf(page
, "%llu\n",
163 (unsigned long long)q
->limits
.max_discard_sectors
<< 9);
166 static ssize_t
queue_discard_max_store(struct request_queue
*q
,
167 const char *page
, size_t count
)
169 unsigned long max_discard
;
170 ssize_t ret
= queue_var_store(&max_discard
, page
, count
);
175 if (max_discard
& (q
->limits
.discard_granularity
- 1))
179 if (max_discard
> UINT_MAX
)
182 if (max_discard
> q
->limits
.max_hw_discard_sectors
)
183 max_discard
= q
->limits
.max_hw_discard_sectors
;
185 q
->limits
.max_discard_sectors
= max_discard
;
189 static ssize_t
queue_discard_zeroes_data_show(struct request_queue
*q
, char *page
)
191 return queue_var_show(queue_discard_zeroes_data(q
), page
);
194 static ssize_t
queue_write_same_max_show(struct request_queue
*q
, char *page
)
196 return sprintf(page
, "%llu\n",
197 (unsigned long long)q
->limits
.max_write_same_sectors
<< 9);
202 queue_max_sectors_store(struct request_queue
*q
, const char *page
, size_t count
)
204 unsigned long max_sectors_kb
,
205 max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1,
206 page_kb
= 1 << (PAGE_SHIFT
- 10);
207 ssize_t ret
= queue_var_store(&max_sectors_kb
, page
, count
);
212 max_hw_sectors_kb
= min_not_zero(max_hw_sectors_kb
, (unsigned long)
213 q
->limits
.max_dev_sectors
>> 1);
215 if (max_sectors_kb
> max_hw_sectors_kb
|| max_sectors_kb
< page_kb
)
218 spin_lock_irq(q
->queue_lock
);
219 q
->limits
.max_sectors
= max_sectors_kb
<< 1;
220 spin_unlock_irq(q
->queue_lock
);
225 static ssize_t
queue_max_hw_sectors_show(struct request_queue
*q
, char *page
)
227 int max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1;
229 return queue_var_show(max_hw_sectors_kb
, (page
));
232 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
234 queue_show_##name(struct request_queue *q, char *page) \
237 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
238 return queue_var_show(neg ? !bit : bit, page); \
241 queue_store_##name(struct request_queue *q, const char *page, size_t count) \
245 ret = queue_var_store(&val, page, count); \
251 spin_lock_irq(q->queue_lock); \
253 queue_flag_set(QUEUE_FLAG_##flag, q); \
255 queue_flag_clear(QUEUE_FLAG_##flag, q); \
256 spin_unlock_irq(q->queue_lock); \
260 QUEUE_SYSFS_BIT_FNS(nonrot
, NONROT
, 1);
261 QUEUE_SYSFS_BIT_FNS(random
, ADD_RANDOM
, 0);
262 QUEUE_SYSFS_BIT_FNS(iostats
, IO_STAT
, 0);
263 #undef QUEUE_SYSFS_BIT_FNS
265 static ssize_t
queue_zoned_show(struct request_queue
*q
, char *page
)
267 switch (blk_queue_zoned_model(q
)) {
269 return sprintf(page
, "host-aware\n");
271 return sprintf(page
, "host-managed\n");
273 return sprintf(page
, "none\n");
277 static ssize_t
queue_nomerges_show(struct request_queue
*q
, char *page
)
279 return queue_var_show((blk_queue_nomerges(q
) << 1) |
280 blk_queue_noxmerges(q
), page
);
283 static ssize_t
queue_nomerges_store(struct request_queue
*q
, const char *page
,
287 ssize_t ret
= queue_var_store(&nm
, page
, count
);
292 spin_lock_irq(q
->queue_lock
);
293 queue_flag_clear(QUEUE_FLAG_NOMERGES
, q
);
294 queue_flag_clear(QUEUE_FLAG_NOXMERGES
, q
);
296 queue_flag_set(QUEUE_FLAG_NOMERGES
, q
);
298 queue_flag_set(QUEUE_FLAG_NOXMERGES
, q
);
299 spin_unlock_irq(q
->queue_lock
);
304 static ssize_t
queue_rq_affinity_show(struct request_queue
*q
, char *page
)
306 bool set
= test_bit(QUEUE_FLAG_SAME_COMP
, &q
->queue_flags
);
307 bool force
= test_bit(QUEUE_FLAG_SAME_FORCE
, &q
->queue_flags
);
309 return queue_var_show(set
<< force
, page
);
313 queue_rq_affinity_store(struct request_queue
*q
, const char *page
, size_t count
)
315 ssize_t ret
= -EINVAL
;
319 ret
= queue_var_store(&val
, page
, count
);
323 spin_lock_irq(q
->queue_lock
);
325 queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
326 queue_flag_set(QUEUE_FLAG_SAME_FORCE
, q
);
327 } else if (val
== 1) {
328 queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
329 queue_flag_clear(QUEUE_FLAG_SAME_FORCE
, q
);
330 } else if (val
== 0) {
331 queue_flag_clear(QUEUE_FLAG_SAME_COMP
, q
);
332 queue_flag_clear(QUEUE_FLAG_SAME_FORCE
, q
);
334 spin_unlock_irq(q
->queue_lock
);
339 static ssize_t
queue_poll_show(struct request_queue
*q
, char *page
)
341 return queue_var_show(test_bit(QUEUE_FLAG_POLL
, &q
->queue_flags
), page
);
344 static ssize_t
queue_poll_store(struct request_queue
*q
, const char *page
,
347 unsigned long poll_on
;
350 if (!q
->mq_ops
|| !q
->mq_ops
->poll
)
353 ret
= queue_var_store(&poll_on
, page
, count
);
357 spin_lock_irq(q
->queue_lock
);
359 queue_flag_set(QUEUE_FLAG_POLL
, q
);
361 queue_flag_clear(QUEUE_FLAG_POLL
, q
);
362 spin_unlock_irq(q
->queue_lock
);
367 static ssize_t
queue_wc_show(struct request_queue
*q
, char *page
)
369 if (test_bit(QUEUE_FLAG_WC
, &q
->queue_flags
))
370 return sprintf(page
, "write back\n");
372 return sprintf(page
, "write through\n");
375 static ssize_t
queue_wc_store(struct request_queue
*q
, const char *page
,
380 if (!strncmp(page
, "write back", 10))
382 else if (!strncmp(page
, "write through", 13) ||
383 !strncmp(page
, "none", 4))
389 spin_lock_irq(q
->queue_lock
);
391 queue_flag_set(QUEUE_FLAG_WC
, q
);
393 queue_flag_clear(QUEUE_FLAG_WC
, q
);
394 spin_unlock_irq(q
->queue_lock
);
399 static ssize_t
queue_dax_show(struct request_queue
*q
, char *page
)
401 return queue_var_show(blk_queue_dax(q
), page
);
404 static ssize_t
print_stat(char *page
, struct blk_rq_stat
*stat
, const char *pre
)
406 return sprintf(page
, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n",
407 pre
, (long long) stat
->nr_samples
,
408 (long long) stat
->mean
, (long long) stat
->min
,
409 (long long) stat
->max
);
412 static ssize_t
queue_stats_show(struct request_queue
*q
, char *page
)
414 struct blk_rq_stat stat
[2];
417 blk_queue_stat_get(q
, stat
);
419 ret
= print_stat(page
, &stat
[BLK_STAT_READ
], "read :");
420 ret
+= print_stat(page
+ ret
, &stat
[BLK_STAT_WRITE
], "write:");
424 static struct queue_sysfs_entry queue_requests_entry
= {
425 .attr
= {.name
= "nr_requests", .mode
= S_IRUGO
| S_IWUSR
},
426 .show
= queue_requests_show
,
427 .store
= queue_requests_store
,
430 static struct queue_sysfs_entry queue_ra_entry
= {
431 .attr
= {.name
= "read_ahead_kb", .mode
= S_IRUGO
| S_IWUSR
},
432 .show
= queue_ra_show
,
433 .store
= queue_ra_store
,
436 static struct queue_sysfs_entry queue_max_sectors_entry
= {
437 .attr
= {.name
= "max_sectors_kb", .mode
= S_IRUGO
| S_IWUSR
},
438 .show
= queue_max_sectors_show
,
439 .store
= queue_max_sectors_store
,
442 static struct queue_sysfs_entry queue_max_hw_sectors_entry
= {
443 .attr
= {.name
= "max_hw_sectors_kb", .mode
= S_IRUGO
},
444 .show
= queue_max_hw_sectors_show
,
447 static struct queue_sysfs_entry queue_max_segments_entry
= {
448 .attr
= {.name
= "max_segments", .mode
= S_IRUGO
},
449 .show
= queue_max_segments_show
,
452 static struct queue_sysfs_entry queue_max_integrity_segments_entry
= {
453 .attr
= {.name
= "max_integrity_segments", .mode
= S_IRUGO
},
454 .show
= queue_max_integrity_segments_show
,
457 static struct queue_sysfs_entry queue_max_segment_size_entry
= {
458 .attr
= {.name
= "max_segment_size", .mode
= S_IRUGO
},
459 .show
= queue_max_segment_size_show
,
462 static struct queue_sysfs_entry queue_iosched_entry
= {
463 .attr
= {.name
= "scheduler", .mode
= S_IRUGO
| S_IWUSR
},
464 .show
= elv_iosched_show
,
465 .store
= elv_iosched_store
,
468 static struct queue_sysfs_entry queue_hw_sector_size_entry
= {
469 .attr
= {.name
= "hw_sector_size", .mode
= S_IRUGO
},
470 .show
= queue_logical_block_size_show
,
473 static struct queue_sysfs_entry queue_logical_block_size_entry
= {
474 .attr
= {.name
= "logical_block_size", .mode
= S_IRUGO
},
475 .show
= queue_logical_block_size_show
,
478 static struct queue_sysfs_entry queue_physical_block_size_entry
= {
479 .attr
= {.name
= "physical_block_size", .mode
= S_IRUGO
},
480 .show
= queue_physical_block_size_show
,
483 static struct queue_sysfs_entry queue_chunk_sectors_entry
= {
484 .attr
= {.name
= "chunk_sectors", .mode
= S_IRUGO
},
485 .show
= queue_chunk_sectors_show
,
488 static struct queue_sysfs_entry queue_io_min_entry
= {
489 .attr
= {.name
= "minimum_io_size", .mode
= S_IRUGO
},
490 .show
= queue_io_min_show
,
493 static struct queue_sysfs_entry queue_io_opt_entry
= {
494 .attr
= {.name
= "optimal_io_size", .mode
= S_IRUGO
},
495 .show
= queue_io_opt_show
,
498 static struct queue_sysfs_entry queue_discard_granularity_entry
= {
499 .attr
= {.name
= "discard_granularity", .mode
= S_IRUGO
},
500 .show
= queue_discard_granularity_show
,
503 static struct queue_sysfs_entry queue_discard_max_hw_entry
= {
504 .attr
= {.name
= "discard_max_hw_bytes", .mode
= S_IRUGO
},
505 .show
= queue_discard_max_hw_show
,
508 static struct queue_sysfs_entry queue_discard_max_entry
= {
509 .attr
= {.name
= "discard_max_bytes", .mode
= S_IRUGO
| S_IWUSR
},
510 .show
= queue_discard_max_show
,
511 .store
= queue_discard_max_store
,
514 static struct queue_sysfs_entry queue_discard_zeroes_data_entry
= {
515 .attr
= {.name
= "discard_zeroes_data", .mode
= S_IRUGO
},
516 .show
= queue_discard_zeroes_data_show
,
519 static struct queue_sysfs_entry queue_write_same_max_entry
= {
520 .attr
= {.name
= "write_same_max_bytes", .mode
= S_IRUGO
},
521 .show
= queue_write_same_max_show
,
524 static struct queue_sysfs_entry queue_nonrot_entry
= {
525 .attr
= {.name
= "rotational", .mode
= S_IRUGO
| S_IWUSR
},
526 .show
= queue_show_nonrot
,
527 .store
= queue_store_nonrot
,
530 static struct queue_sysfs_entry queue_zoned_entry
= {
531 .attr
= {.name
= "zoned", .mode
= S_IRUGO
},
532 .show
= queue_zoned_show
,
535 static struct queue_sysfs_entry queue_nomerges_entry
= {
536 .attr
= {.name
= "nomerges", .mode
= S_IRUGO
| S_IWUSR
},
537 .show
= queue_nomerges_show
,
538 .store
= queue_nomerges_store
,
541 static struct queue_sysfs_entry queue_rq_affinity_entry
= {
542 .attr
= {.name
= "rq_affinity", .mode
= S_IRUGO
| S_IWUSR
},
543 .show
= queue_rq_affinity_show
,
544 .store
= queue_rq_affinity_store
,
547 static struct queue_sysfs_entry queue_iostats_entry
= {
548 .attr
= {.name
= "iostats", .mode
= S_IRUGO
| S_IWUSR
},
549 .show
= queue_show_iostats
,
550 .store
= queue_store_iostats
,
553 static struct queue_sysfs_entry queue_random_entry
= {
554 .attr
= {.name
= "add_random", .mode
= S_IRUGO
| S_IWUSR
},
555 .show
= queue_show_random
,
556 .store
= queue_store_random
,
559 static struct queue_sysfs_entry queue_poll_entry
= {
560 .attr
= {.name
= "io_poll", .mode
= S_IRUGO
| S_IWUSR
},
561 .show
= queue_poll_show
,
562 .store
= queue_poll_store
,
565 static struct queue_sysfs_entry queue_wc_entry
= {
566 .attr
= {.name
= "write_cache", .mode
= S_IRUGO
| S_IWUSR
},
567 .show
= queue_wc_show
,
568 .store
= queue_wc_store
,
571 static struct queue_sysfs_entry queue_dax_entry
= {
572 .attr
= {.name
= "dax", .mode
= S_IRUGO
},
573 .show
= queue_dax_show
,
576 static struct queue_sysfs_entry queue_stats_entry
= {
577 .attr
= {.name
= "stats", .mode
= S_IRUGO
},
578 .show
= queue_stats_show
,
581 static struct attribute
*default_attrs
[] = {
582 &queue_requests_entry
.attr
,
583 &queue_ra_entry
.attr
,
584 &queue_max_hw_sectors_entry
.attr
,
585 &queue_max_sectors_entry
.attr
,
586 &queue_max_segments_entry
.attr
,
587 &queue_max_integrity_segments_entry
.attr
,
588 &queue_max_segment_size_entry
.attr
,
589 &queue_iosched_entry
.attr
,
590 &queue_hw_sector_size_entry
.attr
,
591 &queue_logical_block_size_entry
.attr
,
592 &queue_physical_block_size_entry
.attr
,
593 &queue_chunk_sectors_entry
.attr
,
594 &queue_io_min_entry
.attr
,
595 &queue_io_opt_entry
.attr
,
596 &queue_discard_granularity_entry
.attr
,
597 &queue_discard_max_entry
.attr
,
598 &queue_discard_max_hw_entry
.attr
,
599 &queue_discard_zeroes_data_entry
.attr
,
600 &queue_write_same_max_entry
.attr
,
601 &queue_nonrot_entry
.attr
,
602 &queue_zoned_entry
.attr
,
603 &queue_nomerges_entry
.attr
,
604 &queue_rq_affinity_entry
.attr
,
605 &queue_iostats_entry
.attr
,
606 &queue_random_entry
.attr
,
607 &queue_poll_entry
.attr
,
608 &queue_wc_entry
.attr
,
609 &queue_dax_entry
.attr
,
610 &queue_stats_entry
.attr
,
614 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
617 queue_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
619 struct queue_sysfs_entry
*entry
= to_queue(attr
);
620 struct request_queue
*q
=
621 container_of(kobj
, struct request_queue
, kobj
);
626 mutex_lock(&q
->sysfs_lock
);
627 if (blk_queue_dying(q
)) {
628 mutex_unlock(&q
->sysfs_lock
);
631 res
= entry
->show(q
, page
);
632 mutex_unlock(&q
->sysfs_lock
);
637 queue_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
638 const char *page
, size_t length
)
640 struct queue_sysfs_entry
*entry
= to_queue(attr
);
641 struct request_queue
*q
;
647 q
= container_of(kobj
, struct request_queue
, kobj
);
648 mutex_lock(&q
->sysfs_lock
);
649 if (blk_queue_dying(q
)) {
650 mutex_unlock(&q
->sysfs_lock
);
653 res
= entry
->store(q
, page
, length
);
654 mutex_unlock(&q
->sysfs_lock
);
658 static void blk_free_queue_rcu(struct rcu_head
*rcu_head
)
660 struct request_queue
*q
= container_of(rcu_head
, struct request_queue
,
662 kmem_cache_free(blk_requestq_cachep
, q
);
666 * blk_release_queue: - release a &struct request_queue when it is no longer needed
667 * @kobj: the kobj belonging to the request queue to be released
670 * blk_release_queue is the pair to blk_init_queue() or
671 * blk_queue_make_request(). It should be called when a request queue is
672 * being released; typically when a block device is being de-registered.
673 * Currently, its primary task it to free all the &struct request
674 * structures that were allocated to the queue and the queue itself.
677 * The low level driver must have finished any outstanding requests first
678 * via blk_cleanup_queue().
680 static void blk_release_queue(struct kobject
*kobj
)
682 struct request_queue
*q
=
683 container_of(kobj
, struct request_queue
, kobj
);
685 bdi_exit(&q
->backing_dev_info
);
689 spin_lock_irq(q
->queue_lock
);
691 spin_unlock_irq(q
->queue_lock
);
692 elevator_exit(q
->elevator
);
695 blk_exit_rl(&q
->root_rl
);
698 __blk_queue_free_tags(q
);
701 blk_free_flush_queue(q
->fq
);
705 blk_trace_shutdown(q
);
708 bioset_free(q
->bio_split
);
710 ida_simple_remove(&blk_queue_ida
, q
->id
);
711 call_rcu(&q
->rcu_head
, blk_free_queue_rcu
);
714 static const struct sysfs_ops queue_sysfs_ops
= {
715 .show
= queue_attr_show
,
716 .store
= queue_attr_store
,
719 struct kobj_type blk_queue_ktype
= {
720 .sysfs_ops
= &queue_sysfs_ops
,
721 .default_attrs
= default_attrs
,
722 .release
= blk_release_queue
,
725 int blk_register_queue(struct gendisk
*disk
)
728 struct device
*dev
= disk_to_dev(disk
);
729 struct request_queue
*q
= disk
->queue
;
735 * SCSI probing may synchronously create and destroy a lot of
736 * request_queues for non-existent devices. Shutting down a fully
737 * functional queue takes measureable wallclock time as RCU grace
738 * periods are involved. To avoid excessive latency in these
739 * cases, a request_queue starts out in a degraded mode which is
740 * faster to shut down and is made fully functional here as
741 * request_queues for non-existent devices never get registered.
743 if (!blk_queue_init_done(q
)) {
744 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE
, q
);
745 percpu_ref_switch_to_percpu(&q
->q_usage_counter
);
746 blk_queue_bypass_end(q
);
749 ret
= blk_trace_init_sysfs(dev
);
753 ret
= kobject_add(&q
->kobj
, kobject_get(&dev
->kobj
), "%s", "queue");
755 blk_trace_remove_sysfs(dev
);
759 kobject_uevent(&q
->kobj
, KOBJ_ADD
);
762 blk_mq_register_dev(dev
, q
);
767 ret
= elv_register_queue(q
);
769 kobject_uevent(&q
->kobj
, KOBJ_REMOVE
);
770 kobject_del(&q
->kobj
);
771 blk_trace_remove_sysfs(dev
);
772 kobject_put(&dev
->kobj
);
779 void blk_unregister_queue(struct gendisk
*disk
)
781 struct request_queue
*q
= disk
->queue
;
787 blk_mq_unregister_dev(disk_to_dev(disk
), q
);
790 elv_unregister_queue(q
);
792 kobject_uevent(&q
->kobj
, KOBJ_REMOVE
);
793 kobject_del(&q
->kobj
);
794 blk_trace_remove_sysfs(disk_to_dev(disk
));
795 kobject_put(&disk_to_dev(disk
)->kobj
);