]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * Functions related to sysfs handling | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/slab.h> | |
7 | #include <linux/module.h> | |
8 | #include <linux/bio.h> | |
9 | #include <linux/blkdev.h> | |
10 | #include <linux/backing-dev.h> | |
11 | #include <linux/blktrace_api.h> | |
12 | #include <linux/blk-mq.h> | |
13 | #include <linux/blk-cgroup.h> | |
14 | ||
15 | #include "blk.h" | |
16 | #include "blk-mq.h" | |
17 | #include "blk-mq-debugfs.h" | |
18 | #include "blk-wbt.h" | |
19 | ||
20 | struct queue_sysfs_entry { | |
21 | struct attribute attr; | |
22 | ssize_t (*show)(struct request_queue *, char *); | |
23 | ssize_t (*store)(struct request_queue *, const char *, size_t); | |
24 | }; | |
25 | ||
26 | static ssize_t | |
27 | queue_var_show(unsigned long var, char *page) | |
28 | { | |
29 | return sprintf(page, "%lu\n", var); | |
30 | } | |
31 | ||
32 | static ssize_t | |
33 | queue_var_store(unsigned long *var, const char *page, size_t count) | |
34 | { | |
35 | int err; | |
36 | unsigned long v; | |
37 | ||
38 | err = kstrtoul(page, 10, &v); | |
39 | if (err || v > UINT_MAX) | |
40 | return -EINVAL; | |
41 | ||
42 | *var = v; | |
43 | ||
44 | return count; | |
45 | } | |
46 | ||
47 | static ssize_t queue_var_store64(s64 *var, const char *page) | |
48 | { | |
49 | int err; | |
50 | s64 v; | |
51 | ||
52 | err = kstrtos64(page, 10, &v); | |
53 | if (err < 0) | |
54 | return err; | |
55 | ||
56 | *var = v; | |
57 | return 0; | |
58 | } | |
59 | ||
60 | static ssize_t queue_requests_show(struct request_queue *q, char *page) | |
61 | { | |
62 | return queue_var_show(q->nr_requests, (page)); | |
63 | } | |
64 | ||
65 | static ssize_t | |
66 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | |
67 | { | |
68 | unsigned long nr; | |
69 | int ret, err; | |
70 | ||
71 | if (!q->request_fn && !q->mq_ops) | |
72 | return -EINVAL; | |
73 | ||
74 | ret = queue_var_store(&nr, page, count); | |
75 | if (ret < 0) | |
76 | return ret; | |
77 | ||
78 | if (nr < BLKDEV_MIN_RQ) | |
79 | nr = BLKDEV_MIN_RQ; | |
80 | ||
81 | if (q->request_fn) | |
82 | err = blk_update_nr_requests(q, nr); | |
83 | else | |
84 | err = blk_mq_update_nr_requests(q, nr); | |
85 | ||
86 | if (err) | |
87 | return err; | |
88 | ||
89 | return ret; | |
90 | } | |
91 | ||
92 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | |
93 | { | |
94 | unsigned long ra_kb = q->backing_dev_info->ra_pages << | |
95 | (PAGE_SHIFT - 10); | |
96 | ||
97 | return queue_var_show(ra_kb, (page)); | |
98 | } | |
99 | ||
100 | static ssize_t | |
101 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | |
102 | { | |
103 | unsigned long ra_kb; | |
104 | ssize_t ret = queue_var_store(&ra_kb, page, count); | |
105 | ||
106 | if (ret < 0) | |
107 | return ret; | |
108 | ||
109 | q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); | |
110 | ||
111 | return ret; | |
112 | } | |
113 | ||
114 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | |
115 | { | |
116 | int max_sectors_kb = queue_max_sectors(q) >> 1; | |
117 | ||
118 | return queue_var_show(max_sectors_kb, (page)); | |
119 | } | |
120 | ||
121 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) | |
122 | { | |
123 | return queue_var_show(queue_max_segments(q), (page)); | |
124 | } | |
125 | ||
126 | static ssize_t queue_max_discard_segments_show(struct request_queue *q, | |
127 | char *page) | |
128 | { | |
129 | return queue_var_show(queue_max_discard_segments(q), (page)); | |
130 | } | |
131 | ||
132 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) | |
133 | { | |
134 | return queue_var_show(q->limits.max_integrity_segments, (page)); | |
135 | } | |
136 | ||
137 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) | |
138 | { | |
139 | if (blk_queue_cluster(q)) | |
140 | return queue_var_show(queue_max_segment_size(q), (page)); | |
141 | ||
142 | return queue_var_show(PAGE_SIZE, (page)); | |
143 | } | |
144 | ||
145 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) | |
146 | { | |
147 | return queue_var_show(queue_logical_block_size(q), page); | |
148 | } | |
149 | ||
150 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) | |
151 | { | |
152 | return queue_var_show(queue_physical_block_size(q), page); | |
153 | } | |
154 | ||
155 | static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) | |
156 | { | |
157 | return queue_var_show(q->limits.chunk_sectors, page); | |
158 | } | |
159 | ||
160 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) | |
161 | { | |
162 | return queue_var_show(queue_io_min(q), page); | |
163 | } | |
164 | ||
165 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) | |
166 | { | |
167 | return queue_var_show(queue_io_opt(q), page); | |
168 | } | |
169 | ||
170 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) | |
171 | { | |
172 | return queue_var_show(q->limits.discard_granularity, page); | |
173 | } | |
174 | ||
175 | static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) | |
176 | { | |
177 | ||
178 | return sprintf(page, "%llu\n", | |
179 | (unsigned long long)q->limits.max_hw_discard_sectors << 9); | |
180 | } | |
181 | ||
182 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) | |
183 | { | |
184 | return sprintf(page, "%llu\n", | |
185 | (unsigned long long)q->limits.max_discard_sectors << 9); | |
186 | } | |
187 | ||
188 | static ssize_t queue_discard_max_store(struct request_queue *q, | |
189 | const char *page, size_t count) | |
190 | { | |
191 | unsigned long max_discard; | |
192 | ssize_t ret = queue_var_store(&max_discard, page, count); | |
193 | ||
194 | if (ret < 0) | |
195 | return ret; | |
196 | ||
197 | if (max_discard & (q->limits.discard_granularity - 1)) | |
198 | return -EINVAL; | |
199 | ||
200 | max_discard >>= 9; | |
201 | if (max_discard > UINT_MAX) | |
202 | return -EINVAL; | |
203 | ||
204 | if (max_discard > q->limits.max_hw_discard_sectors) | |
205 | max_discard = q->limits.max_hw_discard_sectors; | |
206 | ||
207 | q->limits.max_discard_sectors = max_discard; | |
208 | return ret; | |
209 | } | |
210 | ||
211 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) | |
212 | { | |
213 | return queue_var_show(0, page); | |
214 | } | |
215 | ||
216 | static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) | |
217 | { | |
218 | return sprintf(page, "%llu\n", | |
219 | (unsigned long long)q->limits.max_write_same_sectors << 9); | |
220 | } | |
221 | ||
222 | static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) | |
223 | { | |
224 | return sprintf(page, "%llu\n", | |
225 | (unsigned long long)q->limits.max_write_zeroes_sectors << 9); | |
226 | } | |
227 | ||
228 | static ssize_t | |
229 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |
230 | { | |
231 | unsigned long max_sectors_kb, | |
232 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, | |
233 | page_kb = 1 << (PAGE_SHIFT - 10); | |
234 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); | |
235 | ||
236 | if (ret < 0) | |
237 | return ret; | |
238 | ||
239 | max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) | |
240 | q->limits.max_dev_sectors >> 1); | |
241 | ||
242 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) | |
243 | return -EINVAL; | |
244 | ||
245 | spin_lock_irq(q->queue_lock); | |
246 | q->limits.max_sectors = max_sectors_kb << 1; | |
247 | q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); | |
248 | spin_unlock_irq(q->queue_lock); | |
249 | ||
250 | return ret; | |
251 | } | |
252 | ||
253 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |
254 | { | |
255 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; | |
256 | ||
257 | return queue_var_show(max_hw_sectors_kb, (page)); | |
258 | } | |
259 | ||
260 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ | |
261 | static ssize_t \ | |
262 | queue_show_##name(struct request_queue *q, char *page) \ | |
263 | { \ | |
264 | int bit; \ | |
265 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ | |
266 | return queue_var_show(neg ? !bit : bit, page); \ | |
267 | } \ | |
268 | static ssize_t \ | |
269 | queue_store_##name(struct request_queue *q, const char *page, size_t count) \ | |
270 | { \ | |
271 | unsigned long val; \ | |
272 | ssize_t ret; \ | |
273 | ret = queue_var_store(&val, page, count); \ | |
274 | if (ret < 0) \ | |
275 | return ret; \ | |
276 | if (neg) \ | |
277 | val = !val; \ | |
278 | \ | |
279 | spin_lock_irq(q->queue_lock); \ | |
280 | if (val) \ | |
281 | queue_flag_set(QUEUE_FLAG_##flag, q); \ | |
282 | else \ | |
283 | queue_flag_clear(QUEUE_FLAG_##flag, q); \ | |
284 | spin_unlock_irq(q->queue_lock); \ | |
285 | return ret; \ | |
286 | } | |
287 | ||
288 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); | |
289 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); | |
290 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); | |
291 | #undef QUEUE_SYSFS_BIT_FNS | |
292 | ||
293 | static ssize_t queue_zoned_show(struct request_queue *q, char *page) | |
294 | { | |
295 | switch (blk_queue_zoned_model(q)) { | |
296 | case BLK_ZONED_HA: | |
297 | return sprintf(page, "host-aware\n"); | |
298 | case BLK_ZONED_HM: | |
299 | return sprintf(page, "host-managed\n"); | |
300 | default: | |
301 | return sprintf(page, "none\n"); | |
302 | } | |
303 | } | |
304 | ||
305 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) | |
306 | { | |
307 | return queue_var_show((blk_queue_nomerges(q) << 1) | | |
308 | blk_queue_noxmerges(q), page); | |
309 | } | |
310 | ||
311 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |
312 | size_t count) | |
313 | { | |
314 | unsigned long nm; | |
315 | ssize_t ret = queue_var_store(&nm, page, count); | |
316 | ||
317 | if (ret < 0) | |
318 | return ret; | |
319 | ||
320 | spin_lock_irq(q->queue_lock); | |
321 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); | |
322 | queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); | |
323 | if (nm == 2) | |
324 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); | |
325 | else if (nm) | |
326 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); | |
327 | spin_unlock_irq(q->queue_lock); | |
328 | ||
329 | return ret; | |
330 | } | |
331 | ||
332 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) | |
333 | { | |
334 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); | |
335 | bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); | |
336 | ||
337 | return queue_var_show(set << force, page); | |
338 | } | |
339 | ||
340 | static ssize_t | |
341 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |
342 | { | |
343 | ssize_t ret = -EINVAL; | |
344 | #ifdef CONFIG_SMP | |
345 | unsigned long val; | |
346 | ||
347 | ret = queue_var_store(&val, page, count); | |
348 | if (ret < 0) | |
349 | return ret; | |
350 | ||
351 | spin_lock_irq(q->queue_lock); | |
352 | if (val == 2) { | |
353 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); | |
354 | queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); | |
355 | } else if (val == 1) { | |
356 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); | |
357 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
358 | } else if (val == 0) { | |
359 | queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); | |
360 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
361 | } | |
362 | spin_unlock_irq(q->queue_lock); | |
363 | #endif | |
364 | return ret; | |
365 | } | |
366 | ||
367 | static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) | |
368 | { | |
369 | int val; | |
370 | ||
371 | if (q->poll_nsec == -1) | |
372 | val = -1; | |
373 | else | |
374 | val = q->poll_nsec / 1000; | |
375 | ||
376 | return sprintf(page, "%d\n", val); | |
377 | } | |
378 | ||
379 | static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, | |
380 | size_t count) | |
381 | { | |
382 | int err, val; | |
383 | ||
384 | if (!q->mq_ops || !q->mq_ops->poll) | |
385 | return -EINVAL; | |
386 | ||
387 | err = kstrtoint(page, 10, &val); | |
388 | if (err < 0) | |
389 | return err; | |
390 | ||
391 | if (val == -1) | |
392 | q->poll_nsec = -1; | |
393 | else | |
394 | q->poll_nsec = val * 1000; | |
395 | ||
396 | return count; | |
397 | } | |
398 | ||
399 | static ssize_t queue_poll_show(struct request_queue *q, char *page) | |
400 | { | |
401 | return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); | |
402 | } | |
403 | ||
404 | static ssize_t queue_poll_store(struct request_queue *q, const char *page, | |
405 | size_t count) | |
406 | { | |
407 | unsigned long poll_on; | |
408 | ssize_t ret; | |
409 | ||
410 | if (!q->mq_ops || !q->mq_ops->poll) | |
411 | return -EINVAL; | |
412 | ||
413 | ret = queue_var_store(&poll_on, page, count); | |
414 | if (ret < 0) | |
415 | return ret; | |
416 | ||
417 | spin_lock_irq(q->queue_lock); | |
418 | if (poll_on) | |
419 | queue_flag_set(QUEUE_FLAG_POLL, q); | |
420 | else | |
421 | queue_flag_clear(QUEUE_FLAG_POLL, q); | |
422 | spin_unlock_irq(q->queue_lock); | |
423 | ||
424 | return ret; | |
425 | } | |
426 | ||
427 | static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) | |
428 | { | |
429 | if (!q->rq_wb) | |
430 | return -EINVAL; | |
431 | ||
432 | return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000)); | |
433 | } | |
434 | ||
435 | static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, | |
436 | size_t count) | |
437 | { | |
438 | struct rq_wb *rwb; | |
439 | ssize_t ret; | |
440 | s64 val; | |
441 | ||
442 | ret = queue_var_store64(&val, page); | |
443 | if (ret < 0) | |
444 | return ret; | |
445 | if (val < -1) | |
446 | return -EINVAL; | |
447 | ||
448 | rwb = q->rq_wb; | |
449 | if (!rwb) { | |
450 | ret = wbt_init(q); | |
451 | if (ret) | |
452 | return ret; | |
453 | } | |
454 | ||
455 | rwb = q->rq_wb; | |
456 | if (val == -1) | |
457 | rwb->min_lat_nsec = wbt_default_latency_nsec(q); | |
458 | else if (val >= 0) | |
459 | rwb->min_lat_nsec = val * 1000ULL; | |
460 | ||
461 | if (rwb->enable_state == WBT_STATE_ON_DEFAULT) | |
462 | rwb->enable_state = WBT_STATE_ON_MANUAL; | |
463 | ||
464 | wbt_update_limits(rwb); | |
465 | return count; | |
466 | } | |
467 | ||
468 | static ssize_t queue_wc_show(struct request_queue *q, char *page) | |
469 | { | |
470 | if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) | |
471 | return sprintf(page, "write back\n"); | |
472 | ||
473 | return sprintf(page, "write through\n"); | |
474 | } | |
475 | ||
476 | static ssize_t queue_wc_store(struct request_queue *q, const char *page, | |
477 | size_t count) | |
478 | { | |
479 | int set = -1; | |
480 | ||
481 | if (!strncmp(page, "write back", 10)) | |
482 | set = 1; | |
483 | else if (!strncmp(page, "write through", 13) || | |
484 | !strncmp(page, "none", 4)) | |
485 | set = 0; | |
486 | ||
487 | if (set == -1) | |
488 | return -EINVAL; | |
489 | ||
490 | spin_lock_irq(q->queue_lock); | |
491 | if (set) | |
492 | queue_flag_set(QUEUE_FLAG_WC, q); | |
493 | else | |
494 | queue_flag_clear(QUEUE_FLAG_WC, q); | |
495 | spin_unlock_irq(q->queue_lock); | |
496 | ||
497 | return count; | |
498 | } | |
499 | ||
500 | static ssize_t queue_dax_show(struct request_queue *q, char *page) | |
501 | { | |
502 | return queue_var_show(blk_queue_dax(q), page); | |
503 | } | |
504 | ||
505 | static struct queue_sysfs_entry queue_requests_entry = { | |
506 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | |
507 | .show = queue_requests_show, | |
508 | .store = queue_requests_store, | |
509 | }; | |
510 | ||
511 | static struct queue_sysfs_entry queue_ra_entry = { | |
512 | .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, | |
513 | .show = queue_ra_show, | |
514 | .store = queue_ra_store, | |
515 | }; | |
516 | ||
517 | static struct queue_sysfs_entry queue_max_sectors_entry = { | |
518 | .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, | |
519 | .show = queue_max_sectors_show, | |
520 | .store = queue_max_sectors_store, | |
521 | }; | |
522 | ||
523 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { | |
524 | .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, | |
525 | .show = queue_max_hw_sectors_show, | |
526 | }; | |
527 | ||
528 | static struct queue_sysfs_entry queue_max_segments_entry = { | |
529 | .attr = {.name = "max_segments", .mode = S_IRUGO }, | |
530 | .show = queue_max_segments_show, | |
531 | }; | |
532 | ||
533 | static struct queue_sysfs_entry queue_max_discard_segments_entry = { | |
534 | .attr = {.name = "max_discard_segments", .mode = S_IRUGO }, | |
535 | .show = queue_max_discard_segments_show, | |
536 | }; | |
537 | ||
538 | static struct queue_sysfs_entry queue_max_integrity_segments_entry = { | |
539 | .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, | |
540 | .show = queue_max_integrity_segments_show, | |
541 | }; | |
542 | ||
543 | static struct queue_sysfs_entry queue_max_segment_size_entry = { | |
544 | .attr = {.name = "max_segment_size", .mode = S_IRUGO }, | |
545 | .show = queue_max_segment_size_show, | |
546 | }; | |
547 | ||
548 | static struct queue_sysfs_entry queue_iosched_entry = { | |
549 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, | |
550 | .show = elv_iosched_show, | |
551 | .store = elv_iosched_store, | |
552 | }; | |
553 | ||
554 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { | |
555 | .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, | |
556 | .show = queue_logical_block_size_show, | |
557 | }; | |
558 | ||
559 | static struct queue_sysfs_entry queue_logical_block_size_entry = { | |
560 | .attr = {.name = "logical_block_size", .mode = S_IRUGO }, | |
561 | .show = queue_logical_block_size_show, | |
562 | }; | |
563 | ||
564 | static struct queue_sysfs_entry queue_physical_block_size_entry = { | |
565 | .attr = {.name = "physical_block_size", .mode = S_IRUGO }, | |
566 | .show = queue_physical_block_size_show, | |
567 | }; | |
568 | ||
569 | static struct queue_sysfs_entry queue_chunk_sectors_entry = { | |
570 | .attr = {.name = "chunk_sectors", .mode = S_IRUGO }, | |
571 | .show = queue_chunk_sectors_show, | |
572 | }; | |
573 | ||
574 | static struct queue_sysfs_entry queue_io_min_entry = { | |
575 | .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, | |
576 | .show = queue_io_min_show, | |
577 | }; | |
578 | ||
579 | static struct queue_sysfs_entry queue_io_opt_entry = { | |
580 | .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, | |
581 | .show = queue_io_opt_show, | |
582 | }; | |
583 | ||
584 | static struct queue_sysfs_entry queue_discard_granularity_entry = { | |
585 | .attr = {.name = "discard_granularity", .mode = S_IRUGO }, | |
586 | .show = queue_discard_granularity_show, | |
587 | }; | |
588 | ||
589 | static struct queue_sysfs_entry queue_discard_max_hw_entry = { | |
590 | .attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO }, | |
591 | .show = queue_discard_max_hw_show, | |
592 | }; | |
593 | ||
594 | static struct queue_sysfs_entry queue_discard_max_entry = { | |
595 | .attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR }, | |
596 | .show = queue_discard_max_show, | |
597 | .store = queue_discard_max_store, | |
598 | }; | |
599 | ||
600 | static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { | |
601 | .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, | |
602 | .show = queue_discard_zeroes_data_show, | |
603 | }; | |
604 | ||
605 | static struct queue_sysfs_entry queue_write_same_max_entry = { | |
606 | .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO }, | |
607 | .show = queue_write_same_max_show, | |
608 | }; | |
609 | ||
610 | static struct queue_sysfs_entry queue_write_zeroes_max_entry = { | |
611 | .attr = {.name = "write_zeroes_max_bytes", .mode = S_IRUGO }, | |
612 | .show = queue_write_zeroes_max_show, | |
613 | }; | |
614 | ||
615 | static struct queue_sysfs_entry queue_nonrot_entry = { | |
616 | .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, | |
617 | .show = queue_show_nonrot, | |
618 | .store = queue_store_nonrot, | |
619 | }; | |
620 | ||
621 | static struct queue_sysfs_entry queue_zoned_entry = { | |
622 | .attr = {.name = "zoned", .mode = S_IRUGO }, | |
623 | .show = queue_zoned_show, | |
624 | }; | |
625 | ||
626 | static struct queue_sysfs_entry queue_nomerges_entry = { | |
627 | .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, | |
628 | .show = queue_nomerges_show, | |
629 | .store = queue_nomerges_store, | |
630 | }; | |
631 | ||
632 | static struct queue_sysfs_entry queue_rq_affinity_entry = { | |
633 | .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, | |
634 | .show = queue_rq_affinity_show, | |
635 | .store = queue_rq_affinity_store, | |
636 | }; | |
637 | ||
638 | static struct queue_sysfs_entry queue_iostats_entry = { | |
639 | .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, | |
640 | .show = queue_show_iostats, | |
641 | .store = queue_store_iostats, | |
642 | }; | |
643 | ||
644 | static struct queue_sysfs_entry queue_random_entry = { | |
645 | .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, | |
646 | .show = queue_show_random, | |
647 | .store = queue_store_random, | |
648 | }; | |
649 | ||
650 | static struct queue_sysfs_entry queue_poll_entry = { | |
651 | .attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR }, | |
652 | .show = queue_poll_show, | |
653 | .store = queue_poll_store, | |
654 | }; | |
655 | ||
656 | static struct queue_sysfs_entry queue_poll_delay_entry = { | |
657 | .attr = {.name = "io_poll_delay", .mode = S_IRUGO | S_IWUSR }, | |
658 | .show = queue_poll_delay_show, | |
659 | .store = queue_poll_delay_store, | |
660 | }; | |
661 | ||
662 | static struct queue_sysfs_entry queue_wc_entry = { | |
663 | .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR }, | |
664 | .show = queue_wc_show, | |
665 | .store = queue_wc_store, | |
666 | }; | |
667 | ||
668 | static struct queue_sysfs_entry queue_dax_entry = { | |
669 | .attr = {.name = "dax", .mode = S_IRUGO }, | |
670 | .show = queue_dax_show, | |
671 | }; | |
672 | ||
673 | static struct queue_sysfs_entry queue_wb_lat_entry = { | |
674 | .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR }, | |
675 | .show = queue_wb_lat_show, | |
676 | .store = queue_wb_lat_store, | |
677 | }; | |
678 | ||
679 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW | |
680 | static struct queue_sysfs_entry throtl_sample_time_entry = { | |
681 | .attr = {.name = "throttle_sample_time", .mode = S_IRUGO | S_IWUSR }, | |
682 | .show = blk_throtl_sample_time_show, | |
683 | .store = blk_throtl_sample_time_store, | |
684 | }; | |
685 | #endif | |
686 | ||
687 | static struct attribute *default_attrs[] = { | |
688 | &queue_requests_entry.attr, | |
689 | &queue_ra_entry.attr, | |
690 | &queue_max_hw_sectors_entry.attr, | |
691 | &queue_max_sectors_entry.attr, | |
692 | &queue_max_segments_entry.attr, | |
693 | &queue_max_discard_segments_entry.attr, | |
694 | &queue_max_integrity_segments_entry.attr, | |
695 | &queue_max_segment_size_entry.attr, | |
696 | &queue_iosched_entry.attr, | |
697 | &queue_hw_sector_size_entry.attr, | |
698 | &queue_logical_block_size_entry.attr, | |
699 | &queue_physical_block_size_entry.attr, | |
700 | &queue_chunk_sectors_entry.attr, | |
701 | &queue_io_min_entry.attr, | |
702 | &queue_io_opt_entry.attr, | |
703 | &queue_discard_granularity_entry.attr, | |
704 | &queue_discard_max_entry.attr, | |
705 | &queue_discard_max_hw_entry.attr, | |
706 | &queue_discard_zeroes_data_entry.attr, | |
707 | &queue_write_same_max_entry.attr, | |
708 | &queue_write_zeroes_max_entry.attr, | |
709 | &queue_nonrot_entry.attr, | |
710 | &queue_zoned_entry.attr, | |
711 | &queue_nomerges_entry.attr, | |
712 | &queue_rq_affinity_entry.attr, | |
713 | &queue_iostats_entry.attr, | |
714 | &queue_random_entry.attr, | |
715 | &queue_poll_entry.attr, | |
716 | &queue_wc_entry.attr, | |
717 | &queue_dax_entry.attr, | |
718 | &queue_wb_lat_entry.attr, | |
719 | &queue_poll_delay_entry.attr, | |
720 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW | |
721 | &throtl_sample_time_entry.attr, | |
722 | #endif | |
723 | NULL, | |
724 | }; | |
725 | ||
726 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) | |
727 | ||
728 | static ssize_t | |
729 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
730 | { | |
731 | struct queue_sysfs_entry *entry = to_queue(attr); | |
732 | struct request_queue *q = | |
733 | container_of(kobj, struct request_queue, kobj); | |
734 | ssize_t res; | |
735 | ||
736 | if (!entry->show) | |
737 | return -EIO; | |
738 | mutex_lock(&q->sysfs_lock); | |
739 | if (blk_queue_dying(q)) { | |
740 | mutex_unlock(&q->sysfs_lock); | |
741 | return -ENOENT; | |
742 | } | |
743 | res = entry->show(q, page); | |
744 | mutex_unlock(&q->sysfs_lock); | |
745 | return res; | |
746 | } | |
747 | ||
748 | static ssize_t | |
749 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | |
750 | const char *page, size_t length) | |
751 | { | |
752 | struct queue_sysfs_entry *entry = to_queue(attr); | |
753 | struct request_queue *q; | |
754 | ssize_t res; | |
755 | ||
756 | if (!entry->store) | |
757 | return -EIO; | |
758 | ||
759 | q = container_of(kobj, struct request_queue, kobj); | |
760 | mutex_lock(&q->sysfs_lock); | |
761 | if (blk_queue_dying(q)) { | |
762 | mutex_unlock(&q->sysfs_lock); | |
763 | return -ENOENT; | |
764 | } | |
765 | res = entry->store(q, page, length); | |
766 | mutex_unlock(&q->sysfs_lock); | |
767 | return res; | |
768 | } | |
769 | ||
770 | static void blk_free_queue_rcu(struct rcu_head *rcu_head) | |
771 | { | |
772 | struct request_queue *q = container_of(rcu_head, struct request_queue, | |
773 | rcu_head); | |
774 | kmem_cache_free(blk_requestq_cachep, q); | |
775 | } | |
776 | ||
777 | /** | |
778 | * __blk_release_queue - release a request queue when it is no longer needed | |
779 | * @work: pointer to the release_work member of the request queue to be released | |
780 | * | |
781 | * Description: | |
782 | * blk_release_queue is the counterpart of blk_init_queue(). It should be | |
783 | * called when a request queue is being released; typically when a block | |
784 | * device is being de-registered. Its primary task it to free the queue | |
785 | * itself. | |
786 | * | |
787 | * Notes: | |
788 | * The low level driver must have finished any outstanding requests first | |
789 | * via blk_cleanup_queue(). | |
790 | * | |
791 | * Although blk_release_queue() may be called with preemption disabled, | |
792 | * __blk_release_queue() may sleep. | |
793 | */ | |
794 | static void __blk_release_queue(struct work_struct *work) | |
795 | { | |
796 | struct request_queue *q = container_of(work, typeof(*q), release_work); | |
797 | ||
798 | if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) | |
799 | blk_stat_remove_callback(q, q->poll_cb); | |
800 | blk_stat_free_callback(q->poll_cb); | |
801 | bdi_put(q->backing_dev_info); | |
802 | blkcg_exit_queue(q); | |
803 | ||
804 | if (q->elevator) { | |
805 | ioc_clear_queue(q); | |
806 | elevator_exit(q, q->elevator); | |
807 | } | |
808 | ||
809 | blk_free_queue_stats(q->stats); | |
810 | ||
811 | blk_exit_rl(q, &q->root_rl); | |
812 | ||
813 | if (q->queue_tags) | |
814 | __blk_queue_free_tags(q); | |
815 | ||
816 | if (!q->mq_ops) { | |
817 | if (q->exit_rq_fn) | |
818 | q->exit_rq_fn(q, q->fq->flush_rq); | |
819 | blk_free_flush_queue(q->fq); | |
820 | } else { | |
821 | blk_mq_release(q); | |
822 | } | |
823 | ||
824 | blk_trace_shutdown(q); | |
825 | ||
826 | if (q->mq_ops) | |
827 | blk_mq_debugfs_unregister(q); | |
828 | ||
829 | if (q->bio_split) | |
830 | bioset_free(q->bio_split); | |
831 | ||
832 | ida_simple_remove(&blk_queue_ida, q->id); | |
833 | call_rcu(&q->rcu_head, blk_free_queue_rcu); | |
834 | } | |
835 | ||
836 | static void blk_release_queue(struct kobject *kobj) | |
837 | { | |
838 | struct request_queue *q = | |
839 | container_of(kobj, struct request_queue, kobj); | |
840 | ||
841 | INIT_WORK(&q->release_work, __blk_release_queue); | |
842 | schedule_work(&q->release_work); | |
843 | } | |
844 | ||
845 | static const struct sysfs_ops queue_sysfs_ops = { | |
846 | .show = queue_attr_show, | |
847 | .store = queue_attr_store, | |
848 | }; | |
849 | ||
850 | struct kobj_type blk_queue_ktype = { | |
851 | .sysfs_ops = &queue_sysfs_ops, | |
852 | .default_attrs = default_attrs, | |
853 | .release = blk_release_queue, | |
854 | }; | |
855 | ||
856 | int blk_register_queue(struct gendisk *disk) | |
857 | { | |
858 | int ret; | |
859 | struct device *dev = disk_to_dev(disk); | |
860 | struct request_queue *q = disk->queue; | |
861 | ||
862 | if (WARN_ON(!q)) | |
863 | return -ENXIO; | |
864 | ||
865 | WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags), | |
866 | "%s is registering an already registered queue\n", | |
867 | kobject_name(&dev->kobj)); | |
868 | queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q); | |
869 | ||
870 | /* | |
871 | * SCSI probing may synchronously create and destroy a lot of | |
872 | * request_queues for non-existent devices. Shutting down a fully | |
873 | * functional queue takes measureable wallclock time as RCU grace | |
874 | * periods are involved. To avoid excessive latency in these | |
875 | * cases, a request_queue starts out in a degraded mode which is | |
876 | * faster to shut down and is made fully functional here as | |
877 | * request_queues for non-existent devices never get registered. | |
878 | */ | |
879 | if (!blk_queue_init_done(q)) { | |
880 | queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); | |
881 | percpu_ref_switch_to_percpu(&q->q_usage_counter); | |
882 | blk_queue_bypass_end(q); | |
883 | } | |
884 | ||
885 | ret = blk_trace_init_sysfs(dev); | |
886 | if (ret) | |
887 | return ret; | |
888 | ||
889 | /* Prevent changes through sysfs until registration is completed. */ | |
890 | mutex_lock(&q->sysfs_lock); | |
891 | ||
892 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); | |
893 | if (ret < 0) { | |
894 | blk_trace_remove_sysfs(dev); | |
895 | goto unlock; | |
896 | } | |
897 | ||
898 | if (q->mq_ops) { | |
899 | __blk_mq_register_dev(dev, q); | |
900 | blk_mq_debugfs_register(q); | |
901 | } | |
902 | ||
903 | kobject_uevent(&q->kobj, KOBJ_ADD); | |
904 | ||
905 | wbt_enable_default(q); | |
906 | ||
907 | blk_throtl_register_queue(q); | |
908 | ||
909 | if (q->request_fn || (q->mq_ops && q->elevator)) { | |
910 | ret = elv_register_queue(q); | |
911 | if (ret) { | |
912 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | |
913 | kobject_del(&q->kobj); | |
914 | blk_trace_remove_sysfs(dev); | |
915 | kobject_put(&dev->kobj); | |
916 | goto unlock; | |
917 | } | |
918 | } | |
919 | ret = 0; | |
920 | unlock: | |
921 | mutex_unlock(&q->sysfs_lock); | |
922 | return ret; | |
923 | } | |
924 | ||
925 | void blk_unregister_queue(struct gendisk *disk) | |
926 | { | |
927 | struct request_queue *q = disk->queue; | |
928 | ||
929 | if (WARN_ON(!q)) | |
930 | return; | |
931 | ||
932 | mutex_lock(&q->sysfs_lock); | |
933 | queue_flag_clear_unlocked(QUEUE_FLAG_REGISTERED, q); | |
934 | mutex_unlock(&q->sysfs_lock); | |
935 | ||
936 | wbt_exit(q); | |
937 | ||
938 | ||
939 | if (q->mq_ops) | |
940 | blk_mq_unregister_dev(disk_to_dev(disk), q); | |
941 | ||
942 | if (q->request_fn || (q->mq_ops && q->elevator)) | |
943 | elv_unregister_queue(q); | |
944 | ||
945 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | |
946 | kobject_del(&q->kobj); | |
947 | blk_trace_remove_sysfs(disk_to_dev(disk)); | |
948 | kobject_put(&disk_to_dev(disk)->kobj); | |
949 | } |