]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Functions related to sysfs handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/slab.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/backing-dev.h> | |
10 | #include <linux/blktrace_api.h> | |
11 | #include <linux/blk-mq.h> | |
12 | #include <linux/blk-cgroup.h> | |
13 | ||
14 | #include "blk.h" | |
15 | #include "blk-mq.h" | |
16 | ||
17 | struct queue_sysfs_entry { | |
18 | struct attribute attr; | |
19 | ssize_t (*show)(struct request_queue *, char *); | |
20 | ssize_t (*store)(struct request_queue *, const char *, size_t); | |
21 | }; | |
22 | ||
23 | static ssize_t | |
24 | queue_var_show(unsigned long var, char *page) | |
25 | { | |
26 | return sprintf(page, "%lu\n", var); | |
27 | } | |
28 | ||
29 | static ssize_t | |
30 | queue_var_store(unsigned long *var, const char *page, size_t count) | |
31 | { | |
32 | int err; | |
33 | unsigned long v; | |
34 | ||
35 | err = kstrtoul(page, 10, &v); | |
36 | if (err || v > UINT_MAX) | |
37 | return -EINVAL; | |
38 | ||
39 | *var = v; | |
40 | ||
41 | return count; | |
42 | } | |
43 | ||
44 | static ssize_t queue_requests_show(struct request_queue *q, char *page) | |
45 | { | |
46 | return queue_var_show(q->nr_requests, (page)); | |
47 | } | |
48 | ||
49 | static ssize_t | |
50 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | |
51 | { | |
52 | unsigned long nr; | |
53 | int ret, err; | |
54 | ||
55 | if (!q->request_fn && !q->mq_ops) | |
56 | return -EINVAL; | |
57 | ||
58 | ret = queue_var_store(&nr, page, count); | |
59 | if (ret < 0) | |
60 | return ret; | |
61 | ||
62 | if (nr < BLKDEV_MIN_RQ) | |
63 | nr = BLKDEV_MIN_RQ; | |
64 | ||
65 | if (q->request_fn) | |
66 | err = blk_update_nr_requests(q, nr); | |
67 | else | |
68 | err = blk_mq_update_nr_requests(q, nr); | |
69 | ||
70 | if (err) | |
71 | return err; | |
72 | ||
73 | return ret; | |
74 | } | |
75 | ||
76 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | |
77 | { | |
78 | unsigned long ra_kb = q->backing_dev_info.ra_pages << | |
79 | (PAGE_CACHE_SHIFT - 10); | |
80 | ||
81 | return queue_var_show(ra_kb, (page)); | |
82 | } | |
83 | ||
84 | static ssize_t | |
85 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | |
86 | { | |
87 | unsigned long ra_kb; | |
88 | ssize_t ret = queue_var_store(&ra_kb, page, count); | |
89 | ||
90 | if (ret < 0) | |
91 | return ret; | |
92 | ||
93 | q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); | |
94 | ||
95 | return ret; | |
96 | } | |
97 | ||
98 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | |
99 | { | |
100 | int max_sectors_kb = queue_max_sectors(q) >> 1; | |
101 | ||
102 | return queue_var_show(max_sectors_kb, (page)); | |
103 | } | |
104 | ||
105 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) | |
106 | { | |
107 | return queue_var_show(queue_max_segments(q), (page)); | |
108 | } | |
109 | ||
110 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) | |
111 | { | |
112 | return queue_var_show(q->limits.max_integrity_segments, (page)); | |
113 | } | |
114 | ||
115 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) | |
116 | { | |
117 | if (blk_queue_cluster(q)) | |
118 | return queue_var_show(queue_max_segment_size(q), (page)); | |
119 | ||
120 | return queue_var_show(PAGE_CACHE_SIZE, (page)); | |
121 | } | |
122 | ||
123 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) | |
124 | { | |
125 | return queue_var_show(queue_logical_block_size(q), page); | |
126 | } | |
127 | ||
128 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) | |
129 | { | |
130 | return queue_var_show(queue_physical_block_size(q), page); | |
131 | } | |
132 | ||
133 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) | |
134 | { | |
135 | return queue_var_show(queue_io_min(q), page); | |
136 | } | |
137 | ||
138 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) | |
139 | { | |
140 | return queue_var_show(queue_io_opt(q), page); | |
141 | } | |
142 | ||
143 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) | |
144 | { | |
145 | return queue_var_show(q->limits.discard_granularity, page); | |
146 | } | |
147 | ||
148 | static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) | |
149 | { | |
150 | unsigned long long val; | |
151 | ||
152 | val = q->limits.max_hw_discard_sectors << 9; | |
153 | return sprintf(page, "%llu\n", val); | |
154 | } | |
155 | ||
156 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) | |
157 | { | |
158 | return sprintf(page, "%llu\n", | |
159 | (unsigned long long)q->limits.max_discard_sectors << 9); | |
160 | } | |
161 | ||
162 | static ssize_t queue_discard_max_store(struct request_queue *q, | |
163 | const char *page, size_t count) | |
164 | { | |
165 | unsigned long max_discard; | |
166 | ssize_t ret = queue_var_store(&max_discard, page, count); | |
167 | ||
168 | if (ret < 0) | |
169 | return ret; | |
170 | ||
171 | if (max_discard & (q->limits.discard_granularity - 1)) | |
172 | return -EINVAL; | |
173 | ||
174 | max_discard >>= 9; | |
175 | if (max_discard > UINT_MAX) | |
176 | return -EINVAL; | |
177 | ||
178 | if (max_discard > q->limits.max_hw_discard_sectors) | |
179 | max_discard = q->limits.max_hw_discard_sectors; | |
180 | ||
181 | q->limits.max_discard_sectors = max_discard; | |
182 | return ret; | |
183 | } | |
184 | ||
185 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) | |
186 | { | |
187 | return queue_var_show(queue_discard_zeroes_data(q), page); | |
188 | } | |
189 | ||
190 | static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) | |
191 | { | |
192 | return sprintf(page, "%llu\n", | |
193 | (unsigned long long)q->limits.max_write_same_sectors << 9); | |
194 | } | |
195 | ||
196 | ||
197 | static ssize_t | |
198 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |
199 | { | |
200 | unsigned long max_sectors_kb, | |
201 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, | |
202 | page_kb = 1 << (PAGE_CACHE_SHIFT - 10); | |
203 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); | |
204 | ||
205 | if (ret < 0) | |
206 | return ret; | |
207 | ||
208 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) | |
209 | return -EINVAL; | |
210 | ||
211 | spin_lock_irq(q->queue_lock); | |
212 | q->limits.max_sectors = max_sectors_kb << 1; | |
213 | spin_unlock_irq(q->queue_lock); | |
214 | ||
215 | return ret; | |
216 | } | |
217 | ||
218 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |
219 | { | |
220 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; | |
221 | ||
222 | return queue_var_show(max_hw_sectors_kb, (page)); | |
223 | } | |
224 | ||
225 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ | |
226 | static ssize_t \ | |
227 | queue_show_##name(struct request_queue *q, char *page) \ | |
228 | { \ | |
229 | int bit; \ | |
230 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ | |
231 | return queue_var_show(neg ? !bit : bit, page); \ | |
232 | } \ | |
233 | static ssize_t \ | |
234 | queue_store_##name(struct request_queue *q, const char *page, size_t count) \ | |
235 | { \ | |
236 | unsigned long val; \ | |
237 | ssize_t ret; \ | |
238 | ret = queue_var_store(&val, page, count); \ | |
239 | if (ret < 0) \ | |
240 | return ret; \ | |
241 | if (neg) \ | |
242 | val = !val; \ | |
243 | \ | |
244 | spin_lock_irq(q->queue_lock); \ | |
245 | if (val) \ | |
246 | queue_flag_set(QUEUE_FLAG_##flag, q); \ | |
247 | else \ | |
248 | queue_flag_clear(QUEUE_FLAG_##flag, q); \ | |
249 | spin_unlock_irq(q->queue_lock); \ | |
250 | return ret; \ | |
251 | } | |
252 | ||
253 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); | |
254 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); | |
255 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); | |
256 | #undef QUEUE_SYSFS_BIT_FNS | |
257 | ||
258 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) | |
259 | { | |
260 | return queue_var_show((blk_queue_nomerges(q) << 1) | | |
261 | blk_queue_noxmerges(q), page); | |
262 | } | |
263 | ||
264 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |
265 | size_t count) | |
266 | { | |
267 | unsigned long nm; | |
268 | ssize_t ret = queue_var_store(&nm, page, count); | |
269 | ||
270 | if (ret < 0) | |
271 | return ret; | |
272 | ||
273 | spin_lock_irq(q->queue_lock); | |
274 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); | |
275 | queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); | |
276 | if (nm == 2) | |
277 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); | |
278 | else if (nm) | |
279 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); | |
280 | spin_unlock_irq(q->queue_lock); | |
281 | ||
282 | return ret; | |
283 | } | |
284 | ||
285 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) | |
286 | { | |
287 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); | |
288 | bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); | |
289 | ||
290 | return queue_var_show(set << force, page); | |
291 | } | |
292 | ||
293 | static ssize_t | |
294 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |
295 | { | |
296 | ssize_t ret = -EINVAL; | |
297 | #ifdef CONFIG_SMP | |
298 | unsigned long val; | |
299 | ||
300 | ret = queue_var_store(&val, page, count); | |
301 | if (ret < 0) | |
302 | return ret; | |
303 | ||
304 | spin_lock_irq(q->queue_lock); | |
305 | if (val == 2) { | |
306 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); | |
307 | queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); | |
308 | } else if (val == 1) { | |
309 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); | |
310 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
311 | } else if (val == 0) { | |
312 | queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); | |
313 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
314 | } | |
315 | spin_unlock_irq(q->queue_lock); | |
316 | #endif | |
317 | return ret; | |
318 | } | |
319 | ||
320 | static ssize_t queue_poll_show(struct request_queue *q, char *page) | |
321 | { | |
322 | return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); | |
323 | } | |
324 | ||
325 | static ssize_t queue_poll_store(struct request_queue *q, const char *page, | |
326 | size_t count) | |
327 | { | |
328 | unsigned long poll_on; | |
329 | ssize_t ret; | |
330 | ||
331 | if (!q->mq_ops || !q->mq_ops->poll) | |
332 | return -EINVAL; | |
333 | ||
334 | ret = queue_var_store(&poll_on, page, count); | |
335 | if (ret < 0) | |
336 | return ret; | |
337 | ||
338 | spin_lock_irq(q->queue_lock); | |
339 | if (poll_on) | |
340 | queue_flag_set(QUEUE_FLAG_POLL, q); | |
341 | else | |
342 | queue_flag_clear(QUEUE_FLAG_POLL, q); | |
343 | spin_unlock_irq(q->queue_lock); | |
344 | ||
345 | return ret; | |
346 | } | |
347 | ||
348 | static struct queue_sysfs_entry queue_requests_entry = { | |
349 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | |
350 | .show = queue_requests_show, | |
351 | .store = queue_requests_store, | |
352 | }; | |
353 | ||
354 | static struct queue_sysfs_entry queue_ra_entry = { | |
355 | .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, | |
356 | .show = queue_ra_show, | |
357 | .store = queue_ra_store, | |
358 | }; | |
359 | ||
360 | static struct queue_sysfs_entry queue_max_sectors_entry = { | |
361 | .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, | |
362 | .show = queue_max_sectors_show, | |
363 | .store = queue_max_sectors_store, | |
364 | }; | |
365 | ||
366 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { | |
367 | .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, | |
368 | .show = queue_max_hw_sectors_show, | |
369 | }; | |
370 | ||
371 | static struct queue_sysfs_entry queue_max_segments_entry = { | |
372 | .attr = {.name = "max_segments", .mode = S_IRUGO }, | |
373 | .show = queue_max_segments_show, | |
374 | }; | |
375 | ||
376 | static struct queue_sysfs_entry queue_max_integrity_segments_entry = { | |
377 | .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, | |
378 | .show = queue_max_integrity_segments_show, | |
379 | }; | |
380 | ||
381 | static struct queue_sysfs_entry queue_max_segment_size_entry = { | |
382 | .attr = {.name = "max_segment_size", .mode = S_IRUGO }, | |
383 | .show = queue_max_segment_size_show, | |
384 | }; | |
385 | ||
386 | static struct queue_sysfs_entry queue_iosched_entry = { | |
387 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, | |
388 | .show = elv_iosched_show, | |
389 | .store = elv_iosched_store, | |
390 | }; | |
391 | ||
392 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { | |
393 | .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, | |
394 | .show = queue_logical_block_size_show, | |
395 | }; | |
396 | ||
397 | static struct queue_sysfs_entry queue_logical_block_size_entry = { | |
398 | .attr = {.name = "logical_block_size", .mode = S_IRUGO }, | |
399 | .show = queue_logical_block_size_show, | |
400 | }; | |
401 | ||
402 | static struct queue_sysfs_entry queue_physical_block_size_entry = { | |
403 | .attr = {.name = "physical_block_size", .mode = S_IRUGO }, | |
404 | .show = queue_physical_block_size_show, | |
405 | }; | |
406 | ||
407 | static struct queue_sysfs_entry queue_io_min_entry = { | |
408 | .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, | |
409 | .show = queue_io_min_show, | |
410 | }; | |
411 | ||
412 | static struct queue_sysfs_entry queue_io_opt_entry = { | |
413 | .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, | |
414 | .show = queue_io_opt_show, | |
415 | }; | |
416 | ||
417 | static struct queue_sysfs_entry queue_discard_granularity_entry = { | |
418 | .attr = {.name = "discard_granularity", .mode = S_IRUGO }, | |
419 | .show = queue_discard_granularity_show, | |
420 | }; | |
421 | ||
422 | static struct queue_sysfs_entry queue_discard_max_hw_entry = { | |
423 | .attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO }, | |
424 | .show = queue_discard_max_hw_show, | |
425 | }; | |
426 | ||
427 | static struct queue_sysfs_entry queue_discard_max_entry = { | |
428 | .attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR }, | |
429 | .show = queue_discard_max_show, | |
430 | .store = queue_discard_max_store, | |
431 | }; | |
432 | ||
433 | static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { | |
434 | .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, | |
435 | .show = queue_discard_zeroes_data_show, | |
436 | }; | |
437 | ||
438 | static struct queue_sysfs_entry queue_write_same_max_entry = { | |
439 | .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO }, | |
440 | .show = queue_write_same_max_show, | |
441 | }; | |
442 | ||
443 | static struct queue_sysfs_entry queue_nonrot_entry = { | |
444 | .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, | |
445 | .show = queue_show_nonrot, | |
446 | .store = queue_store_nonrot, | |
447 | }; | |
448 | ||
449 | static struct queue_sysfs_entry queue_nomerges_entry = { | |
450 | .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, | |
451 | .show = queue_nomerges_show, | |
452 | .store = queue_nomerges_store, | |
453 | }; | |
454 | ||
455 | static struct queue_sysfs_entry queue_rq_affinity_entry = { | |
456 | .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, | |
457 | .show = queue_rq_affinity_show, | |
458 | .store = queue_rq_affinity_store, | |
459 | }; | |
460 | ||
461 | static struct queue_sysfs_entry queue_iostats_entry = { | |
462 | .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, | |
463 | .show = queue_show_iostats, | |
464 | .store = queue_store_iostats, | |
465 | }; | |
466 | ||
467 | static struct queue_sysfs_entry queue_random_entry = { | |
468 | .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, | |
469 | .show = queue_show_random, | |
470 | .store = queue_store_random, | |
471 | }; | |
472 | ||
473 | static struct queue_sysfs_entry queue_poll_entry = { | |
474 | .attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR }, | |
475 | .show = queue_poll_show, | |
476 | .store = queue_poll_store, | |
477 | }; | |
478 | ||
479 | static struct attribute *default_attrs[] = { | |
480 | &queue_requests_entry.attr, | |
481 | &queue_ra_entry.attr, | |
482 | &queue_max_hw_sectors_entry.attr, | |
483 | &queue_max_sectors_entry.attr, | |
484 | &queue_max_segments_entry.attr, | |
485 | &queue_max_integrity_segments_entry.attr, | |
486 | &queue_max_segment_size_entry.attr, | |
487 | &queue_iosched_entry.attr, | |
488 | &queue_hw_sector_size_entry.attr, | |
489 | &queue_logical_block_size_entry.attr, | |
490 | &queue_physical_block_size_entry.attr, | |
491 | &queue_io_min_entry.attr, | |
492 | &queue_io_opt_entry.attr, | |
493 | &queue_discard_granularity_entry.attr, | |
494 | &queue_discard_max_entry.attr, | |
495 | &queue_discard_max_hw_entry.attr, | |
496 | &queue_discard_zeroes_data_entry.attr, | |
497 | &queue_write_same_max_entry.attr, | |
498 | &queue_nonrot_entry.attr, | |
499 | &queue_nomerges_entry.attr, | |
500 | &queue_rq_affinity_entry.attr, | |
501 | &queue_iostats_entry.attr, | |
502 | &queue_random_entry.attr, | |
503 | &queue_poll_entry.attr, | |
504 | NULL, | |
505 | }; | |
506 | ||
507 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) | |
508 | ||
509 | static ssize_t | |
510 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
511 | { | |
512 | struct queue_sysfs_entry *entry = to_queue(attr); | |
513 | struct request_queue *q = | |
514 | container_of(kobj, struct request_queue, kobj); | |
515 | ssize_t res; | |
516 | ||
517 | if (!entry->show) | |
518 | return -EIO; | |
519 | mutex_lock(&q->sysfs_lock); | |
520 | if (blk_queue_dying(q)) { | |
521 | mutex_unlock(&q->sysfs_lock); | |
522 | return -ENOENT; | |
523 | } | |
524 | res = entry->show(q, page); | |
525 | mutex_unlock(&q->sysfs_lock); | |
526 | return res; | |
527 | } | |
528 | ||
529 | static ssize_t | |
530 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | |
531 | const char *page, size_t length) | |
532 | { | |
533 | struct queue_sysfs_entry *entry = to_queue(attr); | |
534 | struct request_queue *q; | |
535 | ssize_t res; | |
536 | ||
537 | if (!entry->store) | |
538 | return -EIO; | |
539 | ||
540 | q = container_of(kobj, struct request_queue, kobj); | |
541 | mutex_lock(&q->sysfs_lock); | |
542 | if (blk_queue_dying(q)) { | |
543 | mutex_unlock(&q->sysfs_lock); | |
544 | return -ENOENT; | |
545 | } | |
546 | res = entry->store(q, page, length); | |
547 | mutex_unlock(&q->sysfs_lock); | |
548 | return res; | |
549 | } | |
550 | ||
551 | static void blk_free_queue_rcu(struct rcu_head *rcu_head) | |
552 | { | |
553 | struct request_queue *q = container_of(rcu_head, struct request_queue, | |
554 | rcu_head); | |
555 | kmem_cache_free(blk_requestq_cachep, q); | |
556 | } | |
557 | ||
558 | /** | |
559 | * blk_release_queue: - release a &struct request_queue when it is no longer needed | |
560 | * @kobj: the kobj belonging to the request queue to be released | |
561 | * | |
562 | * Description: | |
563 | * blk_release_queue is the pair to blk_init_queue() or | |
564 | * blk_queue_make_request(). It should be called when a request queue is | |
565 | * being released; typically when a block device is being de-registered. | |
566 | * Currently, its primary task it to free all the &struct request | |
567 | * structures that were allocated to the queue and the queue itself. | |
568 | * | |
569 | * Note: | |
570 | * The low level driver must have finished any outstanding requests first | |
571 | * via blk_cleanup_queue(). | |
572 | **/ | |
573 | static void blk_release_queue(struct kobject *kobj) | |
574 | { | |
575 | struct request_queue *q = | |
576 | container_of(kobj, struct request_queue, kobj); | |
577 | ||
578 | bdi_exit(&q->backing_dev_info); | |
579 | blkcg_exit_queue(q); | |
580 | ||
581 | if (q->elevator) { | |
582 | spin_lock_irq(q->queue_lock); | |
583 | ioc_clear_queue(q); | |
584 | spin_unlock_irq(q->queue_lock); | |
585 | elevator_exit(q->elevator); | |
586 | } | |
587 | ||
588 | blk_exit_rl(&q->root_rl); | |
589 | ||
590 | if (q->queue_tags) | |
591 | __blk_queue_free_tags(q); | |
592 | ||
593 | if (!q->mq_ops) | |
594 | blk_free_flush_queue(q->fq); | |
595 | else | |
596 | blk_mq_release(q); | |
597 | ||
598 | blk_trace_shutdown(q); | |
599 | ||
600 | if (q->bio_split) | |
601 | bioset_free(q->bio_split); | |
602 | ||
603 | ida_simple_remove(&blk_queue_ida, q->id); | |
604 | call_rcu(&q->rcu_head, blk_free_queue_rcu); | |
605 | } | |
606 | ||
607 | static const struct sysfs_ops queue_sysfs_ops = { | |
608 | .show = queue_attr_show, | |
609 | .store = queue_attr_store, | |
610 | }; | |
611 | ||
612 | struct kobj_type blk_queue_ktype = { | |
613 | .sysfs_ops = &queue_sysfs_ops, | |
614 | .default_attrs = default_attrs, | |
615 | .release = blk_release_queue, | |
616 | }; | |
617 | ||
618 | int blk_register_queue(struct gendisk *disk) | |
619 | { | |
620 | int ret; | |
621 | struct device *dev = disk_to_dev(disk); | |
622 | struct request_queue *q = disk->queue; | |
623 | ||
624 | if (WARN_ON(!q)) | |
625 | return -ENXIO; | |
626 | ||
627 | /* | |
628 | * SCSI probing may synchronously create and destroy a lot of | |
629 | * request_queues for non-existent devices. Shutting down a fully | |
630 | * functional queue takes measureable wallclock time as RCU grace | |
631 | * periods are involved. To avoid excessive latency in these | |
632 | * cases, a request_queue starts out in a degraded mode which is | |
633 | * faster to shut down and is made fully functional here as | |
634 | * request_queues for non-existent devices never get registered. | |
635 | */ | |
636 | if (!blk_queue_init_done(q)) { | |
637 | queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); | |
638 | percpu_ref_switch_to_percpu(&q->q_usage_counter); | |
639 | blk_queue_bypass_end(q); | |
640 | } | |
641 | ||
642 | ret = blk_trace_init_sysfs(dev); | |
643 | if (ret) | |
644 | return ret; | |
645 | ||
646 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); | |
647 | if (ret < 0) { | |
648 | blk_trace_remove_sysfs(dev); | |
649 | return ret; | |
650 | } | |
651 | ||
652 | kobject_uevent(&q->kobj, KOBJ_ADD); | |
653 | ||
654 | if (q->mq_ops) | |
655 | blk_mq_register_disk(disk); | |
656 | ||
657 | if (!q->request_fn) | |
658 | return 0; | |
659 | ||
660 | ret = elv_register_queue(q); | |
661 | if (ret) { | |
662 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | |
663 | kobject_del(&q->kobj); | |
664 | blk_trace_remove_sysfs(dev); | |
665 | kobject_put(&dev->kobj); | |
666 | return ret; | |
667 | } | |
668 | ||
669 | return 0; | |
670 | } | |
671 | ||
672 | void blk_unregister_queue(struct gendisk *disk) | |
673 | { | |
674 | struct request_queue *q = disk->queue; | |
675 | ||
676 | if (WARN_ON(!q)) | |
677 | return; | |
678 | ||
679 | if (q->mq_ops) | |
680 | blk_mq_unregister_disk(disk); | |
681 | ||
682 | if (q->request_fn) | |
683 | elv_unregister_queue(q); | |
684 | ||
685 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | |
686 | kobject_del(&q->kobj); | |
687 | blk_trace_remove_sysfs(disk_to_dev(disk)); | |
688 | kobject_put(&disk_to_dev(disk)->kobj); | |
689 | } |