]>
Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
1da177e4 LT |
3 | * Block device elevator/IO-scheduler. |
4 | * | |
5 | * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE | |
6 | * | |
0fe23479 | 7 | * 30042000 Jens Axboe <axboe@kernel.dk> : |
1da177e4 LT |
8 | * |
9 | * Split the elevator a bit so that it is possible to choose a different | |
10 | * one or even write a new "plug in". There are three pieces: | |
11 | * - elevator_fn, inserts a new request in the queue list | |
12 | * - elevator_merge_fn, decides whether a new buffer can be merged with | |
13 | * an existing request | |
14 | * - elevator_dequeue_fn, called when a request is taken off the active list | |
15 | * | |
16 | * 20082000 Dave Jones <davej@suse.de> : | |
17 | * Removed tests for max-bomb-segments, which was breaking elvtune | |
18 | * when run without -bN | |
19 | * | |
20 | * Jens: | |
21 | * - Rework again to work with bio instead of buffer_heads | |
22 | * - loose bi_dev comparisons, partition handling is right now | |
23 | * - completely modularize elevator setup and teardown | |
24 | * | |
25 | */ | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/fs.h> | |
28 | #include <linux/blkdev.h> | |
29 | #include <linux/elevator.h> | |
30 | #include <linux/bio.h> | |
1da177e4 LT |
31 | #include <linux/module.h> |
32 | #include <linux/slab.h> | |
33 | #include <linux/init.h> | |
34 | #include <linux/compiler.h> | |
2056a782 | 35 | #include <linux/blktrace_api.h> |
9817064b | 36 | #include <linux/hash.h> |
0835da67 | 37 | #include <linux/uaccess.h> |
c8158819 | 38 | #include <linux/pm_runtime.h> |
eea8f41c | 39 | #include <linux/blk-cgroup.h> |
1da177e4 | 40 | |
55782138 LZ |
41 | #include <trace/events/block.h> |
42 | ||
242f9dcb | 43 | #include "blk.h" |
bd166ef1 | 44 | #include "blk-mq-sched.h" |
bca6b067 | 45 | #include "blk-pm.h" |
8330cdb0 | 46 | #include "blk-wbt.h" |
242f9dcb | 47 | |
1da177e4 LT |
48 | static DEFINE_SPINLOCK(elv_list_lock); |
49 | static LIST_HEAD(elv_list); | |
50 | ||
9817064b JA |
51 | /* |
52 | * Merge hash stuff. | |
53 | */ | |
83096ebf | 54 | #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) |
9817064b | 55 | |
da775265 JA |
56 | /* |
57 | * Query io scheduler to see if the current process issuing bio may be | |
58 | * merged with rq. | |
59 | */ | |
72ef799b | 60 | static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio) |
da775265 | 61 | { |
165125e1 | 62 | struct request_queue *q = rq->q; |
b374d18a | 63 | struct elevator_queue *e = q->elevator; |
da775265 | 64 | |
f9cd4bfe JA |
65 | if (e->type->ops.allow_merge) |
66 | return e->type->ops.allow_merge(q, rq, bio); | |
da775265 JA |
67 | |
68 | return 1; | |
69 | } | |
70 | ||
1da177e4 LT |
71 | /* |
72 | * can we safely merge with this request? | |
73 | */ | |
72ef799b | 74 | bool elv_bio_merge_ok(struct request *rq, struct bio *bio) |
1da177e4 | 75 | { |
050c8ea8 | 76 | if (!blk_rq_merge_ok(rq, bio)) |
72ef799b | 77 | return false; |
7ba1ba12 | 78 | |
72ef799b TE |
79 | if (!elv_iosched_allow_bio_merge(rq, bio)) |
80 | return false; | |
1da177e4 | 81 | |
72ef799b | 82 | return true; |
1da177e4 | 83 | } |
72ef799b | 84 | EXPORT_SYMBOL(elv_bio_merge_ok); |
1da177e4 | 85 | |
8ac0d9a8 JA |
86 | static bool elevator_match(const struct elevator_type *e, const char *name) |
87 | { | |
88 | if (!strcmp(e->elevator_name, name)) | |
89 | return true; | |
90 | if (e->elevator_alias && !strcmp(e->elevator_alias, name)) | |
91 | return true; | |
92 | ||
93 | return false; | |
94 | } | |
95 | ||
2527d997 | 96 | /* |
a1ce35fa | 97 | * Return scheduler with name 'name' |
2527d997 | 98 | */ |
a1ce35fa | 99 | static struct elevator_type *elevator_find(const char *name) |
1da177e4 | 100 | { |
a22b169d | 101 | struct elevator_type *e; |
1da177e4 | 102 | |
70cee26e | 103 | list_for_each_entry(e, &elv_list, list) { |
a1ce35fa | 104 | if (elevator_match(e, name)) |
a22b169d | 105 | return e; |
1da177e4 | 106 | } |
1da177e4 | 107 | |
a22b169d | 108 | return NULL; |
1da177e4 LT |
109 | } |
110 | ||
111 | static void elevator_put(struct elevator_type *e) | |
112 | { | |
113 | module_put(e->elevator_owner); | |
114 | } | |
115 | ||
2527d997 JA |
116 | static struct elevator_type *elevator_get(struct request_queue *q, |
117 | const char *name, bool try_loading) | |
1da177e4 | 118 | { |
2824bc93 | 119 | struct elevator_type *e; |
1da177e4 | 120 | |
2a12dcd7 | 121 | spin_lock(&elv_list_lock); |
2824bc93 | 122 | |
a1ce35fa | 123 | e = elevator_find(name); |
21c3c5d2 | 124 | if (!e && try_loading) { |
e1640949 | 125 | spin_unlock(&elv_list_lock); |
490b94be | 126 | request_module("%s-iosched", name); |
e1640949 | 127 | spin_lock(&elv_list_lock); |
a1ce35fa | 128 | e = elevator_find(name); |
e1640949 JA |
129 | } |
130 | ||
2824bc93 TH |
131 | if (e && !try_module_get(e->elevator_owner)) |
132 | e = NULL; | |
133 | ||
2a12dcd7 | 134 | spin_unlock(&elv_list_lock); |
1da177e4 LT |
135 | return e; |
136 | } | |
137 | ||
3d1ab40f AV |
138 | static struct kobj_type elv_ktype; |
139 | ||
d50235b7 | 140 | struct elevator_queue *elevator_alloc(struct request_queue *q, |
165125e1 | 141 | struct elevator_type *e) |
3d1ab40f | 142 | { |
b374d18a | 143 | struct elevator_queue *eq; |
9817064b | 144 | |
c1b511eb | 145 | eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); |
9817064b | 146 | if (unlikely(!eq)) |
8406a4d5 | 147 | return NULL; |
9817064b | 148 | |
22f746e2 | 149 | eq->type = e; |
f9cb074b | 150 | kobject_init(&eq->kobj, &elv_ktype); |
9817064b | 151 | mutex_init(&eq->sysfs_lock); |
242d98f0 | 152 | hash_init(eq->hash); |
9817064b | 153 | |
3d1ab40f AV |
154 | return eq; |
155 | } | |
d50235b7 | 156 | EXPORT_SYMBOL(elevator_alloc); |
3d1ab40f AV |
157 | |
158 | static void elevator_release(struct kobject *kobj) | |
159 | { | |
b374d18a | 160 | struct elevator_queue *e; |
9817064b | 161 | |
b374d18a | 162 | e = container_of(kobj, struct elevator_queue, kobj); |
22f746e2 | 163 | elevator_put(e->type); |
3d1ab40f AV |
164 | kfree(e); |
165 | } | |
166 | ||
c3e22192 | 167 | void __elevator_exit(struct request_queue *q, struct elevator_queue *e) |
1da177e4 | 168 | { |
3d1ab40f | 169 | mutex_lock(&e->sysfs_lock); |
f9cd4bfe | 170 | if (e->type->ops.exit_sched) |
54d5329d | 171 | blk_mq_exit_sched(q, e); |
3d1ab40f | 172 | mutex_unlock(&e->sysfs_lock); |
1da177e4 | 173 | |
3d1ab40f | 174 | kobject_put(&e->kobj); |
1da177e4 | 175 | } |
2e662b65 | 176 | |
9817064b JA |
177 | static inline void __elv_rqhash_del(struct request *rq) |
178 | { | |
242d98f0 | 179 | hash_del(&rq->hash); |
e8064021 | 180 | rq->rq_flags &= ~RQF_HASHED; |
9817064b JA |
181 | } |
182 | ||
70b3ea05 | 183 | void elv_rqhash_del(struct request_queue *q, struct request *rq) |
9817064b JA |
184 | { |
185 | if (ELV_ON_HASH(rq)) | |
186 | __elv_rqhash_del(rq); | |
187 | } | |
bd166ef1 | 188 | EXPORT_SYMBOL_GPL(elv_rqhash_del); |
9817064b | 189 | |
70b3ea05 | 190 | void elv_rqhash_add(struct request_queue *q, struct request *rq) |
9817064b | 191 | { |
b374d18a | 192 | struct elevator_queue *e = q->elevator; |
9817064b JA |
193 | |
194 | BUG_ON(ELV_ON_HASH(rq)); | |
242d98f0 | 195 | hash_add(e->hash, &rq->hash, rq_hash_key(rq)); |
e8064021 | 196 | rq->rq_flags |= RQF_HASHED; |
9817064b | 197 | } |
bd166ef1 | 198 | EXPORT_SYMBOL_GPL(elv_rqhash_add); |
9817064b | 199 | |
70b3ea05 | 200 | void elv_rqhash_reposition(struct request_queue *q, struct request *rq) |
9817064b JA |
201 | { |
202 | __elv_rqhash_del(rq); | |
203 | elv_rqhash_add(q, rq); | |
204 | } | |
205 | ||
70b3ea05 | 206 | struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) |
9817064b | 207 | { |
b374d18a | 208 | struct elevator_queue *e = q->elevator; |
b67bfe0d | 209 | struct hlist_node *next; |
9817064b JA |
210 | struct request *rq; |
211 | ||
ee89f812 | 212 | hash_for_each_possible_safe(e->hash, rq, next, hash, offset) { |
9817064b JA |
213 | BUG_ON(!ELV_ON_HASH(rq)); |
214 | ||
215 | if (unlikely(!rq_mergeable(rq))) { | |
216 | __elv_rqhash_del(rq); | |
217 | continue; | |
218 | } | |
219 | ||
220 | if (rq_hash_key(rq) == offset) | |
221 | return rq; | |
222 | } | |
223 | ||
224 | return NULL; | |
225 | } | |
226 | ||
2e662b65 JA |
227 | /* |
228 | * RB-tree support functions for inserting/lookup/removal of requests | |
229 | * in a sorted RB tree. | |
230 | */ | |
796d5116 | 231 | void elv_rb_add(struct rb_root *root, struct request *rq) |
2e662b65 JA |
232 | { |
233 | struct rb_node **p = &root->rb_node; | |
234 | struct rb_node *parent = NULL; | |
235 | struct request *__rq; | |
236 | ||
237 | while (*p) { | |
238 | parent = *p; | |
239 | __rq = rb_entry(parent, struct request, rb_node); | |
240 | ||
83096ebf | 241 | if (blk_rq_pos(rq) < blk_rq_pos(__rq)) |
2e662b65 | 242 | p = &(*p)->rb_left; |
796d5116 | 243 | else if (blk_rq_pos(rq) >= blk_rq_pos(__rq)) |
2e662b65 | 244 | p = &(*p)->rb_right; |
2e662b65 JA |
245 | } |
246 | ||
247 | rb_link_node(&rq->rb_node, parent, p); | |
248 | rb_insert_color(&rq->rb_node, root); | |
2e662b65 | 249 | } |
2e662b65 JA |
250 | EXPORT_SYMBOL(elv_rb_add); |
251 | ||
252 | void elv_rb_del(struct rb_root *root, struct request *rq) | |
253 | { | |
254 | BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); | |
255 | rb_erase(&rq->rb_node, root); | |
256 | RB_CLEAR_NODE(&rq->rb_node); | |
257 | } | |
2e662b65 JA |
258 | EXPORT_SYMBOL(elv_rb_del); |
259 | ||
260 | struct request *elv_rb_find(struct rb_root *root, sector_t sector) | |
261 | { | |
262 | struct rb_node *n = root->rb_node; | |
263 | struct request *rq; | |
264 | ||
265 | while (n) { | |
266 | rq = rb_entry(n, struct request, rb_node); | |
267 | ||
83096ebf | 268 | if (sector < blk_rq_pos(rq)) |
2e662b65 | 269 | n = n->rb_left; |
83096ebf | 270 | else if (sector > blk_rq_pos(rq)) |
2e662b65 JA |
271 | n = n->rb_right; |
272 | else | |
273 | return rq; | |
274 | } | |
275 | ||
276 | return NULL; | |
277 | } | |
2e662b65 JA |
278 | EXPORT_SYMBOL(elv_rb_find); |
279 | ||
34fe7c05 CH |
280 | enum elv_merge elv_merge(struct request_queue *q, struct request **req, |
281 | struct bio *bio) | |
1da177e4 | 282 | { |
b374d18a | 283 | struct elevator_queue *e = q->elevator; |
9817064b | 284 | struct request *__rq; |
06b86245 | 285 | |
488991e2 AB |
286 | /* |
287 | * Levels of merges: | |
288 | * nomerges: No merges at all attempted | |
289 | * noxmerges: Only simple one-hit cache try | |
290 | * merges: All merge tries attempted | |
291 | */ | |
7460d389 | 292 | if (blk_queue_nomerges(q) || !bio_mergeable(bio)) |
488991e2 AB |
293 | return ELEVATOR_NO_MERGE; |
294 | ||
9817064b JA |
295 | /* |
296 | * First try one-hit cache. | |
297 | */ | |
72ef799b | 298 | if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) { |
34fe7c05 CH |
299 | enum elv_merge ret = blk_try_merge(q->last_merge, bio); |
300 | ||
06b86245 TH |
301 | if (ret != ELEVATOR_NO_MERGE) { |
302 | *req = q->last_merge; | |
303 | return ret; | |
304 | } | |
305 | } | |
1da177e4 | 306 | |
488991e2 | 307 | if (blk_queue_noxmerges(q)) |
ac9fafa1 AB |
308 | return ELEVATOR_NO_MERGE; |
309 | ||
9817064b JA |
310 | /* |
311 | * See if our hash lookup can find a potential backmerge. | |
312 | */ | |
4f024f37 | 313 | __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); |
72ef799b | 314 | if (__rq && elv_bio_merge_ok(__rq, bio)) { |
9817064b JA |
315 | *req = __rq; |
316 | return ELEVATOR_BACK_MERGE; | |
317 | } | |
318 | ||
f9cd4bfe JA |
319 | if (e->type->ops.request_merge) |
320 | return e->type->ops.request_merge(q, req, bio); | |
1da177e4 LT |
321 | |
322 | return ELEVATOR_NO_MERGE; | |
323 | } | |
324 | ||
5e84ea3a JA |
325 | /* |
326 | * Attempt to do an insertion back merge. Only check for the case where | |
327 | * we can append 'rq' to an existing request, so we can throw 'rq' away | |
328 | * afterwards. | |
329 | * | |
330 | * Returns true if we merged, false otherwise | |
331 | */ | |
bd166ef1 | 332 | bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq) |
5e84ea3a JA |
333 | { |
334 | struct request *__rq; | |
bee0393c | 335 | bool ret; |
5e84ea3a JA |
336 | |
337 | if (blk_queue_nomerges(q)) | |
338 | return false; | |
339 | ||
340 | /* | |
341 | * First try one-hit cache. | |
342 | */ | |
343 | if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) | |
344 | return true; | |
345 | ||
346 | if (blk_queue_noxmerges(q)) | |
347 | return false; | |
348 | ||
bee0393c | 349 | ret = false; |
5e84ea3a JA |
350 | /* |
351 | * See if our hash lookup can find a potential backmerge. | |
352 | */ | |
bee0393c SL |
353 | while (1) { |
354 | __rq = elv_rqhash_find(q, blk_rq_pos(rq)); | |
355 | if (!__rq || !blk_attempt_req_merge(q, __rq, rq)) | |
356 | break; | |
357 | ||
358 | /* The merged request could be merged with others, try again */ | |
359 | ret = true; | |
360 | rq = __rq; | |
361 | } | |
27419322 | 362 | |
bee0393c | 363 | return ret; |
5e84ea3a JA |
364 | } |
365 | ||
34fe7c05 CH |
366 | void elv_merged_request(struct request_queue *q, struct request *rq, |
367 | enum elv_merge type) | |
1da177e4 | 368 | { |
b374d18a | 369 | struct elevator_queue *e = q->elevator; |
1da177e4 | 370 | |
f9cd4bfe JA |
371 | if (e->type->ops.request_merged) |
372 | e->type->ops.request_merged(q, rq, type); | |
06b86245 | 373 | |
2e662b65 JA |
374 | if (type == ELEVATOR_BACK_MERGE) |
375 | elv_rqhash_reposition(q, rq); | |
9817064b | 376 | |
06b86245 | 377 | q->last_merge = rq; |
1da177e4 LT |
378 | } |
379 | ||
165125e1 | 380 | void elv_merge_requests(struct request_queue *q, struct request *rq, |
1da177e4 LT |
381 | struct request *next) |
382 | { | |
b374d18a | 383 | struct elevator_queue *e = q->elevator; |
bd166ef1 | 384 | |
f9cd4bfe JA |
385 | if (e->type->ops.requests_merged) |
386 | e->type->ops.requests_merged(q, rq, next); | |
06b86245 | 387 | |
9817064b | 388 | elv_rqhash_reposition(q, rq); |
06b86245 | 389 | q->last_merge = rq; |
1da177e4 LT |
390 | } |
391 | ||
165125e1 | 392 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
1da177e4 | 393 | { |
b374d18a | 394 | struct elevator_queue *e = q->elevator; |
1da177e4 | 395 | |
f9cd4bfe JA |
396 | if (e->type->ops.next_request) |
397 | return e->type->ops.next_request(q, rq); | |
bd166ef1 | 398 | |
1da177e4 LT |
399 | return NULL; |
400 | } | |
401 | ||
165125e1 | 402 | struct request *elv_former_request(struct request_queue *q, struct request *rq) |
1da177e4 | 403 | { |
b374d18a | 404 | struct elevator_queue *e = q->elevator; |
1da177e4 | 405 | |
f9cd4bfe JA |
406 | if (e->type->ops.former_request) |
407 | return e->type->ops.former_request(q, rq); | |
bd166ef1 | 408 | |
a1ce35fa | 409 | return NULL; |
1da177e4 LT |
410 | } |
411 | ||
3d1ab40f AV |
412 | #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) |
413 | ||
414 | static ssize_t | |
415 | elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
1da177e4 | 416 | { |
3d1ab40f | 417 | struct elv_fs_entry *entry = to_elv(attr); |
b374d18a | 418 | struct elevator_queue *e; |
3d1ab40f AV |
419 | ssize_t error; |
420 | ||
421 | if (!entry->show) | |
422 | return -EIO; | |
423 | ||
b374d18a | 424 | e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f | 425 | mutex_lock(&e->sysfs_lock); |
22f746e2 | 426 | error = e->type ? entry->show(e, page) : -ENOENT; |
3d1ab40f AV |
427 | mutex_unlock(&e->sysfs_lock); |
428 | return error; | |
429 | } | |
1da177e4 | 430 | |
3d1ab40f AV |
431 | static ssize_t |
432 | elv_attr_store(struct kobject *kobj, struct attribute *attr, | |
433 | const char *page, size_t length) | |
434 | { | |
3d1ab40f | 435 | struct elv_fs_entry *entry = to_elv(attr); |
b374d18a | 436 | struct elevator_queue *e; |
3d1ab40f | 437 | ssize_t error; |
1da177e4 | 438 | |
3d1ab40f AV |
439 | if (!entry->store) |
440 | return -EIO; | |
1da177e4 | 441 | |
b374d18a | 442 | e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f | 443 | mutex_lock(&e->sysfs_lock); |
22f746e2 | 444 | error = e->type ? entry->store(e, page, length) : -ENOENT; |
3d1ab40f AV |
445 | mutex_unlock(&e->sysfs_lock); |
446 | return error; | |
447 | } | |
448 | ||
52cf25d0 | 449 | static const struct sysfs_ops elv_sysfs_ops = { |
3d1ab40f AV |
450 | .show = elv_attr_show, |
451 | .store = elv_attr_store, | |
452 | }; | |
453 | ||
454 | static struct kobj_type elv_ktype = { | |
455 | .sysfs_ops = &elv_sysfs_ops, | |
456 | .release = elevator_release, | |
457 | }; | |
458 | ||
cecf5d87 ML |
459 | /* |
460 | * elv_register_queue is called from either blk_register_queue or | |
461 | * elevator_switch, elevator switch is prevented from being happen | |
462 | * in the two paths, so it is safe to not hold q->sysfs_lock. | |
463 | */ | |
464 | int elv_register_queue(struct request_queue *q, bool uevent) | |
3d1ab40f | 465 | { |
5a5bafdc | 466 | struct elevator_queue *e = q->elevator; |
3d1ab40f AV |
467 | int error; |
468 | ||
b2d6db58 | 469 | error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); |
3d1ab40f | 470 | if (!error) { |
22f746e2 | 471 | struct elv_fs_entry *attr = e->type->elevator_attrs; |
3d1ab40f | 472 | if (attr) { |
e572ec7e AV |
473 | while (attr->attr.name) { |
474 | if (sysfs_create_file(&e->kobj, &attr->attr)) | |
3d1ab40f | 475 | break; |
e572ec7e | 476 | attr++; |
3d1ab40f AV |
477 | } |
478 | } | |
cecf5d87 ML |
479 | if (uevent) |
480 | kobject_uevent(&e->kobj, KOBJ_ADD); | |
481 | ||
482 | mutex_lock(&q->sysfs_lock); | |
430c62fb | 483 | e->registered = 1; |
cecf5d87 | 484 | mutex_unlock(&q->sysfs_lock); |
3d1ab40f AV |
485 | } |
486 | return error; | |
1da177e4 | 487 | } |
bc1c1169 | 488 | |
cecf5d87 ML |
489 | /* |
490 | * elv_unregister_queue is called from either blk_unregister_queue or | |
491 | * elevator_switch, elevator switch is prevented from being happen | |
492 | * in the two paths, so it is safe to not hold q->sysfs_lock. | |
493 | */ | |
1da177e4 LT |
494 | void elv_unregister_queue(struct request_queue *q) |
495 | { | |
f8fc877d TH |
496 | if (q) { |
497 | struct elevator_queue *e = q->elevator; | |
498 | ||
499 | kobject_uevent(&e->kobj, KOBJ_REMOVE); | |
500 | kobject_del(&e->kobj); | |
cecf5d87 ML |
501 | |
502 | mutex_lock(&q->sysfs_lock); | |
f8fc877d | 503 | e->registered = 0; |
8330cdb0 JK |
504 | /* Re-enable throttling in case elevator disabled it */ |
505 | wbt_enable_default(q); | |
cecf5d87 | 506 | mutex_unlock(&q->sysfs_lock); |
f8fc877d | 507 | } |
1da177e4 LT |
508 | } |
509 | ||
e567bf71 | 510 | int elv_register(struct elevator_type *e) |
1da177e4 | 511 | { |
3d3c2379 TH |
512 | /* create icq_cache if requested */ |
513 | if (e->icq_size) { | |
514 | if (WARN_ON(e->icq_size < sizeof(struct io_cq)) || | |
515 | WARN_ON(e->icq_align < __alignof__(struct io_cq))) | |
516 | return -EINVAL; | |
517 | ||
518 | snprintf(e->icq_cache_name, sizeof(e->icq_cache_name), | |
519 | "%s_io_cq", e->elevator_name); | |
520 | e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size, | |
521 | e->icq_align, 0, NULL); | |
522 | if (!e->icq_cache) | |
523 | return -ENOMEM; | |
524 | } | |
525 | ||
526 | /* register, don't allow duplicate names */ | |
2a12dcd7 | 527 | spin_lock(&elv_list_lock); |
a1ce35fa | 528 | if (elevator_find(e->elevator_name)) { |
3d3c2379 | 529 | spin_unlock(&elv_list_lock); |
62d2a194 | 530 | kmem_cache_destroy(e->icq_cache); |
3d3c2379 TH |
531 | return -EBUSY; |
532 | } | |
1da177e4 | 533 | list_add_tail(&e->list, &elv_list); |
2a12dcd7 | 534 | spin_unlock(&elv_list_lock); |
1da177e4 | 535 | |
d0b0a81a HT |
536 | printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name); |
537 | ||
3d3c2379 | 538 | return 0; |
1da177e4 LT |
539 | } |
540 | EXPORT_SYMBOL_GPL(elv_register); | |
541 | ||
542 | void elv_unregister(struct elevator_type *e) | |
543 | { | |
3d3c2379 | 544 | /* unregister */ |
2a12dcd7 | 545 | spin_lock(&elv_list_lock); |
1da177e4 | 546 | list_del_init(&e->list); |
2a12dcd7 | 547 | spin_unlock(&elv_list_lock); |
3d3c2379 TH |
548 | |
549 | /* | |
550 | * Destroy icq_cache if it exists. icq's are RCU managed. Make | |
551 | * sure all RCU operations are complete before proceeding. | |
552 | */ | |
553 | if (e->icq_cache) { | |
554 | rcu_barrier(); | |
555 | kmem_cache_destroy(e->icq_cache); | |
556 | e->icq_cache = NULL; | |
557 | } | |
1da177e4 LT |
558 | } |
559 | EXPORT_SYMBOL_GPL(elv_unregister); | |
560 | ||
d48ece20 | 561 | int elevator_switch_mq(struct request_queue *q, |
54d5329d OS |
562 | struct elevator_type *new_e) |
563 | { | |
564 | int ret; | |
565 | ||
14a23498 BVA |
566 | lockdep_assert_held(&q->sysfs_lock); |
567 | ||
54d5329d | 568 | if (q->elevator) { |
cecf5d87 ML |
569 | if (q->elevator->registered) { |
570 | mutex_unlock(&q->sysfs_lock); | |
571 | ||
572 | /* | |
573 | * Concurrent elevator switch can't happen becasue | |
574 | * sysfs write is always exclusively on same file. | |
575 | * | |
576 | * Also the elevator queue won't be freed after | |
577 | * sysfs_lock is released becasue kobject_del() in | |
578 | * blk_unregister_queue() waits for completion of | |
579 | * .store & .show on its attributes. | |
580 | */ | |
54d5329d | 581 | elv_unregister_queue(q); |
cecf5d87 ML |
582 | |
583 | mutex_lock(&q->sysfs_lock); | |
584 | } | |
54d5329d OS |
585 | ioc_clear_queue(q); |
586 | elevator_exit(q, q->elevator); | |
cecf5d87 ML |
587 | |
588 | /* | |
589 | * sysfs_lock may be dropped, so re-check if queue is | |
590 | * unregistered. If yes, don't switch to new elevator | |
591 | * any more | |
592 | */ | |
593 | if (!blk_queue_registered(q)) | |
594 | return 0; | |
54d5329d OS |
595 | } |
596 | ||
597 | ret = blk_mq_init_sched(q, new_e); | |
598 | if (ret) | |
599 | goto out; | |
600 | ||
601 | if (new_e) { | |
cecf5d87 ML |
602 | mutex_unlock(&q->sysfs_lock); |
603 | ||
604 | ret = elv_register_queue(q, true); | |
605 | ||
606 | mutex_lock(&q->sysfs_lock); | |
54d5329d OS |
607 | if (ret) { |
608 | elevator_exit(q, q->elevator); | |
609 | goto out; | |
610 | } | |
611 | } | |
612 | ||
613 | if (new_e) | |
614 | blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); | |
615 | else | |
616 | blk_add_trace_msg(q, "elv switch: none"); | |
617 | ||
618 | out: | |
54d5329d | 619 | return ret; |
54d5329d OS |
620 | } |
621 | ||
61db437d DLM |
622 | static inline bool elv_support_iosched(struct request_queue *q) |
623 | { | |
624 | if (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)) | |
625 | return false; | |
626 | return true; | |
627 | } | |
628 | ||
131d08e1 | 629 | /* |
61db437d DLM |
630 | * For blk-mq devices supporting IO scheduling, we default to using mq-deadline, |
631 | * if available, for single queue devices. If deadline isn't available OR we | |
632 | * have multiple queues, default to "none". | |
131d08e1 CH |
633 | */ |
634 | int elevator_init_mq(struct request_queue *q) | |
635 | { | |
636 | struct elevator_type *e; | |
637 | int err = 0; | |
638 | ||
61db437d DLM |
639 | if (!elv_support_iosched(q)) |
640 | return 0; | |
641 | ||
131d08e1 CH |
642 | if (q->nr_hw_queues != 1) |
643 | return 0; | |
644 | ||
c48dac13 ML |
645 | WARN_ON_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)); |
646 | ||
131d08e1 | 647 | if (unlikely(q->elevator)) |
c48dac13 | 648 | goto out; |
131d08e1 CH |
649 | |
650 | e = elevator_get(q, "mq-deadline", false); | |
651 | if (!e) | |
c48dac13 | 652 | goto out; |
131d08e1 CH |
653 | |
654 | err = blk_mq_init_sched(q, e); | |
655 | if (err) | |
656 | elevator_put(e); | |
c48dac13 | 657 | out: |
131d08e1 CH |
658 | return err; |
659 | } | |
660 | ||
661 | ||
1da177e4 LT |
662 | /* |
663 | * switch to new_e io scheduler. be careful not to introduce deadlocks - | |
664 | * we don't free the old io scheduler, before we have allocated what we | |
665 | * need for the new one. this way we have a chance of going back to the old | |
cb98fc8b | 666 | * one, if the new one fails init for some reason. |
1da177e4 | 667 | */ |
165125e1 | 668 | static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) |
1da177e4 | 669 | { |
e8989fae | 670 | int err; |
1da177e4 | 671 | |
14a23498 BVA |
672 | lockdep_assert_held(&q->sysfs_lock); |
673 | ||
a1ce35fa JA |
674 | blk_mq_freeze_queue(q); |
675 | blk_mq_quiesce_queue(q); | |
4722dc52 | 676 | |
a1ce35fa | 677 | err = elevator_switch_mq(q, new_e); |
1da177e4 | 678 | |
a1ce35fa JA |
679 | blk_mq_unquiesce_queue(q); |
680 | blk_mq_unfreeze_queue(q); | |
75ad23bc | 681 | |
5dd531a0 | 682 | return err; |
1da177e4 LT |
683 | } |
684 | ||
5dd531a0 JA |
685 | /* |
686 | * Switch this queue to the given IO scheduler. | |
687 | */ | |
7c8a3679 | 688 | static int __elevator_change(struct request_queue *q, const char *name) |
1da177e4 LT |
689 | { |
690 | char elevator_name[ELV_NAME_MAX]; | |
691 | struct elevator_type *e; | |
692 | ||
e9a823fb | 693 | /* Make sure queue is not in the middle of being removed */ |
58c898ba | 694 | if (!blk_queue_registered(q)) |
e9a823fb DJ |
695 | return -ENOENT; |
696 | ||
bd166ef1 JA |
697 | /* |
698 | * Special case for mq, turn off scheduling | |
699 | */ | |
fbd72127 AZ |
700 | if (!strncmp(name, "none", 4)) { |
701 | if (!q->elevator) | |
702 | return 0; | |
bd166ef1 | 703 | return elevator_switch(q, NULL); |
fbd72127 | 704 | } |
cd43e26f | 705 | |
ee2e992c | 706 | strlcpy(elevator_name, name, sizeof(elevator_name)); |
2527d997 | 707 | e = elevator_get(q, strstrip(elevator_name), true); |
340ff321 | 708 | if (!e) |
1da177e4 | 709 | return -EINVAL; |
1da177e4 | 710 | |
8ac0d9a8 | 711 | if (q->elevator && elevator_match(q->elevator->type, elevator_name)) { |
2ca7d93b | 712 | elevator_put(e); |
5dd531a0 | 713 | return 0; |
2ca7d93b | 714 | } |
1da177e4 | 715 | |
5dd531a0 JA |
716 | return elevator_switch(q, e); |
717 | } | |
7c8a3679 | 718 | |
5dd531a0 JA |
719 | ssize_t elv_iosched_store(struct request_queue *q, const char *name, |
720 | size_t count) | |
721 | { | |
722 | int ret; | |
723 | ||
344e9ffc | 724 | if (!queue_is_mq(q) || !elv_support_iosched(q)) |
5dd531a0 JA |
725 | return count; |
726 | ||
7c8a3679 | 727 | ret = __elevator_change(q, name); |
5dd531a0 JA |
728 | if (!ret) |
729 | return count; | |
730 | ||
5dd531a0 | 731 | return ret; |
1da177e4 LT |
732 | } |
733 | ||
165125e1 | 734 | ssize_t elv_iosched_show(struct request_queue *q, char *name) |
1da177e4 | 735 | { |
b374d18a | 736 | struct elevator_queue *e = q->elevator; |
bd166ef1 | 737 | struct elevator_type *elv = NULL; |
70cee26e | 738 | struct elevator_type *__e; |
1da177e4 LT |
739 | int len = 0; |
740 | ||
344e9ffc | 741 | if (!queue_is_mq(q)) |
cd43e26f MP |
742 | return sprintf(name, "none\n"); |
743 | ||
bd166ef1 JA |
744 | if (!q->elevator) |
745 | len += sprintf(name+len, "[none] "); | |
746 | else | |
747 | elv = e->type; | |
cd43e26f | 748 | |
2a12dcd7 | 749 | spin_lock(&elv_list_lock); |
70cee26e | 750 | list_for_each_entry(__e, &elv_list, list) { |
a1ce35fa | 751 | if (elv && elevator_match(elv, __e->elevator_name)) { |
1da177e4 | 752 | len += sprintf(name+len, "[%s] ", elv->elevator_name); |
bd166ef1 JA |
753 | continue; |
754 | } | |
a1ce35fa | 755 | if (elv_support_iosched(q)) |
1da177e4 LT |
756 | len += sprintf(name+len, "%s ", __e->elevator_name); |
757 | } | |
2a12dcd7 | 758 | spin_unlock(&elv_list_lock); |
1da177e4 | 759 | |
344e9ffc | 760 | if (q->elevator) |
bd166ef1 JA |
761 | len += sprintf(name+len, "none"); |
762 | ||
1da177e4 LT |
763 | len += sprintf(len+name, "\n"); |
764 | return len; | |
765 | } | |
766 | ||
165125e1 JA |
767 | struct request *elv_rb_former_request(struct request_queue *q, |
768 | struct request *rq) | |
2e662b65 JA |
769 | { |
770 | struct rb_node *rbprev = rb_prev(&rq->rb_node); | |
771 | ||
772 | if (rbprev) | |
773 | return rb_entry_rq(rbprev); | |
774 | ||
775 | return NULL; | |
776 | } | |
2e662b65 JA |
777 | EXPORT_SYMBOL(elv_rb_former_request); |
778 | ||
165125e1 JA |
779 | struct request *elv_rb_latter_request(struct request_queue *q, |
780 | struct request *rq) | |
2e662b65 JA |
781 | { |
782 | struct rb_node *rbnext = rb_next(&rq->rb_node); | |
783 | ||
784 | if (rbnext) | |
785 | return rb_entry_rq(rbnext); | |
786 | ||
787 | return NULL; | |
788 | } | |
2e662b65 | 789 | EXPORT_SYMBOL(elv_rb_latter_request); |