]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - block/elevator.c
blk-mq-debugfs: Add missing __acquires() / __releases() annotations
[mirror_ubuntu-artful-kernel.git] / block / elevator.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Block device elevator/IO-scheduler.
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
0fe23479 6 * 30042000 Jens Axboe <axboe@kernel.dk> :
1da177e4
LT
7 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 * an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25#include <linux/kernel.h>
26#include <linux/fs.h>
27#include <linux/blkdev.h>
28#include <linux/elevator.h>
29#include <linux/bio.h>
1da177e4
LT
30#include <linux/module.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/compiler.h>
2056a782 34#include <linux/blktrace_api.h>
9817064b 35#include <linux/hash.h>
0835da67 36#include <linux/uaccess.h>
c8158819 37#include <linux/pm_runtime.h>
eea8f41c 38#include <linux/blk-cgroup.h>
1da177e4 39
55782138
LZ
40#include <trace/events/block.h>
41
242f9dcb 42#include "blk.h"
bd166ef1 43#include "blk-mq-sched.h"
242f9dcb 44
1da177e4
LT
45static DEFINE_SPINLOCK(elv_list_lock);
46static LIST_HEAD(elv_list);
47
9817064b
JA
48/*
49 * Merge hash stuff.
50 */
83096ebf 51#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
9817064b 52
da775265
JA
53/*
54 * Query io scheduler to see if the current process issuing bio may be
55 * merged with rq.
56 */
72ef799b 57static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
da775265 58{
165125e1 59 struct request_queue *q = rq->q;
b374d18a 60 struct elevator_queue *e = q->elevator;
da775265 61
bd166ef1
JA
62 if (e->uses_mq && e->type->ops.mq.allow_merge)
63 return e->type->ops.mq.allow_merge(q, rq, bio);
64 else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
c51ca6cf 65 return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
da775265
JA
66
67 return 1;
68}
69
1da177e4
LT
70/*
71 * can we safely merge with this request?
72 */
72ef799b 73bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
1da177e4 74{
050c8ea8 75 if (!blk_rq_merge_ok(rq, bio))
72ef799b 76 return false;
7ba1ba12 77
72ef799b
TE
78 if (!elv_iosched_allow_bio_merge(rq, bio))
79 return false;
1da177e4 80
72ef799b 81 return true;
1da177e4 82}
72ef799b 83EXPORT_SYMBOL(elv_bio_merge_ok);
1da177e4 84
1da177e4
LT
85static struct elevator_type *elevator_find(const char *name)
86{
a22b169d 87 struct elevator_type *e;
1da177e4 88
70cee26e 89 list_for_each_entry(e, &elv_list, list) {
a22b169d
VT
90 if (!strcmp(e->elevator_name, name))
91 return e;
1da177e4 92 }
1da177e4 93
a22b169d 94 return NULL;
1da177e4
LT
95}
96
97static void elevator_put(struct elevator_type *e)
98{
99 module_put(e->elevator_owner);
100}
101
21c3c5d2 102static struct elevator_type *elevator_get(const char *name, bool try_loading)
1da177e4 103{
2824bc93 104 struct elevator_type *e;
1da177e4 105
2a12dcd7 106 spin_lock(&elv_list_lock);
2824bc93
TH
107
108 e = elevator_find(name);
21c3c5d2 109 if (!e && try_loading) {
e1640949 110 spin_unlock(&elv_list_lock);
490b94be 111 request_module("%s-iosched", name);
e1640949
JA
112 spin_lock(&elv_list_lock);
113 e = elevator_find(name);
114 }
115
2824bc93
TH
116 if (e && !try_module_get(e->elevator_owner))
117 e = NULL;
118
2a12dcd7 119 spin_unlock(&elv_list_lock);
1da177e4
LT
120
121 return e;
122}
123
484fc254 124static char chosen_elevator[ELV_NAME_MAX];
1da177e4 125
5f003976 126static int __init elevator_setup(char *str)
1da177e4 127{
752a3b79
CE
128 /*
129 * Be backwards-compatible with previous kernels, so users
130 * won't get the wrong elevator.
131 */
492af635 132 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
9b41046c 133 return 1;
1da177e4
LT
134}
135
136__setup("elevator=", elevator_setup);
137
bb813f4c
TH
138/* called during boot to load the elevator chosen by the elevator param */
139void __init load_default_elevator_module(void)
140{
141 struct elevator_type *e;
142
143 if (!chosen_elevator[0])
144 return;
145
146 spin_lock(&elv_list_lock);
147 e = elevator_find(chosen_elevator);
148 spin_unlock(&elv_list_lock);
149
150 if (!e)
151 request_module("%s-iosched", chosen_elevator);
152}
153
3d1ab40f
AV
154static struct kobj_type elv_ktype;
155
d50235b7 156struct elevator_queue *elevator_alloc(struct request_queue *q,
165125e1 157 struct elevator_type *e)
3d1ab40f 158{
b374d18a 159 struct elevator_queue *eq;
9817064b 160
c1b511eb 161 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
9817064b 162 if (unlikely(!eq))
8406a4d5 163 return NULL;
9817064b 164
22f746e2 165 eq->type = e;
f9cb074b 166 kobject_init(&eq->kobj, &elv_ktype);
9817064b 167 mutex_init(&eq->sysfs_lock);
242d98f0 168 hash_init(eq->hash);
bd166ef1 169 eq->uses_mq = e->uses_mq;
9817064b 170
3d1ab40f
AV
171 return eq;
172}
d50235b7 173EXPORT_SYMBOL(elevator_alloc);
3d1ab40f
AV
174
175static void elevator_release(struct kobject *kobj)
176{
b374d18a 177 struct elevator_queue *e;
9817064b 178
b374d18a 179 e = container_of(kobj, struct elevator_queue, kobj);
22f746e2 180 elevator_put(e->type);
3d1ab40f
AV
181 kfree(e);
182}
183
165125e1 184int elevator_init(struct request_queue *q, char *name)
1da177e4
LT
185{
186 struct elevator_type *e = NULL;
f8fc877d 187 int err;
1da177e4 188
eb1c160b
TS
189 /*
190 * q->sysfs_lock must be held to provide mutual exclusion between
191 * elevator_switch() and here.
192 */
193 lockdep_assert_held(&q->sysfs_lock);
194
1abec4fd
MS
195 if (unlikely(q->elevator))
196 return 0;
197
cb98fc8b
TH
198 INIT_LIST_HEAD(&q->queue_head);
199 q->last_merge = NULL;
200 q->end_sector = 0;
201 q->boundary_rq = NULL;
cb98fc8b 202
4eb166d9 203 if (name) {
21c3c5d2 204 e = elevator_get(name, true);
4eb166d9
JA
205 if (!e)
206 return -EINVAL;
207 }
1da177e4 208
21c3c5d2
TH
209 /*
210 * Use the default elevator specified by config boot param or
211 * config option. Don't try to load modules as we could be running
212 * off async and request_module() isn't allowed from async.
213 */
4eb166d9 214 if (!e && *chosen_elevator) {
21c3c5d2 215 e = elevator_get(chosen_elevator, false);
4eb166d9
JA
216 if (!e)
217 printk(KERN_ERR "I/O scheduler %s not found\n",
218 chosen_elevator);
219 }
248d5ca5 220
4eb166d9 221 if (!e) {
d3484991
JA
222 if (q->mq_ops && q->nr_hw_queues == 1)
223 e = elevator_get(CONFIG_DEFAULT_SQ_IOSCHED, false);
224 else if (q->mq_ops)
225 e = elevator_get(CONFIG_DEFAULT_MQ_IOSCHED, false);
226 else
227 e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
228
4eb166d9
JA
229 if (!e) {
230 printk(KERN_ERR
231 "Default I/O scheduler not found. " \
bd166ef1 232 "Using noop/none.\n");
21c3c5d2 233 e = elevator_get("noop", false);
4eb166d9 234 }
5f003976
ND
235 }
236
bd166ef1
JA
237 if (e->uses_mq) {
238 err = blk_mq_sched_setup(q);
239 if (!err)
240 err = e->ops.mq.init_sched(q, e);
241 } else
242 err = e->ops.sq.elevator_init_fn(q, e);
243 if (err) {
244 if (e->uses_mq)
245 blk_mq_sched_teardown(q);
d32f6b57 246 elevator_put(e);
bd166ef1 247 }
d32f6b57 248 return err;
1da177e4 249}
2e662b65
JA
250EXPORT_SYMBOL(elevator_init);
251
b374d18a 252void elevator_exit(struct elevator_queue *e)
1da177e4 253{
3d1ab40f 254 mutex_lock(&e->sysfs_lock);
bd166ef1
JA
255 if (e->uses_mq && e->type->ops.mq.exit_sched)
256 e->type->ops.mq.exit_sched(e);
257 else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
c51ca6cf 258 e->type->ops.sq.elevator_exit_fn(e);
3d1ab40f 259 mutex_unlock(&e->sysfs_lock);
1da177e4 260
3d1ab40f 261 kobject_put(&e->kobj);
1da177e4 262}
2e662b65
JA
263EXPORT_SYMBOL(elevator_exit);
264
9817064b
JA
265static inline void __elv_rqhash_del(struct request *rq)
266{
242d98f0 267 hash_del(&rq->hash);
e8064021 268 rq->rq_flags &= ~RQF_HASHED;
9817064b
JA
269}
270
70b3ea05 271void elv_rqhash_del(struct request_queue *q, struct request *rq)
9817064b
JA
272{
273 if (ELV_ON_HASH(rq))
274 __elv_rqhash_del(rq);
275}
bd166ef1 276EXPORT_SYMBOL_GPL(elv_rqhash_del);
9817064b 277
70b3ea05 278void elv_rqhash_add(struct request_queue *q, struct request *rq)
9817064b 279{
b374d18a 280 struct elevator_queue *e = q->elevator;
9817064b
JA
281
282 BUG_ON(ELV_ON_HASH(rq));
242d98f0 283 hash_add(e->hash, &rq->hash, rq_hash_key(rq));
e8064021 284 rq->rq_flags |= RQF_HASHED;
9817064b 285}
bd166ef1 286EXPORT_SYMBOL_GPL(elv_rqhash_add);
9817064b 287
70b3ea05 288void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
9817064b
JA
289{
290 __elv_rqhash_del(rq);
291 elv_rqhash_add(q, rq);
292}
293
70b3ea05 294struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
9817064b 295{
b374d18a 296 struct elevator_queue *e = q->elevator;
b67bfe0d 297 struct hlist_node *next;
9817064b
JA
298 struct request *rq;
299
ee89f812 300 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
9817064b
JA
301 BUG_ON(!ELV_ON_HASH(rq));
302
303 if (unlikely(!rq_mergeable(rq))) {
304 __elv_rqhash_del(rq);
305 continue;
306 }
307
308 if (rq_hash_key(rq) == offset)
309 return rq;
310 }
311
312 return NULL;
313}
314
2e662b65
JA
315/*
316 * RB-tree support functions for inserting/lookup/removal of requests
317 * in a sorted RB tree.
318 */
796d5116 319void elv_rb_add(struct rb_root *root, struct request *rq)
2e662b65
JA
320{
321 struct rb_node **p = &root->rb_node;
322 struct rb_node *parent = NULL;
323 struct request *__rq;
324
325 while (*p) {
326 parent = *p;
327 __rq = rb_entry(parent, struct request, rb_node);
328
83096ebf 329 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
2e662b65 330 p = &(*p)->rb_left;
796d5116 331 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
2e662b65 332 p = &(*p)->rb_right;
2e662b65
JA
333 }
334
335 rb_link_node(&rq->rb_node, parent, p);
336 rb_insert_color(&rq->rb_node, root);
2e662b65 337}
2e662b65
JA
338EXPORT_SYMBOL(elv_rb_add);
339
340void elv_rb_del(struct rb_root *root, struct request *rq)
341{
342 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
343 rb_erase(&rq->rb_node, root);
344 RB_CLEAR_NODE(&rq->rb_node);
345}
2e662b65
JA
346EXPORT_SYMBOL(elv_rb_del);
347
348struct request *elv_rb_find(struct rb_root *root, sector_t sector)
349{
350 struct rb_node *n = root->rb_node;
351 struct request *rq;
352
353 while (n) {
354 rq = rb_entry(n, struct request, rb_node);
355
83096ebf 356 if (sector < blk_rq_pos(rq))
2e662b65 357 n = n->rb_left;
83096ebf 358 else if (sector > blk_rq_pos(rq))
2e662b65
JA
359 n = n->rb_right;
360 else
361 return rq;
362 }
363
364 return NULL;
365}
2e662b65
JA
366EXPORT_SYMBOL(elv_rb_find);
367
8922e16c
TH
368/*
369 * Insert rq into dispatch queue of q. Queue lock must be held on
dbe7f76d 370 * entry. rq is sort instead into the dispatch queue. To be used by
2e662b65 371 * specific elevators.
8922e16c 372 */
165125e1 373void elv_dispatch_sort(struct request_queue *q, struct request *rq)
8922e16c
TH
374{
375 sector_t boundary;
8922e16c
TH
376 struct list_head *entry;
377
06b86245
TH
378 if (q->last_merge == rq)
379 q->last_merge = NULL;
9817064b
JA
380
381 elv_rqhash_del(q, rq);
382
15853af9 383 q->nr_sorted--;
06b86245 384
1b47f531 385 boundary = q->end_sector;
8922e16c
TH
386 list_for_each_prev(entry, &q->queue_head) {
387 struct request *pos = list_entry_rq(entry);
388
7afafc8a 389 if (req_op(rq) != req_op(pos))
e17fc0a1 390 break;
783660b2
JA
391 if (rq_data_dir(rq) != rq_data_dir(pos))
392 break;
e8064021 393 if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
8922e16c 394 break;
83096ebf
TH
395 if (blk_rq_pos(rq) >= boundary) {
396 if (blk_rq_pos(pos) < boundary)
8922e16c
TH
397 continue;
398 } else {
83096ebf 399 if (blk_rq_pos(pos) >= boundary)
8922e16c
TH
400 break;
401 }
83096ebf 402 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
8922e16c
TH
403 break;
404 }
405
406 list_add(&rq->queuelist, entry);
407}
2e662b65
JA
408EXPORT_SYMBOL(elv_dispatch_sort);
409
9817064b 410/*
2e662b65
JA
411 * Insert rq into dispatch queue of q. Queue lock must be held on
412 * entry. rq is added to the back of the dispatch queue. To be used by
413 * specific elevators.
9817064b
JA
414 */
415void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
416{
417 if (q->last_merge == rq)
418 q->last_merge = NULL;
419
420 elv_rqhash_del(q, rq);
421
422 q->nr_sorted--;
423
424 q->end_sector = rq_end_sector(rq);
425 q->boundary_rq = rq;
426 list_add_tail(&rq->queuelist, &q->queue_head);
427}
2e662b65
JA
428EXPORT_SYMBOL(elv_dispatch_add_tail);
429
165125e1 430int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
1da177e4 431{
b374d18a 432 struct elevator_queue *e = q->elevator;
9817064b 433 struct request *__rq;
06b86245
TH
434 int ret;
435
488991e2
AB
436 /*
437 * Levels of merges:
438 * nomerges: No merges at all attempted
439 * noxmerges: Only simple one-hit cache try
440 * merges: All merge tries attempted
441 */
7460d389 442 if (blk_queue_nomerges(q) || !bio_mergeable(bio))
488991e2
AB
443 return ELEVATOR_NO_MERGE;
444
9817064b
JA
445 /*
446 * First try one-hit cache.
447 */
72ef799b 448 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
050c8ea8 449 ret = blk_try_merge(q->last_merge, bio);
06b86245
TH
450 if (ret != ELEVATOR_NO_MERGE) {
451 *req = q->last_merge;
452 return ret;
453 }
454 }
1da177e4 455
488991e2 456 if (blk_queue_noxmerges(q))
ac9fafa1
AB
457 return ELEVATOR_NO_MERGE;
458
9817064b
JA
459 /*
460 * See if our hash lookup can find a potential backmerge.
461 */
4f024f37 462 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
72ef799b 463 if (__rq && elv_bio_merge_ok(__rq, bio)) {
9817064b
JA
464 *req = __rq;
465 return ELEVATOR_BACK_MERGE;
466 }
467
bd166ef1
JA
468 if (e->uses_mq && e->type->ops.mq.request_merge)
469 return e->type->ops.mq.request_merge(q, req, bio);
470 else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
c51ca6cf 471 return e->type->ops.sq.elevator_merge_fn(q, req, bio);
1da177e4
LT
472
473 return ELEVATOR_NO_MERGE;
474}
475
5e84ea3a
JA
476/*
477 * Attempt to do an insertion back merge. Only check for the case where
478 * we can append 'rq' to an existing request, so we can throw 'rq' away
479 * afterwards.
480 *
481 * Returns true if we merged, false otherwise
482 */
bd166ef1 483bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
5e84ea3a
JA
484{
485 struct request *__rq;
bee0393c 486 bool ret;
5e84ea3a
JA
487
488 if (blk_queue_nomerges(q))
489 return false;
490
491 /*
492 * First try one-hit cache.
493 */
494 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
495 return true;
496
497 if (blk_queue_noxmerges(q))
498 return false;
499
bee0393c 500 ret = false;
5e84ea3a
JA
501 /*
502 * See if our hash lookup can find a potential backmerge.
503 */
bee0393c
SL
504 while (1) {
505 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
506 if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
507 break;
508
509 /* The merged request could be merged with others, try again */
510 ret = true;
511 rq = __rq;
512 }
27419322 513
bee0393c 514 return ret;
5e84ea3a
JA
515}
516
165125e1 517void elv_merged_request(struct request_queue *q, struct request *rq, int type)
1da177e4 518{
b374d18a 519 struct elevator_queue *e = q->elevator;
1da177e4 520
bd166ef1
JA
521 if (e->uses_mq && e->type->ops.mq.request_merged)
522 e->type->ops.mq.request_merged(q, rq, type);
523 else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
c51ca6cf 524 e->type->ops.sq.elevator_merged_fn(q, rq, type);
06b86245 525
2e662b65
JA
526 if (type == ELEVATOR_BACK_MERGE)
527 elv_rqhash_reposition(q, rq);
9817064b 528
06b86245 529 q->last_merge = rq;
1da177e4
LT
530}
531
165125e1 532void elv_merge_requests(struct request_queue *q, struct request *rq,
1da177e4
LT
533 struct request *next)
534{
b374d18a 535 struct elevator_queue *e = q->elevator;
bd166ef1
JA
536 bool next_sorted = false;
537
538 if (e->uses_mq && e->type->ops.mq.requests_merged)
539 e->type->ops.mq.requests_merged(q, rq, next);
540 else if (e->type->ops.sq.elevator_merge_req_fn) {
541 next_sorted = next->rq_flags & RQF_SORTED;
542 if (next_sorted)
543 e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
544 }
06b86245 545
9817064b 546 elv_rqhash_reposition(q, rq);
9817064b 547
5e84ea3a
JA
548 if (next_sorted) {
549 elv_rqhash_del(q, next);
550 q->nr_sorted--;
551 }
552
06b86245 553 q->last_merge = rq;
1da177e4
LT
554}
555
812d4026
DS
556void elv_bio_merged(struct request_queue *q, struct request *rq,
557 struct bio *bio)
558{
559 struct elevator_queue *e = q->elevator;
560
bd166ef1
JA
561 if (WARN_ON_ONCE(e->uses_mq))
562 return;
563
c51ca6cf
JA
564 if (e->type->ops.sq.elevator_bio_merged_fn)
565 e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
812d4026
DS
566}
567
47fafbc7 568#ifdef CONFIG_PM
c8158819
LM
569static void blk_pm_requeue_request(struct request *rq)
570{
e8064021 571 if (rq->q->dev && !(rq->rq_flags & RQF_PM))
c8158819
LM
572 rq->q->nr_pending--;
573}
574
575static void blk_pm_add_request(struct request_queue *q, struct request *rq)
576{
e8064021 577 if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
c8158819
LM
578 (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
579 pm_request_resume(q->dev);
580}
581#else
582static inline void blk_pm_requeue_request(struct request *rq) {}
583static inline void blk_pm_add_request(struct request_queue *q,
584 struct request *rq)
585{
586}
587#endif
588
165125e1 589void elv_requeue_request(struct request_queue *q, struct request *rq)
1da177e4 590{
1da177e4
LT
591 /*
592 * it already went through dequeue, we need to decrement the
593 * in_flight count again
594 */
8922e16c 595 if (blk_account_rq(rq)) {
0a7ae2ff 596 q->in_flight[rq_is_sync(rq)]--;
e8064021 597 if (rq->rq_flags & RQF_SORTED)
cad97516 598 elv_deactivate_rq(q, rq);
8922e16c 599 }
1da177e4 600
e8064021 601 rq->rq_flags &= ~RQF_STARTED;
1da177e4 602
c8158819
LM
603 blk_pm_requeue_request(rq);
604
b710a480 605 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
1da177e4
LT
606}
607
26308eab 608void elv_drain_elevator(struct request_queue *q)
15853af9 609{
bd166ef1 610 struct elevator_queue *e = q->elevator;
15853af9 611 static int printed;
e3c78ca5 612
bd166ef1
JA
613 if (WARN_ON_ONCE(e->uses_mq))
614 return;
615
e3c78ca5
TH
616 lockdep_assert_held(q->queue_lock);
617
bd166ef1 618 while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
15853af9 619 ;
e3c78ca5 620 if (q->nr_sorted && printed++ < 10) {
15853af9
TH
621 printk(KERN_ERR "%s: forced dispatching is broken "
622 "(nr_sorted=%u), please report this\n",
22f746e2 623 q->elevator->type->elevator_name, q->nr_sorted);
15853af9
TH
624 }
625}
626
b710a480 627void __elv_add_request(struct request_queue *q, struct request *rq, int where)
1da177e4 628{
5f3ea37c 629 trace_block_rq_insert(q, rq);
2056a782 630
c8158819
LM
631 blk_pm_add_request(q, rq);
632
1da177e4
LT
633 rq->q = q;
634
e8064021 635 if (rq->rq_flags & RQF_SOFTBARRIER) {
b710a480 636 /* barriers are scheduling boundary, update end_sector */
57292b58 637 if (!blk_rq_is_passthrough(rq)) {
b710a480
JA
638 q->end_sector = rq_end_sector(rq);
639 q->boundary_rq = rq;
640 }
e8064021 641 } else if (!(rq->rq_flags & RQF_ELVPRIV) &&
3aa72873
JA
642 (where == ELEVATOR_INSERT_SORT ||
643 where == ELEVATOR_INSERT_SORT_MERGE))
b710a480
JA
644 where = ELEVATOR_INSERT_BACK;
645
8922e16c 646 switch (where) {
28e7d184 647 case ELEVATOR_INSERT_REQUEUE:
8922e16c 648 case ELEVATOR_INSERT_FRONT:
e8064021 649 rq->rq_flags |= RQF_SOFTBARRIER;
8922e16c
TH
650 list_add(&rq->queuelist, &q->queue_head);
651 break;
652
653 case ELEVATOR_INSERT_BACK:
e8064021 654 rq->rq_flags |= RQF_SOFTBARRIER;
15853af9 655 elv_drain_elevator(q);
8922e16c
TH
656 list_add_tail(&rq->queuelist, &q->queue_head);
657 /*
658 * We kick the queue here for the following reasons.
659 * - The elevator might have returned NULL previously
660 * to delay requests and returned them now. As the
661 * queue wasn't empty before this request, ll_rw_blk
662 * won't run the queue on return, resulting in hang.
663 * - Usually, back inserted requests won't be merged
664 * with anything. There's no point in delaying queue
665 * processing.
666 */
24ecfbe2 667 __blk_run_queue(q);
8922e16c
TH
668 break;
669
5e84ea3a
JA
670 case ELEVATOR_INSERT_SORT_MERGE:
671 /*
672 * If we succeed in merging this request with one in the
673 * queue already, we are done - rq has now been freed,
674 * so no need to do anything further.
675 */
676 if (elv_attempt_insert_merge(q, rq))
677 break;
8922e16c 678 case ELEVATOR_INSERT_SORT:
57292b58 679 BUG_ON(blk_rq_is_passthrough(rq));
e8064021 680 rq->rq_flags |= RQF_SORTED;
15853af9 681 q->nr_sorted++;
9817064b
JA
682 if (rq_mergeable(rq)) {
683 elv_rqhash_add(q, rq);
684 if (!q->last_merge)
685 q->last_merge = rq;
686 }
687
ca23509f
TH
688 /*
689 * Some ioscheds (cfq) run q->request_fn directly, so
690 * rq cannot be accessed after calling
691 * elevator_add_req_fn.
692 */
c51ca6cf 693 q->elevator->type->ops.sq.elevator_add_req_fn(q, rq);
8922e16c
TH
694 break;
695
ae1b1539 696 case ELEVATOR_INSERT_FLUSH:
e8064021 697 rq->rq_flags |= RQF_SOFTBARRIER;
ae1b1539
TH
698 blk_insert_flush(rq);
699 break;
8922e16c
TH
700 default:
701 printk(KERN_ERR "%s: bad insertion point %d\n",
24c03d47 702 __func__, where);
8922e16c
TH
703 BUG();
704 }
1da177e4 705}
2e662b65
JA
706EXPORT_SYMBOL(__elv_add_request);
707
7eaceacc 708void elv_add_request(struct request_queue *q, struct request *rq, int where)
1da177e4
LT
709{
710 unsigned long flags;
711
712 spin_lock_irqsave(q->queue_lock, flags);
7eaceacc 713 __elv_add_request(q, rq, where);
1da177e4
LT
714 spin_unlock_irqrestore(q->queue_lock, flags);
715}
2e662b65
JA
716EXPORT_SYMBOL(elv_add_request);
717
165125e1 718struct request *elv_latter_request(struct request_queue *q, struct request *rq)
1da177e4 719{
b374d18a 720 struct elevator_queue *e = q->elevator;
1da177e4 721
bd166ef1
JA
722 if (e->uses_mq && e->type->ops.mq.next_request)
723 return e->type->ops.mq.next_request(q, rq);
724 else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn)
c51ca6cf 725 return e->type->ops.sq.elevator_latter_req_fn(q, rq);
bd166ef1 726
1da177e4
LT
727 return NULL;
728}
729
165125e1 730struct request *elv_former_request(struct request_queue *q, struct request *rq)
1da177e4 731{
b374d18a 732 struct elevator_queue *e = q->elevator;
1da177e4 733
bd166ef1
JA
734 if (e->uses_mq && e->type->ops.mq.former_request)
735 return e->type->ops.mq.former_request(q, rq);
736 if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn)
c51ca6cf 737 return e->type->ops.sq.elevator_former_req_fn(q, rq);
1da177e4
LT
738 return NULL;
739}
740
852c788f
TH
741int elv_set_request(struct request_queue *q, struct request *rq,
742 struct bio *bio, gfp_t gfp_mask)
1da177e4 743{
b374d18a 744 struct elevator_queue *e = q->elevator;
1da177e4 745
bd166ef1
JA
746 if (WARN_ON_ONCE(e->uses_mq))
747 return 0;
748
c51ca6cf
JA
749 if (e->type->ops.sq.elevator_set_req_fn)
750 return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask);
1da177e4
LT
751 return 0;
752}
753
165125e1 754void elv_put_request(struct request_queue *q, struct request *rq)
1da177e4 755{
b374d18a 756 struct elevator_queue *e = q->elevator;
1da177e4 757
bd166ef1
JA
758 if (WARN_ON_ONCE(e->uses_mq))
759 return;
760
c51ca6cf
JA
761 if (e->type->ops.sq.elevator_put_req_fn)
762 e->type->ops.sq.elevator_put_req_fn(rq);
1da177e4
LT
763}
764
ef295ecf 765int elv_may_queue(struct request_queue *q, unsigned int op)
1da177e4 766{
b374d18a 767 struct elevator_queue *e = q->elevator;
1da177e4 768
bd166ef1
JA
769 if (WARN_ON_ONCE(e->uses_mq))
770 return 0;
771
c51ca6cf
JA
772 if (e->type->ops.sq.elevator_may_queue_fn)
773 return e->type->ops.sq.elevator_may_queue_fn(q, op);
1da177e4
LT
774
775 return ELV_MQUEUE_MAY;
776}
777
165125e1 778void elv_completed_request(struct request_queue *q, struct request *rq)
1da177e4 779{
b374d18a 780 struct elevator_queue *e = q->elevator;
1da177e4 781
bd166ef1
JA
782 if (WARN_ON_ONCE(e->uses_mq))
783 return;
784
1da177e4
LT
785 /*
786 * request is released from the driver, io must be done
787 */
8922e16c 788 if (blk_account_rq(rq)) {
0a7ae2ff 789 q->in_flight[rq_is_sync(rq)]--;
e8064021 790 if ((rq->rq_flags & RQF_SORTED) &&
c51ca6cf
JA
791 e->type->ops.sq.elevator_completed_req_fn)
792 e->type->ops.sq.elevator_completed_req_fn(q, rq);
1bc691d3 793 }
1da177e4
LT
794}
795
3d1ab40f
AV
796#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
797
798static ssize_t
799elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1da177e4 800{
3d1ab40f 801 struct elv_fs_entry *entry = to_elv(attr);
b374d18a 802 struct elevator_queue *e;
3d1ab40f
AV
803 ssize_t error;
804
805 if (!entry->show)
806 return -EIO;
807
b374d18a 808 e = container_of(kobj, struct elevator_queue, kobj);
3d1ab40f 809 mutex_lock(&e->sysfs_lock);
22f746e2 810 error = e->type ? entry->show(e, page) : -ENOENT;
3d1ab40f
AV
811 mutex_unlock(&e->sysfs_lock);
812 return error;
813}
1da177e4 814
3d1ab40f
AV
815static ssize_t
816elv_attr_store(struct kobject *kobj, struct attribute *attr,
817 const char *page, size_t length)
818{
3d1ab40f 819 struct elv_fs_entry *entry = to_elv(attr);
b374d18a 820 struct elevator_queue *e;
3d1ab40f 821 ssize_t error;
1da177e4 822
3d1ab40f
AV
823 if (!entry->store)
824 return -EIO;
1da177e4 825
b374d18a 826 e = container_of(kobj, struct elevator_queue, kobj);
3d1ab40f 827 mutex_lock(&e->sysfs_lock);
22f746e2 828 error = e->type ? entry->store(e, page, length) : -ENOENT;
3d1ab40f
AV
829 mutex_unlock(&e->sysfs_lock);
830 return error;
831}
832
52cf25d0 833static const struct sysfs_ops elv_sysfs_ops = {
3d1ab40f
AV
834 .show = elv_attr_show,
835 .store = elv_attr_store,
836};
837
838static struct kobj_type elv_ktype = {
839 .sysfs_ops = &elv_sysfs_ops,
840 .release = elevator_release,
841};
842
5a5bafdc 843int elv_register_queue(struct request_queue *q)
3d1ab40f 844{
5a5bafdc 845 struct elevator_queue *e = q->elevator;
3d1ab40f
AV
846 int error;
847
b2d6db58 848 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
3d1ab40f 849 if (!error) {
22f746e2 850 struct elv_fs_entry *attr = e->type->elevator_attrs;
3d1ab40f 851 if (attr) {
e572ec7e
AV
852 while (attr->attr.name) {
853 if (sysfs_create_file(&e->kobj, &attr->attr))
3d1ab40f 854 break;
e572ec7e 855 attr++;
3d1ab40f
AV
856 }
857 }
858 kobject_uevent(&e->kobj, KOBJ_ADD);
430c62fb 859 e->registered = 1;
bd166ef1 860 if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
c51ca6cf 861 e->type->ops.sq.elevator_registered_fn(q);
3d1ab40f
AV
862 }
863 return error;
1da177e4 864}
f8fc877d 865EXPORT_SYMBOL(elv_register_queue);
bc1c1169 866
1da177e4
LT
867void elv_unregister_queue(struct request_queue *q)
868{
f8fc877d
TH
869 if (q) {
870 struct elevator_queue *e = q->elevator;
871
872 kobject_uevent(&e->kobj, KOBJ_REMOVE);
873 kobject_del(&e->kobj);
874 e->registered = 0;
875 }
1da177e4 876}
01effb0d 877EXPORT_SYMBOL(elv_unregister_queue);
1da177e4 878
e567bf71 879int elv_register(struct elevator_type *e)
1da177e4 880{
1ffb96c5 881 char *def = "";
2a12dcd7 882
3d3c2379
TH
883 /* create icq_cache if requested */
884 if (e->icq_size) {
885 if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
886 WARN_ON(e->icq_align < __alignof__(struct io_cq)))
887 return -EINVAL;
888
889 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
890 "%s_io_cq", e->elevator_name);
891 e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
892 e->icq_align, 0, NULL);
893 if (!e->icq_cache)
894 return -ENOMEM;
895 }
896
897 /* register, don't allow duplicate names */
2a12dcd7 898 spin_lock(&elv_list_lock);
3d3c2379
TH
899 if (elevator_find(e->elevator_name)) {
900 spin_unlock(&elv_list_lock);
901 if (e->icq_cache)
902 kmem_cache_destroy(e->icq_cache);
903 return -EBUSY;
904 }
1da177e4 905 list_add_tail(&e->list, &elv_list);
2a12dcd7 906 spin_unlock(&elv_list_lock);
1da177e4 907
3d3c2379 908 /* print pretty message */
5f003976
ND
909 if (!strcmp(e->elevator_name, chosen_elevator) ||
910 (!*chosen_elevator &&
911 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
1ffb96c5
TV
912 def = " (default)";
913
4eb166d9
JA
914 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
915 def);
3d3c2379 916 return 0;
1da177e4
LT
917}
918EXPORT_SYMBOL_GPL(elv_register);
919
920void elv_unregister(struct elevator_type *e)
921{
3d3c2379 922 /* unregister */
2a12dcd7 923 spin_lock(&elv_list_lock);
1da177e4 924 list_del_init(&e->list);
2a12dcd7 925 spin_unlock(&elv_list_lock);
3d3c2379
TH
926
927 /*
928 * Destroy icq_cache if it exists. icq's are RCU managed. Make
929 * sure all RCU operations are complete before proceeding.
930 */
931 if (e->icq_cache) {
932 rcu_barrier();
933 kmem_cache_destroy(e->icq_cache);
934 e->icq_cache = NULL;
935 }
1da177e4
LT
936}
937EXPORT_SYMBOL_GPL(elv_unregister);
938
939/*
940 * switch to new_e io scheduler. be careful not to introduce deadlocks -
941 * we don't free the old io scheduler, before we have allocated what we
942 * need for the new one. this way we have a chance of going back to the old
cb98fc8b 943 * one, if the new one fails init for some reason.
1da177e4 944 */
165125e1 945static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1da177e4 946{
5a5bafdc 947 struct elevator_queue *old = q->elevator;
bd166ef1 948 bool old_registered = false;
e8989fae 949 int err;
1da177e4 950
bd166ef1
JA
951 if (q->mq_ops) {
952 blk_mq_freeze_queue(q);
953 blk_mq_quiesce_queue(q);
954 }
955
5a5bafdc
TH
956 /*
957 * Turn on BYPASS and drain all requests w/ elevator private data.
958 * Block layer doesn't call into a quiesced elevator - all requests
959 * are directly put on the dispatch list without elevator data
960 * using INSERT_BACK. All requests have SOFTBARRIER set and no
961 * merge happens either.
962 */
bd166ef1
JA
963 if (old) {
964 old_registered = old->registered;
965
966 if (old->uses_mq)
967 blk_mq_sched_teardown(q);
cb98fc8b 968
bd166ef1
JA
969 if (!q->mq_ops)
970 blk_queue_bypass_start(q);
1da177e4 971
bd166ef1
JA
972 /* unregister and clear all auxiliary data of the old elevator */
973 if (old_registered)
974 elv_unregister_queue(q);
975
976 spin_lock_irq(q->queue_lock);
977 ioc_clear_queue(q);
978 spin_unlock_irq(q->queue_lock);
979 }
f8fc877d 980
5a5bafdc 981 /* allocate, init and register new elevator */
bd166ef1
JA
982 if (new_e) {
983 if (new_e->uses_mq) {
984 err = blk_mq_sched_setup(q);
985 if (!err)
986 err = new_e->ops.mq.init_sched(q, new_e);
987 } else
988 err = new_e->ops.sq.elevator_init_fn(q, new_e);
989 if (err)
990 goto fail_init;
5a5bafdc 991
5a5bafdc
TH
992 err = elv_register_queue(q);
993 if (err)
994 goto fail_register;
bd166ef1
JA
995 } else
996 q->elevator = NULL;
5a5bafdc
TH
997
998 /* done, kill the old one and finish */
bd166ef1
JA
999 if (old) {
1000 elevator_exit(old);
1001 if (!q->mq_ops)
1002 blk_queue_bypass_end(q);
1003 }
1004
1005 if (q->mq_ops) {
1006 blk_mq_unfreeze_queue(q);
1007 blk_mq_start_stopped_hw_queues(q, true);
1008 }
75ad23bc 1009
bd166ef1
JA
1010 if (new_e)
1011 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
1012 else
1013 blk_add_trace_msg(q, "elv switch: none");
4722dc52 1014
5dd531a0 1015 return 0;
1da177e4
LT
1016
1017fail_register:
bd166ef1
JA
1018 if (q->mq_ops)
1019 blk_mq_sched_teardown(q);
5a5bafdc
TH
1020 elevator_exit(q->elevator);
1021fail_init:
1022 /* switch failed, restore and re-register old elevator */
bd166ef1
JA
1023 if (old) {
1024 q->elevator = old;
1025 elv_register_queue(q);
1026 if (!q->mq_ops)
1027 blk_queue_bypass_end(q);
1028 }
1029 if (q->mq_ops) {
1030 blk_mq_unfreeze_queue(q);
1031 blk_mq_start_stopped_hw_queues(q, true);
1032 }
75ad23bc 1033
5dd531a0 1034 return err;
1da177e4
LT
1035}
1036
5dd531a0
JA
1037/*
1038 * Switch this queue to the given IO scheduler.
1039 */
7c8a3679 1040static int __elevator_change(struct request_queue *q, const char *name)
1da177e4
LT
1041{
1042 char elevator_name[ELV_NAME_MAX];
1043 struct elevator_type *e;
1044
bd166ef1
JA
1045 /*
1046 * Special case for mq, turn off scheduling
1047 */
1048 if (q->mq_ops && !strncmp(name, "none", 4))
1049 return elevator_switch(q, NULL);
cd43e26f 1050
ee2e992c 1051 strlcpy(elevator_name, name, sizeof(elevator_name));
21c3c5d2 1052 e = elevator_get(strstrip(elevator_name), true);
1da177e4
LT
1053 if (!e) {
1054 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1055 return -EINVAL;
1056 }
1057
bd166ef1
JA
1058 if (q->elevator &&
1059 !strcmp(elevator_name, q->elevator->type->elevator_name)) {
2ca7d93b 1060 elevator_put(e);
5dd531a0 1061 return 0;
2ca7d93b 1062 }
1da177e4 1063
bd166ef1
JA
1064 if (!e->uses_mq && q->mq_ops) {
1065 elevator_put(e);
1066 return -EINVAL;
1067 }
1068 if (e->uses_mq && !q->mq_ops) {
1069 elevator_put(e);
1070 return -EINVAL;
1071 }
1072
5dd531a0
JA
1073 return elevator_switch(q, e);
1074}
7c8a3679
TS
1075
1076int elevator_change(struct request_queue *q, const char *name)
1077{
1078 int ret;
1079
1080 /* Protect q->elevator from elevator_init() */
1081 mutex_lock(&q->sysfs_lock);
1082 ret = __elevator_change(q, name);
1083 mutex_unlock(&q->sysfs_lock);
1084
1085 return ret;
1086}
5dd531a0
JA
1087EXPORT_SYMBOL(elevator_change);
1088
1089ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1090 size_t count)
1091{
1092 int ret;
1093
bd166ef1 1094 if (!(q->mq_ops || q->request_fn))
5dd531a0
JA
1095 return count;
1096
7c8a3679 1097 ret = __elevator_change(q, name);
5dd531a0
JA
1098 if (!ret)
1099 return count;
1100
1101 printk(KERN_ERR "elevator: switch to %s failed\n", name);
1102 return ret;
1da177e4
LT
1103}
1104
165125e1 1105ssize_t elv_iosched_show(struct request_queue *q, char *name)
1da177e4 1106{
b374d18a 1107 struct elevator_queue *e = q->elevator;
bd166ef1 1108 struct elevator_type *elv = NULL;
70cee26e 1109 struct elevator_type *__e;
1da177e4
LT
1110 int len = 0;
1111
bd166ef1 1112 if (!blk_queue_stackable(q))
cd43e26f
MP
1113 return sprintf(name, "none\n");
1114
bd166ef1
JA
1115 if (!q->elevator)
1116 len += sprintf(name+len, "[none] ");
1117 else
1118 elv = e->type;
cd43e26f 1119
2a12dcd7 1120 spin_lock(&elv_list_lock);
70cee26e 1121 list_for_each_entry(__e, &elv_list, list) {
bd166ef1 1122 if (elv && !strcmp(elv->elevator_name, __e->elevator_name)) {
1da177e4 1123 len += sprintf(name+len, "[%s] ", elv->elevator_name);
bd166ef1
JA
1124 continue;
1125 }
1126 if (__e->uses_mq && q->mq_ops)
1127 len += sprintf(name+len, "%s ", __e->elevator_name);
1128 else if (!__e->uses_mq && !q->mq_ops)
1da177e4
LT
1129 len += sprintf(name+len, "%s ", __e->elevator_name);
1130 }
2a12dcd7 1131 spin_unlock(&elv_list_lock);
1da177e4 1132
bd166ef1
JA
1133 if (q->mq_ops && q->elevator)
1134 len += sprintf(name+len, "none");
1135
1da177e4
LT
1136 len += sprintf(len+name, "\n");
1137 return len;
1138}
1139
165125e1
JA
1140struct request *elv_rb_former_request(struct request_queue *q,
1141 struct request *rq)
2e662b65
JA
1142{
1143 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1144
1145 if (rbprev)
1146 return rb_entry_rq(rbprev);
1147
1148 return NULL;
1149}
2e662b65
JA
1150EXPORT_SYMBOL(elv_rb_former_request);
1151
165125e1
JA
1152struct request *elv_rb_latter_request(struct request_queue *q,
1153 struct request *rq)
2e662b65
JA
1154{
1155 struct rb_node *rbnext = rb_next(&rq->rb_node);
1156
1157 if (rbnext)
1158 return rb_entry_rq(rbnext);
1159
1160 return NULL;
1161}
2e662b65 1162EXPORT_SYMBOL(elv_rb_latter_request);