]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - block/elevator.c
Include: Uapi: Add user ABI for Sed/Opal
[mirror_ubuntu-artful-kernel.git] / block / elevator.c
1 /*
2 * Block device elevator/IO-scheduler.
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
6 * 30042000 Jens Axboe <axboe@kernel.dk> :
7 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 * an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/blktrace_api.h>
35 #include <linux/hash.h>
36 #include <linux/uaccess.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/blk-cgroup.h>
39
40 #include <trace/events/block.h>
41
42 #include "blk.h"
43 #include "blk-mq-sched.h"
44
45 static DEFINE_SPINLOCK(elv_list_lock);
46 static LIST_HEAD(elv_list);
47
48 /*
49 * Merge hash stuff.
50 */
51 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
52
53 /*
54 * Query io scheduler to see if the current process issuing bio may be
55 * merged with rq.
56 */
57 static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
58 {
59 struct request_queue *q = rq->q;
60 struct elevator_queue *e = q->elevator;
61
62 if (e->uses_mq && e->type->ops.mq.allow_merge)
63 return e->type->ops.mq.allow_merge(q, rq, bio);
64 else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
65 return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
66
67 return 1;
68 }
69
70 /*
71 * can we safely merge with this request?
72 */
73 bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
74 {
75 if (!blk_rq_merge_ok(rq, bio))
76 return false;
77
78 if (!elv_iosched_allow_bio_merge(rq, bio))
79 return false;
80
81 return true;
82 }
83 EXPORT_SYMBOL(elv_bio_merge_ok);
84
85 static struct elevator_type *elevator_find(const char *name)
86 {
87 struct elevator_type *e;
88
89 list_for_each_entry(e, &elv_list, list) {
90 if (!strcmp(e->elevator_name, name))
91 return e;
92 }
93
94 return NULL;
95 }
96
97 static void elevator_put(struct elevator_type *e)
98 {
99 module_put(e->elevator_owner);
100 }
101
102 static struct elevator_type *elevator_get(const char *name, bool try_loading)
103 {
104 struct elevator_type *e;
105
106 spin_lock(&elv_list_lock);
107
108 e = elevator_find(name);
109 if (!e && try_loading) {
110 spin_unlock(&elv_list_lock);
111 request_module("%s-iosched", name);
112 spin_lock(&elv_list_lock);
113 e = elevator_find(name);
114 }
115
116 if (e && !try_module_get(e->elevator_owner))
117 e = NULL;
118
119 spin_unlock(&elv_list_lock);
120
121 return e;
122 }
123
124 static char chosen_elevator[ELV_NAME_MAX];
125
126 static int __init elevator_setup(char *str)
127 {
128 /*
129 * Be backwards-compatible with previous kernels, so users
130 * won't get the wrong elevator.
131 */
132 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
133 return 1;
134 }
135
136 __setup("elevator=", elevator_setup);
137
138 /* called during boot to load the elevator chosen by the elevator param */
139 void __init load_default_elevator_module(void)
140 {
141 struct elevator_type *e;
142
143 if (!chosen_elevator[0])
144 return;
145
146 spin_lock(&elv_list_lock);
147 e = elevator_find(chosen_elevator);
148 spin_unlock(&elv_list_lock);
149
150 if (!e)
151 request_module("%s-iosched", chosen_elevator);
152 }
153
154 static struct kobj_type elv_ktype;
155
156 struct elevator_queue *elevator_alloc(struct request_queue *q,
157 struct elevator_type *e)
158 {
159 struct elevator_queue *eq;
160
161 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
162 if (unlikely(!eq))
163 return NULL;
164
165 eq->type = e;
166 kobject_init(&eq->kobj, &elv_ktype);
167 mutex_init(&eq->sysfs_lock);
168 hash_init(eq->hash);
169 eq->uses_mq = e->uses_mq;
170
171 return eq;
172 }
173 EXPORT_SYMBOL(elevator_alloc);
174
175 static void elevator_release(struct kobject *kobj)
176 {
177 struct elevator_queue *e;
178
179 e = container_of(kobj, struct elevator_queue, kobj);
180 elevator_put(e->type);
181 kfree(e);
182 }
183
184 int elevator_init(struct request_queue *q, char *name)
185 {
186 struct elevator_type *e = NULL;
187 int err;
188
189 /*
190 * q->sysfs_lock must be held to provide mutual exclusion between
191 * elevator_switch() and here.
192 */
193 lockdep_assert_held(&q->sysfs_lock);
194
195 if (unlikely(q->elevator))
196 return 0;
197
198 INIT_LIST_HEAD(&q->queue_head);
199 q->last_merge = NULL;
200 q->end_sector = 0;
201 q->boundary_rq = NULL;
202
203 if (name) {
204 e = elevator_get(name, true);
205 if (!e)
206 return -EINVAL;
207 }
208
209 /*
210 * Use the default elevator specified by config boot param or
211 * config option. Don't try to load modules as we could be running
212 * off async and request_module() isn't allowed from async.
213 */
214 if (!e && *chosen_elevator) {
215 e = elevator_get(chosen_elevator, false);
216 if (!e)
217 printk(KERN_ERR "I/O scheduler %s not found\n",
218 chosen_elevator);
219 }
220
221 if (!e) {
222 if (q->mq_ops && q->nr_hw_queues == 1)
223 e = elevator_get(CONFIG_DEFAULT_SQ_IOSCHED, false);
224 else if (q->mq_ops)
225 e = elevator_get(CONFIG_DEFAULT_MQ_IOSCHED, false);
226 else
227 e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
228
229 if (!e) {
230 printk(KERN_ERR
231 "Default I/O scheduler not found. " \
232 "Using noop/none.\n");
233 e = elevator_get("noop", false);
234 }
235 }
236
237 if (e->uses_mq) {
238 err = blk_mq_sched_setup(q);
239 if (!err)
240 err = e->ops.mq.init_sched(q, e);
241 } else
242 err = e->ops.sq.elevator_init_fn(q, e);
243 if (err) {
244 if (e->uses_mq)
245 blk_mq_sched_teardown(q);
246 elevator_put(e);
247 }
248 return err;
249 }
250 EXPORT_SYMBOL(elevator_init);
251
252 void elevator_exit(struct elevator_queue *e)
253 {
254 mutex_lock(&e->sysfs_lock);
255 if (e->uses_mq && e->type->ops.mq.exit_sched)
256 e->type->ops.mq.exit_sched(e);
257 else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
258 e->type->ops.sq.elevator_exit_fn(e);
259 mutex_unlock(&e->sysfs_lock);
260
261 kobject_put(&e->kobj);
262 }
263 EXPORT_SYMBOL(elevator_exit);
264
265 static inline void __elv_rqhash_del(struct request *rq)
266 {
267 hash_del(&rq->hash);
268 rq->rq_flags &= ~RQF_HASHED;
269 }
270
271 void elv_rqhash_del(struct request_queue *q, struct request *rq)
272 {
273 if (ELV_ON_HASH(rq))
274 __elv_rqhash_del(rq);
275 }
276 EXPORT_SYMBOL_GPL(elv_rqhash_del);
277
278 void elv_rqhash_add(struct request_queue *q, struct request *rq)
279 {
280 struct elevator_queue *e = q->elevator;
281
282 BUG_ON(ELV_ON_HASH(rq));
283 hash_add(e->hash, &rq->hash, rq_hash_key(rq));
284 rq->rq_flags |= RQF_HASHED;
285 }
286 EXPORT_SYMBOL_GPL(elv_rqhash_add);
287
288 void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
289 {
290 __elv_rqhash_del(rq);
291 elv_rqhash_add(q, rq);
292 }
293
294 struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
295 {
296 struct elevator_queue *e = q->elevator;
297 struct hlist_node *next;
298 struct request *rq;
299
300 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
301 BUG_ON(!ELV_ON_HASH(rq));
302
303 if (unlikely(!rq_mergeable(rq))) {
304 __elv_rqhash_del(rq);
305 continue;
306 }
307
308 if (rq_hash_key(rq) == offset)
309 return rq;
310 }
311
312 return NULL;
313 }
314
315 /*
316 * RB-tree support functions for inserting/lookup/removal of requests
317 * in a sorted RB tree.
318 */
319 void elv_rb_add(struct rb_root *root, struct request *rq)
320 {
321 struct rb_node **p = &root->rb_node;
322 struct rb_node *parent = NULL;
323 struct request *__rq;
324
325 while (*p) {
326 parent = *p;
327 __rq = rb_entry(parent, struct request, rb_node);
328
329 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
330 p = &(*p)->rb_left;
331 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
332 p = &(*p)->rb_right;
333 }
334
335 rb_link_node(&rq->rb_node, parent, p);
336 rb_insert_color(&rq->rb_node, root);
337 }
338 EXPORT_SYMBOL(elv_rb_add);
339
340 void elv_rb_del(struct rb_root *root, struct request *rq)
341 {
342 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
343 rb_erase(&rq->rb_node, root);
344 RB_CLEAR_NODE(&rq->rb_node);
345 }
346 EXPORT_SYMBOL(elv_rb_del);
347
348 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
349 {
350 struct rb_node *n = root->rb_node;
351 struct request *rq;
352
353 while (n) {
354 rq = rb_entry(n, struct request, rb_node);
355
356 if (sector < blk_rq_pos(rq))
357 n = n->rb_left;
358 else if (sector > blk_rq_pos(rq))
359 n = n->rb_right;
360 else
361 return rq;
362 }
363
364 return NULL;
365 }
366 EXPORT_SYMBOL(elv_rb_find);
367
368 /*
369 * Insert rq into dispatch queue of q. Queue lock must be held on
370 * entry. rq is sort instead into the dispatch queue. To be used by
371 * specific elevators.
372 */
373 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
374 {
375 sector_t boundary;
376 struct list_head *entry;
377
378 if (q->last_merge == rq)
379 q->last_merge = NULL;
380
381 elv_rqhash_del(q, rq);
382
383 q->nr_sorted--;
384
385 boundary = q->end_sector;
386 list_for_each_prev(entry, &q->queue_head) {
387 struct request *pos = list_entry_rq(entry);
388
389 if (req_op(rq) != req_op(pos))
390 break;
391 if (rq_data_dir(rq) != rq_data_dir(pos))
392 break;
393 if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
394 break;
395 if (blk_rq_pos(rq) >= boundary) {
396 if (blk_rq_pos(pos) < boundary)
397 continue;
398 } else {
399 if (blk_rq_pos(pos) >= boundary)
400 break;
401 }
402 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
403 break;
404 }
405
406 list_add(&rq->queuelist, entry);
407 }
408 EXPORT_SYMBOL(elv_dispatch_sort);
409
410 /*
411 * Insert rq into dispatch queue of q. Queue lock must be held on
412 * entry. rq is added to the back of the dispatch queue. To be used by
413 * specific elevators.
414 */
415 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
416 {
417 if (q->last_merge == rq)
418 q->last_merge = NULL;
419
420 elv_rqhash_del(q, rq);
421
422 q->nr_sorted--;
423
424 q->end_sector = rq_end_sector(rq);
425 q->boundary_rq = rq;
426 list_add_tail(&rq->queuelist, &q->queue_head);
427 }
428 EXPORT_SYMBOL(elv_dispatch_add_tail);
429
430 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
431 {
432 struct elevator_queue *e = q->elevator;
433 struct request *__rq;
434 int ret;
435
436 /*
437 * Levels of merges:
438 * nomerges: No merges at all attempted
439 * noxmerges: Only simple one-hit cache try
440 * merges: All merge tries attempted
441 */
442 if (blk_queue_nomerges(q) || !bio_mergeable(bio))
443 return ELEVATOR_NO_MERGE;
444
445 /*
446 * First try one-hit cache.
447 */
448 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
449 ret = blk_try_merge(q->last_merge, bio);
450 if (ret != ELEVATOR_NO_MERGE) {
451 *req = q->last_merge;
452 return ret;
453 }
454 }
455
456 if (blk_queue_noxmerges(q))
457 return ELEVATOR_NO_MERGE;
458
459 /*
460 * See if our hash lookup can find a potential backmerge.
461 */
462 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
463 if (__rq && elv_bio_merge_ok(__rq, bio)) {
464 *req = __rq;
465 return ELEVATOR_BACK_MERGE;
466 }
467
468 if (e->uses_mq && e->type->ops.mq.request_merge)
469 return e->type->ops.mq.request_merge(q, req, bio);
470 else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
471 return e->type->ops.sq.elevator_merge_fn(q, req, bio);
472
473 return ELEVATOR_NO_MERGE;
474 }
475
476 /*
477 * Attempt to do an insertion back merge. Only check for the case where
478 * we can append 'rq' to an existing request, so we can throw 'rq' away
479 * afterwards.
480 *
481 * Returns true if we merged, false otherwise
482 */
483 bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
484 {
485 struct request *__rq;
486 bool ret;
487
488 if (blk_queue_nomerges(q))
489 return false;
490
491 /*
492 * First try one-hit cache.
493 */
494 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
495 return true;
496
497 if (blk_queue_noxmerges(q))
498 return false;
499
500 ret = false;
501 /*
502 * See if our hash lookup can find a potential backmerge.
503 */
504 while (1) {
505 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
506 if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
507 break;
508
509 /* The merged request could be merged with others, try again */
510 ret = true;
511 rq = __rq;
512 }
513
514 return ret;
515 }
516
517 void elv_merged_request(struct request_queue *q, struct request *rq, int type)
518 {
519 struct elevator_queue *e = q->elevator;
520
521 if (e->uses_mq && e->type->ops.mq.request_merged)
522 e->type->ops.mq.request_merged(q, rq, type);
523 else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
524 e->type->ops.sq.elevator_merged_fn(q, rq, type);
525
526 if (type == ELEVATOR_BACK_MERGE)
527 elv_rqhash_reposition(q, rq);
528
529 q->last_merge = rq;
530 }
531
532 void elv_merge_requests(struct request_queue *q, struct request *rq,
533 struct request *next)
534 {
535 struct elevator_queue *e = q->elevator;
536 bool next_sorted = false;
537
538 if (e->uses_mq && e->type->ops.mq.requests_merged)
539 e->type->ops.mq.requests_merged(q, rq, next);
540 else if (e->type->ops.sq.elevator_merge_req_fn) {
541 next_sorted = next->rq_flags & RQF_SORTED;
542 if (next_sorted)
543 e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
544 }
545
546 elv_rqhash_reposition(q, rq);
547
548 if (next_sorted) {
549 elv_rqhash_del(q, next);
550 q->nr_sorted--;
551 }
552
553 q->last_merge = rq;
554 }
555
556 void elv_bio_merged(struct request_queue *q, struct request *rq,
557 struct bio *bio)
558 {
559 struct elevator_queue *e = q->elevator;
560
561 if (WARN_ON_ONCE(e->uses_mq))
562 return;
563
564 if (e->type->ops.sq.elevator_bio_merged_fn)
565 e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
566 }
567
568 #ifdef CONFIG_PM
569 static void blk_pm_requeue_request(struct request *rq)
570 {
571 if (rq->q->dev && !(rq->rq_flags & RQF_PM))
572 rq->q->nr_pending--;
573 }
574
575 static void blk_pm_add_request(struct request_queue *q, struct request *rq)
576 {
577 if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
578 (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
579 pm_request_resume(q->dev);
580 }
581 #else
582 static inline void blk_pm_requeue_request(struct request *rq) {}
583 static inline void blk_pm_add_request(struct request_queue *q,
584 struct request *rq)
585 {
586 }
587 #endif
588
589 void elv_requeue_request(struct request_queue *q, struct request *rq)
590 {
591 /*
592 * it already went through dequeue, we need to decrement the
593 * in_flight count again
594 */
595 if (blk_account_rq(rq)) {
596 q->in_flight[rq_is_sync(rq)]--;
597 if (rq->rq_flags & RQF_SORTED)
598 elv_deactivate_rq(q, rq);
599 }
600
601 rq->rq_flags &= ~RQF_STARTED;
602
603 blk_pm_requeue_request(rq);
604
605 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
606 }
607
608 void elv_drain_elevator(struct request_queue *q)
609 {
610 struct elevator_queue *e = q->elevator;
611 static int printed;
612
613 if (WARN_ON_ONCE(e->uses_mq))
614 return;
615
616 lockdep_assert_held(q->queue_lock);
617
618 while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
619 ;
620 if (q->nr_sorted && printed++ < 10) {
621 printk(KERN_ERR "%s: forced dispatching is broken "
622 "(nr_sorted=%u), please report this\n",
623 q->elevator->type->elevator_name, q->nr_sorted);
624 }
625 }
626
627 void __elv_add_request(struct request_queue *q, struct request *rq, int where)
628 {
629 trace_block_rq_insert(q, rq);
630
631 blk_pm_add_request(q, rq);
632
633 rq->q = q;
634
635 if (rq->rq_flags & RQF_SOFTBARRIER) {
636 /* barriers are scheduling boundary, update end_sector */
637 if (rq->cmd_type == REQ_TYPE_FS) {
638 q->end_sector = rq_end_sector(rq);
639 q->boundary_rq = rq;
640 }
641 } else if (!(rq->rq_flags & RQF_ELVPRIV) &&
642 (where == ELEVATOR_INSERT_SORT ||
643 where == ELEVATOR_INSERT_SORT_MERGE))
644 where = ELEVATOR_INSERT_BACK;
645
646 switch (where) {
647 case ELEVATOR_INSERT_REQUEUE:
648 case ELEVATOR_INSERT_FRONT:
649 rq->rq_flags |= RQF_SOFTBARRIER;
650 list_add(&rq->queuelist, &q->queue_head);
651 break;
652
653 case ELEVATOR_INSERT_BACK:
654 rq->rq_flags |= RQF_SOFTBARRIER;
655 elv_drain_elevator(q);
656 list_add_tail(&rq->queuelist, &q->queue_head);
657 /*
658 * We kick the queue here for the following reasons.
659 * - The elevator might have returned NULL previously
660 * to delay requests and returned them now. As the
661 * queue wasn't empty before this request, ll_rw_blk
662 * won't run the queue on return, resulting in hang.
663 * - Usually, back inserted requests won't be merged
664 * with anything. There's no point in delaying queue
665 * processing.
666 */
667 __blk_run_queue(q);
668 break;
669
670 case ELEVATOR_INSERT_SORT_MERGE:
671 /*
672 * If we succeed in merging this request with one in the
673 * queue already, we are done - rq has now been freed,
674 * so no need to do anything further.
675 */
676 if (elv_attempt_insert_merge(q, rq))
677 break;
678 case ELEVATOR_INSERT_SORT:
679 BUG_ON(rq->cmd_type != REQ_TYPE_FS);
680 rq->rq_flags |= RQF_SORTED;
681 q->nr_sorted++;
682 if (rq_mergeable(rq)) {
683 elv_rqhash_add(q, rq);
684 if (!q->last_merge)
685 q->last_merge = rq;
686 }
687
688 /*
689 * Some ioscheds (cfq) run q->request_fn directly, so
690 * rq cannot be accessed after calling
691 * elevator_add_req_fn.
692 */
693 q->elevator->type->ops.sq.elevator_add_req_fn(q, rq);
694 break;
695
696 case ELEVATOR_INSERT_FLUSH:
697 rq->rq_flags |= RQF_SOFTBARRIER;
698 blk_insert_flush(rq);
699 break;
700 default:
701 printk(KERN_ERR "%s: bad insertion point %d\n",
702 __func__, where);
703 BUG();
704 }
705 }
706 EXPORT_SYMBOL(__elv_add_request);
707
708 void elv_add_request(struct request_queue *q, struct request *rq, int where)
709 {
710 unsigned long flags;
711
712 spin_lock_irqsave(q->queue_lock, flags);
713 __elv_add_request(q, rq, where);
714 spin_unlock_irqrestore(q->queue_lock, flags);
715 }
716 EXPORT_SYMBOL(elv_add_request);
717
718 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
719 {
720 struct elevator_queue *e = q->elevator;
721
722 if (e->uses_mq && e->type->ops.mq.next_request)
723 return e->type->ops.mq.next_request(q, rq);
724 else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn)
725 return e->type->ops.sq.elevator_latter_req_fn(q, rq);
726
727 return NULL;
728 }
729
730 struct request *elv_former_request(struct request_queue *q, struct request *rq)
731 {
732 struct elevator_queue *e = q->elevator;
733
734 if (e->uses_mq && e->type->ops.mq.former_request)
735 return e->type->ops.mq.former_request(q, rq);
736 if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn)
737 return e->type->ops.sq.elevator_former_req_fn(q, rq);
738 return NULL;
739 }
740
741 int elv_set_request(struct request_queue *q, struct request *rq,
742 struct bio *bio, gfp_t gfp_mask)
743 {
744 struct elevator_queue *e = q->elevator;
745
746 if (WARN_ON_ONCE(e->uses_mq))
747 return 0;
748
749 if (e->type->ops.sq.elevator_set_req_fn)
750 return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask);
751 return 0;
752 }
753
754 void elv_put_request(struct request_queue *q, struct request *rq)
755 {
756 struct elevator_queue *e = q->elevator;
757
758 if (WARN_ON_ONCE(e->uses_mq))
759 return;
760
761 if (e->type->ops.sq.elevator_put_req_fn)
762 e->type->ops.sq.elevator_put_req_fn(rq);
763 }
764
765 int elv_may_queue(struct request_queue *q, unsigned int op)
766 {
767 struct elevator_queue *e = q->elevator;
768
769 if (WARN_ON_ONCE(e->uses_mq))
770 return 0;
771
772 if (e->type->ops.sq.elevator_may_queue_fn)
773 return e->type->ops.sq.elevator_may_queue_fn(q, op);
774
775 return ELV_MQUEUE_MAY;
776 }
777
778 void elv_completed_request(struct request_queue *q, struct request *rq)
779 {
780 struct elevator_queue *e = q->elevator;
781
782 if (WARN_ON_ONCE(e->uses_mq))
783 return;
784
785 /*
786 * request is released from the driver, io must be done
787 */
788 if (blk_account_rq(rq)) {
789 q->in_flight[rq_is_sync(rq)]--;
790 if ((rq->rq_flags & RQF_SORTED) &&
791 e->type->ops.sq.elevator_completed_req_fn)
792 e->type->ops.sq.elevator_completed_req_fn(q, rq);
793 }
794 }
795
796 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
797
798 static ssize_t
799 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
800 {
801 struct elv_fs_entry *entry = to_elv(attr);
802 struct elevator_queue *e;
803 ssize_t error;
804
805 if (!entry->show)
806 return -EIO;
807
808 e = container_of(kobj, struct elevator_queue, kobj);
809 mutex_lock(&e->sysfs_lock);
810 error = e->type ? entry->show(e, page) : -ENOENT;
811 mutex_unlock(&e->sysfs_lock);
812 return error;
813 }
814
815 static ssize_t
816 elv_attr_store(struct kobject *kobj, struct attribute *attr,
817 const char *page, size_t length)
818 {
819 struct elv_fs_entry *entry = to_elv(attr);
820 struct elevator_queue *e;
821 ssize_t error;
822
823 if (!entry->store)
824 return -EIO;
825
826 e = container_of(kobj, struct elevator_queue, kobj);
827 mutex_lock(&e->sysfs_lock);
828 error = e->type ? entry->store(e, page, length) : -ENOENT;
829 mutex_unlock(&e->sysfs_lock);
830 return error;
831 }
832
833 static const struct sysfs_ops elv_sysfs_ops = {
834 .show = elv_attr_show,
835 .store = elv_attr_store,
836 };
837
838 static struct kobj_type elv_ktype = {
839 .sysfs_ops = &elv_sysfs_ops,
840 .release = elevator_release,
841 };
842
843 int elv_register_queue(struct request_queue *q)
844 {
845 struct elevator_queue *e = q->elevator;
846 int error;
847
848 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
849 if (!error) {
850 struct elv_fs_entry *attr = e->type->elevator_attrs;
851 if (attr) {
852 while (attr->attr.name) {
853 if (sysfs_create_file(&e->kobj, &attr->attr))
854 break;
855 attr++;
856 }
857 }
858 kobject_uevent(&e->kobj, KOBJ_ADD);
859 e->registered = 1;
860 if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
861 e->type->ops.sq.elevator_registered_fn(q);
862 }
863 return error;
864 }
865 EXPORT_SYMBOL(elv_register_queue);
866
867 void elv_unregister_queue(struct request_queue *q)
868 {
869 if (q) {
870 struct elevator_queue *e = q->elevator;
871
872 kobject_uevent(&e->kobj, KOBJ_REMOVE);
873 kobject_del(&e->kobj);
874 e->registered = 0;
875 }
876 }
877 EXPORT_SYMBOL(elv_unregister_queue);
878
879 int elv_register(struct elevator_type *e)
880 {
881 char *def = "";
882
883 /* create icq_cache if requested */
884 if (e->icq_size) {
885 if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
886 WARN_ON(e->icq_align < __alignof__(struct io_cq)))
887 return -EINVAL;
888
889 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
890 "%s_io_cq", e->elevator_name);
891 e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
892 e->icq_align, 0, NULL);
893 if (!e->icq_cache)
894 return -ENOMEM;
895 }
896
897 /* register, don't allow duplicate names */
898 spin_lock(&elv_list_lock);
899 if (elevator_find(e->elevator_name)) {
900 spin_unlock(&elv_list_lock);
901 if (e->icq_cache)
902 kmem_cache_destroy(e->icq_cache);
903 return -EBUSY;
904 }
905 list_add_tail(&e->list, &elv_list);
906 spin_unlock(&elv_list_lock);
907
908 /* print pretty message */
909 if (!strcmp(e->elevator_name, chosen_elevator) ||
910 (!*chosen_elevator &&
911 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
912 def = " (default)";
913
914 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
915 def);
916 return 0;
917 }
918 EXPORT_SYMBOL_GPL(elv_register);
919
920 void elv_unregister(struct elevator_type *e)
921 {
922 /* unregister */
923 spin_lock(&elv_list_lock);
924 list_del_init(&e->list);
925 spin_unlock(&elv_list_lock);
926
927 /*
928 * Destroy icq_cache if it exists. icq's are RCU managed. Make
929 * sure all RCU operations are complete before proceeding.
930 */
931 if (e->icq_cache) {
932 rcu_barrier();
933 kmem_cache_destroy(e->icq_cache);
934 e->icq_cache = NULL;
935 }
936 }
937 EXPORT_SYMBOL_GPL(elv_unregister);
938
939 /*
940 * switch to new_e io scheduler. be careful not to introduce deadlocks -
941 * we don't free the old io scheduler, before we have allocated what we
942 * need for the new one. this way we have a chance of going back to the old
943 * one, if the new one fails init for some reason.
944 */
945 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
946 {
947 struct elevator_queue *old = q->elevator;
948 bool old_registered = false;
949 int err;
950
951 if (q->mq_ops) {
952 blk_mq_freeze_queue(q);
953 blk_mq_quiesce_queue(q);
954 }
955
956 /*
957 * Turn on BYPASS and drain all requests w/ elevator private data.
958 * Block layer doesn't call into a quiesced elevator - all requests
959 * are directly put on the dispatch list without elevator data
960 * using INSERT_BACK. All requests have SOFTBARRIER set and no
961 * merge happens either.
962 */
963 if (old) {
964 old_registered = old->registered;
965
966 if (old->uses_mq)
967 blk_mq_sched_teardown(q);
968
969 if (!q->mq_ops)
970 blk_queue_bypass_start(q);
971
972 /* unregister and clear all auxiliary data of the old elevator */
973 if (old_registered)
974 elv_unregister_queue(q);
975
976 spin_lock_irq(q->queue_lock);
977 ioc_clear_queue(q);
978 spin_unlock_irq(q->queue_lock);
979 }
980
981 /* allocate, init and register new elevator */
982 if (new_e) {
983 if (new_e->uses_mq) {
984 err = blk_mq_sched_setup(q);
985 if (!err)
986 err = new_e->ops.mq.init_sched(q, new_e);
987 } else
988 err = new_e->ops.sq.elevator_init_fn(q, new_e);
989 if (err)
990 goto fail_init;
991
992 err = elv_register_queue(q);
993 if (err)
994 goto fail_register;
995 } else
996 q->elevator = NULL;
997
998 /* done, kill the old one and finish */
999 if (old) {
1000 elevator_exit(old);
1001 if (!q->mq_ops)
1002 blk_queue_bypass_end(q);
1003 }
1004
1005 if (q->mq_ops) {
1006 blk_mq_unfreeze_queue(q);
1007 blk_mq_start_stopped_hw_queues(q, true);
1008 }
1009
1010 if (new_e)
1011 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
1012 else
1013 blk_add_trace_msg(q, "elv switch: none");
1014
1015 return 0;
1016
1017 fail_register:
1018 if (q->mq_ops)
1019 blk_mq_sched_teardown(q);
1020 elevator_exit(q->elevator);
1021 fail_init:
1022 /* switch failed, restore and re-register old elevator */
1023 if (old) {
1024 q->elevator = old;
1025 elv_register_queue(q);
1026 if (!q->mq_ops)
1027 blk_queue_bypass_end(q);
1028 }
1029 if (q->mq_ops) {
1030 blk_mq_unfreeze_queue(q);
1031 blk_mq_start_stopped_hw_queues(q, true);
1032 }
1033
1034 return err;
1035 }
1036
1037 /*
1038 * Switch this queue to the given IO scheduler.
1039 */
1040 static int __elevator_change(struct request_queue *q, const char *name)
1041 {
1042 char elevator_name[ELV_NAME_MAX];
1043 struct elevator_type *e;
1044
1045 /*
1046 * Special case for mq, turn off scheduling
1047 */
1048 if (q->mq_ops && !strncmp(name, "none", 4))
1049 return elevator_switch(q, NULL);
1050
1051 strlcpy(elevator_name, name, sizeof(elevator_name));
1052 e = elevator_get(strstrip(elevator_name), true);
1053 if (!e) {
1054 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1055 return -EINVAL;
1056 }
1057
1058 if (q->elevator &&
1059 !strcmp(elevator_name, q->elevator->type->elevator_name)) {
1060 elevator_put(e);
1061 return 0;
1062 }
1063
1064 if (!e->uses_mq && q->mq_ops) {
1065 elevator_put(e);
1066 return -EINVAL;
1067 }
1068 if (e->uses_mq && !q->mq_ops) {
1069 elevator_put(e);
1070 return -EINVAL;
1071 }
1072
1073 return elevator_switch(q, e);
1074 }
1075
1076 int elevator_change(struct request_queue *q, const char *name)
1077 {
1078 int ret;
1079
1080 /* Protect q->elevator from elevator_init() */
1081 mutex_lock(&q->sysfs_lock);
1082 ret = __elevator_change(q, name);
1083 mutex_unlock(&q->sysfs_lock);
1084
1085 return ret;
1086 }
1087 EXPORT_SYMBOL(elevator_change);
1088
1089 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1090 size_t count)
1091 {
1092 int ret;
1093
1094 if (!(q->mq_ops || q->request_fn))
1095 return count;
1096
1097 ret = __elevator_change(q, name);
1098 if (!ret)
1099 return count;
1100
1101 printk(KERN_ERR "elevator: switch to %s failed\n", name);
1102 return ret;
1103 }
1104
1105 ssize_t elv_iosched_show(struct request_queue *q, char *name)
1106 {
1107 struct elevator_queue *e = q->elevator;
1108 struct elevator_type *elv = NULL;
1109 struct elevator_type *__e;
1110 int len = 0;
1111
1112 if (!blk_queue_stackable(q))
1113 return sprintf(name, "none\n");
1114
1115 if (!q->elevator)
1116 len += sprintf(name+len, "[none] ");
1117 else
1118 elv = e->type;
1119
1120 spin_lock(&elv_list_lock);
1121 list_for_each_entry(__e, &elv_list, list) {
1122 if (elv && !strcmp(elv->elevator_name, __e->elevator_name)) {
1123 len += sprintf(name+len, "[%s] ", elv->elevator_name);
1124 continue;
1125 }
1126 if (__e->uses_mq && q->mq_ops)
1127 len += sprintf(name+len, "%s ", __e->elevator_name);
1128 else if (!__e->uses_mq && !q->mq_ops)
1129 len += sprintf(name+len, "%s ", __e->elevator_name);
1130 }
1131 spin_unlock(&elv_list_lock);
1132
1133 if (q->mq_ops && q->elevator)
1134 len += sprintf(name+len, "none");
1135
1136 len += sprintf(len+name, "\n");
1137 return len;
1138 }
1139
1140 struct request *elv_rb_former_request(struct request_queue *q,
1141 struct request *rq)
1142 {
1143 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1144
1145 if (rbprev)
1146 return rb_entry_rq(rbprev);
1147
1148 return NULL;
1149 }
1150 EXPORT_SYMBOL(elv_rb_former_request);
1151
1152 struct request *elv_rb_latter_request(struct request_queue *q,
1153 struct request *rq)
1154 {
1155 struct rb_node *rbnext = rb_next(&rq->rb_node);
1156
1157 if (rbnext)
1158 return rb_entry_rq(rbnext);
1159
1160 return NULL;
1161 }
1162 EXPORT_SYMBOL(elv_rb_latter_request);