2 * Block device elevator/IO-scheduler.
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * 30042000 Jens Axboe <axboe@kernel.dk> :
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
13 * - elevator_dequeue_fn, called when a request is taken off the active list
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
25 #include <linux/kernel.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/blktrace_api.h>
35 #include <linux/hash.h>
36 #include <linux/uaccess.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/blk-cgroup.h>
40 #include <trace/events/block.h>
43 #include "blk-mq-sched.h"
45 static DEFINE_SPINLOCK(elv_list_lock
);
46 static LIST_HEAD(elv_list
);
51 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
54 * Query io scheduler to see if the current process issuing bio may be
57 static int elv_iosched_allow_bio_merge(struct request
*rq
, struct bio
*bio
)
59 struct request_queue
*q
= rq
->q
;
60 struct elevator_queue
*e
= q
->elevator
;
62 if (e
->uses_mq
&& e
->type
->ops
.mq
.allow_merge
)
63 return e
->type
->ops
.mq
.allow_merge(q
, rq
, bio
);
64 else if (!e
->uses_mq
&& e
->type
->ops
.sq
.elevator_allow_bio_merge_fn
)
65 return e
->type
->ops
.sq
.elevator_allow_bio_merge_fn(q
, rq
, bio
);
71 * can we safely merge with this request?
73 bool elv_bio_merge_ok(struct request
*rq
, struct bio
*bio
)
75 if (!blk_rq_merge_ok(rq
, bio
))
78 if (!elv_iosched_allow_bio_merge(rq
, bio
))
83 EXPORT_SYMBOL(elv_bio_merge_ok
);
85 static struct elevator_type
*elevator_find(const char *name
)
87 struct elevator_type
*e
;
89 list_for_each_entry(e
, &elv_list
, list
) {
90 if (!strcmp(e
->elevator_name
, name
))
97 static void elevator_put(struct elevator_type
*e
)
99 module_put(e
->elevator_owner
);
102 static struct elevator_type
*elevator_get(const char *name
, bool try_loading
)
104 struct elevator_type
*e
;
106 spin_lock(&elv_list_lock
);
108 e
= elevator_find(name
);
109 if (!e
&& try_loading
) {
110 spin_unlock(&elv_list_lock
);
111 request_module("%s-iosched", name
);
112 spin_lock(&elv_list_lock
);
113 e
= elevator_find(name
);
116 if (e
&& !try_module_get(e
->elevator_owner
))
119 spin_unlock(&elv_list_lock
);
124 static char chosen_elevator
[ELV_NAME_MAX
];
126 static int __init
elevator_setup(char *str
)
129 * Be backwards-compatible with previous kernels, so users
130 * won't get the wrong elevator.
132 strncpy(chosen_elevator
, str
, sizeof(chosen_elevator
) - 1);
136 __setup("elevator=", elevator_setup
);
138 /* called during boot to load the elevator chosen by the elevator param */
139 void __init
load_default_elevator_module(void)
141 struct elevator_type
*e
;
143 if (!chosen_elevator
[0])
146 spin_lock(&elv_list_lock
);
147 e
= elevator_find(chosen_elevator
);
148 spin_unlock(&elv_list_lock
);
151 request_module("%s-iosched", chosen_elevator
);
154 static struct kobj_type elv_ktype
;
156 struct elevator_queue
*elevator_alloc(struct request_queue
*q
,
157 struct elevator_type
*e
)
159 struct elevator_queue
*eq
;
161 eq
= kzalloc_node(sizeof(*eq
), GFP_KERNEL
, q
->node
);
166 kobject_init(&eq
->kobj
, &elv_ktype
);
167 mutex_init(&eq
->sysfs_lock
);
169 eq
->uses_mq
= e
->uses_mq
;
173 EXPORT_SYMBOL(elevator_alloc
);
175 static void elevator_release(struct kobject
*kobj
)
177 struct elevator_queue
*e
;
179 e
= container_of(kobj
, struct elevator_queue
, kobj
);
180 elevator_put(e
->type
);
184 int elevator_init(struct request_queue
*q
, char *name
)
186 struct elevator_type
*e
= NULL
;
190 * q->sysfs_lock must be held to provide mutual exclusion between
191 * elevator_switch() and here.
193 lockdep_assert_held(&q
->sysfs_lock
);
195 if (unlikely(q
->elevator
))
198 INIT_LIST_HEAD(&q
->queue_head
);
199 q
->last_merge
= NULL
;
201 q
->boundary_rq
= NULL
;
204 e
= elevator_get(name
, true);
210 * Use the default elevator specified by config boot param for
211 * non-mq devices, or by config option. Don't try to load modules
212 * as we could be running off async and request_module() isn't
213 * allowed from async.
215 if (!e
&& !q
->mq_ops
&& *chosen_elevator
) {
216 e
= elevator_get(chosen_elevator
, false);
218 printk(KERN_ERR
"I/O scheduler %s not found\n",
223 if (q
->mq_ops
&& q
->nr_hw_queues
== 1)
224 e
= elevator_get(CONFIG_DEFAULT_SQ_IOSCHED
, false);
226 e
= elevator_get(CONFIG_DEFAULT_MQ_IOSCHED
, false);
228 e
= elevator_get(CONFIG_DEFAULT_IOSCHED
, false);
232 "Default I/O scheduler not found. " \
233 "Using noop/none.\n");
234 e
= elevator_get("noop", false);
239 err
= blk_mq_sched_setup(q
);
241 err
= e
->ops
.mq
.init_sched(q
, e
);
243 err
= e
->ops
.sq
.elevator_init_fn(q
, e
);
246 blk_mq_sched_teardown(q
);
251 EXPORT_SYMBOL(elevator_init
);
253 void elevator_exit(struct elevator_queue
*e
)
255 mutex_lock(&e
->sysfs_lock
);
256 if (e
->uses_mq
&& e
->type
->ops
.mq
.exit_sched
)
257 e
->type
->ops
.mq
.exit_sched(e
);
258 else if (!e
->uses_mq
&& e
->type
->ops
.sq
.elevator_exit_fn
)
259 e
->type
->ops
.sq
.elevator_exit_fn(e
);
260 mutex_unlock(&e
->sysfs_lock
);
262 kobject_put(&e
->kobj
);
264 EXPORT_SYMBOL(elevator_exit
);
266 static inline void __elv_rqhash_del(struct request
*rq
)
269 rq
->rq_flags
&= ~RQF_HASHED
;
272 void elv_rqhash_del(struct request_queue
*q
, struct request
*rq
)
275 __elv_rqhash_del(rq
);
277 EXPORT_SYMBOL_GPL(elv_rqhash_del
);
279 void elv_rqhash_add(struct request_queue
*q
, struct request
*rq
)
281 struct elevator_queue
*e
= q
->elevator
;
283 BUG_ON(ELV_ON_HASH(rq
));
284 hash_add(e
->hash
, &rq
->hash
, rq_hash_key(rq
));
285 rq
->rq_flags
|= RQF_HASHED
;
287 EXPORT_SYMBOL_GPL(elv_rqhash_add
);
289 void elv_rqhash_reposition(struct request_queue
*q
, struct request
*rq
)
291 __elv_rqhash_del(rq
);
292 elv_rqhash_add(q
, rq
);
295 struct request
*elv_rqhash_find(struct request_queue
*q
, sector_t offset
)
297 struct elevator_queue
*e
= q
->elevator
;
298 struct hlist_node
*next
;
301 hash_for_each_possible_safe(e
->hash
, rq
, next
, hash
, offset
) {
302 BUG_ON(!ELV_ON_HASH(rq
));
304 if (unlikely(!rq_mergeable(rq
))) {
305 __elv_rqhash_del(rq
);
309 if (rq_hash_key(rq
) == offset
)
317 * RB-tree support functions for inserting/lookup/removal of requests
318 * in a sorted RB tree.
320 void elv_rb_add(struct rb_root
*root
, struct request
*rq
)
322 struct rb_node
**p
= &root
->rb_node
;
323 struct rb_node
*parent
= NULL
;
324 struct request
*__rq
;
328 __rq
= rb_entry(parent
, struct request
, rb_node
);
330 if (blk_rq_pos(rq
) < blk_rq_pos(__rq
))
332 else if (blk_rq_pos(rq
) >= blk_rq_pos(__rq
))
336 rb_link_node(&rq
->rb_node
, parent
, p
);
337 rb_insert_color(&rq
->rb_node
, root
);
339 EXPORT_SYMBOL(elv_rb_add
);
341 void elv_rb_del(struct rb_root
*root
, struct request
*rq
)
343 BUG_ON(RB_EMPTY_NODE(&rq
->rb_node
));
344 rb_erase(&rq
->rb_node
, root
);
345 RB_CLEAR_NODE(&rq
->rb_node
);
347 EXPORT_SYMBOL(elv_rb_del
);
349 struct request
*elv_rb_find(struct rb_root
*root
, sector_t sector
)
351 struct rb_node
*n
= root
->rb_node
;
355 rq
= rb_entry(n
, struct request
, rb_node
);
357 if (sector
< blk_rq_pos(rq
))
359 else if (sector
> blk_rq_pos(rq
))
367 EXPORT_SYMBOL(elv_rb_find
);
370 * Insert rq into dispatch queue of q. Queue lock must be held on
371 * entry. rq is sort instead into the dispatch queue. To be used by
372 * specific elevators.
374 void elv_dispatch_sort(struct request_queue
*q
, struct request
*rq
)
377 struct list_head
*entry
;
379 if (q
->last_merge
== rq
)
380 q
->last_merge
= NULL
;
382 elv_rqhash_del(q
, rq
);
386 boundary
= q
->end_sector
;
387 list_for_each_prev(entry
, &q
->queue_head
) {
388 struct request
*pos
= list_entry_rq(entry
);
390 if (req_op(rq
) != req_op(pos
))
392 if (rq_data_dir(rq
) != rq_data_dir(pos
))
394 if (pos
->rq_flags
& (RQF_STARTED
| RQF_SOFTBARRIER
))
396 if (blk_rq_pos(rq
) >= boundary
) {
397 if (blk_rq_pos(pos
) < boundary
)
400 if (blk_rq_pos(pos
) >= boundary
)
403 if (blk_rq_pos(rq
) >= blk_rq_pos(pos
))
407 list_add(&rq
->queuelist
, entry
);
409 EXPORT_SYMBOL(elv_dispatch_sort
);
412 * Insert rq into dispatch queue of q. Queue lock must be held on
413 * entry. rq is added to the back of the dispatch queue. To be used by
414 * specific elevators.
416 void elv_dispatch_add_tail(struct request_queue
*q
, struct request
*rq
)
418 if (q
->last_merge
== rq
)
419 q
->last_merge
= NULL
;
421 elv_rqhash_del(q
, rq
);
425 q
->end_sector
= rq_end_sector(rq
);
427 list_add_tail(&rq
->queuelist
, &q
->queue_head
);
429 EXPORT_SYMBOL(elv_dispatch_add_tail
);
431 int elv_merge(struct request_queue
*q
, struct request
**req
, struct bio
*bio
)
433 struct elevator_queue
*e
= q
->elevator
;
434 struct request
*__rq
;
439 * nomerges: No merges at all attempted
440 * noxmerges: Only simple one-hit cache try
441 * merges: All merge tries attempted
443 if (blk_queue_nomerges(q
) || !bio_mergeable(bio
))
444 return ELEVATOR_NO_MERGE
;
447 * First try one-hit cache.
449 if (q
->last_merge
&& elv_bio_merge_ok(q
->last_merge
, bio
)) {
450 ret
= blk_try_merge(q
->last_merge
, bio
);
451 if (ret
!= ELEVATOR_NO_MERGE
) {
452 *req
= q
->last_merge
;
457 if (blk_queue_noxmerges(q
))
458 return ELEVATOR_NO_MERGE
;
461 * See if our hash lookup can find a potential backmerge.
463 __rq
= elv_rqhash_find(q
, bio
->bi_iter
.bi_sector
);
464 if (__rq
&& elv_bio_merge_ok(__rq
, bio
)) {
466 return ELEVATOR_BACK_MERGE
;
469 if (e
->uses_mq
&& e
->type
->ops
.mq
.request_merge
)
470 return e
->type
->ops
.mq
.request_merge(q
, req
, bio
);
471 else if (!e
->uses_mq
&& e
->type
->ops
.sq
.elevator_merge_fn
)
472 return e
->type
->ops
.sq
.elevator_merge_fn(q
, req
, bio
);
474 return ELEVATOR_NO_MERGE
;
478 * Attempt to do an insertion back merge. Only check for the case where
479 * we can append 'rq' to an existing request, so we can throw 'rq' away
482 * Returns true if we merged, false otherwise
484 bool elv_attempt_insert_merge(struct request_queue
*q
, struct request
*rq
)
486 struct request
*__rq
;
489 if (blk_queue_nomerges(q
))
493 * First try one-hit cache.
495 if (q
->last_merge
&& blk_attempt_req_merge(q
, q
->last_merge
, rq
))
498 if (blk_queue_noxmerges(q
))
503 * See if our hash lookup can find a potential backmerge.
506 __rq
= elv_rqhash_find(q
, blk_rq_pos(rq
));
507 if (!__rq
|| !blk_attempt_req_merge(q
, __rq
, rq
))
510 /* The merged request could be merged with others, try again */
518 void elv_merged_request(struct request_queue
*q
, struct request
*rq
, int type
)
520 struct elevator_queue
*e
= q
->elevator
;
522 if (e
->uses_mq
&& e
->type
->ops
.mq
.request_merged
)
523 e
->type
->ops
.mq
.request_merged(q
, rq
, type
);
524 else if (!e
->uses_mq
&& e
->type
->ops
.sq
.elevator_merged_fn
)
525 e
->type
->ops
.sq
.elevator_merged_fn(q
, rq
, type
);
527 if (type
== ELEVATOR_BACK_MERGE
)
528 elv_rqhash_reposition(q
, rq
);
533 void elv_merge_requests(struct request_queue
*q
, struct request
*rq
,
534 struct request
*next
)
536 struct elevator_queue
*e
= q
->elevator
;
537 bool next_sorted
= false;
539 if (e
->uses_mq
&& e
->type
->ops
.mq
.requests_merged
)
540 e
->type
->ops
.mq
.requests_merged(q
, rq
, next
);
541 else if (e
->type
->ops
.sq
.elevator_merge_req_fn
) {
542 next_sorted
= next
->rq_flags
& RQF_SORTED
;
544 e
->type
->ops
.sq
.elevator_merge_req_fn(q
, rq
, next
);
547 elv_rqhash_reposition(q
, rq
);
550 elv_rqhash_del(q
, next
);
557 void elv_bio_merged(struct request_queue
*q
, struct request
*rq
,
560 struct elevator_queue
*e
= q
->elevator
;
562 if (WARN_ON_ONCE(e
->uses_mq
))
565 if (e
->type
->ops
.sq
.elevator_bio_merged_fn
)
566 e
->type
->ops
.sq
.elevator_bio_merged_fn(q
, rq
, bio
);
570 static void blk_pm_requeue_request(struct request
*rq
)
572 if (rq
->q
->dev
&& !(rq
->rq_flags
& RQF_PM
))
576 static void blk_pm_add_request(struct request_queue
*q
, struct request
*rq
)
578 if (q
->dev
&& !(rq
->rq_flags
& RQF_PM
) && q
->nr_pending
++ == 0 &&
579 (q
->rpm_status
== RPM_SUSPENDED
|| q
->rpm_status
== RPM_SUSPENDING
))
580 pm_request_resume(q
->dev
);
583 static inline void blk_pm_requeue_request(struct request
*rq
) {}
584 static inline void blk_pm_add_request(struct request_queue
*q
,
590 void elv_requeue_request(struct request_queue
*q
, struct request
*rq
)
593 * it already went through dequeue, we need to decrement the
594 * in_flight count again
596 if (blk_account_rq(rq
)) {
597 q
->in_flight
[rq_is_sync(rq
)]--;
598 if (rq
->rq_flags
& RQF_SORTED
)
599 elv_deactivate_rq(q
, rq
);
602 rq
->rq_flags
&= ~RQF_STARTED
;
604 blk_pm_requeue_request(rq
);
606 __elv_add_request(q
, rq
, ELEVATOR_INSERT_REQUEUE
);
609 void elv_drain_elevator(struct request_queue
*q
)
611 struct elevator_queue
*e
= q
->elevator
;
614 if (WARN_ON_ONCE(e
->uses_mq
))
617 lockdep_assert_held(q
->queue_lock
);
619 while (e
->type
->ops
.sq
.elevator_dispatch_fn(q
, 1))
621 if (q
->nr_sorted
&& printed
++ < 10) {
622 printk(KERN_ERR
"%s: forced dispatching is broken "
623 "(nr_sorted=%u), please report this\n",
624 q
->elevator
->type
->elevator_name
, q
->nr_sorted
);
628 void __elv_add_request(struct request_queue
*q
, struct request
*rq
, int where
)
630 trace_block_rq_insert(q
, rq
);
632 blk_pm_add_request(q
, rq
);
636 if (rq
->rq_flags
& RQF_SOFTBARRIER
) {
637 /* barriers are scheduling boundary, update end_sector */
638 if (rq
->cmd_type
== REQ_TYPE_FS
) {
639 q
->end_sector
= rq_end_sector(rq
);
642 } else if (!(rq
->rq_flags
& RQF_ELVPRIV
) &&
643 (where
== ELEVATOR_INSERT_SORT
||
644 where
== ELEVATOR_INSERT_SORT_MERGE
))
645 where
= ELEVATOR_INSERT_BACK
;
648 case ELEVATOR_INSERT_REQUEUE
:
649 case ELEVATOR_INSERT_FRONT
:
650 rq
->rq_flags
|= RQF_SOFTBARRIER
;
651 list_add(&rq
->queuelist
, &q
->queue_head
);
654 case ELEVATOR_INSERT_BACK
:
655 rq
->rq_flags
|= RQF_SOFTBARRIER
;
656 elv_drain_elevator(q
);
657 list_add_tail(&rq
->queuelist
, &q
->queue_head
);
659 * We kick the queue here for the following reasons.
660 * - The elevator might have returned NULL previously
661 * to delay requests and returned them now. As the
662 * queue wasn't empty before this request, ll_rw_blk
663 * won't run the queue on return, resulting in hang.
664 * - Usually, back inserted requests won't be merged
665 * with anything. There's no point in delaying queue
671 case ELEVATOR_INSERT_SORT_MERGE
:
673 * If we succeed in merging this request with one in the
674 * queue already, we are done - rq has now been freed,
675 * so no need to do anything further.
677 if (elv_attempt_insert_merge(q
, rq
))
679 case ELEVATOR_INSERT_SORT
:
680 BUG_ON(rq
->cmd_type
!= REQ_TYPE_FS
);
681 rq
->rq_flags
|= RQF_SORTED
;
683 if (rq_mergeable(rq
)) {
684 elv_rqhash_add(q
, rq
);
690 * Some ioscheds (cfq) run q->request_fn directly, so
691 * rq cannot be accessed after calling
692 * elevator_add_req_fn.
694 q
->elevator
->type
->ops
.sq
.elevator_add_req_fn(q
, rq
);
697 case ELEVATOR_INSERT_FLUSH
:
698 rq
->rq_flags
|= RQF_SOFTBARRIER
;
699 blk_insert_flush(rq
);
702 printk(KERN_ERR
"%s: bad insertion point %d\n",
707 EXPORT_SYMBOL(__elv_add_request
);
709 void elv_add_request(struct request_queue
*q
, struct request
*rq
, int where
)
713 spin_lock_irqsave(q
->queue_lock
, flags
);
714 __elv_add_request(q
, rq
, where
);
715 spin_unlock_irqrestore(q
->queue_lock
, flags
);
717 EXPORT_SYMBOL(elv_add_request
);
719 struct request
*elv_latter_request(struct request_queue
*q
, struct request
*rq
)
721 struct elevator_queue
*e
= q
->elevator
;
723 if (e
->uses_mq
&& e
->type
->ops
.mq
.next_request
)
724 return e
->type
->ops
.mq
.next_request(q
, rq
);
725 else if (!e
->uses_mq
&& e
->type
->ops
.sq
.elevator_latter_req_fn
)
726 return e
->type
->ops
.sq
.elevator_latter_req_fn(q
, rq
);
731 struct request
*elv_former_request(struct request_queue
*q
, struct request
*rq
)
733 struct elevator_queue
*e
= q
->elevator
;
735 if (e
->uses_mq
&& e
->type
->ops
.mq
.former_request
)
736 return e
->type
->ops
.mq
.former_request(q
, rq
);
737 if (!e
->uses_mq
&& e
->type
->ops
.sq
.elevator_former_req_fn
)
738 return e
->type
->ops
.sq
.elevator_former_req_fn(q
, rq
);
742 int elv_set_request(struct request_queue
*q
, struct request
*rq
,
743 struct bio
*bio
, gfp_t gfp_mask
)
745 struct elevator_queue
*e
= q
->elevator
;
747 if (WARN_ON_ONCE(e
->uses_mq
))
750 if (e
->type
->ops
.sq
.elevator_set_req_fn
)
751 return e
->type
->ops
.sq
.elevator_set_req_fn(q
, rq
, bio
, gfp_mask
);
755 void elv_put_request(struct request_queue
*q
, struct request
*rq
)
757 struct elevator_queue
*e
= q
->elevator
;
759 if (WARN_ON_ONCE(e
->uses_mq
))
762 if (e
->type
->ops
.sq
.elevator_put_req_fn
)
763 e
->type
->ops
.sq
.elevator_put_req_fn(rq
);
766 int elv_may_queue(struct request_queue
*q
, unsigned int op
)
768 struct elevator_queue
*e
= q
->elevator
;
770 if (WARN_ON_ONCE(e
->uses_mq
))
773 if (e
->type
->ops
.sq
.elevator_may_queue_fn
)
774 return e
->type
->ops
.sq
.elevator_may_queue_fn(q
, op
);
776 return ELV_MQUEUE_MAY
;
779 void elv_completed_request(struct request_queue
*q
, struct request
*rq
)
781 struct elevator_queue
*e
= q
->elevator
;
783 if (WARN_ON_ONCE(e
->uses_mq
))
787 * request is released from the driver, io must be done
789 if (blk_account_rq(rq
)) {
790 q
->in_flight
[rq_is_sync(rq
)]--;
791 if ((rq
->rq_flags
& RQF_SORTED
) &&
792 e
->type
->ops
.sq
.elevator_completed_req_fn
)
793 e
->type
->ops
.sq
.elevator_completed_req_fn(q
, rq
);
797 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
800 elv_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
802 struct elv_fs_entry
*entry
= to_elv(attr
);
803 struct elevator_queue
*e
;
809 e
= container_of(kobj
, struct elevator_queue
, kobj
);
810 mutex_lock(&e
->sysfs_lock
);
811 error
= e
->type
? entry
->show(e
, page
) : -ENOENT
;
812 mutex_unlock(&e
->sysfs_lock
);
817 elv_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
818 const char *page
, size_t length
)
820 struct elv_fs_entry
*entry
= to_elv(attr
);
821 struct elevator_queue
*e
;
827 e
= container_of(kobj
, struct elevator_queue
, kobj
);
828 mutex_lock(&e
->sysfs_lock
);
829 error
= e
->type
? entry
->store(e
, page
, length
) : -ENOENT
;
830 mutex_unlock(&e
->sysfs_lock
);
834 static const struct sysfs_ops elv_sysfs_ops
= {
835 .show
= elv_attr_show
,
836 .store
= elv_attr_store
,
839 static struct kobj_type elv_ktype
= {
840 .sysfs_ops
= &elv_sysfs_ops
,
841 .release
= elevator_release
,
844 int elv_register_queue(struct request_queue
*q
)
846 struct elevator_queue
*e
= q
->elevator
;
849 error
= kobject_add(&e
->kobj
, &q
->kobj
, "%s", "iosched");
851 struct elv_fs_entry
*attr
= e
->type
->elevator_attrs
;
853 while (attr
->attr
.name
) {
854 if (sysfs_create_file(&e
->kobj
, &attr
->attr
))
859 kobject_uevent(&e
->kobj
, KOBJ_ADD
);
861 if (!e
->uses_mq
&& e
->type
->ops
.sq
.elevator_registered_fn
)
862 e
->type
->ops
.sq
.elevator_registered_fn(q
);
866 EXPORT_SYMBOL(elv_register_queue
);
868 void elv_unregister_queue(struct request_queue
*q
)
871 struct elevator_queue
*e
= q
->elevator
;
873 kobject_uevent(&e
->kobj
, KOBJ_REMOVE
);
874 kobject_del(&e
->kobj
);
878 EXPORT_SYMBOL(elv_unregister_queue
);
880 int elv_register(struct elevator_type
*e
)
884 /* create icq_cache if requested */
886 if (WARN_ON(e
->icq_size
< sizeof(struct io_cq
)) ||
887 WARN_ON(e
->icq_align
< __alignof__(struct io_cq
)))
890 snprintf(e
->icq_cache_name
, sizeof(e
->icq_cache_name
),
891 "%s_io_cq", e
->elevator_name
);
892 e
->icq_cache
= kmem_cache_create(e
->icq_cache_name
, e
->icq_size
,
893 e
->icq_align
, 0, NULL
);
898 /* register, don't allow duplicate names */
899 spin_lock(&elv_list_lock
);
900 if (elevator_find(e
->elevator_name
)) {
901 spin_unlock(&elv_list_lock
);
903 kmem_cache_destroy(e
->icq_cache
);
906 list_add_tail(&e
->list
, &elv_list
);
907 spin_unlock(&elv_list_lock
);
909 /* print pretty message */
910 if (!strcmp(e
->elevator_name
, chosen_elevator
) ||
911 (!*chosen_elevator
&&
912 !strcmp(e
->elevator_name
, CONFIG_DEFAULT_IOSCHED
)))
915 printk(KERN_INFO
"io scheduler %s registered%s\n", e
->elevator_name
,
919 EXPORT_SYMBOL_GPL(elv_register
);
921 void elv_unregister(struct elevator_type
*e
)
924 spin_lock(&elv_list_lock
);
925 list_del_init(&e
->list
);
926 spin_unlock(&elv_list_lock
);
929 * Destroy icq_cache if it exists. icq's are RCU managed. Make
930 * sure all RCU operations are complete before proceeding.
934 kmem_cache_destroy(e
->icq_cache
);
938 EXPORT_SYMBOL_GPL(elv_unregister
);
941 * switch to new_e io scheduler. be careful not to introduce deadlocks -
942 * we don't free the old io scheduler, before we have allocated what we
943 * need for the new one. this way we have a chance of going back to the old
944 * one, if the new one fails init for some reason.
946 static int elevator_switch(struct request_queue
*q
, struct elevator_type
*new_e
)
948 struct elevator_queue
*old
= q
->elevator
;
949 bool old_registered
= false;
953 blk_mq_freeze_queue(q
);
954 blk_mq_quiesce_queue(q
);
958 * Turn on BYPASS and drain all requests w/ elevator private data.
959 * Block layer doesn't call into a quiesced elevator - all requests
960 * are directly put on the dispatch list without elevator data
961 * using INSERT_BACK. All requests have SOFTBARRIER set and no
962 * merge happens either.
965 old_registered
= old
->registered
;
968 blk_mq_sched_teardown(q
);
971 blk_queue_bypass_start(q
);
973 /* unregister and clear all auxiliary data of the old elevator */
975 elv_unregister_queue(q
);
977 spin_lock_irq(q
->queue_lock
);
979 spin_unlock_irq(q
->queue_lock
);
982 /* allocate, init and register new elevator */
984 if (new_e
->uses_mq
) {
985 err
= blk_mq_sched_setup(q
);
987 err
= new_e
->ops
.mq
.init_sched(q
, new_e
);
989 err
= new_e
->ops
.sq
.elevator_init_fn(q
, new_e
);
993 err
= elv_register_queue(q
);
999 /* done, kill the old one and finish */
1003 blk_queue_bypass_end(q
);
1007 blk_mq_unfreeze_queue(q
);
1008 blk_mq_start_stopped_hw_queues(q
, true);
1012 blk_add_trace_msg(q
, "elv switch: %s", new_e
->elevator_name
);
1014 blk_add_trace_msg(q
, "elv switch: none");
1020 blk_mq_sched_teardown(q
);
1021 elevator_exit(q
->elevator
);
1023 /* switch failed, restore and re-register old elevator */
1026 elv_register_queue(q
);
1028 blk_queue_bypass_end(q
);
1031 blk_mq_unfreeze_queue(q
);
1032 blk_mq_start_stopped_hw_queues(q
, true);
1039 * Switch this queue to the given IO scheduler.
1041 static int __elevator_change(struct request_queue
*q
, const char *name
)
1043 char elevator_name
[ELV_NAME_MAX
];
1044 struct elevator_type
*e
;
1047 * Special case for mq, turn off scheduling
1049 if (q
->mq_ops
&& !strncmp(name
, "none", 4))
1050 return elevator_switch(q
, NULL
);
1052 strlcpy(elevator_name
, name
, sizeof(elevator_name
));
1053 e
= elevator_get(strstrip(elevator_name
), true);
1055 printk(KERN_ERR
"elevator: type %s not found\n", elevator_name
);
1060 !strcmp(elevator_name
, q
->elevator
->type
->elevator_name
)) {
1065 if (!e
->uses_mq
&& q
->mq_ops
) {
1069 if (e
->uses_mq
&& !q
->mq_ops
) {
1074 return elevator_switch(q
, e
);
1077 int elevator_change(struct request_queue
*q
, const char *name
)
1081 /* Protect q->elevator from elevator_init() */
1082 mutex_lock(&q
->sysfs_lock
);
1083 ret
= __elevator_change(q
, name
);
1084 mutex_unlock(&q
->sysfs_lock
);
1088 EXPORT_SYMBOL(elevator_change
);
1090 ssize_t
elv_iosched_store(struct request_queue
*q
, const char *name
,
1095 if (!(q
->mq_ops
|| q
->request_fn
))
1098 ret
= __elevator_change(q
, name
);
1102 printk(KERN_ERR
"elevator: switch to %s failed\n", name
);
1106 ssize_t
elv_iosched_show(struct request_queue
*q
, char *name
)
1108 struct elevator_queue
*e
= q
->elevator
;
1109 struct elevator_type
*elv
= NULL
;
1110 struct elevator_type
*__e
;
1113 if (!blk_queue_stackable(q
))
1114 return sprintf(name
, "none\n");
1117 len
+= sprintf(name
+len
, "[none] ");
1121 spin_lock(&elv_list_lock
);
1122 list_for_each_entry(__e
, &elv_list
, list
) {
1123 if (elv
&& !strcmp(elv
->elevator_name
, __e
->elevator_name
)) {
1124 len
+= sprintf(name
+len
, "[%s] ", elv
->elevator_name
);
1127 if (__e
->uses_mq
&& q
->mq_ops
)
1128 len
+= sprintf(name
+len
, "%s ", __e
->elevator_name
);
1129 else if (!__e
->uses_mq
&& !q
->mq_ops
)
1130 len
+= sprintf(name
+len
, "%s ", __e
->elevator_name
);
1132 spin_unlock(&elv_list_lock
);
1134 if (q
->mq_ops
&& q
->elevator
)
1135 len
+= sprintf(name
+len
, "none");
1137 len
+= sprintf(len
+name
, "\n");
1141 struct request
*elv_rb_former_request(struct request_queue
*q
,
1144 struct rb_node
*rbprev
= rb_prev(&rq
->rb_node
);
1147 return rb_entry_rq(rbprev
);
1151 EXPORT_SYMBOL(elv_rb_former_request
);
1153 struct request
*elv_rb_latter_request(struct request_queue
*q
,
1156 struct rb_node
*rbnext
= rb_next(&rq
->rb_node
);
1159 return rb_entry_rq(rbnext
);
1163 EXPORT_SYMBOL(elv_rb_latter_request
);