]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - block/cfq-iosched.c
cfq-iosched: get rid of the coop_preempt flag
[mirror_ubuntu-bionic-kernel.git] / block / cfq-iosched.c
1 /*
2 * CFQ, or complete fairness queueing, disk scheduler.
3 *
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 */
9 #include <linux/module.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/rbtree.h>
13 #include <linux/ioprio.h>
14 #include <linux/blktrace_api.h>
15
16 /*
17 * tunables
18 */
19 /* max queue in one round of service */
20 static const int cfq_quantum = 4;
21 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
22 /* maximum backwards seek, in KiB */
23 static const int cfq_back_max = 16 * 1024;
24 /* penalty of a backwards seek */
25 static const int cfq_back_penalty = 2;
26 static const int cfq_slice_sync = HZ / 10;
27 static int cfq_slice_async = HZ / 25;
28 static const int cfq_slice_async_rq = 2;
29 static int cfq_slice_idle = HZ / 125;
30 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
31 static const int cfq_hist_divisor = 4;
32
33 /*
34 * offset from end of service tree
35 */
36 #define CFQ_IDLE_DELAY (HZ / 5)
37
38 /*
39 * below this threshold, we consider thinktime immediate
40 */
41 #define CFQ_MIN_TT (2)
42
43 /*
44 * Allow merged cfqqs to perform this amount of seeky I/O before
45 * deciding to break the queues up again.
46 */
47 #define CFQQ_COOP_TOUT (HZ)
48
49 #define CFQ_SLICE_SCALE (5)
50 #define CFQ_HW_QUEUE_MIN (5)
51
52 #define RQ_CIC(rq) \
53 ((struct cfq_io_context *) (rq)->elevator_private)
54 #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
55
56 static struct kmem_cache *cfq_pool;
57 static struct kmem_cache *cfq_ioc_pool;
58
59 static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
60 static struct completion *ioc_gone;
61 static DEFINE_SPINLOCK(ioc_gone_lock);
62
63 #define CFQ_PRIO_LISTS IOPRIO_BE_NR
64 #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
65 #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
66
67 #define sample_valid(samples) ((samples) > 80)
68
69 /*
70 * Most of our rbtree usage is for sorting with min extraction, so
71 * if we cache the leftmost node we don't have to walk down the tree
72 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
73 * move this into the elevator for the rq sorting as well.
74 */
75 struct cfq_rb_root {
76 struct rb_root rb;
77 struct rb_node *left;
78 unsigned count;
79 };
80 #define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, 0, }
81
82 /*
83 * Per process-grouping structure
84 */
85 struct cfq_queue {
86 /* reference count */
87 atomic_t ref;
88 /* various state flags, see below */
89 unsigned int flags;
90 /* parent cfq_data */
91 struct cfq_data *cfqd;
92 /* service_tree member */
93 struct rb_node rb_node;
94 /* service_tree key */
95 unsigned long rb_key;
96 /* prio tree member */
97 struct rb_node p_node;
98 /* prio tree root we belong to, if any */
99 struct rb_root *p_root;
100 /* sorted list of pending requests */
101 struct rb_root sort_list;
102 /* if fifo isn't expired, next request to serve */
103 struct request *next_rq;
104 /* requests queued in sort_list */
105 int queued[2];
106 /* currently allocated requests */
107 int allocated[2];
108 /* fifo list of requests in sort_list */
109 struct list_head fifo;
110
111 unsigned long slice_end;
112 long slice_resid;
113 unsigned int slice_dispatch;
114
115 /* pending metadata requests */
116 int meta_pending;
117 /* number of requests that are on the dispatch list or inside driver */
118 int dispatched;
119
120 /* io prio of this group */
121 unsigned short ioprio, org_ioprio;
122 unsigned short ioprio_class, org_ioprio_class;
123
124 unsigned int seek_samples;
125 u64 seek_total;
126 sector_t seek_mean;
127 sector_t last_request_pos;
128 unsigned long seeky_start;
129
130 pid_t pid;
131
132 struct cfq_rb_root *service_tree;
133 struct cfq_queue *new_cfqq;
134 };
135
136 /*
137 * First index in the service_trees.
138 * IDLE is handled separately, so it has negative index
139 */
140 enum wl_prio_t {
141 IDLE_WORKLOAD = -1,
142 BE_WORKLOAD = 0,
143 RT_WORKLOAD = 1
144 };
145
146 /*
147 * Second index in the service_trees.
148 */
149 enum wl_type_t {
150 ASYNC_WORKLOAD = 0,
151 SYNC_NOIDLE_WORKLOAD = 1,
152 SYNC_WORKLOAD = 2
153 };
154
155
156 /*
157 * Per block device queue structure
158 */
159 struct cfq_data {
160 struct request_queue *queue;
161
162 /*
163 * rr lists of queues with requests, onle rr for each priority class.
164 * Counts are embedded in the cfq_rb_root
165 */
166 struct cfq_rb_root service_trees[2][3];
167 struct cfq_rb_root service_tree_idle;
168 /*
169 * The priority currently being served
170 */
171 enum wl_prio_t serving_prio;
172 enum wl_type_t serving_type;
173 unsigned long workload_expires;
174
175 /*
176 * Each priority tree is sorted by next_request position. These
177 * trees are used when determining if two or more queues are
178 * interleaving requests (see cfq_close_cooperator).
179 */
180 struct rb_root prio_trees[CFQ_PRIO_LISTS];
181
182 unsigned int busy_queues;
183 unsigned int busy_queues_avg[2];
184
185 int rq_in_driver[2];
186 int sync_flight;
187
188 /*
189 * queue-depth detection
190 */
191 int rq_queued;
192 int hw_tag;
193 int hw_tag_samples;
194 int rq_in_driver_peak;
195
196 /*
197 * idle window management
198 */
199 struct timer_list idle_slice_timer;
200 struct work_struct unplug_work;
201
202 struct cfq_queue *active_queue;
203 struct cfq_io_context *active_cic;
204
205 /*
206 * async queue for each priority case
207 */
208 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
209 struct cfq_queue *async_idle_cfqq;
210
211 sector_t last_position;
212
213 /*
214 * tunables, see top of file
215 */
216 unsigned int cfq_quantum;
217 unsigned int cfq_fifo_expire[2];
218 unsigned int cfq_back_penalty;
219 unsigned int cfq_back_max;
220 unsigned int cfq_slice[2];
221 unsigned int cfq_slice_async_rq;
222 unsigned int cfq_slice_idle;
223 unsigned int cfq_latency;
224
225 struct list_head cic_list;
226
227 /*
228 * Fallback dummy cfqq for extreme OOM conditions
229 */
230 struct cfq_queue oom_cfqq;
231
232 unsigned long last_end_sync_rq;
233 };
234
235 static struct cfq_rb_root *service_tree_for(enum wl_prio_t prio,
236 enum wl_type_t type,
237 struct cfq_data *cfqd)
238 {
239 if (prio == IDLE_WORKLOAD)
240 return &cfqd->service_tree_idle;
241
242 return &cfqd->service_trees[prio][type];
243 }
244
245 enum cfqq_state_flags {
246 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
247 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
248 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
249 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
250 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
251 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
252 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
253 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
254 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
255 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
256 };
257
258 #define CFQ_CFQQ_FNS(name) \
259 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
260 { \
261 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
262 } \
263 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
264 { \
265 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
266 } \
267 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
268 { \
269 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
270 }
271
272 CFQ_CFQQ_FNS(on_rr);
273 CFQ_CFQQ_FNS(wait_request);
274 CFQ_CFQQ_FNS(must_dispatch);
275 CFQ_CFQQ_FNS(must_alloc_slice);
276 CFQ_CFQQ_FNS(fifo_expire);
277 CFQ_CFQQ_FNS(idle_window);
278 CFQ_CFQQ_FNS(prio_changed);
279 CFQ_CFQQ_FNS(slice_new);
280 CFQ_CFQQ_FNS(sync);
281 CFQ_CFQQ_FNS(coop);
282 #undef CFQ_CFQQ_FNS
283
284 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
285 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
286 #define cfq_log(cfqd, fmt, args...) \
287 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
288
289 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
290 {
291 if (cfq_class_idle(cfqq))
292 return IDLE_WORKLOAD;
293 if (cfq_class_rt(cfqq))
294 return RT_WORKLOAD;
295 return BE_WORKLOAD;
296 }
297
298
299 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
300 {
301 if (!cfq_cfqq_sync(cfqq))
302 return ASYNC_WORKLOAD;
303 if (!cfq_cfqq_idle_window(cfqq))
304 return SYNC_NOIDLE_WORKLOAD;
305 return SYNC_WORKLOAD;
306 }
307
308 static inline int cfq_busy_queues_wl(enum wl_prio_t wl, struct cfq_data *cfqd)
309 {
310 if (wl == IDLE_WORKLOAD)
311 return cfqd->service_tree_idle.count;
312
313 return cfqd->service_trees[wl][ASYNC_WORKLOAD].count
314 + cfqd->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
315 + cfqd->service_trees[wl][SYNC_WORKLOAD].count;
316 }
317
318 static void cfq_dispatch_insert(struct request_queue *, struct request *);
319 static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
320 struct io_context *, gfp_t);
321 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
322 struct io_context *);
323
324 static inline int rq_in_driver(struct cfq_data *cfqd)
325 {
326 return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1];
327 }
328
329 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
330 bool is_sync)
331 {
332 return cic->cfqq[is_sync];
333 }
334
335 static inline void cic_set_cfqq(struct cfq_io_context *cic,
336 struct cfq_queue *cfqq, bool is_sync)
337 {
338 cic->cfqq[is_sync] = cfqq;
339 }
340
341 /*
342 * We regard a request as SYNC, if it's either a read or has the SYNC bit
343 * set (in which case it could also be direct WRITE).
344 */
345 static inline bool cfq_bio_sync(struct bio *bio)
346 {
347 return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
348 }
349
350 /*
351 * scheduler run of queue, if there are requests pending and no one in the
352 * driver that will restart queueing
353 */
354 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
355 {
356 if (cfqd->busy_queues) {
357 cfq_log(cfqd, "schedule dispatch");
358 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
359 }
360 }
361
362 static int cfq_queue_empty(struct request_queue *q)
363 {
364 struct cfq_data *cfqd = q->elevator->elevator_data;
365
366 return !cfqd->busy_queues;
367 }
368
369 /*
370 * Scale schedule slice based on io priority. Use the sync time slice only
371 * if a queue is marked sync and has sync io queued. A sync queue with async
372 * io only, should not get full sync slice length.
373 */
374 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
375 unsigned short prio)
376 {
377 const int base_slice = cfqd->cfq_slice[sync];
378
379 WARN_ON(prio >= IOPRIO_BE_NR);
380
381 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
382 }
383
384 static inline int
385 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
386 {
387 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
388 }
389
390 /*
391 * get averaged number of queues of RT/BE priority.
392 * average is updated, with a formula that gives more weight to higher numbers,
393 * to quickly follows sudden increases and decrease slowly
394 */
395
396 static inline unsigned cfq_get_avg_queues(struct cfq_data *cfqd, bool rt)
397 {
398 unsigned min_q, max_q;
399 unsigned mult = cfq_hist_divisor - 1;
400 unsigned round = cfq_hist_divisor / 2;
401 unsigned busy = cfq_busy_queues_wl(rt, cfqd);
402
403 min_q = min(cfqd->busy_queues_avg[rt], busy);
404 max_q = max(cfqd->busy_queues_avg[rt], busy);
405 cfqd->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
406 cfq_hist_divisor;
407 return cfqd->busy_queues_avg[rt];
408 }
409
410 static inline void
411 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
412 {
413 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
414 if (cfqd->cfq_latency) {
415 /* interested queues (we consider only the ones with the same
416 * priority class) */
417 unsigned iq = cfq_get_avg_queues(cfqd, cfq_class_rt(cfqq));
418 unsigned sync_slice = cfqd->cfq_slice[1];
419 unsigned expect_latency = sync_slice * iq;
420 if (expect_latency > cfq_target_latency) {
421 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
422 /* scale low_slice according to IO priority
423 * and sync vs async */
424 unsigned low_slice =
425 min(slice, base_low_slice * slice / sync_slice);
426 /* the adapted slice value is scaled to fit all iqs
427 * into the target latency */
428 slice = max(slice * cfq_target_latency / expect_latency,
429 low_slice);
430 }
431 }
432 cfqq->slice_end = jiffies + slice;
433 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
434 }
435
436 /*
437 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
438 * isn't valid until the first request from the dispatch is activated
439 * and the slice time set.
440 */
441 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
442 {
443 if (cfq_cfqq_slice_new(cfqq))
444 return 0;
445 if (time_before(jiffies, cfqq->slice_end))
446 return 0;
447
448 return 1;
449 }
450
451 /*
452 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
453 * We choose the request that is closest to the head right now. Distance
454 * behind the head is penalized and only allowed to a certain extent.
455 */
456 static struct request *
457 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
458 {
459 sector_t last, s1, s2, d1 = 0, d2 = 0;
460 unsigned long back_max;
461 #define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
462 #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
463 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
464
465 if (rq1 == NULL || rq1 == rq2)
466 return rq2;
467 if (rq2 == NULL)
468 return rq1;
469
470 if (rq_is_sync(rq1) && !rq_is_sync(rq2))
471 return rq1;
472 else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
473 return rq2;
474 if (rq_is_meta(rq1) && !rq_is_meta(rq2))
475 return rq1;
476 else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
477 return rq2;
478
479 s1 = blk_rq_pos(rq1);
480 s2 = blk_rq_pos(rq2);
481
482 last = cfqd->last_position;
483
484 /*
485 * by definition, 1KiB is 2 sectors
486 */
487 back_max = cfqd->cfq_back_max * 2;
488
489 /*
490 * Strict one way elevator _except_ in the case where we allow
491 * short backward seeks which are biased as twice the cost of a
492 * similar forward seek.
493 */
494 if (s1 >= last)
495 d1 = s1 - last;
496 else if (s1 + back_max >= last)
497 d1 = (last - s1) * cfqd->cfq_back_penalty;
498 else
499 wrap |= CFQ_RQ1_WRAP;
500
501 if (s2 >= last)
502 d2 = s2 - last;
503 else if (s2 + back_max >= last)
504 d2 = (last - s2) * cfqd->cfq_back_penalty;
505 else
506 wrap |= CFQ_RQ2_WRAP;
507
508 /* Found required data */
509
510 /*
511 * By doing switch() on the bit mask "wrap" we avoid having to
512 * check two variables for all permutations: --> faster!
513 */
514 switch (wrap) {
515 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
516 if (d1 < d2)
517 return rq1;
518 else if (d2 < d1)
519 return rq2;
520 else {
521 if (s1 >= s2)
522 return rq1;
523 else
524 return rq2;
525 }
526
527 case CFQ_RQ2_WRAP:
528 return rq1;
529 case CFQ_RQ1_WRAP:
530 return rq2;
531 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
532 default:
533 /*
534 * Since both rqs are wrapped,
535 * start with the one that's further behind head
536 * (--> only *one* back seek required),
537 * since back seek takes more time than forward.
538 */
539 if (s1 <= s2)
540 return rq1;
541 else
542 return rq2;
543 }
544 }
545
546 /*
547 * The below is leftmost cache rbtree addon
548 */
549 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
550 {
551 if (!root->left)
552 root->left = rb_first(&root->rb);
553
554 if (root->left)
555 return rb_entry(root->left, struct cfq_queue, rb_node);
556
557 return NULL;
558 }
559
560 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
561 {
562 rb_erase(n, root);
563 RB_CLEAR_NODE(n);
564 }
565
566 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
567 {
568 if (root->left == n)
569 root->left = NULL;
570 rb_erase_init(n, &root->rb);
571 --root->count;
572 }
573
574 /*
575 * would be nice to take fifo expire time into account as well
576 */
577 static struct request *
578 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
579 struct request *last)
580 {
581 struct rb_node *rbnext = rb_next(&last->rb_node);
582 struct rb_node *rbprev = rb_prev(&last->rb_node);
583 struct request *next = NULL, *prev = NULL;
584
585 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
586
587 if (rbprev)
588 prev = rb_entry_rq(rbprev);
589
590 if (rbnext)
591 next = rb_entry_rq(rbnext);
592 else {
593 rbnext = rb_first(&cfqq->sort_list);
594 if (rbnext && rbnext != &last->rb_node)
595 next = rb_entry_rq(rbnext);
596 }
597
598 return cfq_choose_req(cfqd, next, prev);
599 }
600
601 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
602 struct cfq_queue *cfqq)
603 {
604 /*
605 * just an approximation, should be ok.
606 */
607 return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
608 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
609 }
610
611 /*
612 * The cfqd->service_trees holds all pending cfq_queue's that have
613 * requests waiting to be processed. It is sorted in the order that
614 * we will service the queues.
615 */
616 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
617 bool add_front)
618 {
619 struct rb_node **p, *parent;
620 struct cfq_queue *__cfqq;
621 unsigned long rb_key;
622 struct cfq_rb_root *service_tree;
623 int left;
624
625 service_tree = service_tree_for(cfqq_prio(cfqq), cfqq_type(cfqq), cfqd);
626 if (cfq_class_idle(cfqq)) {
627 rb_key = CFQ_IDLE_DELAY;
628 parent = rb_last(&service_tree->rb);
629 if (parent && parent != &cfqq->rb_node) {
630 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
631 rb_key += __cfqq->rb_key;
632 } else
633 rb_key += jiffies;
634 } else if (!add_front) {
635 /*
636 * Get our rb key offset. Subtract any residual slice
637 * value carried from last service. A negative resid
638 * count indicates slice overrun, and this should position
639 * the next service time further away in the tree.
640 */
641 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
642 rb_key -= cfqq->slice_resid;
643 cfqq->slice_resid = 0;
644 } else {
645 rb_key = -HZ;
646 __cfqq = cfq_rb_first(service_tree);
647 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
648 }
649
650 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
651 /*
652 * same position, nothing more to do
653 */
654 if (rb_key == cfqq->rb_key &&
655 cfqq->service_tree == service_tree)
656 return;
657
658 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
659 cfqq->service_tree = NULL;
660 }
661
662 left = 1;
663 parent = NULL;
664 cfqq->service_tree = service_tree;
665 p = &service_tree->rb.rb_node;
666 while (*p) {
667 struct rb_node **n;
668
669 parent = *p;
670 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
671
672 /*
673 * sort by key, that represents service time.
674 */
675 if (time_before(rb_key, __cfqq->rb_key))
676 n = &(*p)->rb_left;
677 else {
678 n = &(*p)->rb_right;
679 left = 0;
680 }
681
682 p = n;
683 }
684
685 if (left)
686 service_tree->left = &cfqq->rb_node;
687
688 cfqq->rb_key = rb_key;
689 rb_link_node(&cfqq->rb_node, parent, p);
690 rb_insert_color(&cfqq->rb_node, &service_tree->rb);
691 service_tree->count++;
692 }
693
694 static struct cfq_queue *
695 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
696 sector_t sector, struct rb_node **ret_parent,
697 struct rb_node ***rb_link)
698 {
699 struct rb_node **p, *parent;
700 struct cfq_queue *cfqq = NULL;
701
702 parent = NULL;
703 p = &root->rb_node;
704 while (*p) {
705 struct rb_node **n;
706
707 parent = *p;
708 cfqq = rb_entry(parent, struct cfq_queue, p_node);
709
710 /*
711 * Sort strictly based on sector. Smallest to the left,
712 * largest to the right.
713 */
714 if (sector > blk_rq_pos(cfqq->next_rq))
715 n = &(*p)->rb_right;
716 else if (sector < blk_rq_pos(cfqq->next_rq))
717 n = &(*p)->rb_left;
718 else
719 break;
720 p = n;
721 cfqq = NULL;
722 }
723
724 *ret_parent = parent;
725 if (rb_link)
726 *rb_link = p;
727 return cfqq;
728 }
729
730 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
731 {
732 struct rb_node **p, *parent;
733 struct cfq_queue *__cfqq;
734
735 if (cfqq->p_root) {
736 rb_erase(&cfqq->p_node, cfqq->p_root);
737 cfqq->p_root = NULL;
738 }
739
740 if (cfq_class_idle(cfqq))
741 return;
742 if (!cfqq->next_rq)
743 return;
744
745 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
746 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
747 blk_rq_pos(cfqq->next_rq), &parent, &p);
748 if (!__cfqq) {
749 rb_link_node(&cfqq->p_node, parent, p);
750 rb_insert_color(&cfqq->p_node, cfqq->p_root);
751 } else
752 cfqq->p_root = NULL;
753 }
754
755 /*
756 * Update cfqq's position in the service tree.
757 */
758 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
759 {
760 /*
761 * Resorting requires the cfqq to be on the RR list already.
762 */
763 if (cfq_cfqq_on_rr(cfqq)) {
764 cfq_service_tree_add(cfqd, cfqq, 0);
765 cfq_prio_tree_add(cfqd, cfqq);
766 }
767 }
768
769 /*
770 * add to busy list of queues for service, trying to be fair in ordering
771 * the pending list according to last request service
772 */
773 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
774 {
775 cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
776 BUG_ON(cfq_cfqq_on_rr(cfqq));
777 cfq_mark_cfqq_on_rr(cfqq);
778 cfqd->busy_queues++;
779
780 cfq_resort_rr_list(cfqd, cfqq);
781 }
782
783 /*
784 * Called when the cfqq no longer has requests pending, remove it from
785 * the service tree.
786 */
787 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
788 {
789 cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
790 BUG_ON(!cfq_cfqq_on_rr(cfqq));
791 cfq_clear_cfqq_on_rr(cfqq);
792
793 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
794 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
795 cfqq->service_tree = NULL;
796 }
797 if (cfqq->p_root) {
798 rb_erase(&cfqq->p_node, cfqq->p_root);
799 cfqq->p_root = NULL;
800 }
801
802 BUG_ON(!cfqd->busy_queues);
803 cfqd->busy_queues--;
804 }
805
806 /*
807 * rb tree support functions
808 */
809 static void cfq_del_rq_rb(struct request *rq)
810 {
811 struct cfq_queue *cfqq = RQ_CFQQ(rq);
812 struct cfq_data *cfqd = cfqq->cfqd;
813 const int sync = rq_is_sync(rq);
814
815 BUG_ON(!cfqq->queued[sync]);
816 cfqq->queued[sync]--;
817
818 elv_rb_del(&cfqq->sort_list, rq);
819
820 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
821 cfq_del_cfqq_rr(cfqd, cfqq);
822 }
823
824 static void cfq_add_rq_rb(struct request *rq)
825 {
826 struct cfq_queue *cfqq = RQ_CFQQ(rq);
827 struct cfq_data *cfqd = cfqq->cfqd;
828 struct request *__alias, *prev;
829
830 cfqq->queued[rq_is_sync(rq)]++;
831
832 /*
833 * looks a little odd, but the first insert might return an alias.
834 * if that happens, put the alias on the dispatch list
835 */
836 while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
837 cfq_dispatch_insert(cfqd->queue, __alias);
838
839 if (!cfq_cfqq_on_rr(cfqq))
840 cfq_add_cfqq_rr(cfqd, cfqq);
841
842 /*
843 * check if this request is a better next-serve candidate
844 */
845 prev = cfqq->next_rq;
846 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
847
848 /*
849 * adjust priority tree position, if ->next_rq changes
850 */
851 if (prev != cfqq->next_rq)
852 cfq_prio_tree_add(cfqd, cfqq);
853
854 BUG_ON(!cfqq->next_rq);
855 }
856
857 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
858 {
859 elv_rb_del(&cfqq->sort_list, rq);
860 cfqq->queued[rq_is_sync(rq)]--;
861 cfq_add_rq_rb(rq);
862 }
863
864 static struct request *
865 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
866 {
867 struct task_struct *tsk = current;
868 struct cfq_io_context *cic;
869 struct cfq_queue *cfqq;
870
871 cic = cfq_cic_lookup(cfqd, tsk->io_context);
872 if (!cic)
873 return NULL;
874
875 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
876 if (cfqq) {
877 sector_t sector = bio->bi_sector + bio_sectors(bio);
878
879 return elv_rb_find(&cfqq->sort_list, sector);
880 }
881
882 return NULL;
883 }
884
885 static void cfq_activate_request(struct request_queue *q, struct request *rq)
886 {
887 struct cfq_data *cfqd = q->elevator->elevator_data;
888
889 cfqd->rq_in_driver[rq_is_sync(rq)]++;
890 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
891 rq_in_driver(cfqd));
892
893 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
894 }
895
896 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
897 {
898 struct cfq_data *cfqd = q->elevator->elevator_data;
899 const int sync = rq_is_sync(rq);
900
901 WARN_ON(!cfqd->rq_in_driver[sync]);
902 cfqd->rq_in_driver[sync]--;
903 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
904 rq_in_driver(cfqd));
905 }
906
907 static void cfq_remove_request(struct request *rq)
908 {
909 struct cfq_queue *cfqq = RQ_CFQQ(rq);
910
911 if (cfqq->next_rq == rq)
912 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
913
914 list_del_init(&rq->queuelist);
915 cfq_del_rq_rb(rq);
916
917 cfqq->cfqd->rq_queued--;
918 if (rq_is_meta(rq)) {
919 WARN_ON(!cfqq->meta_pending);
920 cfqq->meta_pending--;
921 }
922 }
923
924 static int cfq_merge(struct request_queue *q, struct request **req,
925 struct bio *bio)
926 {
927 struct cfq_data *cfqd = q->elevator->elevator_data;
928 struct request *__rq;
929
930 __rq = cfq_find_rq_fmerge(cfqd, bio);
931 if (__rq && elv_rq_merge_ok(__rq, bio)) {
932 *req = __rq;
933 return ELEVATOR_FRONT_MERGE;
934 }
935
936 return ELEVATOR_NO_MERGE;
937 }
938
939 static void cfq_merged_request(struct request_queue *q, struct request *req,
940 int type)
941 {
942 if (type == ELEVATOR_FRONT_MERGE) {
943 struct cfq_queue *cfqq = RQ_CFQQ(req);
944
945 cfq_reposition_rq_rb(cfqq, req);
946 }
947 }
948
949 static void
950 cfq_merged_requests(struct request_queue *q, struct request *rq,
951 struct request *next)
952 {
953 /*
954 * reposition in fifo if next is older than rq
955 */
956 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
957 time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
958 list_move(&rq->queuelist, &next->queuelist);
959 rq_set_fifo_time(rq, rq_fifo_time(next));
960 }
961
962 cfq_remove_request(next);
963 }
964
965 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
966 struct bio *bio)
967 {
968 struct cfq_data *cfqd = q->elevator->elevator_data;
969 struct cfq_io_context *cic;
970 struct cfq_queue *cfqq;
971
972 /*
973 * Disallow merge of a sync bio into an async request.
974 */
975 if (cfq_bio_sync(bio) && !rq_is_sync(rq))
976 return false;
977
978 /*
979 * Lookup the cfqq that this bio will be queued with. Allow
980 * merge only if rq is queued there.
981 */
982 cic = cfq_cic_lookup(cfqd, current->io_context);
983 if (!cic)
984 return false;
985
986 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
987 return cfqq == RQ_CFQQ(rq);
988 }
989
990 static void __cfq_set_active_queue(struct cfq_data *cfqd,
991 struct cfq_queue *cfqq)
992 {
993 if (cfqq) {
994 cfq_log_cfqq(cfqd, cfqq, "set_active");
995 cfqq->slice_end = 0;
996 cfqq->slice_dispatch = 0;
997
998 cfq_clear_cfqq_wait_request(cfqq);
999 cfq_clear_cfqq_must_dispatch(cfqq);
1000 cfq_clear_cfqq_must_alloc_slice(cfqq);
1001 cfq_clear_cfqq_fifo_expire(cfqq);
1002 cfq_mark_cfqq_slice_new(cfqq);
1003
1004 del_timer(&cfqd->idle_slice_timer);
1005 }
1006
1007 cfqd->active_queue = cfqq;
1008 }
1009
1010 /*
1011 * current cfqq expired its slice (or was too idle), select new one
1012 */
1013 static void
1014 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1015 bool timed_out)
1016 {
1017 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1018
1019 if (cfq_cfqq_wait_request(cfqq))
1020 del_timer(&cfqd->idle_slice_timer);
1021
1022 cfq_clear_cfqq_wait_request(cfqq);
1023
1024 /*
1025 * store what was left of this slice, if the queue idled/timed out
1026 */
1027 if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
1028 cfqq->slice_resid = cfqq->slice_end - jiffies;
1029 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1030 }
1031
1032 cfq_resort_rr_list(cfqd, cfqq);
1033
1034 if (cfqq == cfqd->active_queue)
1035 cfqd->active_queue = NULL;
1036
1037 if (cfqd->active_cic) {
1038 put_io_context(cfqd->active_cic->ioc);
1039 cfqd->active_cic = NULL;
1040 }
1041 }
1042
1043 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1044 {
1045 struct cfq_queue *cfqq = cfqd->active_queue;
1046
1047 if (cfqq)
1048 __cfq_slice_expired(cfqd, cfqq, timed_out);
1049 }
1050
1051 /*
1052 * Get next queue for service. Unless we have a queue preemption,
1053 * we'll simply select the first cfqq in the service tree.
1054 */
1055 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1056 {
1057 struct cfq_rb_root *service_tree =
1058 service_tree_for(cfqd->serving_prio, cfqd->serving_type, cfqd);
1059
1060 if (RB_EMPTY_ROOT(&service_tree->rb))
1061 return NULL;
1062 return cfq_rb_first(service_tree);
1063 }
1064
1065 /*
1066 * Get and set a new active queue for service.
1067 */
1068 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1069 struct cfq_queue *cfqq)
1070 {
1071 if (!cfqq)
1072 cfqq = cfq_get_next_queue(cfqd);
1073
1074 __cfq_set_active_queue(cfqd, cfqq);
1075 return cfqq;
1076 }
1077
1078 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1079 struct request *rq)
1080 {
1081 if (blk_rq_pos(rq) >= cfqd->last_position)
1082 return blk_rq_pos(rq) - cfqd->last_position;
1083 else
1084 return cfqd->last_position - blk_rq_pos(rq);
1085 }
1086
1087 #define CFQQ_SEEK_THR 8 * 1024
1088 #define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
1089
1090 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1091 struct request *rq)
1092 {
1093 sector_t sdist = cfqq->seek_mean;
1094
1095 if (!sample_valid(cfqq->seek_samples))
1096 sdist = CFQQ_SEEK_THR;
1097
1098 return cfq_dist_from_last(cfqd, rq) <= sdist;
1099 }
1100
1101 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1102 struct cfq_queue *cur_cfqq)
1103 {
1104 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1105 struct rb_node *parent, *node;
1106 struct cfq_queue *__cfqq;
1107 sector_t sector = cfqd->last_position;
1108
1109 if (RB_EMPTY_ROOT(root))
1110 return NULL;
1111
1112 /*
1113 * First, if we find a request starting at the end of the last
1114 * request, choose it.
1115 */
1116 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1117 if (__cfqq)
1118 return __cfqq;
1119
1120 /*
1121 * If the exact sector wasn't found, the parent of the NULL leaf
1122 * will contain the closest sector.
1123 */
1124 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1125 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1126 return __cfqq;
1127
1128 if (blk_rq_pos(__cfqq->next_rq) < sector)
1129 node = rb_next(&__cfqq->p_node);
1130 else
1131 node = rb_prev(&__cfqq->p_node);
1132 if (!node)
1133 return NULL;
1134
1135 __cfqq = rb_entry(node, struct cfq_queue, p_node);
1136 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1137 return __cfqq;
1138
1139 return NULL;
1140 }
1141
1142 /*
1143 * cfqd - obvious
1144 * cur_cfqq - passed in so that we don't decide that the current queue is
1145 * closely cooperating with itself.
1146 *
1147 * So, basically we're assuming that that cur_cfqq has dispatched at least
1148 * one request, and that cfqd->last_position reflects a position on the disk
1149 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
1150 * assumption.
1151 */
1152 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1153 struct cfq_queue *cur_cfqq)
1154 {
1155 struct cfq_queue *cfqq;
1156
1157 if (!cfq_cfqq_sync(cur_cfqq))
1158 return NULL;
1159 if (CFQQ_SEEKY(cur_cfqq))
1160 return NULL;
1161
1162 /*
1163 * We should notice if some of the queues are cooperating, eg
1164 * working closely on the same area of the disk. In that case,
1165 * we can group them together and don't waste time idling.
1166 */
1167 cfqq = cfqq_close(cfqd, cur_cfqq);
1168 if (!cfqq)
1169 return NULL;
1170
1171 /*
1172 * It only makes sense to merge sync queues.
1173 */
1174 if (!cfq_cfqq_sync(cfqq))
1175 return NULL;
1176 if (CFQQ_SEEKY(cfqq))
1177 return NULL;
1178
1179 /*
1180 * Do not merge queues of different priority classes
1181 */
1182 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1183 return NULL;
1184
1185 return cfqq;
1186 }
1187
1188 /*
1189 * Determine whether we should enforce idle window for this queue.
1190 */
1191
1192 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1193 {
1194 enum wl_prio_t prio = cfqq_prio(cfqq);
1195 struct cfq_rb_root *service_tree = cfqq->service_tree;
1196
1197 /* We never do for idle class queues. */
1198 if (prio == IDLE_WORKLOAD)
1199 return false;
1200
1201 /* We do for queues that were marked with idle window flag. */
1202 if (cfq_cfqq_idle_window(cfqq))
1203 return true;
1204
1205 /*
1206 * Otherwise, we do only if they are the last ones
1207 * in their service tree.
1208 */
1209 if (!service_tree)
1210 service_tree = service_tree_for(prio, cfqq_type(cfqq), cfqd);
1211
1212 if (service_tree->count == 0)
1213 return true;
1214
1215 return (service_tree->count == 1 && cfq_rb_first(service_tree) == cfqq);
1216 }
1217
1218 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1219 {
1220 struct cfq_queue *cfqq = cfqd->active_queue;
1221 struct cfq_io_context *cic;
1222 unsigned long sl;
1223
1224 /*
1225 * SSD device without seek penalty, disable idling. But only do so
1226 * for devices that support queuing, otherwise we still have a problem
1227 * with sync vs async workloads.
1228 */
1229 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1230 return;
1231
1232 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
1233 WARN_ON(cfq_cfqq_slice_new(cfqq));
1234
1235 /*
1236 * idle is disabled, either manually or by past process history
1237 */
1238 if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq))
1239 return;
1240
1241 /*
1242 * still requests with the driver, don't idle
1243 */
1244 if (rq_in_driver(cfqd))
1245 return;
1246
1247 /*
1248 * task has exited, don't wait
1249 */
1250 cic = cfqd->active_cic;
1251 if (!cic || !atomic_read(&cic->ioc->nr_tasks))
1252 return;
1253
1254 /*
1255 * If our average think time is larger than the remaining time
1256 * slice, then don't idle. This avoids overrunning the allotted
1257 * time slice.
1258 */
1259 if (sample_valid(cic->ttime_samples) &&
1260 (cfqq->slice_end - jiffies < cic->ttime_mean))
1261 return;
1262
1263 cfq_mark_cfqq_wait_request(cfqq);
1264
1265 sl = cfqd->cfq_slice_idle;
1266 /* are we servicing noidle tree, and there are more queues?
1267 * non-rotational or NCQ: no idle
1268 * non-NCQ rotational : very small idle, to allow
1269 * fair distribution of slice time for a process doing back-to-back
1270 * seeks.
1271 */
1272 if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
1273 service_tree_for(cfqd->serving_prio, SYNC_NOIDLE_WORKLOAD, cfqd)
1274 ->count > 0) {
1275 if (blk_queue_nonrot(cfqd->queue) || cfqd->hw_tag)
1276 return;
1277 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
1278 }
1279
1280 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1281 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
1282 }
1283
1284 /*
1285 * Move request from internal lists to the request queue dispatch list.
1286 */
1287 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1288 {
1289 struct cfq_data *cfqd = q->elevator->elevator_data;
1290 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1291
1292 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1293
1294 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1295 cfq_remove_request(rq);
1296 cfqq->dispatched++;
1297 elv_dispatch_sort(q, rq);
1298
1299 if (cfq_cfqq_sync(cfqq))
1300 cfqd->sync_flight++;
1301 }
1302
1303 /*
1304 * return expired entry, or NULL to just start from scratch in rbtree
1305 */
1306 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1307 {
1308 struct request *rq = NULL;
1309
1310 if (cfq_cfqq_fifo_expire(cfqq))
1311 return NULL;
1312
1313 cfq_mark_cfqq_fifo_expire(cfqq);
1314
1315 if (list_empty(&cfqq->fifo))
1316 return NULL;
1317
1318 rq = rq_entry_fifo(cfqq->fifo.next);
1319 if (time_before(jiffies, rq_fifo_time(rq)))
1320 rq = NULL;
1321
1322 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
1323 return rq;
1324 }
1325
1326 static inline int
1327 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1328 {
1329 const int base_rq = cfqd->cfq_slice_async_rq;
1330
1331 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1332
1333 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
1334 }
1335
1336 /*
1337 * Must be called with the queue_lock held.
1338 */
1339 static int cfqq_process_refs(struct cfq_queue *cfqq)
1340 {
1341 int process_refs, io_refs;
1342
1343 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
1344 process_refs = atomic_read(&cfqq->ref) - io_refs;
1345 BUG_ON(process_refs < 0);
1346 return process_refs;
1347 }
1348
1349 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1350 {
1351 int process_refs, new_process_refs;
1352 struct cfq_queue *__cfqq;
1353
1354 /* Avoid a circular list and skip interim queue merges */
1355 while ((__cfqq = new_cfqq->new_cfqq)) {
1356 if (__cfqq == cfqq)
1357 return;
1358 new_cfqq = __cfqq;
1359 }
1360
1361 process_refs = cfqq_process_refs(cfqq);
1362 /*
1363 * If the process for the cfqq has gone away, there is no
1364 * sense in merging the queues.
1365 */
1366 if (process_refs == 0)
1367 return;
1368
1369 /*
1370 * Merge in the direction of the lesser amount of work.
1371 */
1372 new_process_refs = cfqq_process_refs(new_cfqq);
1373 if (new_process_refs >= process_refs) {
1374 cfqq->new_cfqq = new_cfqq;
1375 atomic_add(process_refs, &new_cfqq->ref);
1376 } else {
1377 new_cfqq->new_cfqq = cfqq;
1378 atomic_add(new_process_refs, &cfqq->ref);
1379 }
1380 }
1381
1382 static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, enum wl_prio_t prio,
1383 bool prio_changed)
1384 {
1385 struct cfq_queue *queue;
1386 int i;
1387 bool key_valid = false;
1388 unsigned long lowest_key = 0;
1389 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
1390
1391 if (prio_changed) {
1392 /*
1393 * When priorities switched, we prefer starting
1394 * from SYNC_NOIDLE (first choice), or just SYNC
1395 * over ASYNC
1396 */
1397 if (service_tree_for(prio, cur_best, cfqd)->count)
1398 return cur_best;
1399 cur_best = SYNC_WORKLOAD;
1400 if (service_tree_for(prio, cur_best, cfqd)->count)
1401 return cur_best;
1402
1403 return ASYNC_WORKLOAD;
1404 }
1405
1406 for (i = 0; i < 3; ++i) {
1407 /* otherwise, select the one with lowest rb_key */
1408 queue = cfq_rb_first(service_tree_for(prio, i, cfqd));
1409 if (queue &&
1410 (!key_valid || time_before(queue->rb_key, lowest_key))) {
1411 lowest_key = queue->rb_key;
1412 cur_best = i;
1413 key_valid = true;
1414 }
1415 }
1416
1417 return cur_best;
1418 }
1419
1420 static void choose_service_tree(struct cfq_data *cfqd)
1421 {
1422 enum wl_prio_t previous_prio = cfqd->serving_prio;
1423 bool prio_changed;
1424 unsigned slice;
1425 unsigned count;
1426
1427 /* Choose next priority. RT > BE > IDLE */
1428 if (cfq_busy_queues_wl(RT_WORKLOAD, cfqd))
1429 cfqd->serving_prio = RT_WORKLOAD;
1430 else if (cfq_busy_queues_wl(BE_WORKLOAD, cfqd))
1431 cfqd->serving_prio = BE_WORKLOAD;
1432 else {
1433 cfqd->serving_prio = IDLE_WORKLOAD;
1434 cfqd->workload_expires = jiffies + 1;
1435 return;
1436 }
1437
1438 /*
1439 * For RT and BE, we have to choose also the type
1440 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
1441 * expiration time
1442 */
1443 prio_changed = (cfqd->serving_prio != previous_prio);
1444 count = service_tree_for(cfqd->serving_prio, cfqd->serving_type, cfqd)
1445 ->count;
1446
1447 /*
1448 * If priority didn't change, check workload expiration,
1449 * and that we still have other queues ready
1450 */
1451 if (!prio_changed && count &&
1452 !time_after(jiffies, cfqd->workload_expires))
1453 return;
1454
1455 /* otherwise select new workload type */
1456 cfqd->serving_type =
1457 cfq_choose_wl(cfqd, cfqd->serving_prio, prio_changed);
1458 count = service_tree_for(cfqd->serving_prio, cfqd->serving_type, cfqd)
1459 ->count;
1460
1461 /*
1462 * the workload slice is computed as a fraction of target latency
1463 * proportional to the number of queues in that workload, over
1464 * all the queues in the same priority class
1465 */
1466 slice = cfq_target_latency * count /
1467 max_t(unsigned, cfqd->busy_queues_avg[cfqd->serving_prio],
1468 cfq_busy_queues_wl(cfqd->serving_prio, cfqd));
1469
1470 if (cfqd->serving_type == ASYNC_WORKLOAD)
1471 /* async workload slice is scaled down according to
1472 * the sync/async slice ratio. */
1473 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
1474 else
1475 /* sync workload slice is at least 2 * cfq_slice_idle */
1476 slice = max(slice, 2 * cfqd->cfq_slice_idle);
1477
1478 slice = max_t(unsigned, slice, CFQ_MIN_TT);
1479 cfqd->workload_expires = jiffies + slice;
1480 }
1481
1482 /*
1483 * Select a queue for service. If we have a current active queue,
1484 * check whether to continue servicing it, or retrieve and set a new one.
1485 */
1486 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1487 {
1488 struct cfq_queue *cfqq, *new_cfqq = NULL;
1489
1490 cfqq = cfqd->active_queue;
1491 if (!cfqq)
1492 goto new_queue;
1493
1494 /*
1495 * The active queue has run out of time, expire it and select new.
1496 */
1497 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
1498 goto expire;
1499
1500 /*
1501 * The active queue has requests and isn't expired, allow it to
1502 * dispatch.
1503 */
1504 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
1505 goto keep_queue;
1506
1507 /*
1508 * If another queue has a request waiting within our mean seek
1509 * distance, let it run. The expire code will check for close
1510 * cooperators and put the close queue at the front of the service
1511 * tree. If possible, merge the expiring queue with the new cfqq.
1512 */
1513 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
1514 if (new_cfqq) {
1515 if (!cfqq->new_cfqq)
1516 cfq_setup_merge(cfqq, new_cfqq);
1517 goto expire;
1518 }
1519
1520 /*
1521 * No requests pending. If the active queue still has requests in
1522 * flight or is idling for a new request, allow either of these
1523 * conditions to happen (or time out) before selecting a new queue.
1524 */
1525 if (timer_pending(&cfqd->idle_slice_timer) ||
1526 (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) {
1527 cfqq = NULL;
1528 goto keep_queue;
1529 }
1530
1531 expire:
1532 cfq_slice_expired(cfqd, 0);
1533 new_queue:
1534 /*
1535 * Current queue expired. Check if we have to switch to a new
1536 * service tree
1537 */
1538 if (!new_cfqq)
1539 choose_service_tree(cfqd);
1540
1541 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
1542 keep_queue:
1543 return cfqq;
1544 }
1545
1546 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
1547 {
1548 int dispatched = 0;
1549
1550 while (cfqq->next_rq) {
1551 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
1552 dispatched++;
1553 }
1554
1555 BUG_ON(!list_empty(&cfqq->fifo));
1556 return dispatched;
1557 }
1558
1559 /*
1560 * Drain our current requests. Used for barriers and when switching
1561 * io schedulers on-the-fly.
1562 */
1563 static int cfq_forced_dispatch(struct cfq_data *cfqd)
1564 {
1565 struct cfq_queue *cfqq;
1566 int dispatched = 0;
1567 int i, j;
1568 for (i = 0; i < 2; ++i)
1569 for (j = 0; j < 3; ++j)
1570 while ((cfqq = cfq_rb_first(&cfqd->service_trees[i][j]))
1571 != NULL)
1572 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1573
1574 while ((cfqq = cfq_rb_first(&cfqd->service_tree_idle)) != NULL)
1575 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1576
1577 cfq_slice_expired(cfqd, 0);
1578
1579 BUG_ON(cfqd->busy_queues);
1580
1581 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
1582 return dispatched;
1583 }
1584
1585 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1586 {
1587 unsigned int max_dispatch;
1588
1589 /*
1590 * Drain async requests before we start sync IO
1591 */
1592 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
1593 return false;
1594
1595 /*
1596 * If this is an async queue and we have sync IO in flight, let it wait
1597 */
1598 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
1599 return false;
1600
1601 max_dispatch = cfqd->cfq_quantum;
1602 if (cfq_class_idle(cfqq))
1603 max_dispatch = 1;
1604
1605 /*
1606 * Does this cfqq already have too much IO in flight?
1607 */
1608 if (cfqq->dispatched >= max_dispatch) {
1609 /*
1610 * idle queue must always only have a single IO in flight
1611 */
1612 if (cfq_class_idle(cfqq))
1613 return false;
1614
1615 /*
1616 * We have other queues, don't allow more IO from this one
1617 */
1618 if (cfqd->busy_queues > 1)
1619 return false;
1620
1621 /*
1622 * Sole queue user, allow bigger slice
1623 */
1624 max_dispatch *= 4;
1625 }
1626
1627 /*
1628 * Async queues must wait a bit before being allowed dispatch.
1629 * We also ramp up the dispatch depth gradually for async IO,
1630 * based on the last sync IO we serviced
1631 */
1632 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
1633 unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
1634 unsigned int depth;
1635
1636 depth = last_sync / cfqd->cfq_slice[1];
1637 if (!depth && !cfqq->dispatched)
1638 depth = 1;
1639 if (depth < max_dispatch)
1640 max_dispatch = depth;
1641 }
1642
1643 /*
1644 * If we're below the current max, allow a dispatch
1645 */
1646 return cfqq->dispatched < max_dispatch;
1647 }
1648
1649 /*
1650 * Dispatch a request from cfqq, moving them to the request queue
1651 * dispatch list.
1652 */
1653 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1654 {
1655 struct request *rq;
1656
1657 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1658
1659 if (!cfq_may_dispatch(cfqd, cfqq))
1660 return false;
1661
1662 /*
1663 * follow expired path, else get first next available
1664 */
1665 rq = cfq_check_fifo(cfqq);
1666 if (!rq)
1667 rq = cfqq->next_rq;
1668
1669 /*
1670 * insert request into driver dispatch list
1671 */
1672 cfq_dispatch_insert(cfqd->queue, rq);
1673
1674 if (!cfqd->active_cic) {
1675 struct cfq_io_context *cic = RQ_CIC(rq);
1676
1677 atomic_long_inc(&cic->ioc->refcount);
1678 cfqd->active_cic = cic;
1679 }
1680
1681 return true;
1682 }
1683
1684 /*
1685 * Find the cfqq that we need to service and move a request from that to the
1686 * dispatch list
1687 */
1688 static int cfq_dispatch_requests(struct request_queue *q, int force)
1689 {
1690 struct cfq_data *cfqd = q->elevator->elevator_data;
1691 struct cfq_queue *cfqq;
1692
1693 if (!cfqd->busy_queues)
1694 return 0;
1695
1696 if (unlikely(force))
1697 return cfq_forced_dispatch(cfqd);
1698
1699 cfqq = cfq_select_queue(cfqd);
1700 if (!cfqq)
1701 return 0;
1702
1703 /*
1704 * Dispatch a request from this cfqq, if it is allowed
1705 */
1706 if (!cfq_dispatch_request(cfqd, cfqq))
1707 return 0;
1708
1709 cfqq->slice_dispatch++;
1710 cfq_clear_cfqq_must_dispatch(cfqq);
1711
1712 /*
1713 * expire an async queue immediately if it has used up its slice. idle
1714 * queue always expire after 1 dispatch round.
1715 */
1716 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1717 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1718 cfq_class_idle(cfqq))) {
1719 cfqq->slice_end = jiffies + 1;
1720 cfq_slice_expired(cfqd, 0);
1721 }
1722
1723 cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
1724 return 1;
1725 }
1726
1727 /*
1728 * task holds one reference to the queue, dropped when task exits. each rq
1729 * in-flight on this queue also holds a reference, dropped when rq is freed.
1730 *
1731 * queue lock must be held here.
1732 */
1733 static void cfq_put_queue(struct cfq_queue *cfqq)
1734 {
1735 struct cfq_data *cfqd = cfqq->cfqd;
1736
1737 BUG_ON(atomic_read(&cfqq->ref) <= 0);
1738
1739 if (!atomic_dec_and_test(&cfqq->ref))
1740 return;
1741
1742 cfq_log_cfqq(cfqd, cfqq, "put_queue");
1743 BUG_ON(rb_first(&cfqq->sort_list));
1744 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1745 BUG_ON(cfq_cfqq_on_rr(cfqq));
1746
1747 if (unlikely(cfqd->active_queue == cfqq)) {
1748 __cfq_slice_expired(cfqd, cfqq, 0);
1749 cfq_schedule_dispatch(cfqd);
1750 }
1751
1752 kmem_cache_free(cfq_pool, cfqq);
1753 }
1754
1755 /*
1756 * Must always be called with the rcu_read_lock() held
1757 */
1758 static void
1759 __call_for_each_cic(struct io_context *ioc,
1760 void (*func)(struct io_context *, struct cfq_io_context *))
1761 {
1762 struct cfq_io_context *cic;
1763 struct hlist_node *n;
1764
1765 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
1766 func(ioc, cic);
1767 }
1768
1769 /*
1770 * Call func for each cic attached to this ioc.
1771 */
1772 static void
1773 call_for_each_cic(struct io_context *ioc,
1774 void (*func)(struct io_context *, struct cfq_io_context *))
1775 {
1776 rcu_read_lock();
1777 __call_for_each_cic(ioc, func);
1778 rcu_read_unlock();
1779 }
1780
1781 static void cfq_cic_free_rcu(struct rcu_head *head)
1782 {
1783 struct cfq_io_context *cic;
1784
1785 cic = container_of(head, struct cfq_io_context, rcu_head);
1786
1787 kmem_cache_free(cfq_ioc_pool, cic);
1788 elv_ioc_count_dec(cfq_ioc_count);
1789
1790 if (ioc_gone) {
1791 /*
1792 * CFQ scheduler is exiting, grab exit lock and check
1793 * the pending io context count. If it hits zero,
1794 * complete ioc_gone and set it back to NULL
1795 */
1796 spin_lock(&ioc_gone_lock);
1797 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
1798 complete(ioc_gone);
1799 ioc_gone = NULL;
1800 }
1801 spin_unlock(&ioc_gone_lock);
1802 }
1803 }
1804
1805 static void cfq_cic_free(struct cfq_io_context *cic)
1806 {
1807 call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
1808 }
1809
1810 static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
1811 {
1812 unsigned long flags;
1813
1814 BUG_ON(!cic->dead_key);
1815
1816 spin_lock_irqsave(&ioc->lock, flags);
1817 radix_tree_delete(&ioc->radix_root, cic->dead_key);
1818 hlist_del_rcu(&cic->cic_list);
1819 spin_unlock_irqrestore(&ioc->lock, flags);
1820
1821 cfq_cic_free(cic);
1822 }
1823
1824 /*
1825 * Must be called with rcu_read_lock() held or preemption otherwise disabled.
1826 * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
1827 * and ->trim() which is called with the task lock held
1828 */
1829 static void cfq_free_io_context(struct io_context *ioc)
1830 {
1831 /*
1832 * ioc->refcount is zero here, or we are called from elv_unregister(),
1833 * so no more cic's are allowed to be linked into this ioc. So it
1834 * should be ok to iterate over the known list, we will see all cic's
1835 * since no new ones are added.
1836 */
1837 __call_for_each_cic(ioc, cic_free_func);
1838 }
1839
1840 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1841 {
1842 struct cfq_queue *__cfqq, *next;
1843
1844 if (unlikely(cfqq == cfqd->active_queue)) {
1845 __cfq_slice_expired(cfqd, cfqq, 0);
1846 cfq_schedule_dispatch(cfqd);
1847 }
1848
1849 /*
1850 * If this queue was scheduled to merge with another queue, be
1851 * sure to drop the reference taken on that queue (and others in
1852 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
1853 */
1854 __cfqq = cfqq->new_cfqq;
1855 while (__cfqq) {
1856 if (__cfqq == cfqq) {
1857 WARN(1, "cfqq->new_cfqq loop detected\n");
1858 break;
1859 }
1860 next = __cfqq->new_cfqq;
1861 cfq_put_queue(__cfqq);
1862 __cfqq = next;
1863 }
1864
1865 cfq_put_queue(cfqq);
1866 }
1867
1868 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1869 struct cfq_io_context *cic)
1870 {
1871 struct io_context *ioc = cic->ioc;
1872
1873 list_del_init(&cic->queue_list);
1874
1875 /*
1876 * Make sure key == NULL is seen for dead queues
1877 */
1878 smp_wmb();
1879 cic->dead_key = (unsigned long) cic->key;
1880 cic->key = NULL;
1881
1882 if (ioc->ioc_data == cic)
1883 rcu_assign_pointer(ioc->ioc_data, NULL);
1884
1885 if (cic->cfqq[BLK_RW_ASYNC]) {
1886 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
1887 cic->cfqq[BLK_RW_ASYNC] = NULL;
1888 }
1889
1890 if (cic->cfqq[BLK_RW_SYNC]) {
1891 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
1892 cic->cfqq[BLK_RW_SYNC] = NULL;
1893 }
1894 }
1895
1896 static void cfq_exit_single_io_context(struct io_context *ioc,
1897 struct cfq_io_context *cic)
1898 {
1899 struct cfq_data *cfqd = cic->key;
1900
1901 if (cfqd) {
1902 struct request_queue *q = cfqd->queue;
1903 unsigned long flags;
1904
1905 spin_lock_irqsave(q->queue_lock, flags);
1906
1907 /*
1908 * Ensure we get a fresh copy of the ->key to prevent
1909 * race between exiting task and queue
1910 */
1911 smp_read_barrier_depends();
1912 if (cic->key)
1913 __cfq_exit_single_io_context(cfqd, cic);
1914
1915 spin_unlock_irqrestore(q->queue_lock, flags);
1916 }
1917 }
1918
1919 /*
1920 * The process that ioc belongs to has exited, we need to clean up
1921 * and put the internal structures we have that belongs to that process.
1922 */
1923 static void cfq_exit_io_context(struct io_context *ioc)
1924 {
1925 call_for_each_cic(ioc, cfq_exit_single_io_context);
1926 }
1927
1928 static struct cfq_io_context *
1929 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1930 {
1931 struct cfq_io_context *cic;
1932
1933 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
1934 cfqd->queue->node);
1935 if (cic) {
1936 cic->last_end_request = jiffies;
1937 INIT_LIST_HEAD(&cic->queue_list);
1938 INIT_HLIST_NODE(&cic->cic_list);
1939 cic->dtor = cfq_free_io_context;
1940 cic->exit = cfq_exit_io_context;
1941 elv_ioc_count_inc(cfq_ioc_count);
1942 }
1943
1944 return cic;
1945 }
1946
1947 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
1948 {
1949 struct task_struct *tsk = current;
1950 int ioprio_class;
1951
1952 if (!cfq_cfqq_prio_changed(cfqq))
1953 return;
1954
1955 ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
1956 switch (ioprio_class) {
1957 default:
1958 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1959 case IOPRIO_CLASS_NONE:
1960 /*
1961 * no prio set, inherit CPU scheduling settings
1962 */
1963 cfqq->ioprio = task_nice_ioprio(tsk);
1964 cfqq->ioprio_class = task_nice_ioclass(tsk);
1965 break;
1966 case IOPRIO_CLASS_RT:
1967 cfqq->ioprio = task_ioprio(ioc);
1968 cfqq->ioprio_class = IOPRIO_CLASS_RT;
1969 break;
1970 case IOPRIO_CLASS_BE:
1971 cfqq->ioprio = task_ioprio(ioc);
1972 cfqq->ioprio_class = IOPRIO_CLASS_BE;
1973 break;
1974 case IOPRIO_CLASS_IDLE:
1975 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1976 cfqq->ioprio = 7;
1977 cfq_clear_cfqq_idle_window(cfqq);
1978 break;
1979 }
1980
1981 /*
1982 * keep track of original prio settings in case we have to temporarily
1983 * elevate the priority of this queue
1984 */
1985 cfqq->org_ioprio = cfqq->ioprio;
1986 cfqq->org_ioprio_class = cfqq->ioprio_class;
1987 cfq_clear_cfqq_prio_changed(cfqq);
1988 }
1989
1990 static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
1991 {
1992 struct cfq_data *cfqd = cic->key;
1993 struct cfq_queue *cfqq;
1994 unsigned long flags;
1995
1996 if (unlikely(!cfqd))
1997 return;
1998
1999 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2000
2001 cfqq = cic->cfqq[BLK_RW_ASYNC];
2002 if (cfqq) {
2003 struct cfq_queue *new_cfqq;
2004 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
2005 GFP_ATOMIC);
2006 if (new_cfqq) {
2007 cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
2008 cfq_put_queue(cfqq);
2009 }
2010 }
2011
2012 cfqq = cic->cfqq[BLK_RW_SYNC];
2013 if (cfqq)
2014 cfq_mark_cfqq_prio_changed(cfqq);
2015
2016 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2017 }
2018
2019 static void cfq_ioc_set_ioprio(struct io_context *ioc)
2020 {
2021 call_for_each_cic(ioc, changed_ioprio);
2022 ioc->ioprio_changed = 0;
2023 }
2024
2025 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2026 pid_t pid, bool is_sync)
2027 {
2028 RB_CLEAR_NODE(&cfqq->rb_node);
2029 RB_CLEAR_NODE(&cfqq->p_node);
2030 INIT_LIST_HEAD(&cfqq->fifo);
2031
2032 atomic_set(&cfqq->ref, 0);
2033 cfqq->cfqd = cfqd;
2034
2035 cfq_mark_cfqq_prio_changed(cfqq);
2036
2037 if (is_sync) {
2038 if (!cfq_class_idle(cfqq))
2039 cfq_mark_cfqq_idle_window(cfqq);
2040 cfq_mark_cfqq_sync(cfqq);
2041 }
2042 cfqq->pid = pid;
2043 }
2044
2045 static struct cfq_queue *
2046 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2047 struct io_context *ioc, gfp_t gfp_mask)
2048 {
2049 struct cfq_queue *cfqq, *new_cfqq = NULL;
2050 struct cfq_io_context *cic;
2051
2052 retry:
2053 cic = cfq_cic_lookup(cfqd, ioc);
2054 /* cic always exists here */
2055 cfqq = cic_to_cfqq(cic, is_sync);
2056
2057 /*
2058 * Always try a new alloc if we fell back to the OOM cfqq
2059 * originally, since it should just be a temporary situation.
2060 */
2061 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2062 cfqq = NULL;
2063 if (new_cfqq) {
2064 cfqq = new_cfqq;
2065 new_cfqq = NULL;
2066 } else if (gfp_mask & __GFP_WAIT) {
2067 spin_unlock_irq(cfqd->queue->queue_lock);
2068 new_cfqq = kmem_cache_alloc_node(cfq_pool,
2069 gfp_mask | __GFP_ZERO,
2070 cfqd->queue->node);
2071 spin_lock_irq(cfqd->queue->queue_lock);
2072 if (new_cfqq)
2073 goto retry;
2074 } else {
2075 cfqq = kmem_cache_alloc_node(cfq_pool,
2076 gfp_mask | __GFP_ZERO,
2077 cfqd->queue->node);
2078 }
2079
2080 if (cfqq) {
2081 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2082 cfq_init_prio_data(cfqq, ioc);
2083 cfq_log_cfqq(cfqd, cfqq, "alloced");
2084 } else
2085 cfqq = &cfqd->oom_cfqq;
2086 }
2087
2088 if (new_cfqq)
2089 kmem_cache_free(cfq_pool, new_cfqq);
2090
2091 return cfqq;
2092 }
2093
2094 static struct cfq_queue **
2095 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2096 {
2097 switch (ioprio_class) {
2098 case IOPRIO_CLASS_RT:
2099 return &cfqd->async_cfqq[0][ioprio];
2100 case IOPRIO_CLASS_BE:
2101 return &cfqd->async_cfqq[1][ioprio];
2102 case IOPRIO_CLASS_IDLE:
2103 return &cfqd->async_idle_cfqq;
2104 default:
2105 BUG();
2106 }
2107 }
2108
2109 static struct cfq_queue *
2110 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2111 gfp_t gfp_mask)
2112 {
2113 const int ioprio = task_ioprio(ioc);
2114 const int ioprio_class = task_ioprio_class(ioc);
2115 struct cfq_queue **async_cfqq = NULL;
2116 struct cfq_queue *cfqq = NULL;
2117
2118 if (!is_sync) {
2119 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2120 cfqq = *async_cfqq;
2121 }
2122
2123 if (!cfqq)
2124 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
2125
2126 /*
2127 * pin the queue now that it's allocated, scheduler exit will prune it
2128 */
2129 if (!is_sync && !(*async_cfqq)) {
2130 atomic_inc(&cfqq->ref);
2131 *async_cfqq = cfqq;
2132 }
2133
2134 atomic_inc(&cfqq->ref);
2135 return cfqq;
2136 }
2137
2138 /*
2139 * We drop cfq io contexts lazily, so we may find a dead one.
2140 */
2141 static void
2142 cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
2143 struct cfq_io_context *cic)
2144 {
2145 unsigned long flags;
2146
2147 WARN_ON(!list_empty(&cic->queue_list));
2148
2149 spin_lock_irqsave(&ioc->lock, flags);
2150
2151 BUG_ON(ioc->ioc_data == cic);
2152
2153 radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
2154 hlist_del_rcu(&cic->cic_list);
2155 spin_unlock_irqrestore(&ioc->lock, flags);
2156
2157 cfq_cic_free(cic);
2158 }
2159
2160 static struct cfq_io_context *
2161 cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
2162 {
2163 struct cfq_io_context *cic;
2164 unsigned long flags;
2165 void *k;
2166
2167 if (unlikely(!ioc))
2168 return NULL;
2169
2170 rcu_read_lock();
2171
2172 /*
2173 * we maintain a last-hit cache, to avoid browsing over the tree
2174 */
2175 cic = rcu_dereference(ioc->ioc_data);
2176 if (cic && cic->key == cfqd) {
2177 rcu_read_unlock();
2178 return cic;
2179 }
2180
2181 do {
2182 cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
2183 rcu_read_unlock();
2184 if (!cic)
2185 break;
2186 /* ->key must be copied to avoid race with cfq_exit_queue() */
2187 k = cic->key;
2188 if (unlikely(!k)) {
2189 cfq_drop_dead_cic(cfqd, ioc, cic);
2190 rcu_read_lock();
2191 continue;
2192 }
2193
2194 spin_lock_irqsave(&ioc->lock, flags);
2195 rcu_assign_pointer(ioc->ioc_data, cic);
2196 spin_unlock_irqrestore(&ioc->lock, flags);
2197 break;
2198 } while (1);
2199
2200 return cic;
2201 }
2202
2203 /*
2204 * Add cic into ioc, using cfqd as the search key. This enables us to lookup
2205 * the process specific cfq io context when entered from the block layer.
2206 * Also adds the cic to a per-cfqd list, used when this queue is removed.
2207 */
2208 static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
2209 struct cfq_io_context *cic, gfp_t gfp_mask)
2210 {
2211 unsigned long flags;
2212 int ret;
2213
2214 ret = radix_tree_preload(gfp_mask);
2215 if (!ret) {
2216 cic->ioc = ioc;
2217 cic->key = cfqd;
2218
2219 spin_lock_irqsave(&ioc->lock, flags);
2220 ret = radix_tree_insert(&ioc->radix_root,
2221 (unsigned long) cfqd, cic);
2222 if (!ret)
2223 hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
2224 spin_unlock_irqrestore(&ioc->lock, flags);
2225
2226 radix_tree_preload_end();
2227
2228 if (!ret) {
2229 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2230 list_add(&cic->queue_list, &cfqd->cic_list);
2231 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2232 }
2233 }
2234
2235 if (ret)
2236 printk(KERN_ERR "cfq: cic link failed!\n");
2237
2238 return ret;
2239 }
2240
2241 /*
2242 * Setup general io context and cfq io context. There can be several cfq
2243 * io contexts per general io context, if this process is doing io to more
2244 * than one device managed by cfq.
2245 */
2246 static struct cfq_io_context *
2247 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2248 {
2249 struct io_context *ioc = NULL;
2250 struct cfq_io_context *cic;
2251
2252 might_sleep_if(gfp_mask & __GFP_WAIT);
2253
2254 ioc = get_io_context(gfp_mask, cfqd->queue->node);
2255 if (!ioc)
2256 return NULL;
2257
2258 cic = cfq_cic_lookup(cfqd, ioc);
2259 if (cic)
2260 goto out;
2261
2262 cic = cfq_alloc_io_context(cfqd, gfp_mask);
2263 if (cic == NULL)
2264 goto err;
2265
2266 if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
2267 goto err_free;
2268
2269 out:
2270 smp_read_barrier_depends();
2271 if (unlikely(ioc->ioprio_changed))
2272 cfq_ioc_set_ioprio(ioc);
2273
2274 return cic;
2275 err_free:
2276 cfq_cic_free(cic);
2277 err:
2278 put_io_context(ioc);
2279 return NULL;
2280 }
2281
2282 static void
2283 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
2284 {
2285 unsigned long elapsed = jiffies - cic->last_end_request;
2286 unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
2287
2288 cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
2289 cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
2290 cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
2291 }
2292
2293 static void
2294 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2295 struct request *rq)
2296 {
2297 sector_t sdist;
2298 u64 total;
2299
2300 if (!cfqq->last_request_pos)
2301 sdist = 0;
2302 else if (cfqq->last_request_pos < blk_rq_pos(rq))
2303 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
2304 else
2305 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
2306
2307 /*
2308 * Don't allow the seek distance to get too large from the
2309 * odd fragment, pagein, etc
2310 */
2311 if (cfqq->seek_samples <= 60) /* second&third seek */
2312 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*1024);
2313 else
2314 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*64);
2315
2316 cfqq->seek_samples = (7*cfqq->seek_samples + 256) / 8;
2317 cfqq->seek_total = (7*cfqq->seek_total + (u64)256*sdist) / 8;
2318 total = cfqq->seek_total + (cfqq->seek_samples/2);
2319 do_div(total, cfqq->seek_samples);
2320 cfqq->seek_mean = (sector_t)total;
2321
2322 /*
2323 * If this cfqq is shared between multiple processes, check to
2324 * make sure that those processes are still issuing I/Os within
2325 * the mean seek distance. If not, it may be time to break the
2326 * queues apart again.
2327 */
2328 if (cfq_cfqq_coop(cfqq)) {
2329 if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start)
2330 cfqq->seeky_start = jiffies;
2331 else if (!CFQQ_SEEKY(cfqq))
2332 cfqq->seeky_start = 0;
2333 }
2334 }
2335
2336 /*
2337 * Disable idle window if the process thinks too long or seeks so much that
2338 * it doesn't matter
2339 */
2340 static void
2341 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2342 struct cfq_io_context *cic)
2343 {
2344 int old_idle, enable_idle;
2345
2346 /*
2347 * Don't idle for async or idle io prio class
2348 */
2349 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
2350 return;
2351
2352 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
2353
2354 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
2355 (sample_valid(cfqq->seek_samples) && CFQQ_SEEKY(cfqq)))
2356 enable_idle = 0;
2357 else if (sample_valid(cic->ttime_samples)) {
2358 if (cic->ttime_mean > cfqd->cfq_slice_idle)
2359 enable_idle = 0;
2360 else
2361 enable_idle = 1;
2362 }
2363
2364 if (old_idle != enable_idle) {
2365 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
2366 if (enable_idle)
2367 cfq_mark_cfqq_idle_window(cfqq);
2368 else
2369 cfq_clear_cfqq_idle_window(cfqq);
2370 }
2371 }
2372
2373 /*
2374 * Check if new_cfqq should preempt the currently active queue. Return 0 for
2375 * no or if we aren't sure, a 1 will cause a preempt.
2376 */
2377 static bool
2378 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
2379 struct request *rq)
2380 {
2381 struct cfq_queue *cfqq;
2382
2383 cfqq = cfqd->active_queue;
2384 if (!cfqq)
2385 return false;
2386
2387 if (cfq_slice_used(cfqq))
2388 return true;
2389
2390 if (cfq_class_idle(new_cfqq))
2391 return false;
2392
2393 if (cfq_class_idle(cfqq))
2394 return true;
2395
2396 if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD
2397 && new_cfqq->service_tree == cfqq->service_tree)
2398 return true;
2399
2400 /*
2401 * if the new request is sync, but the currently running queue is
2402 * not, let the sync request have priority.
2403 */
2404 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
2405 return true;
2406
2407 /*
2408 * So both queues are sync. Let the new request get disk time if
2409 * it's a metadata request and the current queue is doing regular IO.
2410 */
2411 if (rq_is_meta(rq) && !cfqq->meta_pending)
2412 return true;
2413
2414 /*
2415 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
2416 */
2417 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
2418 return true;
2419
2420 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
2421 return false;
2422
2423 /*
2424 * if this request is as-good as one we would expect from the
2425 * current cfqq, let it preempt
2426 */
2427 if (cfq_rq_close(cfqd, cfqq, rq))
2428 return true;
2429
2430 return false;
2431 }
2432
2433 /*
2434 * cfqq preempts the active queue. if we allowed preempt with no slice left,
2435 * let it have half of its nominal slice.
2436 */
2437 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2438 {
2439 cfq_log_cfqq(cfqd, cfqq, "preempt");
2440 cfq_slice_expired(cfqd, 1);
2441
2442 /*
2443 * Put the new queue at the front of the of the current list,
2444 * so we know that it will be selected next.
2445 */
2446 BUG_ON(!cfq_cfqq_on_rr(cfqq));
2447
2448 cfq_service_tree_add(cfqd, cfqq, 1);
2449
2450 cfqq->slice_end = 0;
2451 cfq_mark_cfqq_slice_new(cfqq);
2452 }
2453
2454 /*
2455 * Called when a new fs request (rq) is added (to cfqq). Check if there's
2456 * something we should do about it
2457 */
2458 static void
2459 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2460 struct request *rq)
2461 {
2462 struct cfq_io_context *cic = RQ_CIC(rq);
2463
2464 cfqd->rq_queued++;
2465 if (rq_is_meta(rq))
2466 cfqq->meta_pending++;
2467
2468 cfq_update_io_thinktime(cfqd, cic);
2469 cfq_update_io_seektime(cfqd, cfqq, rq);
2470 cfq_update_idle_window(cfqd, cfqq, cic);
2471
2472 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
2473
2474 if (cfqq == cfqd->active_queue) {
2475 /*
2476 * Remember that we saw a request from this process, but
2477 * don't start queuing just yet. Otherwise we risk seeing lots
2478 * of tiny requests, because we disrupt the normal plugging
2479 * and merging. If the request is already larger than a single
2480 * page, let it rip immediately. For that case we assume that
2481 * merging is already done. Ditto for a busy system that
2482 * has other work pending, don't risk delaying until the
2483 * idle timer unplug to continue working.
2484 */
2485 if (cfq_cfqq_wait_request(cfqq)) {
2486 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
2487 cfqd->busy_queues > 1) {
2488 del_timer(&cfqd->idle_slice_timer);
2489 __blk_run_queue(cfqd->queue);
2490 }
2491 cfq_mark_cfqq_must_dispatch(cfqq);
2492 }
2493 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
2494 /*
2495 * not the active queue - expire current slice if it is
2496 * idle and has expired it's mean thinktime or this new queue
2497 * has some old slice time left and is of higher priority or
2498 * this new queue is RT and the current one is BE
2499 */
2500 cfq_preempt_queue(cfqd, cfqq);
2501 __blk_run_queue(cfqd->queue);
2502 }
2503 }
2504
2505 static void cfq_insert_request(struct request_queue *q, struct request *rq)
2506 {
2507 struct cfq_data *cfqd = q->elevator->elevator_data;
2508 struct cfq_queue *cfqq = RQ_CFQQ(rq);
2509
2510 cfq_log_cfqq(cfqd, cfqq, "insert_request");
2511 cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
2512
2513 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
2514 list_add_tail(&rq->queuelist, &cfqq->fifo);
2515 cfq_add_rq_rb(rq);
2516
2517 cfq_rq_enqueued(cfqd, cfqq, rq);
2518 }
2519
2520 /*
2521 * Update hw_tag based on peak queue depth over 50 samples under
2522 * sufficient load.
2523 */
2524 static void cfq_update_hw_tag(struct cfq_data *cfqd)
2525 {
2526 struct cfq_queue *cfqq = cfqd->active_queue;
2527
2528 if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak)
2529 cfqd->rq_in_driver_peak = rq_in_driver(cfqd);
2530
2531 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
2532 rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN)
2533 return;
2534
2535 /*
2536 * If active queue hasn't enough requests and can idle, cfq might not
2537 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
2538 * case
2539 */
2540 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
2541 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
2542 CFQ_HW_QUEUE_MIN && rq_in_driver(cfqd) < CFQ_HW_QUEUE_MIN)
2543 return;
2544
2545 if (cfqd->hw_tag_samples++ < 50)
2546 return;
2547
2548 if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN)
2549 cfqd->hw_tag = 1;
2550 else
2551 cfqd->hw_tag = 0;
2552
2553 cfqd->hw_tag_samples = 0;
2554 cfqd->rq_in_driver_peak = 0;
2555 }
2556
2557 static void cfq_completed_request(struct request_queue *q, struct request *rq)
2558 {
2559 struct cfq_queue *cfqq = RQ_CFQQ(rq);
2560 struct cfq_data *cfqd = cfqq->cfqd;
2561 const int sync = rq_is_sync(rq);
2562 unsigned long now;
2563
2564 now = jiffies;
2565 cfq_log_cfqq(cfqd, cfqq, "complete");
2566
2567 cfq_update_hw_tag(cfqd);
2568
2569 WARN_ON(!cfqd->rq_in_driver[sync]);
2570 WARN_ON(!cfqq->dispatched);
2571 cfqd->rq_in_driver[sync]--;
2572 cfqq->dispatched--;
2573
2574 if (cfq_cfqq_sync(cfqq))
2575 cfqd->sync_flight--;
2576
2577 if (sync) {
2578 RQ_CIC(rq)->last_end_request = now;
2579 cfqd->last_end_sync_rq = now;
2580 }
2581
2582 /*
2583 * If this is the active queue, check if it needs to be expired,
2584 * or if we want to idle in case it has no pending requests.
2585 */
2586 if (cfqd->active_queue == cfqq) {
2587 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
2588
2589 if (cfq_cfqq_slice_new(cfqq)) {
2590 cfq_set_prio_slice(cfqd, cfqq);
2591 cfq_clear_cfqq_slice_new(cfqq);
2592 }
2593 /*
2594 * If there are no requests waiting in this queue, and
2595 * there are other queues ready to issue requests, AND
2596 * those other queues are issuing requests within our
2597 * mean seek distance, give them a chance to run instead
2598 * of idling.
2599 */
2600 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
2601 cfq_slice_expired(cfqd, 1);
2602 else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq) &&
2603 sync && !rq_noidle(rq))
2604 cfq_arm_slice_timer(cfqd);
2605 }
2606
2607 if (!rq_in_driver(cfqd))
2608 cfq_schedule_dispatch(cfqd);
2609 }
2610
2611 /*
2612 * we temporarily boost lower priority queues if they are holding fs exclusive
2613 * resources. they are boosted to normal prio (CLASS_BE/4)
2614 */
2615 static void cfq_prio_boost(struct cfq_queue *cfqq)
2616 {
2617 if (has_fs_excl()) {
2618 /*
2619 * boost idle prio on transactions that would lock out other
2620 * users of the filesystem
2621 */
2622 if (cfq_class_idle(cfqq))
2623 cfqq->ioprio_class = IOPRIO_CLASS_BE;
2624 if (cfqq->ioprio > IOPRIO_NORM)
2625 cfqq->ioprio = IOPRIO_NORM;
2626 } else {
2627 /*
2628 * unboost the queue (if needed)
2629 */
2630 cfqq->ioprio_class = cfqq->org_ioprio_class;
2631 cfqq->ioprio = cfqq->org_ioprio;
2632 }
2633 }
2634
2635 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
2636 {
2637 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
2638 cfq_mark_cfqq_must_alloc_slice(cfqq);
2639 return ELV_MQUEUE_MUST;
2640 }
2641
2642 return ELV_MQUEUE_MAY;
2643 }
2644
2645 static int cfq_may_queue(struct request_queue *q, int rw)
2646 {
2647 struct cfq_data *cfqd = q->elevator->elevator_data;
2648 struct task_struct *tsk = current;
2649 struct cfq_io_context *cic;
2650 struct cfq_queue *cfqq;
2651
2652 /*
2653 * don't force setup of a queue from here, as a call to may_queue
2654 * does not necessarily imply that a request actually will be queued.
2655 * so just lookup a possibly existing queue, or return 'may queue'
2656 * if that fails
2657 */
2658 cic = cfq_cic_lookup(cfqd, tsk->io_context);
2659 if (!cic)
2660 return ELV_MQUEUE_MAY;
2661
2662 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
2663 if (cfqq) {
2664 cfq_init_prio_data(cfqq, cic->ioc);
2665 cfq_prio_boost(cfqq);
2666
2667 return __cfq_may_queue(cfqq);
2668 }
2669
2670 return ELV_MQUEUE_MAY;
2671 }
2672
2673 /*
2674 * queue lock held here
2675 */
2676 static void cfq_put_request(struct request *rq)
2677 {
2678 struct cfq_queue *cfqq = RQ_CFQQ(rq);
2679
2680 if (cfqq) {
2681 const int rw = rq_data_dir(rq);
2682
2683 BUG_ON(!cfqq->allocated[rw]);
2684 cfqq->allocated[rw]--;
2685
2686 put_io_context(RQ_CIC(rq)->ioc);
2687
2688 rq->elevator_private = NULL;
2689 rq->elevator_private2 = NULL;
2690
2691 cfq_put_queue(cfqq);
2692 }
2693 }
2694
2695 static struct cfq_queue *
2696 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
2697 struct cfq_queue *cfqq)
2698 {
2699 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
2700 cic_set_cfqq(cic, cfqq->new_cfqq, 1);
2701 cfq_mark_cfqq_coop(cfqq->new_cfqq);
2702 cfq_put_queue(cfqq);
2703 return cic_to_cfqq(cic, 1);
2704 }
2705
2706 static int should_split_cfqq(struct cfq_queue *cfqq)
2707 {
2708 if (cfqq->seeky_start &&
2709 time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT))
2710 return 1;
2711 return 0;
2712 }
2713
2714 /*
2715 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
2716 * was the last process referring to said cfqq.
2717 */
2718 static struct cfq_queue *
2719 split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
2720 {
2721 if (cfqq_process_refs(cfqq) == 1) {
2722 cfqq->seeky_start = 0;
2723 cfqq->pid = current->pid;
2724 cfq_clear_cfqq_coop(cfqq);
2725 return cfqq;
2726 }
2727
2728 cic_set_cfqq(cic, NULL, 1);
2729 cfq_put_queue(cfqq);
2730 return NULL;
2731 }
2732 /*
2733 * Allocate cfq data structures associated with this request.
2734 */
2735 static int
2736 cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
2737 {
2738 struct cfq_data *cfqd = q->elevator->elevator_data;
2739 struct cfq_io_context *cic;
2740 const int rw = rq_data_dir(rq);
2741 const bool is_sync = rq_is_sync(rq);
2742 struct cfq_queue *cfqq;
2743 unsigned long flags;
2744
2745 might_sleep_if(gfp_mask & __GFP_WAIT);
2746
2747 cic = cfq_get_io_context(cfqd, gfp_mask);
2748
2749 spin_lock_irqsave(q->queue_lock, flags);
2750
2751 if (!cic)
2752 goto queue_fail;
2753
2754 new_queue:
2755 cfqq = cic_to_cfqq(cic, is_sync);
2756 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2757 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
2758 cic_set_cfqq(cic, cfqq, is_sync);
2759 } else {
2760 /*
2761 * If the queue was seeky for too long, break it apart.
2762 */
2763 if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) {
2764 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
2765 cfqq = split_cfqq(cic, cfqq);
2766 if (!cfqq)
2767 goto new_queue;
2768 }
2769
2770 /*
2771 * Check to see if this queue is scheduled to merge with
2772 * another, closely cooperating queue. The merging of
2773 * queues happens here as it must be done in process context.
2774 * The reference on new_cfqq was taken in merge_cfqqs.
2775 */
2776 if (cfqq->new_cfqq)
2777 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
2778 }
2779
2780 cfqq->allocated[rw]++;
2781 atomic_inc(&cfqq->ref);
2782
2783 spin_unlock_irqrestore(q->queue_lock, flags);
2784
2785 rq->elevator_private = cic;
2786 rq->elevator_private2 = cfqq;
2787 return 0;
2788
2789 queue_fail:
2790 if (cic)
2791 put_io_context(cic->ioc);
2792
2793 cfq_schedule_dispatch(cfqd);
2794 spin_unlock_irqrestore(q->queue_lock, flags);
2795 cfq_log(cfqd, "set_request fail");
2796 return 1;
2797 }
2798
2799 static void cfq_kick_queue(struct work_struct *work)
2800 {
2801 struct cfq_data *cfqd =
2802 container_of(work, struct cfq_data, unplug_work);
2803 struct request_queue *q = cfqd->queue;
2804
2805 spin_lock_irq(q->queue_lock);
2806 __blk_run_queue(cfqd->queue);
2807 spin_unlock_irq(q->queue_lock);
2808 }
2809
2810 /*
2811 * Timer running if the active_queue is currently idling inside its time slice
2812 */
2813 static void cfq_idle_slice_timer(unsigned long data)
2814 {
2815 struct cfq_data *cfqd = (struct cfq_data *) data;
2816 struct cfq_queue *cfqq;
2817 unsigned long flags;
2818 int timed_out = 1;
2819
2820 cfq_log(cfqd, "idle timer fired");
2821
2822 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2823
2824 cfqq = cfqd->active_queue;
2825 if (cfqq) {
2826 timed_out = 0;
2827
2828 /*
2829 * We saw a request before the queue expired, let it through
2830 */
2831 if (cfq_cfqq_must_dispatch(cfqq))
2832 goto out_kick;
2833
2834 /*
2835 * expired
2836 */
2837 if (cfq_slice_used(cfqq))
2838 goto expire;
2839
2840 /*
2841 * only expire and reinvoke request handler, if there are
2842 * other queues with pending requests
2843 */
2844 if (!cfqd->busy_queues)
2845 goto out_cont;
2846
2847 /*
2848 * not expired and it has a request pending, let it dispatch
2849 */
2850 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2851 goto out_kick;
2852 }
2853 expire:
2854 cfq_slice_expired(cfqd, timed_out);
2855 out_kick:
2856 cfq_schedule_dispatch(cfqd);
2857 out_cont:
2858 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2859 }
2860
2861 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2862 {
2863 del_timer_sync(&cfqd->idle_slice_timer);
2864 cancel_work_sync(&cfqd->unplug_work);
2865 }
2866
2867 static void cfq_put_async_queues(struct cfq_data *cfqd)
2868 {
2869 int i;
2870
2871 for (i = 0; i < IOPRIO_BE_NR; i++) {
2872 if (cfqd->async_cfqq[0][i])
2873 cfq_put_queue(cfqd->async_cfqq[0][i]);
2874 if (cfqd->async_cfqq[1][i])
2875 cfq_put_queue(cfqd->async_cfqq[1][i]);
2876 }
2877
2878 if (cfqd->async_idle_cfqq)
2879 cfq_put_queue(cfqd->async_idle_cfqq);
2880 }
2881
2882 static void cfq_exit_queue(struct elevator_queue *e)
2883 {
2884 struct cfq_data *cfqd = e->elevator_data;
2885 struct request_queue *q = cfqd->queue;
2886
2887 cfq_shutdown_timer_wq(cfqd);
2888
2889 spin_lock_irq(q->queue_lock);
2890
2891 if (cfqd->active_queue)
2892 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
2893
2894 while (!list_empty(&cfqd->cic_list)) {
2895 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
2896 struct cfq_io_context,
2897 queue_list);
2898
2899 __cfq_exit_single_io_context(cfqd, cic);
2900 }
2901
2902 cfq_put_async_queues(cfqd);
2903
2904 spin_unlock_irq(q->queue_lock);
2905
2906 cfq_shutdown_timer_wq(cfqd);
2907
2908 kfree(cfqd);
2909 }
2910
2911 static void *cfq_init_queue(struct request_queue *q)
2912 {
2913 struct cfq_data *cfqd;
2914 int i, j;
2915
2916 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
2917 if (!cfqd)
2918 return NULL;
2919
2920 for (i = 0; i < 2; ++i)
2921 for (j = 0; j < 3; ++j)
2922 cfqd->service_trees[i][j] = CFQ_RB_ROOT;
2923 cfqd->service_tree_idle = CFQ_RB_ROOT;
2924
2925 /*
2926 * Not strictly needed (since RB_ROOT just clears the node and we
2927 * zeroed cfqd on alloc), but better be safe in case someone decides
2928 * to add magic to the rb code
2929 */
2930 for (i = 0; i < CFQ_PRIO_LISTS; i++)
2931 cfqd->prio_trees[i] = RB_ROOT;
2932
2933 /*
2934 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
2935 * Grab a permanent reference to it, so that the normal code flow
2936 * will not attempt to free it.
2937 */
2938 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
2939 atomic_inc(&cfqd->oom_cfqq.ref);
2940
2941 INIT_LIST_HEAD(&cfqd->cic_list);
2942
2943 cfqd->queue = q;
2944
2945 init_timer(&cfqd->idle_slice_timer);
2946 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2947 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2948
2949 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
2950
2951 cfqd->cfq_quantum = cfq_quantum;
2952 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2953 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
2954 cfqd->cfq_back_max = cfq_back_max;
2955 cfqd->cfq_back_penalty = cfq_back_penalty;
2956 cfqd->cfq_slice[0] = cfq_slice_async;
2957 cfqd->cfq_slice[1] = cfq_slice_sync;
2958 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2959 cfqd->cfq_slice_idle = cfq_slice_idle;
2960 cfqd->cfq_latency = 1;
2961 cfqd->hw_tag = 1;
2962 cfqd->last_end_sync_rq = jiffies;
2963 return cfqd;
2964 }
2965
2966 static void cfq_slab_kill(void)
2967 {
2968 /*
2969 * Caller already ensured that pending RCU callbacks are completed,
2970 * so we should have no busy allocations at this point.
2971 */
2972 if (cfq_pool)
2973 kmem_cache_destroy(cfq_pool);
2974 if (cfq_ioc_pool)
2975 kmem_cache_destroy(cfq_ioc_pool);
2976 }
2977
2978 static int __init cfq_slab_setup(void)
2979 {
2980 cfq_pool = KMEM_CACHE(cfq_queue, 0);
2981 if (!cfq_pool)
2982 goto fail;
2983
2984 cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
2985 if (!cfq_ioc_pool)
2986 goto fail;
2987
2988 return 0;
2989 fail:
2990 cfq_slab_kill();
2991 return -ENOMEM;
2992 }
2993
2994 /*
2995 * sysfs parts below -->
2996 */
2997 static ssize_t
2998 cfq_var_show(unsigned int var, char *page)
2999 {
3000 return sprintf(page, "%d\n", var);
3001 }
3002
3003 static ssize_t
3004 cfq_var_store(unsigned int *var, const char *page, size_t count)
3005 {
3006 char *p = (char *) page;
3007
3008 *var = simple_strtoul(p, &p, 10);
3009 return count;
3010 }
3011
3012 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
3013 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
3014 { \
3015 struct cfq_data *cfqd = e->elevator_data; \
3016 unsigned int __data = __VAR; \
3017 if (__CONV) \
3018 __data = jiffies_to_msecs(__data); \
3019 return cfq_var_show(__data, (page)); \
3020 }
3021 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
3022 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
3023 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
3024 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3025 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
3026 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
3027 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3028 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3029 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
3030 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
3031 #undef SHOW_FUNCTION
3032
3033 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
3034 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
3035 { \
3036 struct cfq_data *cfqd = e->elevator_data; \
3037 unsigned int __data; \
3038 int ret = cfq_var_store(&__data, (page), count); \
3039 if (__data < (MIN)) \
3040 __data = (MIN); \
3041 else if (__data > (MAX)) \
3042 __data = (MAX); \
3043 if (__CONV) \
3044 *(__PTR) = msecs_to_jiffies(__data); \
3045 else \
3046 *(__PTR) = __data; \
3047 return ret; \
3048 }
3049 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
3050 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
3051 UINT_MAX, 1);
3052 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
3053 UINT_MAX, 1);
3054 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
3055 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
3056 UINT_MAX, 0);
3057 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
3058 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
3059 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
3060 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
3061 UINT_MAX, 0);
3062 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
3063 #undef STORE_FUNCTION
3064
3065 #define CFQ_ATTR(name) \
3066 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
3067
3068 static struct elv_fs_entry cfq_attrs[] = {
3069 CFQ_ATTR(quantum),
3070 CFQ_ATTR(fifo_expire_sync),
3071 CFQ_ATTR(fifo_expire_async),
3072 CFQ_ATTR(back_seek_max),
3073 CFQ_ATTR(back_seek_penalty),
3074 CFQ_ATTR(slice_sync),
3075 CFQ_ATTR(slice_async),
3076 CFQ_ATTR(slice_async_rq),
3077 CFQ_ATTR(slice_idle),
3078 CFQ_ATTR(low_latency),
3079 __ATTR_NULL
3080 };
3081
3082 static struct elevator_type iosched_cfq = {
3083 .ops = {
3084 .elevator_merge_fn = cfq_merge,
3085 .elevator_merged_fn = cfq_merged_request,
3086 .elevator_merge_req_fn = cfq_merged_requests,
3087 .elevator_allow_merge_fn = cfq_allow_merge,
3088 .elevator_dispatch_fn = cfq_dispatch_requests,
3089 .elevator_add_req_fn = cfq_insert_request,
3090 .elevator_activate_req_fn = cfq_activate_request,
3091 .elevator_deactivate_req_fn = cfq_deactivate_request,
3092 .elevator_queue_empty_fn = cfq_queue_empty,
3093 .elevator_completed_req_fn = cfq_completed_request,
3094 .elevator_former_req_fn = elv_rb_former_request,
3095 .elevator_latter_req_fn = elv_rb_latter_request,
3096 .elevator_set_req_fn = cfq_set_request,
3097 .elevator_put_req_fn = cfq_put_request,
3098 .elevator_may_queue_fn = cfq_may_queue,
3099 .elevator_init_fn = cfq_init_queue,
3100 .elevator_exit_fn = cfq_exit_queue,
3101 .trim = cfq_free_io_context,
3102 },
3103 .elevator_attrs = cfq_attrs,
3104 .elevator_name = "cfq",
3105 .elevator_owner = THIS_MODULE,
3106 };
3107
3108 static int __init cfq_init(void)
3109 {
3110 /*
3111 * could be 0 on HZ < 1000 setups
3112 */
3113 if (!cfq_slice_async)
3114 cfq_slice_async = 1;
3115 if (!cfq_slice_idle)
3116 cfq_slice_idle = 1;
3117
3118 if (cfq_slab_setup())
3119 return -ENOMEM;
3120
3121 elv_register(&iosched_cfq);
3122
3123 return 0;
3124 }
3125
3126 static void __exit cfq_exit(void)
3127 {
3128 DECLARE_COMPLETION_ONSTACK(all_gone);
3129 elv_unregister(&iosched_cfq);
3130 ioc_gone = &all_gone;
3131 /* ioc_gone's update must be visible before reading ioc_count */
3132 smp_wmb();
3133
3134 /*
3135 * this also protects us from entering cfq_slab_kill() with
3136 * pending RCU callbacks
3137 */
3138 if (elv_ioc_count_read(cfq_ioc_count))
3139 wait_for_completion(&all_gone);
3140 cfq_slab_kill();
3141 }
3142
3143 module_init(cfq_init);
3144 module_exit(cfq_exit);
3145
3146 MODULE_AUTHOR("Jens Axboe");
3147 MODULE_LICENSE("GPL");
3148 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");