]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - block/cfq-iosched.c
blkcg: move io_service_bytes and io_serviced stats into blkcg_gq
[mirror_ubuntu-bionic-kernel.git] / block / cfq-iosched.c
1 /*
2 * CFQ, or complete fairness queueing, disk scheduler.
3 *
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/blkdev.h>
12 #include <linux/elevator.h>
13 #include <linux/jiffies.h>
14 #include <linux/rbtree.h>
15 #include <linux/ioprio.h>
16 #include <linux/blktrace_api.h>
17 #include <linux/blk-cgroup.h>
18 #include "blk.h"
19
20 /*
21 * tunables
22 */
23 /* max queue in one round of service */
24 static const int cfq_quantum = 8;
25 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
26 /* maximum backwards seek, in KiB */
27 static const int cfq_back_max = 16 * 1024;
28 /* penalty of a backwards seek */
29 static const int cfq_back_penalty = 2;
30 static const int cfq_slice_sync = HZ / 10;
31 static int cfq_slice_async = HZ / 25;
32 static const int cfq_slice_async_rq = 2;
33 static int cfq_slice_idle = HZ / 125;
34 static int cfq_group_idle = HZ / 125;
35 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
36 static const int cfq_hist_divisor = 4;
37
38 /*
39 * offset from end of service tree
40 */
41 #define CFQ_IDLE_DELAY (HZ / 5)
42
43 /*
44 * below this threshold, we consider thinktime immediate
45 */
46 #define CFQ_MIN_TT (2)
47
48 #define CFQ_SLICE_SCALE (5)
49 #define CFQ_HW_QUEUE_MIN (5)
50 #define CFQ_SERVICE_SHIFT 12
51
52 #define CFQQ_SEEK_THR (sector_t)(8 * 100)
53 #define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
54 #define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
55 #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
56
57 #define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
58 #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
59 #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
60
61 static struct kmem_cache *cfq_pool;
62
63 #define CFQ_PRIO_LISTS IOPRIO_BE_NR
64 #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
65 #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
66
67 #define sample_valid(samples) ((samples) > 80)
68 #define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
69
70 /* blkio-related constants */
71 #define CFQ_WEIGHT_MIN 10
72 #define CFQ_WEIGHT_MAX 1000
73 #define CFQ_WEIGHT_DEFAULT 500
74
75 struct cfq_ttime {
76 unsigned long last_end_request;
77
78 unsigned long ttime_total;
79 unsigned long ttime_samples;
80 unsigned long ttime_mean;
81 };
82
83 /*
84 * Most of our rbtree usage is for sorting with min extraction, so
85 * if we cache the leftmost node we don't have to walk down the tree
86 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
87 * move this into the elevator for the rq sorting as well.
88 */
89 struct cfq_rb_root {
90 struct rb_root rb;
91 struct rb_node *left;
92 unsigned count;
93 u64 min_vdisktime;
94 struct cfq_ttime ttime;
95 };
96 #define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
97 .ttime = {.last_end_request = jiffies,},}
98
99 /*
100 * Per process-grouping structure
101 */
102 struct cfq_queue {
103 /* reference count */
104 int ref;
105 /* various state flags, see below */
106 unsigned int flags;
107 /* parent cfq_data */
108 struct cfq_data *cfqd;
109 /* service_tree member */
110 struct rb_node rb_node;
111 /* service_tree key */
112 unsigned long rb_key;
113 /* prio tree member */
114 struct rb_node p_node;
115 /* prio tree root we belong to, if any */
116 struct rb_root *p_root;
117 /* sorted list of pending requests */
118 struct rb_root sort_list;
119 /* if fifo isn't expired, next request to serve */
120 struct request *next_rq;
121 /* requests queued in sort_list */
122 int queued[2];
123 /* currently allocated requests */
124 int allocated[2];
125 /* fifo list of requests in sort_list */
126 struct list_head fifo;
127
128 /* time when queue got scheduled in to dispatch first request. */
129 unsigned long dispatch_start;
130 unsigned int allocated_slice;
131 unsigned int slice_dispatch;
132 /* time when first request from queue completed and slice started. */
133 unsigned long slice_start;
134 unsigned long slice_end;
135 long slice_resid;
136
137 /* pending priority requests */
138 int prio_pending;
139 /* number of requests that are on the dispatch list or inside driver */
140 int dispatched;
141
142 /* io prio of this group */
143 unsigned short ioprio, org_ioprio;
144 unsigned short ioprio_class;
145
146 pid_t pid;
147
148 u32 seek_history;
149 sector_t last_request_pos;
150
151 struct cfq_rb_root *service_tree;
152 struct cfq_queue *new_cfqq;
153 struct cfq_group *cfqg;
154 /* Number of sectors dispatched from queue in single dispatch round */
155 unsigned long nr_sectors;
156 };
157
158 /*
159 * First index in the service_trees.
160 * IDLE is handled separately, so it has negative index
161 */
162 enum wl_class_t {
163 BE_WORKLOAD = 0,
164 RT_WORKLOAD = 1,
165 IDLE_WORKLOAD = 2,
166 CFQ_PRIO_NR,
167 };
168
169 /*
170 * Second index in the service_trees.
171 */
172 enum wl_type_t {
173 ASYNC_WORKLOAD = 0,
174 SYNC_NOIDLE_WORKLOAD = 1,
175 SYNC_WORKLOAD = 2
176 };
177
178 struct cfqg_stats {
179 #ifdef CONFIG_CFQ_GROUP_IOSCHED
180 /* number of ios merged */
181 struct blkg_rwstat merged;
182 /* total time spent on device in ns, may not be accurate w/ queueing */
183 struct blkg_rwstat service_time;
184 /* total time spent waiting in scheduler queue in ns */
185 struct blkg_rwstat wait_time;
186 /* number of IOs queued up */
187 struct blkg_rwstat queued;
188 /* total sectors transferred */
189 struct blkg_stat sectors;
190 /* total disk time and nr sectors dispatched by this group */
191 struct blkg_stat time;
192 #ifdef CONFIG_DEBUG_BLK_CGROUP
193 /* time not charged to this cgroup */
194 struct blkg_stat unaccounted_time;
195 /* sum of number of ios queued across all samples */
196 struct blkg_stat avg_queue_size_sum;
197 /* count of samples taken for average */
198 struct blkg_stat avg_queue_size_samples;
199 /* how many times this group has been removed from service tree */
200 struct blkg_stat dequeue;
201 /* total time spent waiting for it to be assigned a timeslice. */
202 struct blkg_stat group_wait_time;
203 /* time spent idling for this blkcg_gq */
204 struct blkg_stat idle_time;
205 /* total time with empty current active q with other requests queued */
206 struct blkg_stat empty_time;
207 /* fields after this shouldn't be cleared on stat reset */
208 uint64_t start_group_wait_time;
209 uint64_t start_idle_time;
210 uint64_t start_empty_time;
211 uint16_t flags;
212 #endif /* CONFIG_DEBUG_BLK_CGROUP */
213 #endif /* CONFIG_CFQ_GROUP_IOSCHED */
214 };
215
216 /* Per-cgroup data */
217 struct cfq_group_data {
218 /* must be the first member */
219 struct blkcg_policy_data cpd;
220
221 unsigned int weight;
222 unsigned int leaf_weight;
223 };
224
225 /* This is per cgroup per device grouping structure */
226 struct cfq_group {
227 /* must be the first member */
228 struct blkg_policy_data pd;
229
230 /* group service_tree member */
231 struct rb_node rb_node;
232
233 /* group service_tree key */
234 u64 vdisktime;
235
236 /*
237 * The number of active cfqgs and sum of their weights under this
238 * cfqg. This covers this cfqg's leaf_weight and all children's
239 * weights, but does not cover weights of further descendants.
240 *
241 * If a cfqg is on the service tree, it's active. An active cfqg
242 * also activates its parent and contributes to the children_weight
243 * of the parent.
244 */
245 int nr_active;
246 unsigned int children_weight;
247
248 /*
249 * vfraction is the fraction of vdisktime that the tasks in this
250 * cfqg are entitled to. This is determined by compounding the
251 * ratios walking up from this cfqg to the root.
252 *
253 * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
254 * vfractions on a service tree is approximately 1. The sum may
255 * deviate a bit due to rounding errors and fluctuations caused by
256 * cfqgs entering and leaving the service tree.
257 */
258 unsigned int vfraction;
259
260 /*
261 * There are two weights - (internal) weight is the weight of this
262 * cfqg against the sibling cfqgs. leaf_weight is the wight of
263 * this cfqg against the child cfqgs. For the root cfqg, both
264 * weights are kept in sync for backward compatibility.
265 */
266 unsigned int weight;
267 unsigned int new_weight;
268 unsigned int dev_weight;
269
270 unsigned int leaf_weight;
271 unsigned int new_leaf_weight;
272 unsigned int dev_leaf_weight;
273
274 /* number of cfqq currently on this group */
275 int nr_cfqq;
276
277 /*
278 * Per group busy queues average. Useful for workload slice calc. We
279 * create the array for each prio class but at run time it is used
280 * only for RT and BE class and slot for IDLE class remains unused.
281 * This is primarily done to avoid confusion and a gcc warning.
282 */
283 unsigned int busy_queues_avg[CFQ_PRIO_NR];
284 /*
285 * rr lists of queues with requests. We maintain service trees for
286 * RT and BE classes. These trees are subdivided in subclasses
287 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
288 * class there is no subclassification and all the cfq queues go on
289 * a single tree service_tree_idle.
290 * Counts are embedded in the cfq_rb_root
291 */
292 struct cfq_rb_root service_trees[2][3];
293 struct cfq_rb_root service_tree_idle;
294
295 unsigned long saved_wl_slice;
296 enum wl_type_t saved_wl_type;
297 enum wl_class_t saved_wl_class;
298
299 /* number of requests that are on the dispatch list or inside driver */
300 int dispatched;
301 struct cfq_ttime ttime;
302 struct cfqg_stats stats; /* stats for this cfqg */
303
304 /* async queue for each priority case */
305 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
306 struct cfq_queue *async_idle_cfqq;
307
308 };
309
310 struct cfq_io_cq {
311 struct io_cq icq; /* must be the first member */
312 struct cfq_queue *cfqq[2];
313 struct cfq_ttime ttime;
314 int ioprio; /* the current ioprio */
315 #ifdef CONFIG_CFQ_GROUP_IOSCHED
316 uint64_t blkcg_serial_nr; /* the current blkcg serial */
317 #endif
318 };
319
320 /*
321 * Per block device queue structure
322 */
323 struct cfq_data {
324 struct request_queue *queue;
325 /* Root service tree for cfq_groups */
326 struct cfq_rb_root grp_service_tree;
327 struct cfq_group *root_group;
328
329 /*
330 * The priority currently being served
331 */
332 enum wl_class_t serving_wl_class;
333 enum wl_type_t serving_wl_type;
334 unsigned long workload_expires;
335 struct cfq_group *serving_group;
336
337 /*
338 * Each priority tree is sorted by next_request position. These
339 * trees are used when determining if two or more queues are
340 * interleaving requests (see cfq_close_cooperator).
341 */
342 struct rb_root prio_trees[CFQ_PRIO_LISTS];
343
344 unsigned int busy_queues;
345 unsigned int busy_sync_queues;
346
347 int rq_in_driver;
348 int rq_in_flight[2];
349
350 /*
351 * queue-depth detection
352 */
353 int rq_queued;
354 int hw_tag;
355 /*
356 * hw_tag can be
357 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
358 * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
359 * 0 => no NCQ
360 */
361 int hw_tag_est_depth;
362 unsigned int hw_tag_samples;
363
364 /*
365 * idle window management
366 */
367 struct timer_list idle_slice_timer;
368 struct work_struct unplug_work;
369
370 struct cfq_queue *active_queue;
371 struct cfq_io_cq *active_cic;
372
373 sector_t last_position;
374
375 /*
376 * tunables, see top of file
377 */
378 unsigned int cfq_quantum;
379 unsigned int cfq_fifo_expire[2];
380 unsigned int cfq_back_penalty;
381 unsigned int cfq_back_max;
382 unsigned int cfq_slice[2];
383 unsigned int cfq_slice_async_rq;
384 unsigned int cfq_slice_idle;
385 unsigned int cfq_group_idle;
386 unsigned int cfq_latency;
387 unsigned int cfq_target_latency;
388
389 /*
390 * Fallback dummy cfqq for extreme OOM conditions
391 */
392 struct cfq_queue oom_cfqq;
393
394 unsigned long last_delayed_sync;
395 };
396
397 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
398 static void cfq_put_queue(struct cfq_queue *cfqq);
399
400 static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
401 enum wl_class_t class,
402 enum wl_type_t type)
403 {
404 if (!cfqg)
405 return NULL;
406
407 if (class == IDLE_WORKLOAD)
408 return &cfqg->service_tree_idle;
409
410 return &cfqg->service_trees[class][type];
411 }
412
413 enum cfqq_state_flags {
414 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
415 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
416 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
417 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
418 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
419 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
420 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
421 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
422 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
423 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
424 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
425 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
426 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
427 };
428
429 #define CFQ_CFQQ_FNS(name) \
430 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
431 { \
432 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
433 } \
434 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
435 { \
436 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
437 } \
438 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
439 { \
440 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
441 }
442
443 CFQ_CFQQ_FNS(on_rr);
444 CFQ_CFQQ_FNS(wait_request);
445 CFQ_CFQQ_FNS(must_dispatch);
446 CFQ_CFQQ_FNS(must_alloc_slice);
447 CFQ_CFQQ_FNS(fifo_expire);
448 CFQ_CFQQ_FNS(idle_window);
449 CFQ_CFQQ_FNS(prio_changed);
450 CFQ_CFQQ_FNS(slice_new);
451 CFQ_CFQQ_FNS(sync);
452 CFQ_CFQQ_FNS(coop);
453 CFQ_CFQQ_FNS(split_coop);
454 CFQ_CFQQ_FNS(deep);
455 CFQ_CFQQ_FNS(wait_busy);
456 #undef CFQ_CFQQ_FNS
457
458 #if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
459
460 /* cfqg stats flags */
461 enum cfqg_stats_flags {
462 CFQG_stats_waiting = 0,
463 CFQG_stats_idling,
464 CFQG_stats_empty,
465 };
466
467 #define CFQG_FLAG_FNS(name) \
468 static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
469 { \
470 stats->flags |= (1 << CFQG_stats_##name); \
471 } \
472 static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
473 { \
474 stats->flags &= ~(1 << CFQG_stats_##name); \
475 } \
476 static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
477 { \
478 return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
479 } \
480
481 CFQG_FLAG_FNS(waiting)
482 CFQG_FLAG_FNS(idling)
483 CFQG_FLAG_FNS(empty)
484 #undef CFQG_FLAG_FNS
485
486 /* This should be called with the queue_lock held. */
487 static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
488 {
489 unsigned long long now;
490
491 if (!cfqg_stats_waiting(stats))
492 return;
493
494 now = sched_clock();
495 if (time_after64(now, stats->start_group_wait_time))
496 blkg_stat_add(&stats->group_wait_time,
497 now - stats->start_group_wait_time);
498 cfqg_stats_clear_waiting(stats);
499 }
500
501 /* This should be called with the queue_lock held. */
502 static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
503 struct cfq_group *curr_cfqg)
504 {
505 struct cfqg_stats *stats = &cfqg->stats;
506
507 if (cfqg_stats_waiting(stats))
508 return;
509 if (cfqg == curr_cfqg)
510 return;
511 stats->start_group_wait_time = sched_clock();
512 cfqg_stats_mark_waiting(stats);
513 }
514
515 /* This should be called with the queue_lock held. */
516 static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
517 {
518 unsigned long long now;
519
520 if (!cfqg_stats_empty(stats))
521 return;
522
523 now = sched_clock();
524 if (time_after64(now, stats->start_empty_time))
525 blkg_stat_add(&stats->empty_time,
526 now - stats->start_empty_time);
527 cfqg_stats_clear_empty(stats);
528 }
529
530 static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
531 {
532 blkg_stat_add(&cfqg->stats.dequeue, 1);
533 }
534
535 static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
536 {
537 struct cfqg_stats *stats = &cfqg->stats;
538
539 if (blkg_rwstat_total(&stats->queued))
540 return;
541
542 /*
543 * group is already marked empty. This can happen if cfqq got new
544 * request in parent group and moved to this group while being added
545 * to service tree. Just ignore the event and move on.
546 */
547 if (cfqg_stats_empty(stats))
548 return;
549
550 stats->start_empty_time = sched_clock();
551 cfqg_stats_mark_empty(stats);
552 }
553
554 static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
555 {
556 struct cfqg_stats *stats = &cfqg->stats;
557
558 if (cfqg_stats_idling(stats)) {
559 unsigned long long now = sched_clock();
560
561 if (time_after64(now, stats->start_idle_time))
562 blkg_stat_add(&stats->idle_time,
563 now - stats->start_idle_time);
564 cfqg_stats_clear_idling(stats);
565 }
566 }
567
568 static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
569 {
570 struct cfqg_stats *stats = &cfqg->stats;
571
572 BUG_ON(cfqg_stats_idling(stats));
573
574 stats->start_idle_time = sched_clock();
575 cfqg_stats_mark_idling(stats);
576 }
577
578 static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
579 {
580 struct cfqg_stats *stats = &cfqg->stats;
581
582 blkg_stat_add(&stats->avg_queue_size_sum,
583 blkg_rwstat_total(&stats->queued));
584 blkg_stat_add(&stats->avg_queue_size_samples, 1);
585 cfqg_stats_update_group_wait_time(stats);
586 }
587
588 #else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
589
590 static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
591 static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
592 static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
593 static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
594 static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
595 static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
596 static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
597
598 #endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
599
600 #ifdef CONFIG_CFQ_GROUP_IOSCHED
601
602 static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
603 {
604 return pd ? container_of(pd, struct cfq_group, pd) : NULL;
605 }
606
607 static struct cfq_group_data
608 *cpd_to_cfqgd(struct blkcg_policy_data *cpd)
609 {
610 return cpd ? container_of(cpd, struct cfq_group_data, cpd) : NULL;
611 }
612
613 static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
614 {
615 return pd_to_blkg(&cfqg->pd);
616 }
617
618 static struct blkcg_policy blkcg_policy_cfq;
619
620 static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
621 {
622 return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
623 }
624
625 static struct cfq_group_data *blkcg_to_cfqgd(struct blkcg *blkcg)
626 {
627 return cpd_to_cfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_cfq));
628 }
629
630 static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
631 {
632 struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
633
634 return pblkg ? blkg_to_cfqg(pblkg) : NULL;
635 }
636
637 static inline void cfqg_get(struct cfq_group *cfqg)
638 {
639 return blkg_get(cfqg_to_blkg(cfqg));
640 }
641
642 static inline void cfqg_put(struct cfq_group *cfqg)
643 {
644 return blkg_put(cfqg_to_blkg(cfqg));
645 }
646
647 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
648 char __pbuf[128]; \
649 \
650 blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
651 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
652 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
653 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
654 __pbuf, ##args); \
655 } while (0)
656
657 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
658 char __pbuf[128]; \
659 \
660 blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \
661 blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \
662 } while (0)
663
664 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
665 struct cfq_group *curr_cfqg, int rw)
666 {
667 blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
668 cfqg_stats_end_empty_time(&cfqg->stats);
669 cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
670 }
671
672 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
673 unsigned long time, unsigned long unaccounted_time)
674 {
675 blkg_stat_add(&cfqg->stats.time, time);
676 #ifdef CONFIG_DEBUG_BLK_CGROUP
677 blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
678 #endif
679 }
680
681 static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
682 {
683 blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
684 }
685
686 static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
687 {
688 blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
689 }
690
691 static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
692 uint64_t bytes, int rw)
693 {
694 blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
695 }
696
697 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
698 uint64_t start_time, uint64_t io_start_time, int rw)
699 {
700 struct cfqg_stats *stats = &cfqg->stats;
701 unsigned long long now = sched_clock();
702
703 if (time_after64(now, io_start_time))
704 blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
705 if (time_after64(io_start_time, start_time))
706 blkg_rwstat_add(&stats->wait_time, rw,
707 io_start_time - start_time);
708 }
709
710 /* @stats = 0 */
711 static void cfqg_stats_reset(struct cfqg_stats *stats)
712 {
713 /* queued stats shouldn't be cleared */
714 blkg_rwstat_reset(&stats->merged);
715 blkg_rwstat_reset(&stats->service_time);
716 blkg_rwstat_reset(&stats->wait_time);
717 blkg_stat_reset(&stats->time);
718 #ifdef CONFIG_DEBUG_BLK_CGROUP
719 blkg_stat_reset(&stats->unaccounted_time);
720 blkg_stat_reset(&stats->avg_queue_size_sum);
721 blkg_stat_reset(&stats->avg_queue_size_samples);
722 blkg_stat_reset(&stats->dequeue);
723 blkg_stat_reset(&stats->group_wait_time);
724 blkg_stat_reset(&stats->idle_time);
725 blkg_stat_reset(&stats->empty_time);
726 #endif
727 }
728
729 /* @to += @from */
730 static void cfqg_stats_add_aux(struct cfqg_stats *to, struct cfqg_stats *from)
731 {
732 /* queued stats shouldn't be cleared */
733 blkg_rwstat_add_aux(&to->merged, &from->merged);
734 blkg_rwstat_add_aux(&to->service_time, &from->service_time);
735 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
736 blkg_stat_add_aux(&from->time, &from->time);
737 #ifdef CONFIG_DEBUG_BLK_CGROUP
738 blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time);
739 blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
740 blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
741 blkg_stat_add_aux(&to->dequeue, &from->dequeue);
742 blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
743 blkg_stat_add_aux(&to->idle_time, &from->idle_time);
744 blkg_stat_add_aux(&to->empty_time, &from->empty_time);
745 #endif
746 }
747
748 /*
749 * Transfer @cfqg's stats to its parent's aux counts so that the ancestors'
750 * recursive stats can still account for the amount used by this cfqg after
751 * it's gone.
752 */
753 static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
754 {
755 struct cfq_group *parent = cfqg_parent(cfqg);
756
757 lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
758
759 if (unlikely(!parent))
760 return;
761
762 cfqg_stats_add_aux(&parent->stats, &cfqg->stats);
763 cfqg_stats_reset(&cfqg->stats);
764 }
765
766 #else /* CONFIG_CFQ_GROUP_IOSCHED */
767
768 static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
769 static inline void cfqg_get(struct cfq_group *cfqg) { }
770 static inline void cfqg_put(struct cfq_group *cfqg) { }
771
772 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
773 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
774 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
775 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
776 ##args)
777 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
778
779 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
780 struct cfq_group *curr_cfqg, int rw) { }
781 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
782 unsigned long time, unsigned long unaccounted_time) { }
783 static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
784 static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
785 static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
786 uint64_t bytes, int rw) { }
787 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
788 uint64_t start_time, uint64_t io_start_time, int rw) { }
789
790 #endif /* CONFIG_CFQ_GROUP_IOSCHED */
791
792 #define cfq_log(cfqd, fmt, args...) \
793 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
794
795 /* Traverses through cfq group service trees */
796 #define for_each_cfqg_st(cfqg, i, j, st) \
797 for (i = 0; i <= IDLE_WORKLOAD; i++) \
798 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
799 : &cfqg->service_tree_idle; \
800 (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
801 (i == IDLE_WORKLOAD && j == 0); \
802 j++, st = i < IDLE_WORKLOAD ? \
803 &cfqg->service_trees[i][j]: NULL) \
804
805 static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
806 struct cfq_ttime *ttime, bool group_idle)
807 {
808 unsigned long slice;
809 if (!sample_valid(ttime->ttime_samples))
810 return false;
811 if (group_idle)
812 slice = cfqd->cfq_group_idle;
813 else
814 slice = cfqd->cfq_slice_idle;
815 return ttime->ttime_mean > slice;
816 }
817
818 static inline bool iops_mode(struct cfq_data *cfqd)
819 {
820 /*
821 * If we are not idling on queues and it is a NCQ drive, parallel
822 * execution of requests is on and measuring time is not possible
823 * in most of the cases until and unless we drive shallower queue
824 * depths and that becomes a performance bottleneck. In such cases
825 * switch to start providing fairness in terms of number of IOs.
826 */
827 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
828 return true;
829 else
830 return false;
831 }
832
833 static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
834 {
835 if (cfq_class_idle(cfqq))
836 return IDLE_WORKLOAD;
837 if (cfq_class_rt(cfqq))
838 return RT_WORKLOAD;
839 return BE_WORKLOAD;
840 }
841
842
843 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
844 {
845 if (!cfq_cfqq_sync(cfqq))
846 return ASYNC_WORKLOAD;
847 if (!cfq_cfqq_idle_window(cfqq))
848 return SYNC_NOIDLE_WORKLOAD;
849 return SYNC_WORKLOAD;
850 }
851
852 static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
853 struct cfq_data *cfqd,
854 struct cfq_group *cfqg)
855 {
856 if (wl_class == IDLE_WORKLOAD)
857 return cfqg->service_tree_idle.count;
858
859 return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
860 cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
861 cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
862 }
863
864 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
865 struct cfq_group *cfqg)
866 {
867 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
868 cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
869 }
870
871 static void cfq_dispatch_insert(struct request_queue *, struct request *);
872 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
873 struct cfq_io_cq *cic, struct bio *bio);
874
875 static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
876 {
877 /* cic->icq is the first member, %NULL will convert to %NULL */
878 return container_of(icq, struct cfq_io_cq, icq);
879 }
880
881 static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
882 struct io_context *ioc)
883 {
884 if (ioc)
885 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
886 return NULL;
887 }
888
889 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
890 {
891 return cic->cfqq[is_sync];
892 }
893
894 static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
895 bool is_sync)
896 {
897 cic->cfqq[is_sync] = cfqq;
898 }
899
900 static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
901 {
902 return cic->icq.q->elevator->elevator_data;
903 }
904
905 /*
906 * We regard a request as SYNC, if it's either a read or has the SYNC bit
907 * set (in which case it could also be direct WRITE).
908 */
909 static inline bool cfq_bio_sync(struct bio *bio)
910 {
911 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
912 }
913
914 /*
915 * scheduler run of queue, if there are requests pending and no one in the
916 * driver that will restart queueing
917 */
918 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
919 {
920 if (cfqd->busy_queues) {
921 cfq_log(cfqd, "schedule dispatch");
922 kblockd_schedule_work(&cfqd->unplug_work);
923 }
924 }
925
926 /*
927 * Scale schedule slice based on io priority. Use the sync time slice only
928 * if a queue is marked sync and has sync io queued. A sync queue with async
929 * io only, should not get full sync slice length.
930 */
931 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
932 unsigned short prio)
933 {
934 const int base_slice = cfqd->cfq_slice[sync];
935
936 WARN_ON(prio >= IOPRIO_BE_NR);
937
938 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
939 }
940
941 static inline int
942 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
943 {
944 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
945 }
946
947 /**
948 * cfqg_scale_charge - scale disk time charge according to cfqg weight
949 * @charge: disk time being charged
950 * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
951 *
952 * Scale @charge according to @vfraction, which is in range (0, 1]. The
953 * scaling is inversely proportional.
954 *
955 * scaled = charge / vfraction
956 *
957 * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
958 */
959 static inline u64 cfqg_scale_charge(unsigned long charge,
960 unsigned int vfraction)
961 {
962 u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */
963
964 /* charge / vfraction */
965 c <<= CFQ_SERVICE_SHIFT;
966 do_div(c, vfraction);
967 return c;
968 }
969
970 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
971 {
972 s64 delta = (s64)(vdisktime - min_vdisktime);
973 if (delta > 0)
974 min_vdisktime = vdisktime;
975
976 return min_vdisktime;
977 }
978
979 static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
980 {
981 s64 delta = (s64)(vdisktime - min_vdisktime);
982 if (delta < 0)
983 min_vdisktime = vdisktime;
984
985 return min_vdisktime;
986 }
987
988 static void update_min_vdisktime(struct cfq_rb_root *st)
989 {
990 struct cfq_group *cfqg;
991
992 if (st->left) {
993 cfqg = rb_entry_cfqg(st->left);
994 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
995 cfqg->vdisktime);
996 }
997 }
998
999 /*
1000 * get averaged number of queues of RT/BE priority.
1001 * average is updated, with a formula that gives more weight to higher numbers,
1002 * to quickly follows sudden increases and decrease slowly
1003 */
1004
1005 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
1006 struct cfq_group *cfqg, bool rt)
1007 {
1008 unsigned min_q, max_q;
1009 unsigned mult = cfq_hist_divisor - 1;
1010 unsigned round = cfq_hist_divisor / 2;
1011 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
1012
1013 min_q = min(cfqg->busy_queues_avg[rt], busy);
1014 max_q = max(cfqg->busy_queues_avg[rt], busy);
1015 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
1016 cfq_hist_divisor;
1017 return cfqg->busy_queues_avg[rt];
1018 }
1019
1020 static inline unsigned
1021 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
1022 {
1023 return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
1024 }
1025
1026 static inline unsigned
1027 cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1028 {
1029 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
1030 if (cfqd->cfq_latency) {
1031 /*
1032 * interested queues (we consider only the ones with the same
1033 * priority class in the cfq group)
1034 */
1035 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
1036 cfq_class_rt(cfqq));
1037 unsigned sync_slice = cfqd->cfq_slice[1];
1038 unsigned expect_latency = sync_slice * iq;
1039 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
1040
1041 if (expect_latency > group_slice) {
1042 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
1043 /* scale low_slice according to IO priority
1044 * and sync vs async */
1045 unsigned low_slice =
1046 min(slice, base_low_slice * slice / sync_slice);
1047 /* the adapted slice value is scaled to fit all iqs
1048 * into the target latency */
1049 slice = max(slice * group_slice / expect_latency,
1050 low_slice);
1051 }
1052 }
1053 return slice;
1054 }
1055
1056 static inline void
1057 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1058 {
1059 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
1060
1061 cfqq->slice_start = jiffies;
1062 cfqq->slice_end = jiffies + slice;
1063 cfqq->allocated_slice = slice;
1064 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
1065 }
1066
1067 /*
1068 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
1069 * isn't valid until the first request from the dispatch is activated
1070 * and the slice time set.
1071 */
1072 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
1073 {
1074 if (cfq_cfqq_slice_new(cfqq))
1075 return false;
1076 if (time_before(jiffies, cfqq->slice_end))
1077 return false;
1078
1079 return true;
1080 }
1081
1082 /*
1083 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1084 * We choose the request that is closest to the head right now. Distance
1085 * behind the head is penalized and only allowed to a certain extent.
1086 */
1087 static struct request *
1088 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1089 {
1090 sector_t s1, s2, d1 = 0, d2 = 0;
1091 unsigned long back_max;
1092 #define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
1093 #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
1094 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1095
1096 if (rq1 == NULL || rq1 == rq2)
1097 return rq2;
1098 if (rq2 == NULL)
1099 return rq1;
1100
1101 if (rq_is_sync(rq1) != rq_is_sync(rq2))
1102 return rq_is_sync(rq1) ? rq1 : rq2;
1103
1104 if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
1105 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
1106
1107 s1 = blk_rq_pos(rq1);
1108 s2 = blk_rq_pos(rq2);
1109
1110 /*
1111 * by definition, 1KiB is 2 sectors
1112 */
1113 back_max = cfqd->cfq_back_max * 2;
1114
1115 /*
1116 * Strict one way elevator _except_ in the case where we allow
1117 * short backward seeks which are biased as twice the cost of a
1118 * similar forward seek.
1119 */
1120 if (s1 >= last)
1121 d1 = s1 - last;
1122 else if (s1 + back_max >= last)
1123 d1 = (last - s1) * cfqd->cfq_back_penalty;
1124 else
1125 wrap |= CFQ_RQ1_WRAP;
1126
1127 if (s2 >= last)
1128 d2 = s2 - last;
1129 else if (s2 + back_max >= last)
1130 d2 = (last - s2) * cfqd->cfq_back_penalty;
1131 else
1132 wrap |= CFQ_RQ2_WRAP;
1133
1134 /* Found required data */
1135
1136 /*
1137 * By doing switch() on the bit mask "wrap" we avoid having to
1138 * check two variables for all permutations: --> faster!
1139 */
1140 switch (wrap) {
1141 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
1142 if (d1 < d2)
1143 return rq1;
1144 else if (d2 < d1)
1145 return rq2;
1146 else {
1147 if (s1 >= s2)
1148 return rq1;
1149 else
1150 return rq2;
1151 }
1152
1153 case CFQ_RQ2_WRAP:
1154 return rq1;
1155 case CFQ_RQ1_WRAP:
1156 return rq2;
1157 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
1158 default:
1159 /*
1160 * Since both rqs are wrapped,
1161 * start with the one that's further behind head
1162 * (--> only *one* back seek required),
1163 * since back seek takes more time than forward.
1164 */
1165 if (s1 <= s2)
1166 return rq1;
1167 else
1168 return rq2;
1169 }
1170 }
1171
1172 /*
1173 * The below is leftmost cache rbtree addon
1174 */
1175 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
1176 {
1177 /* Service tree is empty */
1178 if (!root->count)
1179 return NULL;
1180
1181 if (!root->left)
1182 root->left = rb_first(&root->rb);
1183
1184 if (root->left)
1185 return rb_entry(root->left, struct cfq_queue, rb_node);
1186
1187 return NULL;
1188 }
1189
1190 static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1191 {
1192 if (!root->left)
1193 root->left = rb_first(&root->rb);
1194
1195 if (root->left)
1196 return rb_entry_cfqg(root->left);
1197
1198 return NULL;
1199 }
1200
1201 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1202 {
1203 rb_erase(n, root);
1204 RB_CLEAR_NODE(n);
1205 }
1206
1207 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1208 {
1209 if (root->left == n)
1210 root->left = NULL;
1211 rb_erase_init(n, &root->rb);
1212 --root->count;
1213 }
1214
1215 /*
1216 * would be nice to take fifo expire time into account as well
1217 */
1218 static struct request *
1219 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1220 struct request *last)
1221 {
1222 struct rb_node *rbnext = rb_next(&last->rb_node);
1223 struct rb_node *rbprev = rb_prev(&last->rb_node);
1224 struct request *next = NULL, *prev = NULL;
1225
1226 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1227
1228 if (rbprev)
1229 prev = rb_entry_rq(rbprev);
1230
1231 if (rbnext)
1232 next = rb_entry_rq(rbnext);
1233 else {
1234 rbnext = rb_first(&cfqq->sort_list);
1235 if (rbnext && rbnext != &last->rb_node)
1236 next = rb_entry_rq(rbnext);
1237 }
1238
1239 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1240 }
1241
1242 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
1243 struct cfq_queue *cfqq)
1244 {
1245 /*
1246 * just an approximation, should be ok.
1247 */
1248 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
1249 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
1250 }
1251
1252 static inline s64
1253 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1254 {
1255 return cfqg->vdisktime - st->min_vdisktime;
1256 }
1257
1258 static void
1259 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1260 {
1261 struct rb_node **node = &st->rb.rb_node;
1262 struct rb_node *parent = NULL;
1263 struct cfq_group *__cfqg;
1264 s64 key = cfqg_key(st, cfqg);
1265 int left = 1;
1266
1267 while (*node != NULL) {
1268 parent = *node;
1269 __cfqg = rb_entry_cfqg(parent);
1270
1271 if (key < cfqg_key(st, __cfqg))
1272 node = &parent->rb_left;
1273 else {
1274 node = &parent->rb_right;
1275 left = 0;
1276 }
1277 }
1278
1279 if (left)
1280 st->left = &cfqg->rb_node;
1281
1282 rb_link_node(&cfqg->rb_node, parent, node);
1283 rb_insert_color(&cfqg->rb_node, &st->rb);
1284 }
1285
1286 /*
1287 * This has to be called only on activation of cfqg
1288 */
1289 static void
1290 cfq_update_group_weight(struct cfq_group *cfqg)
1291 {
1292 if (cfqg->new_weight) {
1293 cfqg->weight = cfqg->new_weight;
1294 cfqg->new_weight = 0;
1295 }
1296 }
1297
1298 static void
1299 cfq_update_group_leaf_weight(struct cfq_group *cfqg)
1300 {
1301 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1302
1303 if (cfqg->new_leaf_weight) {
1304 cfqg->leaf_weight = cfqg->new_leaf_weight;
1305 cfqg->new_leaf_weight = 0;
1306 }
1307 }
1308
1309 static void
1310 cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1311 {
1312 unsigned int vfr = 1 << CFQ_SERVICE_SHIFT; /* start with 1 */
1313 struct cfq_group *pos = cfqg;
1314 struct cfq_group *parent;
1315 bool propagate;
1316
1317 /* add to the service tree */
1318 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1319
1320 /*
1321 * Update leaf_weight. We cannot update weight at this point
1322 * because cfqg might already have been activated and is
1323 * contributing its current weight to the parent's child_weight.
1324 */
1325 cfq_update_group_leaf_weight(cfqg);
1326 __cfq_group_service_tree_add(st, cfqg);
1327
1328 /*
1329 * Activate @cfqg and calculate the portion of vfraction @cfqg is
1330 * entitled to. vfraction is calculated by walking the tree
1331 * towards the root calculating the fraction it has at each level.
1332 * The compounded ratio is how much vfraction @cfqg owns.
1333 *
1334 * Start with the proportion tasks in this cfqg has against active
1335 * children cfqgs - its leaf_weight against children_weight.
1336 */
1337 propagate = !pos->nr_active++;
1338 pos->children_weight += pos->leaf_weight;
1339 vfr = vfr * pos->leaf_weight / pos->children_weight;
1340
1341 /*
1342 * Compound ->weight walking up the tree. Both activation and
1343 * vfraction calculation are done in the same loop. Propagation
1344 * stops once an already activated node is met. vfraction
1345 * calculation should always continue to the root.
1346 */
1347 while ((parent = cfqg_parent(pos))) {
1348 if (propagate) {
1349 cfq_update_group_weight(pos);
1350 propagate = !parent->nr_active++;
1351 parent->children_weight += pos->weight;
1352 }
1353 vfr = vfr * pos->weight / parent->children_weight;
1354 pos = parent;
1355 }
1356
1357 cfqg->vfraction = max_t(unsigned, vfr, 1);
1358 }
1359
1360 static void
1361 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1362 {
1363 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1364 struct cfq_group *__cfqg;
1365 struct rb_node *n;
1366
1367 cfqg->nr_cfqq++;
1368 if (!RB_EMPTY_NODE(&cfqg->rb_node))
1369 return;
1370
1371 /*
1372 * Currently put the group at the end. Later implement something
1373 * so that groups get lesser vtime based on their weights, so that
1374 * if group does not loose all if it was not continuously backlogged.
1375 */
1376 n = rb_last(&st->rb);
1377 if (n) {
1378 __cfqg = rb_entry_cfqg(n);
1379 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1380 } else
1381 cfqg->vdisktime = st->min_vdisktime;
1382 cfq_group_service_tree_add(st, cfqg);
1383 }
1384
1385 static void
1386 cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1387 {
1388 struct cfq_group *pos = cfqg;
1389 bool propagate;
1390
1391 /*
1392 * Undo activation from cfq_group_service_tree_add(). Deactivate
1393 * @cfqg and propagate deactivation upwards.
1394 */
1395 propagate = !--pos->nr_active;
1396 pos->children_weight -= pos->leaf_weight;
1397
1398 while (propagate) {
1399 struct cfq_group *parent = cfqg_parent(pos);
1400
1401 /* @pos has 0 nr_active at this point */
1402 WARN_ON_ONCE(pos->children_weight);
1403 pos->vfraction = 0;
1404
1405 if (!parent)
1406 break;
1407
1408 propagate = !--parent->nr_active;
1409 parent->children_weight -= pos->weight;
1410 pos = parent;
1411 }
1412
1413 /* remove from the service tree */
1414 if (!RB_EMPTY_NODE(&cfqg->rb_node))
1415 cfq_rb_erase(&cfqg->rb_node, st);
1416 }
1417
1418 static void
1419 cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1420 {
1421 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1422
1423 BUG_ON(cfqg->nr_cfqq < 1);
1424 cfqg->nr_cfqq--;
1425
1426 /* If there are other cfq queues under this group, don't delete it */
1427 if (cfqg->nr_cfqq)
1428 return;
1429
1430 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
1431 cfq_group_service_tree_del(st, cfqg);
1432 cfqg->saved_wl_slice = 0;
1433 cfqg_stats_update_dequeue(cfqg);
1434 }
1435
1436 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1437 unsigned int *unaccounted_time)
1438 {
1439 unsigned int slice_used;
1440
1441 /*
1442 * Queue got expired before even a single request completed or
1443 * got expired immediately after first request completion.
1444 */
1445 if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
1446 /*
1447 * Also charge the seek time incurred to the group, otherwise
1448 * if there are mutiple queues in the group, each can dispatch
1449 * a single request on seeky media and cause lots of seek time
1450 * and group will never know it.
1451 */
1452 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
1453 1);
1454 } else {
1455 slice_used = jiffies - cfqq->slice_start;
1456 if (slice_used > cfqq->allocated_slice) {
1457 *unaccounted_time = slice_used - cfqq->allocated_slice;
1458 slice_used = cfqq->allocated_slice;
1459 }
1460 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
1461 *unaccounted_time += cfqq->slice_start -
1462 cfqq->dispatch_start;
1463 }
1464
1465 return slice_used;
1466 }
1467
1468 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1469 struct cfq_queue *cfqq)
1470 {
1471 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1472 unsigned int used_sl, charge, unaccounted_sl = 0;
1473 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1474 - cfqg->service_tree_idle.count;
1475 unsigned int vfr;
1476
1477 BUG_ON(nr_sync < 0);
1478 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
1479
1480 if (iops_mode(cfqd))
1481 charge = cfqq->slice_dispatch;
1482 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1483 charge = cfqq->allocated_slice;
1484
1485 /*
1486 * Can't update vdisktime while on service tree and cfqg->vfraction
1487 * is valid only while on it. Cache vfr, leave the service tree,
1488 * update vdisktime and go back on. The re-addition to the tree
1489 * will also update the weights as necessary.
1490 */
1491 vfr = cfqg->vfraction;
1492 cfq_group_service_tree_del(st, cfqg);
1493 cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
1494 cfq_group_service_tree_add(st, cfqg);
1495
1496 /* This group is being expired. Save the context */
1497 if (time_after(cfqd->workload_expires, jiffies)) {
1498 cfqg->saved_wl_slice = cfqd->workload_expires
1499 - jiffies;
1500 cfqg->saved_wl_type = cfqd->serving_wl_type;
1501 cfqg->saved_wl_class = cfqd->serving_wl_class;
1502 } else
1503 cfqg->saved_wl_slice = 0;
1504
1505 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1506 st->min_vdisktime);
1507 cfq_log_cfqq(cfqq->cfqd, cfqq,
1508 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1509 used_sl, cfqq->slice_dispatch, charge,
1510 iops_mode(cfqd), cfqq->nr_sectors);
1511 cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1512 cfqg_stats_set_start_empty_time(cfqg);
1513 }
1514
1515 /**
1516 * cfq_init_cfqg_base - initialize base part of a cfq_group
1517 * @cfqg: cfq_group to initialize
1518 *
1519 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1520 * is enabled or not.
1521 */
1522 static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1523 {
1524 struct cfq_rb_root *st;
1525 int i, j;
1526
1527 for_each_cfqg_st(cfqg, i, j, st)
1528 *st = CFQ_RB_ROOT;
1529 RB_CLEAR_NODE(&cfqg->rb_node);
1530
1531 cfqg->ttime.last_end_request = jiffies;
1532 }
1533
1534 #ifdef CONFIG_CFQ_GROUP_IOSCHED
1535 static void cfqg_stats_exit(struct cfqg_stats *stats)
1536 {
1537 blkg_rwstat_exit(&stats->merged);
1538 blkg_rwstat_exit(&stats->service_time);
1539 blkg_rwstat_exit(&stats->wait_time);
1540 blkg_rwstat_exit(&stats->queued);
1541
1542 blkg_stat_exit(&stats->sectors);
1543 blkg_stat_exit(&stats->time);
1544 #ifdef CONFIG_DEBUG_BLK_CGROUP
1545 blkg_stat_exit(&stats->unaccounted_time);
1546 blkg_stat_exit(&stats->avg_queue_size_sum);
1547 blkg_stat_exit(&stats->avg_queue_size_samples);
1548 blkg_stat_exit(&stats->dequeue);
1549 blkg_stat_exit(&stats->group_wait_time);
1550 blkg_stat_exit(&stats->idle_time);
1551 blkg_stat_exit(&stats->empty_time);
1552 #endif
1553 }
1554
1555 static int cfqg_stats_init(struct cfqg_stats *stats, gfp_t gfp)
1556 {
1557 if (blkg_rwstat_init(&stats->merged, gfp) ||
1558 blkg_rwstat_init(&stats->service_time, gfp) ||
1559 blkg_rwstat_init(&stats->wait_time, gfp) ||
1560 blkg_rwstat_init(&stats->queued, gfp) ||
1561
1562 blkg_stat_init(&stats->sectors, gfp) ||
1563 blkg_stat_init(&stats->time, gfp))
1564 goto err;
1565
1566 #ifdef CONFIG_DEBUG_BLK_CGROUP
1567 if (blkg_stat_init(&stats->unaccounted_time, gfp) ||
1568 blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
1569 blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
1570 blkg_stat_init(&stats->dequeue, gfp) ||
1571 blkg_stat_init(&stats->group_wait_time, gfp) ||
1572 blkg_stat_init(&stats->idle_time, gfp) ||
1573 blkg_stat_init(&stats->empty_time, gfp))
1574 goto err;
1575 #endif
1576 return 0;
1577 err:
1578 cfqg_stats_exit(stats);
1579 return -ENOMEM;
1580 }
1581
1582 static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
1583 {
1584 struct cfq_group_data *cgd;
1585
1586 cgd = kzalloc(sizeof(*cgd), GFP_KERNEL);
1587 if (!cgd)
1588 return NULL;
1589 return &cgd->cpd;
1590 }
1591
1592 static void cfq_cpd_init(struct blkcg_policy_data *cpd)
1593 {
1594 struct cfq_group_data *cgd = cpd_to_cfqgd(cpd);
1595
1596 if (cpd_to_blkcg(cpd) == &blkcg_root) {
1597 cgd->weight = 2 * CFQ_WEIGHT_DEFAULT;
1598 cgd->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT;
1599 } else {
1600 cgd->weight = CFQ_WEIGHT_DEFAULT;
1601 cgd->leaf_weight = CFQ_WEIGHT_DEFAULT;
1602 }
1603 }
1604
1605 static void cfq_cpd_free(struct blkcg_policy_data *cpd)
1606 {
1607 kfree(cpd_to_cfqgd(cpd));
1608 }
1609
1610 static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node)
1611 {
1612 struct cfq_group *cfqg;
1613
1614 cfqg = kzalloc_node(sizeof(*cfqg), gfp, node);
1615 if (!cfqg)
1616 return NULL;
1617
1618 cfq_init_cfqg_base(cfqg);
1619 if (cfqg_stats_init(&cfqg->stats, gfp)) {
1620 kfree(cfqg);
1621 return NULL;
1622 }
1623
1624 return &cfqg->pd;
1625 }
1626
1627 static void cfq_pd_init(struct blkg_policy_data *pd)
1628 {
1629 struct cfq_group *cfqg = pd_to_cfqg(pd);
1630 struct cfq_group_data *cgd = blkcg_to_cfqgd(pd->blkg->blkcg);
1631
1632 cfqg->weight = cgd->weight;
1633 cfqg->leaf_weight = cgd->leaf_weight;
1634 }
1635
1636 static void cfq_pd_offline(struct blkg_policy_data *pd)
1637 {
1638 struct cfq_group *cfqg = pd_to_cfqg(pd);
1639 int i;
1640
1641 for (i = 0; i < IOPRIO_BE_NR; i++) {
1642 if (cfqg->async_cfqq[0][i])
1643 cfq_put_queue(cfqg->async_cfqq[0][i]);
1644 if (cfqg->async_cfqq[1][i])
1645 cfq_put_queue(cfqg->async_cfqq[1][i]);
1646 }
1647
1648 if (cfqg->async_idle_cfqq)
1649 cfq_put_queue(cfqg->async_idle_cfqq);
1650
1651 /*
1652 * @blkg is going offline and will be ignored by
1653 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
1654 * that they don't get lost. If IOs complete after this point, the
1655 * stats for them will be lost. Oh well...
1656 */
1657 cfqg_stats_xfer_dead(cfqg);
1658 }
1659
1660 static void cfq_pd_free(struct blkg_policy_data *pd)
1661 {
1662 struct cfq_group *cfqg = pd_to_cfqg(pd);
1663
1664 cfqg_stats_exit(&cfqg->stats);
1665 return kfree(cfqg);
1666 }
1667
1668 static void cfq_pd_reset_stats(struct blkg_policy_data *pd)
1669 {
1670 struct cfq_group *cfqg = pd_to_cfqg(pd);
1671
1672 cfqg_stats_reset(&cfqg->stats);
1673 }
1674
1675 static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
1676 struct blkcg *blkcg)
1677 {
1678 struct blkcg_gq *blkg;
1679
1680 blkg = blkg_lookup(blkcg, cfqd->queue);
1681 if (likely(blkg))
1682 return blkg_to_cfqg(blkg);
1683 return NULL;
1684 }
1685
1686 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1687 {
1688 cfqq->cfqg = cfqg;
1689 /* cfqq reference on cfqg */
1690 cfqg_get(cfqg);
1691 }
1692
1693 static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1694 struct blkg_policy_data *pd, int off)
1695 {
1696 struct cfq_group *cfqg = pd_to_cfqg(pd);
1697
1698 if (!cfqg->dev_weight)
1699 return 0;
1700 return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
1701 }
1702
1703 static int cfqg_print_weight_device(struct seq_file *sf, void *v)
1704 {
1705 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1706 cfqg_prfill_weight_device, &blkcg_policy_cfq,
1707 0, false);
1708 return 0;
1709 }
1710
1711 static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1712 struct blkg_policy_data *pd, int off)
1713 {
1714 struct cfq_group *cfqg = pd_to_cfqg(pd);
1715
1716 if (!cfqg->dev_leaf_weight)
1717 return 0;
1718 return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1719 }
1720
1721 static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v)
1722 {
1723 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1724 cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq,
1725 0, false);
1726 return 0;
1727 }
1728
1729 static int cfq_print_weight(struct seq_file *sf, void *v)
1730 {
1731 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1732 struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1733 unsigned int val = 0;
1734
1735 if (cgd)
1736 val = cgd->weight;
1737
1738 seq_printf(sf, "%u\n", val);
1739 return 0;
1740 }
1741
1742 static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
1743 {
1744 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1745 struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1746 unsigned int val = 0;
1747
1748 if (cgd)
1749 val = cgd->leaf_weight;
1750
1751 seq_printf(sf, "%u\n", val);
1752 return 0;
1753 }
1754
1755 static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
1756 char *buf, size_t nbytes, loff_t off,
1757 bool is_leaf_weight)
1758 {
1759 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1760 struct blkg_conf_ctx ctx;
1761 struct cfq_group *cfqg;
1762 struct cfq_group_data *cfqgd;
1763 int ret;
1764
1765 ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
1766 if (ret)
1767 return ret;
1768
1769 ret = -EINVAL;
1770 cfqg = blkg_to_cfqg(ctx.blkg);
1771 cfqgd = blkcg_to_cfqgd(blkcg);
1772 if (!cfqg || !cfqgd)
1773 goto err;
1774
1775 if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
1776 if (!is_leaf_weight) {
1777 cfqg->dev_weight = ctx.v;
1778 cfqg->new_weight = ctx.v ?: cfqgd->weight;
1779 } else {
1780 cfqg->dev_leaf_weight = ctx.v;
1781 cfqg->new_leaf_weight = ctx.v ?: cfqgd->leaf_weight;
1782 }
1783 ret = 0;
1784 }
1785
1786 err:
1787 blkg_conf_finish(&ctx);
1788 return ret ?: nbytes;
1789 }
1790
1791 static ssize_t cfqg_set_weight_device(struct kernfs_open_file *of,
1792 char *buf, size_t nbytes, loff_t off)
1793 {
1794 return __cfqg_set_weight_device(of, buf, nbytes, off, false);
1795 }
1796
1797 static ssize_t cfqg_set_leaf_weight_device(struct kernfs_open_file *of,
1798 char *buf, size_t nbytes, loff_t off)
1799 {
1800 return __cfqg_set_weight_device(of, buf, nbytes, off, true);
1801 }
1802
1803 static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1804 u64 val, bool is_leaf_weight)
1805 {
1806 struct blkcg *blkcg = css_to_blkcg(css);
1807 struct blkcg_gq *blkg;
1808 struct cfq_group_data *cfqgd;
1809 int ret = 0;
1810
1811 if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
1812 return -EINVAL;
1813
1814 spin_lock_irq(&blkcg->lock);
1815 cfqgd = blkcg_to_cfqgd(blkcg);
1816 if (!cfqgd) {
1817 ret = -EINVAL;
1818 goto out;
1819 }
1820
1821 if (!is_leaf_weight)
1822 cfqgd->weight = val;
1823 else
1824 cfqgd->leaf_weight = val;
1825
1826 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1827 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1828
1829 if (!cfqg)
1830 continue;
1831
1832 if (!is_leaf_weight) {
1833 if (!cfqg->dev_weight)
1834 cfqg->new_weight = cfqgd->weight;
1835 } else {
1836 if (!cfqg->dev_leaf_weight)
1837 cfqg->new_leaf_weight = cfqgd->leaf_weight;
1838 }
1839 }
1840
1841 out:
1842 spin_unlock_irq(&blkcg->lock);
1843 return ret;
1844 }
1845
1846 static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1847 u64 val)
1848 {
1849 return __cfq_set_weight(css, cft, val, false);
1850 }
1851
1852 static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
1853 struct cftype *cft, u64 val)
1854 {
1855 return __cfq_set_weight(css, cft, val, true);
1856 }
1857
1858 static int cfqg_print_stat(struct seq_file *sf, void *v)
1859 {
1860 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1861 &blkcg_policy_cfq, seq_cft(sf)->private, false);
1862 return 0;
1863 }
1864
1865 static int cfqg_print_rwstat(struct seq_file *sf, void *v)
1866 {
1867 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1868 &blkcg_policy_cfq, seq_cft(sf)->private, true);
1869 return 0;
1870 }
1871
1872 static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1873 struct blkg_policy_data *pd, int off)
1874 {
1875 u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
1876 &blkcg_policy_cfq, off);
1877 return __blkg_prfill_u64(sf, pd, sum);
1878 }
1879
1880 static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1881 struct blkg_policy_data *pd, int off)
1882 {
1883 struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
1884 &blkcg_policy_cfq, off);
1885 return __blkg_prfill_rwstat(sf, pd, &sum);
1886 }
1887
1888 static int cfqg_print_stat_recursive(struct seq_file *sf, void *v)
1889 {
1890 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1891 cfqg_prfill_stat_recursive, &blkcg_policy_cfq,
1892 seq_cft(sf)->private, false);
1893 return 0;
1894 }
1895
1896 static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1897 {
1898 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1899 cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq,
1900 seq_cft(sf)->private, true);
1901 return 0;
1902 }
1903
1904 #ifdef CONFIG_DEBUG_BLK_CGROUP
1905 static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1906 struct blkg_policy_data *pd, int off)
1907 {
1908 struct cfq_group *cfqg = pd_to_cfqg(pd);
1909 u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
1910 u64 v = 0;
1911
1912 if (samples) {
1913 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
1914 v = div64_u64(v, samples);
1915 }
1916 __blkg_prfill_u64(sf, pd, v);
1917 return 0;
1918 }
1919
1920 /* print avg_queue_size */
1921 static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1922 {
1923 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1924 cfqg_prfill_avg_queue_size, &blkcg_policy_cfq,
1925 0, false);
1926 return 0;
1927 }
1928 #endif /* CONFIG_DEBUG_BLK_CGROUP */
1929
1930 static struct cftype cfq_blkcg_files[] = {
1931 /* on root, weight is mapped to leaf_weight */
1932 {
1933 .name = "weight_device",
1934 .flags = CFTYPE_ONLY_ON_ROOT,
1935 .seq_show = cfqg_print_leaf_weight_device,
1936 .write = cfqg_set_leaf_weight_device,
1937 },
1938 {
1939 .name = "weight",
1940 .flags = CFTYPE_ONLY_ON_ROOT,
1941 .seq_show = cfq_print_leaf_weight,
1942 .write_u64 = cfq_set_leaf_weight,
1943 },
1944
1945 /* no such mapping necessary for !roots */
1946 {
1947 .name = "weight_device",
1948 .flags = CFTYPE_NOT_ON_ROOT,
1949 .seq_show = cfqg_print_weight_device,
1950 .write = cfqg_set_weight_device,
1951 },
1952 {
1953 .name = "weight",
1954 .flags = CFTYPE_NOT_ON_ROOT,
1955 .seq_show = cfq_print_weight,
1956 .write_u64 = cfq_set_weight,
1957 },
1958
1959 {
1960 .name = "leaf_weight_device",
1961 .seq_show = cfqg_print_leaf_weight_device,
1962 .write = cfqg_set_leaf_weight_device,
1963 },
1964 {
1965 .name = "leaf_weight",
1966 .seq_show = cfq_print_leaf_weight,
1967 .write_u64 = cfq_set_leaf_weight,
1968 },
1969
1970 /* statistics, covers only the tasks in the cfqg */
1971 {
1972 .name = "time",
1973 .private = offsetof(struct cfq_group, stats.time),
1974 .seq_show = cfqg_print_stat,
1975 },
1976 {
1977 .name = "sectors",
1978 .private = offsetof(struct cfq_group, stats.sectors),
1979 .seq_show = cfqg_print_stat,
1980 },
1981 {
1982 .name = "io_service_bytes",
1983 .private = (unsigned long)&blkcg_policy_cfq,
1984 .seq_show = blkg_print_stat_bytes,
1985 },
1986 {
1987 .name = "io_serviced",
1988 .private = (unsigned long)&blkcg_policy_cfq,
1989 .seq_show = blkg_print_stat_ios,
1990 },
1991 {
1992 .name = "io_service_time",
1993 .private = offsetof(struct cfq_group, stats.service_time),
1994 .seq_show = cfqg_print_rwstat,
1995 },
1996 {
1997 .name = "io_wait_time",
1998 .private = offsetof(struct cfq_group, stats.wait_time),
1999 .seq_show = cfqg_print_rwstat,
2000 },
2001 {
2002 .name = "io_merged",
2003 .private = offsetof(struct cfq_group, stats.merged),
2004 .seq_show = cfqg_print_rwstat,
2005 },
2006 {
2007 .name = "io_queued",
2008 .private = offsetof(struct cfq_group, stats.queued),
2009 .seq_show = cfqg_print_rwstat,
2010 },
2011
2012 /* the same statictics which cover the cfqg and its descendants */
2013 {
2014 .name = "time_recursive",
2015 .private = offsetof(struct cfq_group, stats.time),
2016 .seq_show = cfqg_print_stat_recursive,
2017 },
2018 {
2019 .name = "sectors_recursive",
2020 .private = offsetof(struct cfq_group, stats.sectors),
2021 .seq_show = cfqg_print_stat_recursive,
2022 },
2023 {
2024 .name = "io_service_bytes_recursive",
2025 .private = (unsigned long)&blkcg_policy_cfq,
2026 .seq_show = blkg_print_stat_bytes_recursive,
2027 },
2028 {
2029 .name = "io_serviced_recursive",
2030 .private = (unsigned long)&blkcg_policy_cfq,
2031 .seq_show = blkg_print_stat_ios_recursive,
2032 },
2033 {
2034 .name = "io_service_time_recursive",
2035 .private = offsetof(struct cfq_group, stats.service_time),
2036 .seq_show = cfqg_print_rwstat_recursive,
2037 },
2038 {
2039 .name = "io_wait_time_recursive",
2040 .private = offsetof(struct cfq_group, stats.wait_time),
2041 .seq_show = cfqg_print_rwstat_recursive,
2042 },
2043 {
2044 .name = "io_merged_recursive",
2045 .private = offsetof(struct cfq_group, stats.merged),
2046 .seq_show = cfqg_print_rwstat_recursive,
2047 },
2048 {
2049 .name = "io_queued_recursive",
2050 .private = offsetof(struct cfq_group, stats.queued),
2051 .seq_show = cfqg_print_rwstat_recursive,
2052 },
2053 #ifdef CONFIG_DEBUG_BLK_CGROUP
2054 {
2055 .name = "avg_queue_size",
2056 .seq_show = cfqg_print_avg_queue_size,
2057 },
2058 {
2059 .name = "group_wait_time",
2060 .private = offsetof(struct cfq_group, stats.group_wait_time),
2061 .seq_show = cfqg_print_stat,
2062 },
2063 {
2064 .name = "idle_time",
2065 .private = offsetof(struct cfq_group, stats.idle_time),
2066 .seq_show = cfqg_print_stat,
2067 },
2068 {
2069 .name = "empty_time",
2070 .private = offsetof(struct cfq_group, stats.empty_time),
2071 .seq_show = cfqg_print_stat,
2072 },
2073 {
2074 .name = "dequeue",
2075 .private = offsetof(struct cfq_group, stats.dequeue),
2076 .seq_show = cfqg_print_stat,
2077 },
2078 {
2079 .name = "unaccounted_time",
2080 .private = offsetof(struct cfq_group, stats.unaccounted_time),
2081 .seq_show = cfqg_print_stat,
2082 },
2083 #endif /* CONFIG_DEBUG_BLK_CGROUP */
2084 { } /* terminate */
2085 };
2086 #else /* GROUP_IOSCHED */
2087 static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
2088 struct blkcg *blkcg)
2089 {
2090 return cfqd->root_group;
2091 }
2092
2093 static inline void
2094 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
2095 cfqq->cfqg = cfqg;
2096 }
2097
2098 #endif /* GROUP_IOSCHED */
2099
2100 /*
2101 * The cfqd->service_trees holds all pending cfq_queue's that have
2102 * requests waiting to be processed. It is sorted in the order that
2103 * we will service the queues.
2104 */
2105 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2106 bool add_front)
2107 {
2108 struct rb_node **p, *parent;
2109 struct cfq_queue *__cfqq;
2110 unsigned long rb_key;
2111 struct cfq_rb_root *st;
2112 int left;
2113 int new_cfqq = 1;
2114
2115 st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
2116 if (cfq_class_idle(cfqq)) {
2117 rb_key = CFQ_IDLE_DELAY;
2118 parent = rb_last(&st->rb);
2119 if (parent && parent != &cfqq->rb_node) {
2120 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2121 rb_key += __cfqq->rb_key;
2122 } else
2123 rb_key += jiffies;
2124 } else if (!add_front) {
2125 /*
2126 * Get our rb key offset. Subtract any residual slice
2127 * value carried from last service. A negative resid
2128 * count indicates slice overrun, and this should position
2129 * the next service time further away in the tree.
2130 */
2131 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
2132 rb_key -= cfqq->slice_resid;
2133 cfqq->slice_resid = 0;
2134 } else {
2135 rb_key = -HZ;
2136 __cfqq = cfq_rb_first(st);
2137 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
2138 }
2139
2140 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2141 new_cfqq = 0;
2142 /*
2143 * same position, nothing more to do
2144 */
2145 if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
2146 return;
2147
2148 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2149 cfqq->service_tree = NULL;
2150 }
2151
2152 left = 1;
2153 parent = NULL;
2154 cfqq->service_tree = st;
2155 p = &st->rb.rb_node;
2156 while (*p) {
2157 parent = *p;
2158 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2159
2160 /*
2161 * sort by key, that represents service time.
2162 */
2163 if (time_before(rb_key, __cfqq->rb_key))
2164 p = &parent->rb_left;
2165 else {
2166 p = &parent->rb_right;
2167 left = 0;
2168 }
2169 }
2170
2171 if (left)
2172 st->left = &cfqq->rb_node;
2173
2174 cfqq->rb_key = rb_key;
2175 rb_link_node(&cfqq->rb_node, parent, p);
2176 rb_insert_color(&cfqq->rb_node, &st->rb);
2177 st->count++;
2178 if (add_front || !new_cfqq)
2179 return;
2180 cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
2181 }
2182
2183 static struct cfq_queue *
2184 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
2185 sector_t sector, struct rb_node **ret_parent,
2186 struct rb_node ***rb_link)
2187 {
2188 struct rb_node **p, *parent;
2189 struct cfq_queue *cfqq = NULL;
2190
2191 parent = NULL;
2192 p = &root->rb_node;
2193 while (*p) {
2194 struct rb_node **n;
2195
2196 parent = *p;
2197 cfqq = rb_entry(parent, struct cfq_queue, p_node);
2198
2199 /*
2200 * Sort strictly based on sector. Smallest to the left,
2201 * largest to the right.
2202 */
2203 if (sector > blk_rq_pos(cfqq->next_rq))
2204 n = &(*p)->rb_right;
2205 else if (sector < blk_rq_pos(cfqq->next_rq))
2206 n = &(*p)->rb_left;
2207 else
2208 break;
2209 p = n;
2210 cfqq = NULL;
2211 }
2212
2213 *ret_parent = parent;
2214 if (rb_link)
2215 *rb_link = p;
2216 return cfqq;
2217 }
2218
2219 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2220 {
2221 struct rb_node **p, *parent;
2222 struct cfq_queue *__cfqq;
2223
2224 if (cfqq->p_root) {
2225 rb_erase(&cfqq->p_node, cfqq->p_root);
2226 cfqq->p_root = NULL;
2227 }
2228
2229 if (cfq_class_idle(cfqq))
2230 return;
2231 if (!cfqq->next_rq)
2232 return;
2233
2234 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2235 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
2236 blk_rq_pos(cfqq->next_rq), &parent, &p);
2237 if (!__cfqq) {
2238 rb_link_node(&cfqq->p_node, parent, p);
2239 rb_insert_color(&cfqq->p_node, cfqq->p_root);
2240 } else
2241 cfqq->p_root = NULL;
2242 }
2243
2244 /*
2245 * Update cfqq's position in the service tree.
2246 */
2247 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2248 {
2249 /*
2250 * Resorting requires the cfqq to be on the RR list already.
2251 */
2252 if (cfq_cfqq_on_rr(cfqq)) {
2253 cfq_service_tree_add(cfqd, cfqq, 0);
2254 cfq_prio_tree_add(cfqd, cfqq);
2255 }
2256 }
2257
2258 /*
2259 * add to busy list of queues for service, trying to be fair in ordering
2260 * the pending list according to last request service
2261 */
2262 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2263 {
2264 cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
2265 BUG_ON(cfq_cfqq_on_rr(cfqq));
2266 cfq_mark_cfqq_on_rr(cfqq);
2267 cfqd->busy_queues++;
2268 if (cfq_cfqq_sync(cfqq))
2269 cfqd->busy_sync_queues++;
2270
2271 cfq_resort_rr_list(cfqd, cfqq);
2272 }
2273
2274 /*
2275 * Called when the cfqq no longer has requests pending, remove it from
2276 * the service tree.
2277 */
2278 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2279 {
2280 cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
2281 BUG_ON(!cfq_cfqq_on_rr(cfqq));
2282 cfq_clear_cfqq_on_rr(cfqq);
2283
2284 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2285 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2286 cfqq->service_tree = NULL;
2287 }
2288 if (cfqq->p_root) {
2289 rb_erase(&cfqq->p_node, cfqq->p_root);
2290 cfqq->p_root = NULL;
2291 }
2292
2293 cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
2294 BUG_ON(!cfqd->busy_queues);
2295 cfqd->busy_queues--;
2296 if (cfq_cfqq_sync(cfqq))
2297 cfqd->busy_sync_queues--;
2298 }
2299
2300 /*
2301 * rb tree support functions
2302 */
2303 static void cfq_del_rq_rb(struct request *rq)
2304 {
2305 struct cfq_queue *cfqq = RQ_CFQQ(rq);
2306 const int sync = rq_is_sync(rq);
2307
2308 BUG_ON(!cfqq->queued[sync]);
2309 cfqq->queued[sync]--;
2310
2311 elv_rb_del(&cfqq->sort_list, rq);
2312
2313 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
2314 /*
2315 * Queue will be deleted from service tree when we actually
2316 * expire it later. Right now just remove it from prio tree
2317 * as it is empty.
2318 */
2319 if (cfqq->p_root) {
2320 rb_erase(&cfqq->p_node, cfqq->p_root);
2321 cfqq->p_root = NULL;
2322 }
2323 }
2324 }
2325
2326 static void cfq_add_rq_rb(struct request *rq)
2327 {
2328 struct cfq_queue *cfqq = RQ_CFQQ(rq);
2329 struct cfq_data *cfqd = cfqq->cfqd;
2330 struct request *prev;
2331
2332 cfqq->queued[rq_is_sync(rq)]++;
2333
2334 elv_rb_add(&cfqq->sort_list, rq);
2335
2336 if (!cfq_cfqq_on_rr(cfqq))
2337 cfq_add_cfqq_rr(cfqd, cfqq);
2338
2339 /*
2340 * check if this request is a better next-serve candidate
2341 */
2342 prev = cfqq->next_rq;
2343 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
2344
2345 /*
2346 * adjust priority tree position, if ->next_rq changes
2347 */
2348 if (prev != cfqq->next_rq)
2349 cfq_prio_tree_add(cfqd, cfqq);
2350
2351 BUG_ON(!cfqq->next_rq);
2352 }
2353
2354 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
2355 {
2356 elv_rb_del(&cfqq->sort_list, rq);
2357 cfqq->queued[rq_is_sync(rq)]--;
2358 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2359 cfq_add_rq_rb(rq);
2360 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
2361 rq->cmd_flags);
2362 }
2363
2364 static struct request *
2365 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
2366 {
2367 struct task_struct *tsk = current;
2368 struct cfq_io_cq *cic;
2369 struct cfq_queue *cfqq;
2370
2371 cic = cfq_cic_lookup(cfqd, tsk->io_context);
2372 if (!cic)
2373 return NULL;
2374
2375 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2376 if (cfqq)
2377 return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
2378
2379 return NULL;
2380 }
2381
2382 static void cfq_activate_request(struct request_queue *q, struct request *rq)
2383 {
2384 struct cfq_data *cfqd = q->elevator->elevator_data;
2385
2386 cfqd->rq_in_driver++;
2387 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
2388 cfqd->rq_in_driver);
2389
2390 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
2391 }
2392
2393 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
2394 {
2395 struct cfq_data *cfqd = q->elevator->elevator_data;
2396
2397 WARN_ON(!cfqd->rq_in_driver);
2398 cfqd->rq_in_driver--;
2399 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
2400 cfqd->rq_in_driver);
2401 }
2402
2403 static void cfq_remove_request(struct request *rq)
2404 {
2405 struct cfq_queue *cfqq = RQ_CFQQ(rq);
2406
2407 if (cfqq->next_rq == rq)
2408 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
2409
2410 list_del_init(&rq->queuelist);
2411 cfq_del_rq_rb(rq);
2412
2413 cfqq->cfqd->rq_queued--;
2414 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2415 if (rq->cmd_flags & REQ_PRIO) {
2416 WARN_ON(!cfqq->prio_pending);
2417 cfqq->prio_pending--;
2418 }
2419 }
2420
2421 static int cfq_merge(struct request_queue *q, struct request **req,
2422 struct bio *bio)
2423 {
2424 struct cfq_data *cfqd = q->elevator->elevator_data;
2425 struct request *__rq;
2426
2427 __rq = cfq_find_rq_fmerge(cfqd, bio);
2428 if (__rq && elv_rq_merge_ok(__rq, bio)) {
2429 *req = __rq;
2430 return ELEVATOR_FRONT_MERGE;
2431 }
2432
2433 return ELEVATOR_NO_MERGE;
2434 }
2435
2436 static void cfq_merged_request(struct request_queue *q, struct request *req,
2437 int type)
2438 {
2439 if (type == ELEVATOR_FRONT_MERGE) {
2440 struct cfq_queue *cfqq = RQ_CFQQ(req);
2441
2442 cfq_reposition_rq_rb(cfqq, req);
2443 }
2444 }
2445
2446 static void cfq_bio_merged(struct request_queue *q, struct request *req,
2447 struct bio *bio)
2448 {
2449 cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
2450 }
2451
2452 static void
2453 cfq_merged_requests(struct request_queue *q, struct request *rq,
2454 struct request *next)
2455 {
2456 struct cfq_queue *cfqq = RQ_CFQQ(rq);
2457 struct cfq_data *cfqd = q->elevator->elevator_data;
2458
2459 /*
2460 * reposition in fifo if next is older than rq
2461 */
2462 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
2463 time_before(next->fifo_time, rq->fifo_time) &&
2464 cfqq == RQ_CFQQ(next)) {
2465 list_move(&rq->queuelist, &next->queuelist);
2466 rq->fifo_time = next->fifo_time;
2467 }
2468
2469 if (cfqq->next_rq == next)
2470 cfqq->next_rq = rq;
2471 cfq_remove_request(next);
2472 cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
2473
2474 cfqq = RQ_CFQQ(next);
2475 /*
2476 * all requests of this queue are merged to other queues, delete it
2477 * from the service tree. If it's the active_queue,
2478 * cfq_dispatch_requests() will choose to expire it or do idle
2479 */
2480 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
2481 cfqq != cfqd->active_queue)
2482 cfq_del_cfqq_rr(cfqd, cfqq);
2483 }
2484
2485 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
2486 struct bio *bio)
2487 {
2488 struct cfq_data *cfqd = q->elevator->elevator_data;
2489 struct cfq_io_cq *cic;
2490 struct cfq_queue *cfqq;
2491
2492 /*
2493 * Disallow merge of a sync bio into an async request.
2494 */
2495 if (cfq_bio_sync(bio) && !rq_is_sync(rq))
2496 return false;
2497
2498 /*
2499 * Lookup the cfqq that this bio will be queued with and allow
2500 * merge only if rq is queued there.
2501 */
2502 cic = cfq_cic_lookup(cfqd, current->io_context);
2503 if (!cic)
2504 return false;
2505
2506 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2507 return cfqq == RQ_CFQQ(rq);
2508 }
2509
2510 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2511 {
2512 del_timer(&cfqd->idle_slice_timer);
2513 cfqg_stats_update_idle_time(cfqq->cfqg);
2514 }
2515
2516 static void __cfq_set_active_queue(struct cfq_data *cfqd,
2517 struct cfq_queue *cfqq)
2518 {
2519 if (cfqq) {
2520 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
2521 cfqd->serving_wl_class, cfqd->serving_wl_type);
2522 cfqg_stats_update_avg_queue_size(cfqq->cfqg);
2523 cfqq->slice_start = 0;
2524 cfqq->dispatch_start = jiffies;
2525 cfqq->allocated_slice = 0;
2526 cfqq->slice_end = 0;
2527 cfqq->slice_dispatch = 0;
2528 cfqq->nr_sectors = 0;
2529
2530 cfq_clear_cfqq_wait_request(cfqq);
2531 cfq_clear_cfqq_must_dispatch(cfqq);
2532 cfq_clear_cfqq_must_alloc_slice(cfqq);
2533 cfq_clear_cfqq_fifo_expire(cfqq);
2534 cfq_mark_cfqq_slice_new(cfqq);
2535
2536 cfq_del_timer(cfqd, cfqq);
2537 }
2538
2539 cfqd->active_queue = cfqq;
2540 }
2541
2542 /*
2543 * current cfqq expired its slice (or was too idle), select new one
2544 */
2545 static void
2546 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2547 bool timed_out)
2548 {
2549 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2550
2551 if (cfq_cfqq_wait_request(cfqq))
2552 cfq_del_timer(cfqd, cfqq);
2553
2554 cfq_clear_cfqq_wait_request(cfqq);
2555 cfq_clear_cfqq_wait_busy(cfqq);
2556
2557 /*
2558 * If this cfqq is shared between multiple processes, check to
2559 * make sure that those processes are still issuing I/Os within
2560 * the mean seek distance. If not, it may be time to break the
2561 * queues apart again.
2562 */
2563 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2564 cfq_mark_cfqq_split_coop(cfqq);
2565
2566 /*
2567 * store what was left of this slice, if the queue idled/timed out
2568 */
2569 if (timed_out) {
2570 if (cfq_cfqq_slice_new(cfqq))
2571 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
2572 else
2573 cfqq->slice_resid = cfqq->slice_end - jiffies;
2574 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
2575 }
2576
2577 cfq_group_served(cfqd, cfqq->cfqg, cfqq);
2578
2579 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2580 cfq_del_cfqq_rr(cfqd, cfqq);
2581
2582 cfq_resort_rr_list(cfqd, cfqq);
2583
2584 if (cfqq == cfqd->active_queue)
2585 cfqd->active_queue = NULL;
2586
2587 if (cfqd->active_cic) {
2588 put_io_context(cfqd->active_cic->icq.ioc);
2589 cfqd->active_cic = NULL;
2590 }
2591 }
2592
2593 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
2594 {
2595 struct cfq_queue *cfqq = cfqd->active_queue;
2596
2597 if (cfqq)
2598 __cfq_slice_expired(cfqd, cfqq, timed_out);
2599 }
2600
2601 /*
2602 * Get next queue for service. Unless we have a queue preemption,
2603 * we'll simply select the first cfqq in the service tree.
2604 */
2605 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
2606 {
2607 struct cfq_rb_root *st = st_for(cfqd->serving_group,
2608 cfqd->serving_wl_class, cfqd->serving_wl_type);
2609
2610 if (!cfqd->rq_queued)
2611 return NULL;
2612
2613 /* There is nothing to dispatch */
2614 if (!st)
2615 return NULL;
2616 if (RB_EMPTY_ROOT(&st->rb))
2617 return NULL;
2618 return cfq_rb_first(st);
2619 }
2620
2621 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2622 {
2623 struct cfq_group *cfqg;
2624 struct cfq_queue *cfqq;
2625 int i, j;
2626 struct cfq_rb_root *st;
2627
2628 if (!cfqd->rq_queued)
2629 return NULL;
2630
2631 cfqg = cfq_get_next_cfqg(cfqd);
2632 if (!cfqg)
2633 return NULL;
2634
2635 for_each_cfqg_st(cfqg, i, j, st)
2636 if ((cfqq = cfq_rb_first(st)) != NULL)
2637 return cfqq;
2638 return NULL;
2639 }
2640
2641 /*
2642 * Get and set a new active queue for service.
2643 */
2644 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2645 struct cfq_queue *cfqq)
2646 {
2647 if (!cfqq)
2648 cfqq = cfq_get_next_queue(cfqd);
2649
2650 __cfq_set_active_queue(cfqd, cfqq);
2651 return cfqq;
2652 }
2653
2654 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2655 struct request *rq)
2656 {
2657 if (blk_rq_pos(rq) >= cfqd->last_position)
2658 return blk_rq_pos(rq) - cfqd->last_position;
2659 else
2660 return cfqd->last_position - blk_rq_pos(rq);
2661 }
2662
2663 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2664 struct request *rq)
2665 {
2666 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
2667 }
2668
2669 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2670 struct cfq_queue *cur_cfqq)
2671 {
2672 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
2673 struct rb_node *parent, *node;
2674 struct cfq_queue *__cfqq;
2675 sector_t sector = cfqd->last_position;
2676
2677 if (RB_EMPTY_ROOT(root))
2678 return NULL;
2679
2680 /*
2681 * First, if we find a request starting at the end of the last
2682 * request, choose it.
2683 */
2684 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
2685 if (__cfqq)
2686 return __cfqq;
2687
2688 /*
2689 * If the exact sector wasn't found, the parent of the NULL leaf
2690 * will contain the closest sector.
2691 */
2692 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
2693 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2694 return __cfqq;
2695
2696 if (blk_rq_pos(__cfqq->next_rq) < sector)
2697 node = rb_next(&__cfqq->p_node);
2698 else
2699 node = rb_prev(&__cfqq->p_node);
2700 if (!node)
2701 return NULL;
2702
2703 __cfqq = rb_entry(node, struct cfq_queue, p_node);
2704 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2705 return __cfqq;
2706
2707 return NULL;
2708 }
2709
2710 /*
2711 * cfqd - obvious
2712 * cur_cfqq - passed in so that we don't decide that the current queue is
2713 * closely cooperating with itself.
2714 *
2715 * So, basically we're assuming that that cur_cfqq has dispatched at least
2716 * one request, and that cfqd->last_position reflects a position on the disk
2717 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
2718 * assumption.
2719 */
2720 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
2721 struct cfq_queue *cur_cfqq)
2722 {
2723 struct cfq_queue *cfqq;
2724
2725 if (cfq_class_idle(cur_cfqq))
2726 return NULL;
2727 if (!cfq_cfqq_sync(cur_cfqq))
2728 return NULL;
2729 if (CFQQ_SEEKY(cur_cfqq))
2730 return NULL;
2731
2732 /*
2733 * Don't search priority tree if it's the only queue in the group.
2734 */
2735 if (cur_cfqq->cfqg->nr_cfqq == 1)
2736 return NULL;
2737
2738 /*
2739 * We should notice if some of the queues are cooperating, eg
2740 * working closely on the same area of the disk. In that case,
2741 * we can group them together and don't waste time idling.
2742 */
2743 cfqq = cfqq_close(cfqd, cur_cfqq);
2744 if (!cfqq)
2745 return NULL;
2746
2747 /* If new queue belongs to different cfq_group, don't choose it */
2748 if (cur_cfqq->cfqg != cfqq->cfqg)
2749 return NULL;
2750
2751 /*
2752 * It only makes sense to merge sync queues.
2753 */
2754 if (!cfq_cfqq_sync(cfqq))
2755 return NULL;
2756 if (CFQQ_SEEKY(cfqq))
2757 return NULL;
2758
2759 /*
2760 * Do not merge queues of different priority classes
2761 */
2762 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2763 return NULL;
2764
2765 return cfqq;
2766 }
2767
2768 /*
2769 * Determine whether we should enforce idle window for this queue.
2770 */
2771
2772 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2773 {
2774 enum wl_class_t wl_class = cfqq_class(cfqq);
2775 struct cfq_rb_root *st = cfqq->service_tree;
2776
2777 BUG_ON(!st);
2778 BUG_ON(!st->count);
2779
2780 if (!cfqd->cfq_slice_idle)
2781 return false;
2782
2783 /* We never do for idle class queues. */
2784 if (wl_class == IDLE_WORKLOAD)
2785 return false;
2786
2787 /* We do for queues that were marked with idle window flag. */
2788 if (cfq_cfqq_idle_window(cfqq) &&
2789 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
2790 return true;
2791
2792 /*
2793 * Otherwise, we do only if they are the last ones
2794 * in their service tree.
2795 */
2796 if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2797 !cfq_io_thinktime_big(cfqd, &st->ttime, false))
2798 return true;
2799 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
2800 return false;
2801 }
2802
2803 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2804 {
2805 struct cfq_queue *cfqq = cfqd->active_queue;
2806 struct cfq_io_cq *cic;
2807 unsigned long sl, group_idle = 0;
2808
2809 /*
2810 * SSD device without seek penalty, disable idling. But only do so
2811 * for devices that support queuing, otherwise we still have a problem
2812 * with sync vs async workloads.
2813 */
2814 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
2815 return;
2816
2817 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
2818 WARN_ON(cfq_cfqq_slice_new(cfqq));
2819
2820 /*
2821 * idle is disabled, either manually or by past process history
2822 */
2823 if (!cfq_should_idle(cfqd, cfqq)) {
2824 /* no queue idling. Check for group idling */
2825 if (cfqd->cfq_group_idle)
2826 group_idle = cfqd->cfq_group_idle;
2827 else
2828 return;
2829 }
2830
2831 /*
2832 * still active requests from this queue, don't idle
2833 */
2834 if (cfqq->dispatched)
2835 return;
2836
2837 /*
2838 * task has exited, don't wait
2839 */
2840 cic = cfqd->active_cic;
2841 if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
2842 return;
2843
2844 /*
2845 * If our average think time is larger than the remaining time
2846 * slice, then don't idle. This avoids overrunning the allotted
2847 * time slice.
2848 */
2849 if (sample_valid(cic->ttime.ttime_samples) &&
2850 (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
2851 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
2852 cic->ttime.ttime_mean);
2853 return;
2854 }
2855
2856 /* There are other queues in the group, don't do group idle */
2857 if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2858 return;
2859
2860 cfq_mark_cfqq_wait_request(cfqq);
2861
2862 if (group_idle)
2863 sl = cfqd->cfq_group_idle;
2864 else
2865 sl = cfqd->cfq_slice_idle;
2866
2867 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2868 cfqg_stats_set_start_idle_time(cfqq->cfqg);
2869 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2870 group_idle ? 1 : 0);
2871 }
2872
2873 /*
2874 * Move request from internal lists to the request queue dispatch list.
2875 */
2876 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2877 {
2878 struct cfq_data *cfqd = q->elevator->elevator_data;
2879 struct cfq_queue *cfqq = RQ_CFQQ(rq);
2880
2881 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2882
2883 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2884 cfq_remove_request(rq);
2885 cfqq->dispatched++;
2886 (RQ_CFQG(rq))->dispatched++;
2887 elv_dispatch_sort(q, rq);
2888
2889 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2890 cfqq->nr_sectors += blk_rq_sectors(rq);
2891 cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
2892 }
2893
2894 /*
2895 * return expired entry, or NULL to just start from scratch in rbtree
2896 */
2897 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2898 {
2899 struct request *rq = NULL;
2900
2901 if (cfq_cfqq_fifo_expire(cfqq))
2902 return NULL;
2903
2904 cfq_mark_cfqq_fifo_expire(cfqq);
2905
2906 if (list_empty(&cfqq->fifo))
2907 return NULL;
2908
2909 rq = rq_entry_fifo(cfqq->fifo.next);
2910 if (time_before(jiffies, rq->fifo_time))
2911 rq = NULL;
2912
2913 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2914 return rq;
2915 }
2916
2917 static inline int
2918 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2919 {
2920 const int base_rq = cfqd->cfq_slice_async_rq;
2921
2922 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2923
2924 return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
2925 }
2926
2927 /*
2928 * Must be called with the queue_lock held.
2929 */
2930 static int cfqq_process_refs(struct cfq_queue *cfqq)
2931 {
2932 int process_refs, io_refs;
2933
2934 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2935 process_refs = cfqq->ref - io_refs;
2936 BUG_ON(process_refs < 0);
2937 return process_refs;
2938 }
2939
2940 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2941 {
2942 int process_refs, new_process_refs;
2943 struct cfq_queue *__cfqq;
2944
2945 /*
2946 * If there are no process references on the new_cfqq, then it is
2947 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2948 * chain may have dropped their last reference (not just their
2949 * last process reference).
2950 */
2951 if (!cfqq_process_refs(new_cfqq))
2952 return;
2953
2954 /* Avoid a circular list and skip interim queue merges */
2955 while ((__cfqq = new_cfqq->new_cfqq)) {
2956 if (__cfqq == cfqq)
2957 return;
2958 new_cfqq = __cfqq;
2959 }
2960
2961 process_refs = cfqq_process_refs(cfqq);
2962 new_process_refs = cfqq_process_refs(new_cfqq);
2963 /*
2964 * If the process for the cfqq has gone away, there is no
2965 * sense in merging the queues.
2966 */
2967 if (process_refs == 0 || new_process_refs == 0)
2968 return;
2969
2970 /*
2971 * Merge in the direction of the lesser amount of work.
2972 */
2973 if (new_process_refs >= process_refs) {
2974 cfqq->new_cfqq = new_cfqq;
2975 new_cfqq->ref += process_refs;
2976 } else {
2977 new_cfqq->new_cfqq = cfqq;
2978 cfqq->ref += new_process_refs;
2979 }
2980 }
2981
2982 static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
2983 struct cfq_group *cfqg, enum wl_class_t wl_class)
2984 {
2985 struct cfq_queue *queue;
2986 int i;
2987 bool key_valid = false;
2988 unsigned long lowest_key = 0;
2989 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2990
2991 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2992 /* select the one with lowest rb_key */
2993 queue = cfq_rb_first(st_for(cfqg, wl_class, i));
2994 if (queue &&
2995 (!key_valid || time_before(queue->rb_key, lowest_key))) {
2996 lowest_key = queue->rb_key;
2997 cur_best = i;
2998 key_valid = true;
2999 }
3000 }
3001
3002 return cur_best;
3003 }
3004
3005 static void
3006 choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
3007 {
3008 unsigned slice;
3009 unsigned count;
3010 struct cfq_rb_root *st;
3011 unsigned group_slice;
3012 enum wl_class_t original_class = cfqd->serving_wl_class;
3013
3014 /* Choose next priority. RT > BE > IDLE */
3015 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
3016 cfqd->serving_wl_class = RT_WORKLOAD;
3017 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
3018 cfqd->serving_wl_class = BE_WORKLOAD;
3019 else {
3020 cfqd->serving_wl_class = IDLE_WORKLOAD;
3021 cfqd->workload_expires = jiffies + 1;
3022 return;
3023 }
3024
3025 if (original_class != cfqd->serving_wl_class)
3026 goto new_workload;
3027
3028 /*
3029 * For RT and BE, we have to choose also the type
3030 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
3031 * expiration time
3032 */
3033 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
3034 count = st->count;
3035
3036 /*
3037 * check workload expiration, and that we still have other queues ready
3038 */
3039 if (count && !time_after(jiffies, cfqd->workload_expires))
3040 return;
3041
3042 new_workload:
3043 /* otherwise select new workload type */
3044 cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
3045 cfqd->serving_wl_class);
3046 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
3047 count = st->count;
3048
3049 /*
3050 * the workload slice is computed as a fraction of target latency
3051 * proportional to the number of queues in that workload, over
3052 * all the queues in the same priority class
3053 */
3054 group_slice = cfq_group_slice(cfqd, cfqg);
3055
3056 slice = group_slice * count /
3057 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
3058 cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
3059 cfqg));
3060
3061 if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
3062 unsigned int tmp;
3063
3064 /*
3065 * Async queues are currently system wide. Just taking
3066 * proportion of queues with-in same group will lead to higher
3067 * async ratio system wide as generally root group is going
3068 * to have higher weight. A more accurate thing would be to
3069 * calculate system wide asnc/sync ratio.
3070 */
3071 tmp = cfqd->cfq_target_latency *
3072 cfqg_busy_async_queues(cfqd, cfqg);
3073 tmp = tmp/cfqd->busy_queues;
3074 slice = min_t(unsigned, slice, tmp);
3075
3076 /* async workload slice is scaled down according to
3077 * the sync/async slice ratio. */
3078 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
3079 } else
3080 /* sync workload slice is at least 2 * cfq_slice_idle */
3081 slice = max(slice, 2 * cfqd->cfq_slice_idle);
3082
3083 slice = max_t(unsigned, slice, CFQ_MIN_TT);
3084 cfq_log(cfqd, "workload slice:%d", slice);
3085 cfqd->workload_expires = jiffies + slice;
3086 }
3087
3088 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
3089 {
3090 struct cfq_rb_root *st = &cfqd->grp_service_tree;
3091 struct cfq_group *cfqg;
3092
3093 if (RB_EMPTY_ROOT(&st->rb))
3094 return NULL;
3095 cfqg = cfq_rb_first_group(st);
3096 update_min_vdisktime(st);
3097 return cfqg;
3098 }
3099
3100 static void cfq_choose_cfqg(struct cfq_data *cfqd)
3101 {
3102 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
3103
3104 cfqd->serving_group = cfqg;
3105
3106 /* Restore the workload type data */
3107 if (cfqg->saved_wl_slice) {
3108 cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
3109 cfqd->serving_wl_type = cfqg->saved_wl_type;
3110 cfqd->serving_wl_class = cfqg->saved_wl_class;
3111 } else
3112 cfqd->workload_expires = jiffies - 1;
3113
3114 choose_wl_class_and_type(cfqd, cfqg);
3115 }
3116
3117 /*
3118 * Select a queue for service. If we have a current active queue,
3119 * check whether to continue servicing it, or retrieve and set a new one.
3120 */
3121 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
3122 {
3123 struct cfq_queue *cfqq, *new_cfqq = NULL;
3124
3125 cfqq = cfqd->active_queue;
3126 if (!cfqq)
3127 goto new_queue;
3128
3129 if (!cfqd->rq_queued)
3130 return NULL;
3131
3132 /*
3133 * We were waiting for group to get backlogged. Expire the queue
3134 */
3135 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
3136 goto expire;
3137
3138 /*
3139 * The active queue has run out of time, expire it and select new.
3140 */
3141 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
3142 /*
3143 * If slice had not expired at the completion of last request
3144 * we might not have turned on wait_busy flag. Don't expire
3145 * the queue yet. Allow the group to get backlogged.
3146 *
3147 * The very fact that we have used the slice, that means we
3148 * have been idling all along on this queue and it should be
3149 * ok to wait for this request to complete.
3150 */
3151 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
3152 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3153 cfqq = NULL;
3154 goto keep_queue;
3155 } else
3156 goto check_group_idle;
3157 }
3158
3159 /*
3160 * The active queue has requests and isn't expired, allow it to
3161 * dispatch.
3162 */
3163 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3164 goto keep_queue;
3165
3166 /*
3167 * If another queue has a request waiting within our mean seek
3168 * distance, let it run. The expire code will check for close
3169 * cooperators and put the close queue at the front of the service
3170 * tree. If possible, merge the expiring queue with the new cfqq.
3171 */
3172 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
3173 if (new_cfqq) {
3174 if (!cfqq->new_cfqq)
3175 cfq_setup_merge(cfqq, new_cfqq);
3176 goto expire;
3177 }
3178
3179 /*
3180 * No requests pending. If the active queue still has requests in
3181 * flight or is idling for a new request, allow either of these
3182 * conditions to happen (or time out) before selecting a new queue.
3183 */
3184 if (timer_pending(&cfqd->idle_slice_timer)) {
3185 cfqq = NULL;
3186 goto keep_queue;
3187 }
3188
3189 /*
3190 * This is a deep seek queue, but the device is much faster than
3191 * the queue can deliver, don't idle
3192 **/
3193 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
3194 (cfq_cfqq_slice_new(cfqq) ||
3195 (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
3196 cfq_clear_cfqq_deep(cfqq);
3197 cfq_clear_cfqq_idle_window(cfqq);
3198 }
3199
3200 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3201 cfqq = NULL;
3202 goto keep_queue;
3203 }
3204
3205 /*
3206 * If group idle is enabled and there are requests dispatched from
3207 * this group, wait for requests to complete.
3208 */
3209 check_group_idle:
3210 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
3211 cfqq->cfqg->dispatched &&
3212 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
3213 cfqq = NULL;
3214 goto keep_queue;
3215 }
3216
3217 expire:
3218 cfq_slice_expired(cfqd, 0);
3219 new_queue:
3220 /*
3221 * Current queue expired. Check if we have to switch to a new
3222 * service tree
3223 */
3224 if (!new_cfqq)
3225 cfq_choose_cfqg(cfqd);
3226
3227 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
3228 keep_queue:
3229 return cfqq;
3230 }
3231
3232 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
3233 {
3234 int dispatched = 0;
3235
3236 while (cfqq->next_rq) {
3237 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
3238 dispatched++;
3239 }
3240
3241 BUG_ON(!list_empty(&cfqq->fifo));
3242
3243 /* By default cfqq is not expired if it is empty. Do it explicitly */
3244 __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
3245 return dispatched;
3246 }
3247
3248 /*
3249 * Drain our current requests. Used for barriers and when switching
3250 * io schedulers on-the-fly.
3251 */
3252 static int cfq_forced_dispatch(struct cfq_data *cfqd)
3253 {
3254 struct cfq_queue *cfqq;
3255 int dispatched = 0;
3256
3257 /* Expire the timeslice of the current active queue first */
3258 cfq_slice_expired(cfqd, 0);
3259 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
3260 __cfq_set_active_queue(cfqd, cfqq);
3261 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3262 }
3263
3264 BUG_ON(cfqd->busy_queues);
3265
3266 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
3267 return dispatched;
3268 }
3269
3270 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
3271 struct cfq_queue *cfqq)
3272 {
3273 /* the queue hasn't finished any request, can't estimate */
3274 if (cfq_cfqq_slice_new(cfqq))
3275 return true;
3276 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
3277 cfqq->slice_end))
3278 return true;
3279
3280 return false;
3281 }
3282
3283 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3284 {
3285 unsigned int max_dispatch;
3286
3287 /*
3288 * Drain async requests before we start sync IO
3289 */
3290 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
3291 return false;
3292
3293 /*
3294 * If this is an async queue and we have sync IO in flight, let it wait
3295 */
3296 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
3297 return false;
3298
3299 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
3300 if (cfq_class_idle(cfqq))
3301 max_dispatch = 1;
3302
3303 /*
3304 * Does this cfqq already have too much IO in flight?
3305 */
3306 if (cfqq->dispatched >= max_dispatch) {
3307 bool promote_sync = false;
3308 /*
3309 * idle queue must always only have a single IO in flight
3310 */
3311 if (cfq_class_idle(cfqq))
3312 return false;
3313
3314 /*
3315 * If there is only one sync queue
3316 * we can ignore async queue here and give the sync
3317 * queue no dispatch limit. The reason is a sync queue can
3318 * preempt async queue, limiting the sync queue doesn't make
3319 * sense. This is useful for aiostress test.
3320 */
3321 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
3322 promote_sync = true;
3323
3324 /*
3325 * We have other queues, don't allow more IO from this one
3326 */
3327 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
3328 !promote_sync)
3329 return false;
3330
3331 /*
3332 * Sole queue user, no limit
3333 */
3334 if (cfqd->busy_queues == 1 || promote_sync)
3335 max_dispatch = -1;
3336 else
3337 /*
3338 * Normally we start throttling cfqq when cfq_quantum/2
3339 * requests have been dispatched. But we can drive
3340 * deeper queue depths at the beginning of slice
3341 * subjected to upper limit of cfq_quantum.
3342 * */
3343 max_dispatch = cfqd->cfq_quantum;
3344 }
3345
3346 /*
3347 * Async queues must wait a bit before being allowed dispatch.
3348 * We also ramp up the dispatch depth gradually for async IO,
3349 * based on the last sync IO we serviced
3350 */
3351 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
3352 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
3353 unsigned int depth;
3354
3355 depth = last_sync / cfqd->cfq_slice[1];
3356 if (!depth && !cfqq->dispatched)
3357 depth = 1;
3358 if (depth < max_dispatch)
3359 max_dispatch = depth;
3360 }
3361
3362 /*
3363 * If we're below the current max, allow a dispatch
3364 */
3365 return cfqq->dispatched < max_dispatch;
3366 }
3367
3368 /*
3369 * Dispatch a request from cfqq, moving them to the request queue
3370 * dispatch list.
3371 */
3372 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3373 {
3374 struct request *rq;
3375
3376 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
3377
3378 if (!cfq_may_dispatch(cfqd, cfqq))
3379 return false;
3380
3381 /*
3382 * follow expired path, else get first next available
3383 */
3384 rq = cfq_check_fifo(cfqq);
3385 if (!rq)
3386 rq = cfqq->next_rq;
3387
3388 /*
3389 * insert request into driver dispatch list
3390 */
3391 cfq_dispatch_insert(cfqd->queue, rq);
3392
3393 if (!cfqd->active_cic) {
3394 struct cfq_io_cq *cic = RQ_CIC(rq);
3395
3396 atomic_long_inc(&cic->icq.ioc->refcount);
3397 cfqd->active_cic = cic;
3398 }
3399
3400 return true;
3401 }
3402
3403 /*
3404 * Find the cfqq that we need to service and move a request from that to the
3405 * dispatch list
3406 */
3407 static int cfq_dispatch_requests(struct request_queue *q, int force)
3408 {
3409 struct cfq_data *cfqd = q->elevator->elevator_data;
3410 struct cfq_queue *cfqq;
3411
3412 if (!cfqd->busy_queues)
3413 return 0;
3414
3415 if (unlikely(force))
3416 return cfq_forced_dispatch(cfqd);
3417
3418 cfqq = cfq_select_queue(cfqd);
3419 if (!cfqq)
3420 return 0;
3421
3422 /*
3423 * Dispatch a request from this cfqq, if it is allowed
3424 */
3425 if (!cfq_dispatch_request(cfqd, cfqq))
3426 return 0;
3427
3428 cfqq->slice_dispatch++;
3429 cfq_clear_cfqq_must_dispatch(cfqq);
3430
3431 /*
3432 * expire an async queue immediately if it has used up its slice. idle
3433 * queue always expire after 1 dispatch round.
3434 */
3435 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
3436 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
3437 cfq_class_idle(cfqq))) {
3438 cfqq->slice_end = jiffies + 1;
3439 cfq_slice_expired(cfqd, 0);
3440 }
3441
3442 cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
3443 return 1;
3444 }
3445
3446 /*
3447 * task holds one reference to the queue, dropped when task exits. each rq
3448 * in-flight on this queue also holds a reference, dropped when rq is freed.
3449 *
3450 * Each cfq queue took a reference on the parent group. Drop it now.
3451 * queue lock must be held here.
3452 */
3453 static void cfq_put_queue(struct cfq_queue *cfqq)
3454 {
3455 struct cfq_data *cfqd = cfqq->cfqd;
3456 struct cfq_group *cfqg;
3457
3458 BUG_ON(cfqq->ref <= 0);
3459
3460 cfqq->ref--;
3461 if (cfqq->ref)
3462 return;
3463
3464 cfq_log_cfqq(cfqd, cfqq, "put_queue");
3465 BUG_ON(rb_first(&cfqq->sort_list));
3466 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
3467 cfqg = cfqq->cfqg;
3468
3469 if (unlikely(cfqd->active_queue == cfqq)) {
3470 __cfq_slice_expired(cfqd, cfqq, 0);
3471 cfq_schedule_dispatch(cfqd);
3472 }
3473
3474 BUG_ON(cfq_cfqq_on_rr(cfqq));
3475 kmem_cache_free(cfq_pool, cfqq);
3476 cfqg_put(cfqg);
3477 }
3478
3479 static void cfq_put_cooperator(struct cfq_queue *cfqq)
3480 {
3481 struct cfq_queue *__cfqq, *next;
3482
3483 /*
3484 * If this queue was scheduled to merge with another queue, be
3485 * sure to drop the reference taken on that queue (and others in
3486 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
3487 */
3488 __cfqq = cfqq->new_cfqq;
3489 while (__cfqq) {
3490 if (__cfqq == cfqq) {
3491 WARN(1, "cfqq->new_cfqq loop detected\n");
3492 break;
3493 }
3494 next = __cfqq->new_cfqq;
3495 cfq_put_queue(__cfqq);
3496 __cfqq = next;
3497 }
3498 }
3499
3500 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3501 {
3502 if (unlikely(cfqq == cfqd->active_queue)) {
3503 __cfq_slice_expired(cfqd, cfqq, 0);
3504 cfq_schedule_dispatch(cfqd);
3505 }
3506
3507 cfq_put_cooperator(cfqq);
3508
3509 cfq_put_queue(cfqq);
3510 }
3511
3512 static void cfq_init_icq(struct io_cq *icq)
3513 {
3514 struct cfq_io_cq *cic = icq_to_cic(icq);
3515
3516 cic->ttime.last_end_request = jiffies;
3517 }
3518
3519 static void cfq_exit_icq(struct io_cq *icq)
3520 {
3521 struct cfq_io_cq *cic = icq_to_cic(icq);
3522 struct cfq_data *cfqd = cic_to_cfqd(cic);
3523
3524 if (cic_to_cfqq(cic, false)) {
3525 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, false));
3526 cic_set_cfqq(cic, NULL, false);
3527 }
3528
3529 if (cic_to_cfqq(cic, true)) {
3530 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, true));
3531 cic_set_cfqq(cic, NULL, true);
3532 }
3533 }
3534
3535 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
3536 {
3537 struct task_struct *tsk = current;
3538 int ioprio_class;
3539
3540 if (!cfq_cfqq_prio_changed(cfqq))
3541 return;
3542
3543 ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3544 switch (ioprio_class) {
3545 default:
3546 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3547 case IOPRIO_CLASS_NONE:
3548 /*
3549 * no prio set, inherit CPU scheduling settings
3550 */
3551 cfqq->ioprio = task_nice_ioprio(tsk);
3552 cfqq->ioprio_class = task_nice_ioclass(tsk);
3553 break;
3554 case IOPRIO_CLASS_RT:
3555 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3556 cfqq->ioprio_class = IOPRIO_CLASS_RT;
3557 break;
3558 case IOPRIO_CLASS_BE:
3559 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3560 cfqq->ioprio_class = IOPRIO_CLASS_BE;
3561 break;
3562 case IOPRIO_CLASS_IDLE:
3563 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3564 cfqq->ioprio = 7;
3565 cfq_clear_cfqq_idle_window(cfqq);
3566 break;
3567 }
3568
3569 /*
3570 * keep track of original prio settings in case we have to temporarily
3571 * elevate the priority of this queue
3572 */
3573 cfqq->org_ioprio = cfqq->ioprio;
3574 cfq_clear_cfqq_prio_changed(cfqq);
3575 }
3576
3577 static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
3578 {
3579 int ioprio = cic->icq.ioc->ioprio;
3580 struct cfq_data *cfqd = cic_to_cfqd(cic);
3581 struct cfq_queue *cfqq;
3582
3583 /*
3584 * Check whether ioprio has changed. The condition may trigger
3585 * spuriously on a newly created cic but there's no harm.
3586 */
3587 if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
3588 return;
3589
3590 cfqq = cic_to_cfqq(cic, false);
3591 if (cfqq) {
3592 cfq_put_queue(cfqq);
3593 cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio);
3594 cic_set_cfqq(cic, cfqq, false);
3595 }
3596
3597 cfqq = cic_to_cfqq(cic, true);
3598 if (cfqq)
3599 cfq_mark_cfqq_prio_changed(cfqq);
3600
3601 cic->ioprio = ioprio;
3602 }
3603
3604 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3605 pid_t pid, bool is_sync)
3606 {
3607 RB_CLEAR_NODE(&cfqq->rb_node);
3608 RB_CLEAR_NODE(&cfqq->p_node);
3609 INIT_LIST_HEAD(&cfqq->fifo);
3610
3611 cfqq->ref = 0;
3612 cfqq->cfqd = cfqd;
3613
3614 cfq_mark_cfqq_prio_changed(cfqq);
3615
3616 if (is_sync) {
3617 if (!cfq_class_idle(cfqq))
3618 cfq_mark_cfqq_idle_window(cfqq);
3619 cfq_mark_cfqq_sync(cfqq);
3620 }
3621 cfqq->pid = pid;
3622 }
3623
3624 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3625 static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3626 {
3627 struct cfq_data *cfqd = cic_to_cfqd(cic);
3628 struct cfq_queue *cfqq;
3629 uint64_t serial_nr;
3630
3631 rcu_read_lock();
3632 serial_nr = bio_blkcg(bio)->css.serial_nr;
3633 rcu_read_unlock();
3634
3635 /*
3636 * Check whether blkcg has changed. The condition may trigger
3637 * spuriously on a newly created cic but there's no harm.
3638 */
3639 if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
3640 return;
3641
3642 /*
3643 * Drop reference to queues. New queues will be assigned in new
3644 * group upon arrival of fresh requests.
3645 */
3646 cfqq = cic_to_cfqq(cic, false);
3647 if (cfqq) {
3648 cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3649 cic_set_cfqq(cic, NULL, false);
3650 cfq_put_queue(cfqq);
3651 }
3652
3653 cfqq = cic_to_cfqq(cic, true);
3654 if (cfqq) {
3655 cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3656 cic_set_cfqq(cic, NULL, true);
3657 cfq_put_queue(cfqq);
3658 }
3659
3660 cic->blkcg_serial_nr = serial_nr;
3661 }
3662 #else
3663 static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
3664 #endif /* CONFIG_CFQ_GROUP_IOSCHED */
3665
3666 static struct cfq_queue **
3667 cfq_async_queue_prio(struct cfq_group *cfqg, int ioprio_class, int ioprio)
3668 {
3669 switch (ioprio_class) {
3670 case IOPRIO_CLASS_RT:
3671 return &cfqg->async_cfqq[0][ioprio];
3672 case IOPRIO_CLASS_NONE:
3673 ioprio = IOPRIO_NORM;
3674 /* fall through */
3675 case IOPRIO_CLASS_BE:
3676 return &cfqg->async_cfqq[1][ioprio];
3677 case IOPRIO_CLASS_IDLE:
3678 return &cfqg->async_idle_cfqq;
3679 default:
3680 BUG();
3681 }
3682 }
3683
3684 static struct cfq_queue *
3685 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3686 struct bio *bio)
3687 {
3688 int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3689 int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3690 struct cfq_queue **async_cfqq = NULL;
3691 struct cfq_queue *cfqq;
3692 struct cfq_group *cfqg;
3693
3694 rcu_read_lock();
3695 cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
3696 if (!cfqg) {
3697 cfqq = &cfqd->oom_cfqq;
3698 goto out;
3699 }
3700
3701 if (!is_sync) {
3702 if (!ioprio_valid(cic->ioprio)) {
3703 struct task_struct *tsk = current;
3704 ioprio = task_nice_ioprio(tsk);
3705 ioprio_class = task_nice_ioclass(tsk);
3706 }
3707 async_cfqq = cfq_async_queue_prio(cfqg, ioprio_class, ioprio);
3708 cfqq = *async_cfqq;
3709 if (cfqq)
3710 goto out;
3711 }
3712
3713 cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO,
3714 cfqd->queue->node);
3715 if (!cfqq) {
3716 cfqq = &cfqd->oom_cfqq;
3717 goto out;
3718 }
3719
3720 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3721 cfq_init_prio_data(cfqq, cic);
3722 cfq_link_cfqq_cfqg(cfqq, cfqg);
3723 cfq_log_cfqq(cfqd, cfqq, "alloced");
3724
3725 if (async_cfqq) {
3726 /* a new async queue is created, pin and remember */
3727 cfqq->ref++;
3728 *async_cfqq = cfqq;
3729 }
3730 out:
3731 cfqq->ref++;
3732 rcu_read_unlock();
3733 return cfqq;
3734 }
3735
3736 static void
3737 __cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
3738 {
3739 unsigned long elapsed = jiffies - ttime->last_end_request;
3740 elapsed = min(elapsed, 2UL * slice_idle);
3741
3742 ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3743 ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
3744 ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
3745 }
3746
3747 static void
3748 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3749 struct cfq_io_cq *cic)
3750 {
3751 if (cfq_cfqq_sync(cfqq)) {
3752 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
3753 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3754 cfqd->cfq_slice_idle);
3755 }
3756 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3757 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3758 #endif
3759 }
3760
3761 static void
3762 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3763 struct request *rq)
3764 {
3765 sector_t sdist = 0;
3766 sector_t n_sec = blk_rq_sectors(rq);
3767 if (cfqq->last_request_pos) {
3768 if (cfqq->last_request_pos < blk_rq_pos(rq))
3769 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3770 else
3771 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3772 }
3773
3774 cfqq->seek_history <<= 1;
3775 if (blk_queue_nonrot(cfqd->queue))
3776 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3777 else
3778 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3779 }
3780
3781 /*
3782 * Disable idle window if the process thinks too long or seeks so much that
3783 * it doesn't matter
3784 */
3785 static void
3786 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3787 struct cfq_io_cq *cic)
3788 {
3789 int old_idle, enable_idle;
3790
3791 /*
3792 * Don't idle for async or idle io prio class
3793 */
3794 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3795 return;
3796
3797 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3798
3799 if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3800 cfq_mark_cfqq_deep(cfqq);
3801
3802 if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3803 enable_idle = 0;
3804 else if (!atomic_read(&cic->icq.ioc->active_ref) ||
3805 !cfqd->cfq_slice_idle ||
3806 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3807 enable_idle = 0;
3808 else if (sample_valid(cic->ttime.ttime_samples)) {
3809 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
3810 enable_idle = 0;
3811 else
3812 enable_idle = 1;
3813 }
3814
3815 if (old_idle != enable_idle) {
3816 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3817 if (enable_idle)
3818 cfq_mark_cfqq_idle_window(cfqq);
3819 else
3820 cfq_clear_cfqq_idle_window(cfqq);
3821 }
3822 }
3823
3824 /*
3825 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3826 * no or if we aren't sure, a 1 will cause a preempt.
3827 */
3828 static bool
3829 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3830 struct request *rq)
3831 {
3832 struct cfq_queue *cfqq;
3833
3834 cfqq = cfqd->active_queue;
3835 if (!cfqq)
3836 return false;
3837
3838 if (cfq_class_idle(new_cfqq))
3839 return false;
3840
3841 if (cfq_class_idle(cfqq))
3842 return true;
3843
3844 /*
3845 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3846 */
3847 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3848 return false;
3849
3850 /*
3851 * if the new request is sync, but the currently running queue is
3852 * not, let the sync request have priority.
3853 */
3854 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3855 return true;
3856
3857 if (new_cfqq->cfqg != cfqq->cfqg)
3858 return false;
3859
3860 if (cfq_slice_used(cfqq))
3861 return true;
3862
3863 /* Allow preemption only if we are idling on sync-noidle tree */
3864 if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
3865 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3866 new_cfqq->service_tree->count == 2 &&
3867 RB_EMPTY_ROOT(&cfqq->sort_list))
3868 return true;
3869
3870 /*
3871 * So both queues are sync. Let the new request get disk time if
3872 * it's a metadata request and the current queue is doing regular IO.
3873 */
3874 if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
3875 return true;
3876
3877 /*
3878 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3879 */
3880 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3881 return true;
3882
3883 /* An idle queue should not be idle now for some reason */
3884 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3885 return true;
3886
3887 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3888 return false;
3889
3890 /*
3891 * if this request is as-good as one we would expect from the
3892 * current cfqq, let it preempt
3893 */
3894 if (cfq_rq_close(cfqd, cfqq, rq))
3895 return true;
3896
3897 return false;
3898 }
3899
3900 /*
3901 * cfqq preempts the active queue. if we allowed preempt with no slice left,
3902 * let it have half of its nominal slice.
3903 */
3904 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3905 {
3906 enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3907
3908 cfq_log_cfqq(cfqd, cfqq, "preempt");
3909 cfq_slice_expired(cfqd, 1);
3910
3911 /*
3912 * workload type is changed, don't save slice, otherwise preempt
3913 * doesn't happen
3914 */
3915 if (old_type != cfqq_type(cfqq))
3916 cfqq->cfqg->saved_wl_slice = 0;
3917
3918 /*
3919 * Put the new queue at the front of the of the current list,
3920 * so we know that it will be selected next.
3921 */
3922 BUG_ON(!cfq_cfqq_on_rr(cfqq));
3923
3924 cfq_service_tree_add(cfqd, cfqq, 1);
3925
3926 cfqq->slice_end = 0;
3927 cfq_mark_cfqq_slice_new(cfqq);
3928 }
3929
3930 /*
3931 * Called when a new fs request (rq) is added (to cfqq). Check if there's
3932 * something we should do about it
3933 */
3934 static void
3935 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3936 struct request *rq)
3937 {
3938 struct cfq_io_cq *cic = RQ_CIC(rq);
3939
3940 cfqd->rq_queued++;
3941 if (rq->cmd_flags & REQ_PRIO)
3942 cfqq->prio_pending++;
3943
3944 cfq_update_io_thinktime(cfqd, cfqq, cic);
3945 cfq_update_io_seektime(cfqd, cfqq, rq);
3946 cfq_update_idle_window(cfqd, cfqq, cic);
3947
3948 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3949
3950 if (cfqq == cfqd->active_queue) {
3951 /*
3952 * Remember that we saw a request from this process, but
3953 * don't start queuing just yet. Otherwise we risk seeing lots
3954 * of tiny requests, because we disrupt the normal plugging
3955 * and merging. If the request is already larger than a single
3956 * page, let it rip immediately. For that case we assume that
3957 * merging is already done. Ditto for a busy system that
3958 * has other work pending, don't risk delaying until the
3959 * idle timer unplug to continue working.
3960 */
3961 if (cfq_cfqq_wait_request(cfqq)) {
3962 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3963 cfqd->busy_queues > 1) {
3964 cfq_del_timer(cfqd, cfqq);
3965 cfq_clear_cfqq_wait_request(cfqq);
3966 __blk_run_queue(cfqd->queue);
3967 } else {
3968 cfqg_stats_update_idle_time(cfqq->cfqg);
3969 cfq_mark_cfqq_must_dispatch(cfqq);
3970 }
3971 }
3972 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3973 /*
3974 * not the active queue - expire current slice if it is
3975 * idle and has expired it's mean thinktime or this new queue
3976 * has some old slice time left and is of higher priority or
3977 * this new queue is RT and the current one is BE
3978 */
3979 cfq_preempt_queue(cfqd, cfqq);
3980 __blk_run_queue(cfqd->queue);
3981 }
3982 }
3983
3984 static void cfq_insert_request(struct request_queue *q, struct request *rq)
3985 {
3986 struct cfq_data *cfqd = q->elevator->elevator_data;
3987 struct cfq_queue *cfqq = RQ_CFQQ(rq);
3988
3989 cfq_log_cfqq(cfqd, cfqq, "insert_request");
3990 cfq_init_prio_data(cfqq, RQ_CIC(rq));
3991
3992 rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
3993 list_add_tail(&rq->queuelist, &cfqq->fifo);
3994 cfq_add_rq_rb(rq);
3995 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
3996 rq->cmd_flags);
3997 cfq_rq_enqueued(cfqd, cfqq, rq);
3998 }
3999
4000 /*
4001 * Update hw_tag based on peak queue depth over 50 samples under
4002 * sufficient load.
4003 */
4004 static void cfq_update_hw_tag(struct cfq_data *cfqd)
4005 {
4006 struct cfq_queue *cfqq = cfqd->active_queue;
4007
4008 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
4009 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
4010
4011 if (cfqd->hw_tag == 1)
4012 return;
4013
4014 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
4015 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
4016 return;
4017
4018 /*
4019 * If active queue hasn't enough requests and can idle, cfq might not
4020 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
4021 * case
4022 */
4023 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
4024 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
4025 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
4026 return;
4027
4028 if (cfqd->hw_tag_samples++ < 50)
4029 return;
4030
4031 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
4032 cfqd->hw_tag = 1;
4033 else
4034 cfqd->hw_tag = 0;
4035 }
4036
4037 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
4038 {
4039 struct cfq_io_cq *cic = cfqd->active_cic;
4040
4041 /* If the queue already has requests, don't wait */
4042 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4043 return false;
4044
4045 /* If there are other queues in the group, don't wait */
4046 if (cfqq->cfqg->nr_cfqq > 1)
4047 return false;
4048
4049 /* the only queue in the group, but think time is big */
4050 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
4051 return false;
4052
4053 if (cfq_slice_used(cfqq))
4054 return true;
4055
4056 /* if slice left is less than think time, wait busy */
4057 if (cic && sample_valid(cic->ttime.ttime_samples)
4058 && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
4059 return true;
4060
4061 /*
4062 * If think times is less than a jiffy than ttime_mean=0 and above
4063 * will not be true. It might happen that slice has not expired yet
4064 * but will expire soon (4-5 ns) during select_queue(). To cover the
4065 * case where think time is less than a jiffy, mark the queue wait
4066 * busy if only 1 jiffy is left in the slice.
4067 */
4068 if (cfqq->slice_end - jiffies == 1)
4069 return true;
4070
4071 return false;
4072 }
4073
4074 static void cfq_completed_request(struct request_queue *q, struct request *rq)
4075 {
4076 struct cfq_queue *cfqq = RQ_CFQQ(rq);
4077 struct cfq_data *cfqd = cfqq->cfqd;
4078 const int sync = rq_is_sync(rq);
4079 unsigned long now;
4080
4081 now = jiffies;
4082 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
4083 !!(rq->cmd_flags & REQ_NOIDLE));
4084
4085 cfq_update_hw_tag(cfqd);
4086
4087 WARN_ON(!cfqd->rq_in_driver);
4088 WARN_ON(!cfqq->dispatched);
4089 cfqd->rq_in_driver--;
4090 cfqq->dispatched--;
4091 (RQ_CFQG(rq))->dispatched--;
4092 cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
4093 rq_io_start_time_ns(rq), rq->cmd_flags);
4094
4095 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
4096
4097 if (sync) {
4098 struct cfq_rb_root *st;
4099
4100 RQ_CIC(rq)->ttime.last_end_request = now;
4101
4102 if (cfq_cfqq_on_rr(cfqq))
4103 st = cfqq->service_tree;
4104 else
4105 st = st_for(cfqq->cfqg, cfqq_class(cfqq),
4106 cfqq_type(cfqq));
4107
4108 st->ttime.last_end_request = now;
4109 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
4110 cfqd->last_delayed_sync = now;
4111 }
4112
4113 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4114 cfqq->cfqg->ttime.last_end_request = now;
4115 #endif
4116
4117 /*
4118 * If this is the active queue, check if it needs to be expired,
4119 * or if we want to idle in case it has no pending requests.
4120 */
4121 if (cfqd->active_queue == cfqq) {
4122 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
4123
4124 if (cfq_cfqq_slice_new(cfqq)) {
4125 cfq_set_prio_slice(cfqd, cfqq);
4126 cfq_clear_cfqq_slice_new(cfqq);
4127 }
4128
4129 /*
4130 * Should we wait for next request to come in before we expire
4131 * the queue.
4132 */
4133 if (cfq_should_wait_busy(cfqd, cfqq)) {
4134 unsigned long extend_sl = cfqd->cfq_slice_idle;
4135 if (!cfqd->cfq_slice_idle)
4136 extend_sl = cfqd->cfq_group_idle;
4137 cfqq->slice_end = jiffies + extend_sl;
4138 cfq_mark_cfqq_wait_busy(cfqq);
4139 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
4140 }
4141
4142 /*
4143 * Idling is not enabled on:
4144 * - expired queues
4145 * - idle-priority queues
4146 * - async queues
4147 * - queues with still some requests queued
4148 * - when there is a close cooperator
4149 */
4150 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
4151 cfq_slice_expired(cfqd, 1);
4152 else if (sync && cfqq_empty &&
4153 !cfq_close_cooperator(cfqd, cfqq)) {
4154 cfq_arm_slice_timer(cfqd);
4155 }
4156 }
4157
4158 if (!cfqd->rq_in_driver)
4159 cfq_schedule_dispatch(cfqd);
4160 }
4161
4162 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
4163 {
4164 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
4165 cfq_mark_cfqq_must_alloc_slice(cfqq);
4166 return ELV_MQUEUE_MUST;
4167 }
4168
4169 return ELV_MQUEUE_MAY;
4170 }
4171
4172 static int cfq_may_queue(struct request_queue *q, int rw)
4173 {
4174 struct cfq_data *cfqd = q->elevator->elevator_data;
4175 struct task_struct *tsk = current;
4176 struct cfq_io_cq *cic;
4177 struct cfq_queue *cfqq;
4178
4179 /*
4180 * don't force setup of a queue from here, as a call to may_queue
4181 * does not necessarily imply that a request actually will be queued.
4182 * so just lookup a possibly existing queue, or return 'may queue'
4183 * if that fails
4184 */
4185 cic = cfq_cic_lookup(cfqd, tsk->io_context);
4186 if (!cic)
4187 return ELV_MQUEUE_MAY;
4188
4189 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
4190 if (cfqq) {
4191 cfq_init_prio_data(cfqq, cic);
4192
4193 return __cfq_may_queue(cfqq);
4194 }
4195
4196 return ELV_MQUEUE_MAY;
4197 }
4198
4199 /*
4200 * queue lock held here
4201 */
4202 static void cfq_put_request(struct request *rq)
4203 {
4204 struct cfq_queue *cfqq = RQ_CFQQ(rq);
4205
4206 if (cfqq) {
4207 const int rw = rq_data_dir(rq);
4208
4209 BUG_ON(!cfqq->allocated[rw]);
4210 cfqq->allocated[rw]--;
4211
4212 /* Put down rq reference on cfqg */
4213 cfqg_put(RQ_CFQG(rq));
4214 rq->elv.priv[0] = NULL;
4215 rq->elv.priv[1] = NULL;
4216
4217 cfq_put_queue(cfqq);
4218 }
4219 }
4220
4221 static struct cfq_queue *
4222 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
4223 struct cfq_queue *cfqq)
4224 {
4225 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
4226 cic_set_cfqq(cic, cfqq->new_cfqq, 1);
4227 cfq_mark_cfqq_coop(cfqq->new_cfqq);
4228 cfq_put_queue(cfqq);
4229 return cic_to_cfqq(cic, 1);
4230 }
4231
4232 /*
4233 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
4234 * was the last process referring to said cfqq.
4235 */
4236 static struct cfq_queue *
4237 split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
4238 {
4239 if (cfqq_process_refs(cfqq) == 1) {
4240 cfqq->pid = current->pid;
4241 cfq_clear_cfqq_coop(cfqq);
4242 cfq_clear_cfqq_split_coop(cfqq);
4243 return cfqq;
4244 }
4245
4246 cic_set_cfqq(cic, NULL, 1);
4247
4248 cfq_put_cooperator(cfqq);
4249
4250 cfq_put_queue(cfqq);
4251 return NULL;
4252 }
4253 /*
4254 * Allocate cfq data structures associated with this request.
4255 */
4256 static int
4257 cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4258 gfp_t gfp_mask)
4259 {
4260 struct cfq_data *cfqd = q->elevator->elevator_data;
4261 struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
4262 const int rw = rq_data_dir(rq);
4263 const bool is_sync = rq_is_sync(rq);
4264 struct cfq_queue *cfqq;
4265
4266 spin_lock_irq(q->queue_lock);
4267
4268 check_ioprio_changed(cic, bio);
4269 check_blkcg_changed(cic, bio);
4270 new_queue:
4271 cfqq = cic_to_cfqq(cic, is_sync);
4272 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
4273 if (cfqq)
4274 cfq_put_queue(cfqq);
4275 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio);
4276 cic_set_cfqq(cic, cfqq, is_sync);
4277 } else {
4278 /*
4279 * If the queue was seeky for too long, break it apart.
4280 */
4281 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
4282 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
4283 cfqq = split_cfqq(cic, cfqq);
4284 if (!cfqq)
4285 goto new_queue;
4286 }
4287
4288 /*
4289 * Check to see if this queue is scheduled to merge with
4290 * another, closely cooperating queue. The merging of
4291 * queues happens here as it must be done in process context.
4292 * The reference on new_cfqq was taken in merge_cfqqs.
4293 */
4294 if (cfqq->new_cfqq)
4295 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
4296 }
4297
4298 cfqq->allocated[rw]++;
4299
4300 cfqq->ref++;
4301 cfqg_get(cfqq->cfqg);
4302 rq->elv.priv[0] = cfqq;
4303 rq->elv.priv[1] = cfqq->cfqg;
4304 spin_unlock_irq(q->queue_lock);
4305 return 0;
4306 }
4307
4308 static void cfq_kick_queue(struct work_struct *work)
4309 {
4310 struct cfq_data *cfqd =
4311 container_of(work, struct cfq_data, unplug_work);
4312 struct request_queue *q = cfqd->queue;
4313
4314 spin_lock_irq(q->queue_lock);
4315 __blk_run_queue(cfqd->queue);
4316 spin_unlock_irq(q->queue_lock);
4317 }
4318
4319 /*
4320 * Timer running if the active_queue is currently idling inside its time slice
4321 */
4322 static void cfq_idle_slice_timer(unsigned long data)
4323 {
4324 struct cfq_data *cfqd = (struct cfq_data *) data;
4325 struct cfq_queue *cfqq;
4326 unsigned long flags;
4327 int timed_out = 1;
4328
4329 cfq_log(cfqd, "idle timer fired");
4330
4331 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
4332
4333 cfqq = cfqd->active_queue;
4334 if (cfqq) {
4335 timed_out = 0;
4336
4337 /*
4338 * We saw a request before the queue expired, let it through
4339 */
4340 if (cfq_cfqq_must_dispatch(cfqq))
4341 goto out_kick;
4342
4343 /*
4344 * expired
4345 */
4346 if (cfq_slice_used(cfqq))
4347 goto expire;
4348
4349 /*
4350 * only expire and reinvoke request handler, if there are
4351 * other queues with pending requests
4352 */
4353 if (!cfqd->busy_queues)
4354 goto out_cont;
4355
4356 /*
4357 * not expired and it has a request pending, let it dispatch
4358 */
4359 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4360 goto out_kick;
4361
4362 /*
4363 * Queue depth flag is reset only when the idle didn't succeed
4364 */
4365 cfq_clear_cfqq_deep(cfqq);
4366 }
4367 expire:
4368 cfq_slice_expired(cfqd, timed_out);
4369 out_kick:
4370 cfq_schedule_dispatch(cfqd);
4371 out_cont:
4372 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
4373 }
4374
4375 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
4376 {
4377 del_timer_sync(&cfqd->idle_slice_timer);
4378 cancel_work_sync(&cfqd->unplug_work);
4379 }
4380
4381 static void cfq_exit_queue(struct elevator_queue *e)
4382 {
4383 struct cfq_data *cfqd = e->elevator_data;
4384 struct request_queue *q = cfqd->queue;
4385
4386 cfq_shutdown_timer_wq(cfqd);
4387
4388 spin_lock_irq(q->queue_lock);
4389
4390 if (cfqd->active_queue)
4391 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
4392
4393 spin_unlock_irq(q->queue_lock);
4394
4395 cfq_shutdown_timer_wq(cfqd);
4396
4397 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4398 blkcg_deactivate_policy(q, &blkcg_policy_cfq);
4399 #else
4400 kfree(cfqd->root_group);
4401 #endif
4402 kfree(cfqd);
4403 }
4404
4405 static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
4406 {
4407 struct cfq_data *cfqd;
4408 struct blkcg_gq *blkg __maybe_unused;
4409 int i, ret;
4410 struct elevator_queue *eq;
4411
4412 eq = elevator_alloc(q, e);
4413 if (!eq)
4414 return -ENOMEM;
4415
4416 cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
4417 if (!cfqd) {
4418 kobject_put(&eq->kobj);
4419 return -ENOMEM;
4420 }
4421 eq->elevator_data = cfqd;
4422
4423 cfqd->queue = q;
4424 spin_lock_irq(q->queue_lock);
4425 q->elevator = eq;
4426 spin_unlock_irq(q->queue_lock);
4427
4428 /* Init root service tree */
4429 cfqd->grp_service_tree = CFQ_RB_ROOT;
4430
4431 /* Init root group and prefer root group over other groups by default */
4432 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4433 ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
4434 if (ret)
4435 goto out_free;
4436
4437 cfqd->root_group = blkg_to_cfqg(q->root_blkg);
4438 #else
4439 ret = -ENOMEM;
4440 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4441 GFP_KERNEL, cfqd->queue->node);
4442 if (!cfqd->root_group)
4443 goto out_free;
4444
4445 cfq_init_cfqg_base(cfqd->root_group);
4446 #endif
4447 cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
4448 cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT;
4449
4450 /*
4451 * Not strictly needed (since RB_ROOT just clears the node and we
4452 * zeroed cfqd on alloc), but better be safe in case someone decides
4453 * to add magic to the rb code
4454 */
4455 for (i = 0; i < CFQ_PRIO_LISTS; i++)
4456 cfqd->prio_trees[i] = RB_ROOT;
4457
4458 /*
4459 * Our fallback cfqq if cfq_get_queue() runs into OOM issues.
4460 * Grab a permanent reference to it, so that the normal code flow
4461 * will not attempt to free it. oom_cfqq is linked to root_group
4462 * but shouldn't hold a reference as it'll never be unlinked. Lose
4463 * the reference from linking right away.
4464 */
4465 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
4466 cfqd->oom_cfqq.ref++;
4467
4468 spin_lock_irq(q->queue_lock);
4469 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
4470 cfqg_put(cfqd->root_group);
4471 spin_unlock_irq(q->queue_lock);
4472
4473 init_timer(&cfqd->idle_slice_timer);
4474 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4475 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4476
4477 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
4478
4479 cfqd->cfq_quantum = cfq_quantum;
4480 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4481 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
4482 cfqd->cfq_back_max = cfq_back_max;
4483 cfqd->cfq_back_penalty = cfq_back_penalty;
4484 cfqd->cfq_slice[0] = cfq_slice_async;
4485 cfqd->cfq_slice[1] = cfq_slice_sync;
4486 cfqd->cfq_target_latency = cfq_target_latency;
4487 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4488 cfqd->cfq_slice_idle = cfq_slice_idle;
4489 cfqd->cfq_group_idle = cfq_group_idle;
4490 cfqd->cfq_latency = 1;
4491 cfqd->hw_tag = -1;
4492 /*
4493 * we optimistically start assuming sync ops weren't delayed in last
4494 * second, in order to have larger depth for async operations.
4495 */
4496 cfqd->last_delayed_sync = jiffies - HZ;
4497 return 0;
4498
4499 out_free:
4500 kfree(cfqd);
4501 kobject_put(&eq->kobj);
4502 return ret;
4503 }
4504
4505 static void cfq_registered_queue(struct request_queue *q)
4506 {
4507 struct elevator_queue *e = q->elevator;
4508 struct cfq_data *cfqd = e->elevator_data;
4509
4510 /*
4511 * Default to IOPS mode with no idling for SSDs
4512 */
4513 if (blk_queue_nonrot(q))
4514 cfqd->cfq_slice_idle = 0;
4515 }
4516
4517 /*
4518 * sysfs parts below -->
4519 */
4520 static ssize_t
4521 cfq_var_show(unsigned int var, char *page)
4522 {
4523 return sprintf(page, "%u\n", var);
4524 }
4525
4526 static ssize_t
4527 cfq_var_store(unsigned int *var, const char *page, size_t count)
4528 {
4529 char *p = (char *) page;
4530
4531 *var = simple_strtoul(p, &p, 10);
4532 return count;
4533 }
4534
4535 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
4536 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
4537 { \
4538 struct cfq_data *cfqd = e->elevator_data; \
4539 unsigned int __data = __VAR; \
4540 if (__CONV) \
4541 __data = jiffies_to_msecs(__data); \
4542 return cfq_var_show(__data, (page)); \
4543 }
4544 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4545 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4546 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4547 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4548 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4549 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4550 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4551 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4552 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4553 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4554 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4555 SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
4556 #undef SHOW_FUNCTION
4557
4558 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
4559 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4560 { \
4561 struct cfq_data *cfqd = e->elevator_data; \
4562 unsigned int __data; \
4563 int ret = cfq_var_store(&__data, (page), count); \
4564 if (__data < (MIN)) \
4565 __data = (MIN); \
4566 else if (__data > (MAX)) \
4567 __data = (MAX); \
4568 if (__CONV) \
4569 *(__PTR) = msecs_to_jiffies(__data); \
4570 else \
4571 *(__PTR) = __data; \
4572 return ret; \
4573 }
4574 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4575 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4576 UINT_MAX, 1);
4577 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4578 UINT_MAX, 1);
4579 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4580 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4581 UINT_MAX, 0);
4582 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4583 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4584 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4585 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4586 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4587 UINT_MAX, 0);
4588 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4589 STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
4590 #undef STORE_FUNCTION
4591
4592 #define CFQ_ATTR(name) \
4593 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4594
4595 static struct elv_fs_entry cfq_attrs[] = {
4596 CFQ_ATTR(quantum),
4597 CFQ_ATTR(fifo_expire_sync),
4598 CFQ_ATTR(fifo_expire_async),
4599 CFQ_ATTR(back_seek_max),
4600 CFQ_ATTR(back_seek_penalty),
4601 CFQ_ATTR(slice_sync),
4602 CFQ_ATTR(slice_async),
4603 CFQ_ATTR(slice_async_rq),
4604 CFQ_ATTR(slice_idle),
4605 CFQ_ATTR(group_idle),
4606 CFQ_ATTR(low_latency),
4607 CFQ_ATTR(target_latency),
4608 __ATTR_NULL
4609 };
4610
4611 static struct elevator_type iosched_cfq = {
4612 .ops = {
4613 .elevator_merge_fn = cfq_merge,
4614 .elevator_merged_fn = cfq_merged_request,
4615 .elevator_merge_req_fn = cfq_merged_requests,
4616 .elevator_allow_merge_fn = cfq_allow_merge,
4617 .elevator_bio_merged_fn = cfq_bio_merged,
4618 .elevator_dispatch_fn = cfq_dispatch_requests,
4619 .elevator_add_req_fn = cfq_insert_request,
4620 .elevator_activate_req_fn = cfq_activate_request,
4621 .elevator_deactivate_req_fn = cfq_deactivate_request,
4622 .elevator_completed_req_fn = cfq_completed_request,
4623 .elevator_former_req_fn = elv_rb_former_request,
4624 .elevator_latter_req_fn = elv_rb_latter_request,
4625 .elevator_init_icq_fn = cfq_init_icq,
4626 .elevator_exit_icq_fn = cfq_exit_icq,
4627 .elevator_set_req_fn = cfq_set_request,
4628 .elevator_put_req_fn = cfq_put_request,
4629 .elevator_may_queue_fn = cfq_may_queue,
4630 .elevator_init_fn = cfq_init_queue,
4631 .elevator_exit_fn = cfq_exit_queue,
4632 .elevator_registered_fn = cfq_registered_queue,
4633 },
4634 .icq_size = sizeof(struct cfq_io_cq),
4635 .icq_align = __alignof__(struct cfq_io_cq),
4636 .elevator_attrs = cfq_attrs,
4637 .elevator_name = "cfq",
4638 .elevator_owner = THIS_MODULE,
4639 };
4640
4641 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4642 static struct blkcg_policy blkcg_policy_cfq = {
4643 .cftypes = cfq_blkcg_files,
4644
4645 .cpd_alloc_fn = cfq_cpd_alloc,
4646 .cpd_init_fn = cfq_cpd_init,
4647 .cpd_free_fn = cfq_cpd_free,
4648
4649 .pd_alloc_fn = cfq_pd_alloc,
4650 .pd_init_fn = cfq_pd_init,
4651 .pd_offline_fn = cfq_pd_offline,
4652 .pd_free_fn = cfq_pd_free,
4653 .pd_reset_stats_fn = cfq_pd_reset_stats,
4654 };
4655 #endif
4656
4657 static int __init cfq_init(void)
4658 {
4659 int ret;
4660
4661 /*
4662 * could be 0 on HZ < 1000 setups
4663 */
4664 if (!cfq_slice_async)
4665 cfq_slice_async = 1;
4666 if (!cfq_slice_idle)
4667 cfq_slice_idle = 1;
4668
4669 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4670 if (!cfq_group_idle)
4671 cfq_group_idle = 1;
4672
4673 ret = blkcg_policy_register(&blkcg_policy_cfq);
4674 if (ret)
4675 return ret;
4676 #else
4677 cfq_group_idle = 0;
4678 #endif
4679
4680 ret = -ENOMEM;
4681 cfq_pool = KMEM_CACHE(cfq_queue, 0);
4682 if (!cfq_pool)
4683 goto err_pol_unreg;
4684
4685 ret = elv_register(&iosched_cfq);
4686 if (ret)
4687 goto err_free_pool;
4688
4689 return 0;
4690
4691 err_free_pool:
4692 kmem_cache_destroy(cfq_pool);
4693 err_pol_unreg:
4694 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4695 blkcg_policy_unregister(&blkcg_policy_cfq);
4696 #endif
4697 return ret;
4698 }
4699
4700 static void __exit cfq_exit(void)
4701 {
4702 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4703 blkcg_policy_unregister(&blkcg_policy_cfq);
4704 #endif
4705 elv_unregister(&iosched_cfq);
4706 kmem_cache_destroy(cfq_pool);
4707 }
4708
4709 module_init(cfq_init);
4710 module_exit(cfq_exit);
4711
4712 MODULE_AUTHOR("Jens Axboe");
4713 MODULE_LICENSE("GPL");
4714 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");