]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - block/cfq-iosched.c
Merge remote-tracking branch 'regulator/fix/max77802' into regulator-linus
[mirror_ubuntu-artful-kernel.git] / block / cfq-iosched.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * CFQ, or complete fairness queueing, disk scheduler.
3 *
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
0fe23479 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
1da177e4 8 */
1da177e4 9#include <linux/module.h>
5a0e3ad6 10#include <linux/slab.h>
e6017571 11#include <linux/sched/clock.h>
1cc9be68
AV
12#include <linux/blkdev.h>
13#include <linux/elevator.h>
9a7f38c4 14#include <linux/ktime.h>
1da177e4 15#include <linux/rbtree.h>
22e2c507 16#include <linux/ioprio.h>
7b679138 17#include <linux/blktrace_api.h>
eea8f41c 18#include <linux/blk-cgroup.h>
6e736be7 19#include "blk.h"
87760e5e 20#include "blk-wbt.h"
1da177e4
LT
21
22/*
23 * tunables
24 */
fe094d98 25/* max queue in one round of service */
abc3c744 26static const int cfq_quantum = 8;
9a7f38c4 27static const u64 cfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
fe094d98
JA
28/* maximum backwards seek, in KiB */
29static const int cfq_back_max = 16 * 1024;
30/* penalty of a backwards seek */
31static const int cfq_back_penalty = 2;
9a7f38c4
JM
32static const u64 cfq_slice_sync = NSEC_PER_SEC / 10;
33static u64 cfq_slice_async = NSEC_PER_SEC / 25;
64100099 34static const int cfq_slice_async_rq = 2;
9a7f38c4
JM
35static u64 cfq_slice_idle = NSEC_PER_SEC / 125;
36static u64 cfq_group_idle = NSEC_PER_SEC / 125;
37static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
5db5d642 38static const int cfq_hist_divisor = 4;
22e2c507 39
d9e7620e 40/*
5be6b756 41 * offset from end of queue service tree for idle class
d9e7620e 42 */
9a7f38c4 43#define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5)
5be6b756
HT
44/* offset from end of group service tree under time slice mode */
45#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
46/* offset from end of group service under IOPS mode */
47#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
d9e7620e
JA
48
49/*
50 * below this threshold, we consider thinktime immediate
51 */
9a7f38c4 52#define CFQ_MIN_TT (2 * NSEC_PER_SEC / HZ)
d9e7620e 53
22e2c507 54#define CFQ_SLICE_SCALE (5)
45333d5a 55#define CFQ_HW_QUEUE_MIN (5)
25bc6b07 56#define CFQ_SERVICE_SHIFT 12
22e2c507 57
3dde36dd 58#define CFQQ_SEEK_THR (sector_t)(8 * 100)
e9ce335d 59#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
41647e7a 60#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
3dde36dd 61#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
ae54abed 62
a612fddf
TH
63#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
64#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
65#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
1da177e4 66
e18b890b 67static struct kmem_cache *cfq_pool;
1da177e4 68
22e2c507
JA
69#define CFQ_PRIO_LISTS IOPRIO_BE_NR
70#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
22e2c507
JA
71#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
72
206dc69b 73#define sample_valid(samples) ((samples) > 80)
1fa8f6d6 74#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
206dc69b 75
e48453c3 76/* blkio-related constants */
3ecca629
TH
77#define CFQ_WEIGHT_LEGACY_MIN 10
78#define CFQ_WEIGHT_LEGACY_DFL 500
79#define CFQ_WEIGHT_LEGACY_MAX 1000
e48453c3 80
c5869807 81struct cfq_ttime {
9a7f38c4 82 u64 last_end_request;
c5869807 83
9a7f38c4
JM
84 u64 ttime_total;
85 u64 ttime_mean;
c5869807 86 unsigned long ttime_samples;
c5869807
TH
87};
88
cc09e299
JA
89/*
90 * Most of our rbtree usage is for sorting with min extraction, so
91 * if we cache the leftmost node we don't have to walk down the tree
92 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
93 * move this into the elevator for the rq sorting as well.
94 */
95struct cfq_rb_root {
96 struct rb_root rb;
97 struct rb_node *left;
aa6f6a3d 98 unsigned count;
1fa8f6d6 99 u64 min_vdisktime;
f5f2b6ce 100 struct cfq_ttime ttime;
cc09e299 101};
f5f2b6ce 102#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
9a7f38c4 103 .ttime = {.last_end_request = ktime_get_ns(),},}
cc09e299 104
6118b70b
JA
105/*
106 * Per process-grouping structure
107 */
108struct cfq_queue {
109 /* reference count */
30d7b944 110 int ref;
6118b70b
JA
111 /* various state flags, see below */
112 unsigned int flags;
113 /* parent cfq_data */
114 struct cfq_data *cfqd;
115 /* service_tree member */
116 struct rb_node rb_node;
117 /* service_tree key */
9a7f38c4 118 u64 rb_key;
6118b70b
JA
119 /* prio tree member */
120 struct rb_node p_node;
121 /* prio tree root we belong to, if any */
122 struct rb_root *p_root;
123 /* sorted list of pending requests */
124 struct rb_root sort_list;
125 /* if fifo isn't expired, next request to serve */
126 struct request *next_rq;
127 /* requests queued in sort_list */
128 int queued[2];
129 /* currently allocated requests */
130 int allocated[2];
131 /* fifo list of requests in sort_list */
132 struct list_head fifo;
133
dae739eb 134 /* time when queue got scheduled in to dispatch first request. */
9a7f38c4
JM
135 u64 dispatch_start;
136 u64 allocated_slice;
137 u64 slice_dispatch;
dae739eb 138 /* time when first request from queue completed and slice started. */
9a7f38c4
JM
139 u64 slice_start;
140 u64 slice_end;
93fdf147 141 s64 slice_resid;
6118b70b 142
65299a3b
CH
143 /* pending priority requests */
144 int prio_pending;
6118b70b
JA
145 /* number of requests that are on the dispatch list or inside driver */
146 int dispatched;
147
148 /* io prio of this group */
149 unsigned short ioprio, org_ioprio;
b8269db4 150 unsigned short ioprio_class, org_ioprio_class;
6118b70b 151
c4081ba5
RK
152 pid_t pid;
153
3dde36dd 154 u32 seek_history;
b2c18e1e
JM
155 sector_t last_request_pos;
156
aa6f6a3d 157 struct cfq_rb_root *service_tree;
df5fe3e8 158 struct cfq_queue *new_cfqq;
cdb16e8f 159 struct cfq_group *cfqg;
c4e7893e
VG
160 /* Number of sectors dispatched from queue in single dispatch round */
161 unsigned long nr_sectors;
6118b70b
JA
162};
163
c0324a02 164/*
718eee05 165 * First index in the service_trees.
c0324a02
CZ
166 * IDLE is handled separately, so it has negative index
167 */
3bf10fea 168enum wl_class_t {
c0324a02 169 BE_WORKLOAD = 0,
615f0259
VG
170 RT_WORKLOAD = 1,
171 IDLE_WORKLOAD = 2,
b4627321 172 CFQ_PRIO_NR,
c0324a02
CZ
173};
174
718eee05
CZ
175/*
176 * Second index in the service_trees.
177 */
178enum wl_type_t {
179 ASYNC_WORKLOAD = 0,
180 SYNC_NOIDLE_WORKLOAD = 1,
181 SYNC_WORKLOAD = 2
182};
183
155fead9
TH
184struct cfqg_stats {
185#ifdef CONFIG_CFQ_GROUP_IOSCHED
155fead9
TH
186 /* number of ios merged */
187 struct blkg_rwstat merged;
188 /* total time spent on device in ns, may not be accurate w/ queueing */
189 struct blkg_rwstat service_time;
190 /* total time spent waiting in scheduler queue in ns */
191 struct blkg_rwstat wait_time;
192 /* number of IOs queued up */
193 struct blkg_rwstat queued;
155fead9
TH
194 /* total disk time and nr sectors dispatched by this group */
195 struct blkg_stat time;
196#ifdef CONFIG_DEBUG_BLK_CGROUP
197 /* time not charged to this cgroup */
198 struct blkg_stat unaccounted_time;
199 /* sum of number of ios queued across all samples */
200 struct blkg_stat avg_queue_size_sum;
201 /* count of samples taken for average */
202 struct blkg_stat avg_queue_size_samples;
203 /* how many times this group has been removed from service tree */
204 struct blkg_stat dequeue;
205 /* total time spent waiting for it to be assigned a timeslice. */
206 struct blkg_stat group_wait_time;
3c798398 207 /* time spent idling for this blkcg_gq */
155fead9
TH
208 struct blkg_stat idle_time;
209 /* total time with empty current active q with other requests queued */
210 struct blkg_stat empty_time;
211 /* fields after this shouldn't be cleared on stat reset */
212 uint64_t start_group_wait_time;
213 uint64_t start_idle_time;
214 uint64_t start_empty_time;
215 uint16_t flags;
216#endif /* CONFIG_DEBUG_BLK_CGROUP */
217#endif /* CONFIG_CFQ_GROUP_IOSCHED */
218};
219
e48453c3
AA
220/* Per-cgroup data */
221struct cfq_group_data {
222 /* must be the first member */
81437648 223 struct blkcg_policy_data cpd;
e48453c3
AA
224
225 unsigned int weight;
226 unsigned int leaf_weight;
227};
228
cdb16e8f
VG
229/* This is per cgroup per device grouping structure */
230struct cfq_group {
f95a04af
TH
231 /* must be the first member */
232 struct blkg_policy_data pd;
233
1fa8f6d6
VG
234 /* group service_tree member */
235 struct rb_node rb_node;
236
237 /* group service_tree key */
238 u64 vdisktime;
e71357e1 239
7918ffb5
TH
240 /*
241 * The number of active cfqgs and sum of their weights under this
242 * cfqg. This covers this cfqg's leaf_weight and all children's
243 * weights, but does not cover weights of further descendants.
244 *
245 * If a cfqg is on the service tree, it's active. An active cfqg
246 * also activates its parent and contributes to the children_weight
247 * of the parent.
248 */
249 int nr_active;
250 unsigned int children_weight;
251
1d3650f7
TH
252 /*
253 * vfraction is the fraction of vdisktime that the tasks in this
254 * cfqg are entitled to. This is determined by compounding the
255 * ratios walking up from this cfqg to the root.
256 *
257 * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
258 * vfractions on a service tree is approximately 1. The sum may
259 * deviate a bit due to rounding errors and fluctuations caused by
260 * cfqgs entering and leaving the service tree.
261 */
262 unsigned int vfraction;
263
e71357e1
TH
264 /*
265 * There are two weights - (internal) weight is the weight of this
266 * cfqg against the sibling cfqgs. leaf_weight is the wight of
267 * this cfqg against the child cfqgs. For the root cfqg, both
268 * weights are kept in sync for backward compatibility.
269 */
25bc6b07 270 unsigned int weight;
8184f93e 271 unsigned int new_weight;
3381cb8d 272 unsigned int dev_weight;
1fa8f6d6 273
e71357e1
TH
274 unsigned int leaf_weight;
275 unsigned int new_leaf_weight;
276 unsigned int dev_leaf_weight;
277
1fa8f6d6
VG
278 /* number of cfqq currently on this group */
279 int nr_cfqq;
280
cdb16e8f 281 /*
4495a7d4 282 * Per group busy queues average. Useful for workload slice calc. We
b4627321
VG
283 * create the array for each prio class but at run time it is used
284 * only for RT and BE class and slot for IDLE class remains unused.
285 * This is primarily done to avoid confusion and a gcc warning.
286 */
287 unsigned int busy_queues_avg[CFQ_PRIO_NR];
288 /*
289 * rr lists of queues with requests. We maintain service trees for
290 * RT and BE classes. These trees are subdivided in subclasses
291 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
292 * class there is no subclassification and all the cfq queues go on
293 * a single tree service_tree_idle.
cdb16e8f
VG
294 * Counts are embedded in the cfq_rb_root
295 */
296 struct cfq_rb_root service_trees[2][3];
297 struct cfq_rb_root service_tree_idle;
dae739eb 298
9a7f38c4 299 u64 saved_wl_slice;
4d2ceea4
VG
300 enum wl_type_t saved_wl_type;
301 enum wl_class_t saved_wl_class;
4eef3049 302
80bdf0c7
VG
303 /* number of requests that are on the dispatch list or inside driver */
304 int dispatched;
7700fc4f 305 struct cfq_ttime ttime;
0b39920b 306 struct cfqg_stats stats; /* stats for this cfqg */
60a83707
TH
307
308 /* async queue for each priority case */
309 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
310 struct cfq_queue *async_idle_cfqq;
311
cdb16e8f 312};
718eee05 313
c5869807
TH
314struct cfq_io_cq {
315 struct io_cq icq; /* must be the first member */
316 struct cfq_queue *cfqq[2];
317 struct cfq_ttime ttime;
598971bf
TH
318 int ioprio; /* the current ioprio */
319#ifdef CONFIG_CFQ_GROUP_IOSCHED
f4da8072 320 uint64_t blkcg_serial_nr; /* the current blkcg serial */
598971bf 321#endif
c5869807
TH
322};
323
22e2c507
JA
324/*
325 * Per block device queue structure
326 */
1da177e4 327struct cfq_data {
165125e1 328 struct request_queue *queue;
1fa8f6d6
VG
329 /* Root service tree for cfq_groups */
330 struct cfq_rb_root grp_service_tree;
f51b802c 331 struct cfq_group *root_group;
22e2c507 332
c0324a02
CZ
333 /*
334 * The priority currently being served
22e2c507 335 */
4d2ceea4
VG
336 enum wl_class_t serving_wl_class;
337 enum wl_type_t serving_wl_type;
9a7f38c4 338 u64 workload_expires;
cdb16e8f 339 struct cfq_group *serving_group;
a36e71f9
JA
340
341 /*
342 * Each priority tree is sorted by next_request position. These
343 * trees are used when determining if two or more queues are
344 * interleaving requests (see cfq_close_cooperator).
345 */
346 struct rb_root prio_trees[CFQ_PRIO_LISTS];
347
22e2c507 348 unsigned int busy_queues;
ef8a41df 349 unsigned int busy_sync_queues;
22e2c507 350
53c583d2
CZ
351 int rq_in_driver;
352 int rq_in_flight[2];
45333d5a
AC
353
354 /*
355 * queue-depth detection
356 */
357 int rq_queued;
25776e35 358 int hw_tag;
e459dd08
CZ
359 /*
360 * hw_tag can be
361 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
362 * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
363 * 0 => no NCQ
364 */
365 int hw_tag_est_depth;
366 unsigned int hw_tag_samples;
1da177e4 367
22e2c507
JA
368 /*
369 * idle window management
370 */
91148325 371 struct hrtimer idle_slice_timer;
23e018a1 372 struct work_struct unplug_work;
1da177e4 373
22e2c507 374 struct cfq_queue *active_queue;
c5869807 375 struct cfq_io_cq *active_cic;
22e2c507 376
6d048f53 377 sector_t last_position;
1da177e4 378
1da177e4
LT
379 /*
380 * tunables, see top of file
381 */
382 unsigned int cfq_quantum;
1da177e4
LT
383 unsigned int cfq_back_penalty;
384 unsigned int cfq_back_max;
22e2c507 385 unsigned int cfq_slice_async_rq;
963b72fc 386 unsigned int cfq_latency;
9a7f38c4
JM
387 u64 cfq_fifo_expire[2];
388 u64 cfq_slice[2];
389 u64 cfq_slice_idle;
390 u64 cfq_group_idle;
391 u64 cfq_target_latency;
d9ff4187 392
6118b70b
JA
393 /*
394 * Fallback dummy cfqq for extreme OOM conditions
395 */
396 struct cfq_queue oom_cfqq;
365722bb 397
9a7f38c4 398 u64 last_delayed_sync;
1da177e4
LT
399};
400
25fb5169 401static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
60a83707 402static void cfq_put_queue(struct cfq_queue *cfqq);
25fb5169 403
34b98d03 404static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
3bf10fea 405 enum wl_class_t class,
65b32a57 406 enum wl_type_t type)
c0324a02 407{
1fa8f6d6
VG
408 if (!cfqg)
409 return NULL;
410
3bf10fea 411 if (class == IDLE_WORKLOAD)
cdb16e8f 412 return &cfqg->service_tree_idle;
c0324a02 413
3bf10fea 414 return &cfqg->service_trees[class][type];
c0324a02
CZ
415}
416
3b18152c 417enum cfqq_state_flags {
b0b8d749
JA
418 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
419 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
b029195d 420 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
b0b8d749 421 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
b0b8d749
JA
422 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
423 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
424 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
44f7c160 425 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
91fac317 426 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
b3b6d040 427 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
ae54abed 428 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
76280aff 429 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
f75edf2d 430 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
3b18152c
JA
431};
432
433#define CFQ_CFQQ_FNS(name) \
434static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
435{ \
fe094d98 436 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
3b18152c
JA
437} \
438static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
439{ \
fe094d98 440 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
3b18152c
JA
441} \
442static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
443{ \
fe094d98 444 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
3b18152c
JA
445}
446
447CFQ_CFQQ_FNS(on_rr);
448CFQ_CFQQ_FNS(wait_request);
b029195d 449CFQ_CFQQ_FNS(must_dispatch);
3b18152c 450CFQ_CFQQ_FNS(must_alloc_slice);
3b18152c
JA
451CFQ_CFQQ_FNS(fifo_expire);
452CFQ_CFQQ_FNS(idle_window);
453CFQ_CFQQ_FNS(prio_changed);
44f7c160 454CFQ_CFQQ_FNS(slice_new);
91fac317 455CFQ_CFQQ_FNS(sync);
a36e71f9 456CFQ_CFQQ_FNS(coop);
ae54abed 457CFQ_CFQQ_FNS(split_coop);
76280aff 458CFQ_CFQQ_FNS(deep);
f75edf2d 459CFQ_CFQQ_FNS(wait_busy);
3b18152c
JA
460#undef CFQ_CFQQ_FNS
461
629ed0b1 462#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
2ce4d50f 463
155fead9
TH
464/* cfqg stats flags */
465enum cfqg_stats_flags {
466 CFQG_stats_waiting = 0,
467 CFQG_stats_idling,
468 CFQG_stats_empty,
629ed0b1
TH
469};
470
155fead9
TH
471#define CFQG_FLAG_FNS(name) \
472static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
629ed0b1 473{ \
155fead9 474 stats->flags |= (1 << CFQG_stats_##name); \
629ed0b1 475} \
155fead9 476static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
629ed0b1 477{ \
155fead9 478 stats->flags &= ~(1 << CFQG_stats_##name); \
629ed0b1 479} \
155fead9 480static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
629ed0b1 481{ \
155fead9 482 return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
629ed0b1
TH
483} \
484
155fead9
TH
485CFQG_FLAG_FNS(waiting)
486CFQG_FLAG_FNS(idling)
487CFQG_FLAG_FNS(empty)
488#undef CFQG_FLAG_FNS
629ed0b1
TH
489
490/* This should be called with the queue_lock held. */
155fead9 491static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
629ed0b1
TH
492{
493 unsigned long long now;
494
155fead9 495 if (!cfqg_stats_waiting(stats))
629ed0b1
TH
496 return;
497
498 now = sched_clock();
499 if (time_after64(now, stats->start_group_wait_time))
500 blkg_stat_add(&stats->group_wait_time,
501 now - stats->start_group_wait_time);
155fead9 502 cfqg_stats_clear_waiting(stats);
629ed0b1
TH
503}
504
505/* This should be called with the queue_lock held. */
155fead9
TH
506static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
507 struct cfq_group *curr_cfqg)
629ed0b1 508{
155fead9 509 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 510
155fead9 511 if (cfqg_stats_waiting(stats))
629ed0b1 512 return;
155fead9 513 if (cfqg == curr_cfqg)
629ed0b1 514 return;
155fead9
TH
515 stats->start_group_wait_time = sched_clock();
516 cfqg_stats_mark_waiting(stats);
629ed0b1
TH
517}
518
519/* This should be called with the queue_lock held. */
155fead9 520static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
629ed0b1
TH
521{
522 unsigned long long now;
523
155fead9 524 if (!cfqg_stats_empty(stats))
629ed0b1
TH
525 return;
526
527 now = sched_clock();
528 if (time_after64(now, stats->start_empty_time))
529 blkg_stat_add(&stats->empty_time,
530 now - stats->start_empty_time);
155fead9 531 cfqg_stats_clear_empty(stats);
629ed0b1
TH
532}
533
155fead9 534static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
629ed0b1 535{
155fead9 536 blkg_stat_add(&cfqg->stats.dequeue, 1);
629ed0b1
TH
537}
538
155fead9 539static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
629ed0b1 540{
155fead9 541 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 542
4d5e80a7 543 if (blkg_rwstat_total(&stats->queued))
629ed0b1
TH
544 return;
545
546 /*
547 * group is already marked empty. This can happen if cfqq got new
548 * request in parent group and moved to this group while being added
549 * to service tree. Just ignore the event and move on.
550 */
155fead9 551 if (cfqg_stats_empty(stats))
629ed0b1
TH
552 return;
553
554 stats->start_empty_time = sched_clock();
155fead9 555 cfqg_stats_mark_empty(stats);
629ed0b1
TH
556}
557
155fead9 558static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
629ed0b1 559{
155fead9 560 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 561
155fead9 562 if (cfqg_stats_idling(stats)) {
629ed0b1
TH
563 unsigned long long now = sched_clock();
564
565 if (time_after64(now, stats->start_idle_time))
566 blkg_stat_add(&stats->idle_time,
567 now - stats->start_idle_time);
155fead9 568 cfqg_stats_clear_idling(stats);
629ed0b1
TH
569 }
570}
571
155fead9 572static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
629ed0b1 573{
155fead9 574 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 575
155fead9 576 BUG_ON(cfqg_stats_idling(stats));
629ed0b1
TH
577
578 stats->start_idle_time = sched_clock();
155fead9 579 cfqg_stats_mark_idling(stats);
629ed0b1
TH
580}
581
155fead9 582static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
629ed0b1 583{
155fead9 584 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1
TH
585
586 blkg_stat_add(&stats->avg_queue_size_sum,
4d5e80a7 587 blkg_rwstat_total(&stats->queued));
629ed0b1 588 blkg_stat_add(&stats->avg_queue_size_samples, 1);
155fead9 589 cfqg_stats_update_group_wait_time(stats);
629ed0b1
TH
590}
591
592#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
593
f48ec1d7
TH
594static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
595static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
596static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
597static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
598static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
599static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
600static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
629ed0b1
TH
601
602#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
603
604#ifdef CONFIG_CFQ_GROUP_IOSCHED
2ce4d50f 605
4ceab71b
JA
606static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
607{
608 return pd ? container_of(pd, struct cfq_group, pd) : NULL;
609}
610
611static struct cfq_group_data
612*cpd_to_cfqgd(struct blkcg_policy_data *cpd)
613{
81437648 614 return cpd ? container_of(cpd, struct cfq_group_data, cpd) : NULL;
4ceab71b
JA
615}
616
617static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
618{
619 return pd_to_blkg(&cfqg->pd);
620}
621
ffea73fc
TH
622static struct blkcg_policy blkcg_policy_cfq;
623
624static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
625{
626 return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
627}
628
e48453c3
AA
629static struct cfq_group_data *blkcg_to_cfqgd(struct blkcg *blkcg)
630{
631 return cpd_to_cfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_cfq));
632}
633
d02f7aa8 634static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
7918ffb5 635{
d02f7aa8 636 struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
7918ffb5 637
d02f7aa8 638 return pblkg ? blkg_to_cfqg(pblkg) : NULL;
7918ffb5
TH
639}
640
3984aa55
JK
641static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
642 struct cfq_group *ancestor)
643{
644 return cgroup_is_descendant(cfqg_to_blkg(cfqg)->blkcg->css.cgroup,
645 cfqg_to_blkg(ancestor)->blkcg->css.cgroup);
646}
647
eb7d8c07
TH
648static inline void cfqg_get(struct cfq_group *cfqg)
649{
650 return blkg_get(cfqg_to_blkg(cfqg));
651}
652
653static inline void cfqg_put(struct cfq_group *cfqg)
654{
655 return blkg_put(cfqg_to_blkg(cfqg));
656}
657
54e7ed12
TH
658#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
659 char __pbuf[128]; \
660 \
661 blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
b226e5c4
VG
662 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
663 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
664 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
54e7ed12
TH
665 __pbuf, ##args); \
666} while (0)
667
668#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
669 char __pbuf[128]; \
670 \
671 blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \
672 blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \
673} while (0)
2868ef7b 674
155fead9 675static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
ef295ecf
CH
676 struct cfq_group *curr_cfqg,
677 unsigned int op)
2ce4d50f 678{
ef295ecf 679 blkg_rwstat_add(&cfqg->stats.queued, op, 1);
155fead9
TH
680 cfqg_stats_end_empty_time(&cfqg->stats);
681 cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
2ce4d50f
TH
682}
683
155fead9 684static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
9a7f38c4 685 uint64_t time, unsigned long unaccounted_time)
2ce4d50f 686{
155fead9 687 blkg_stat_add(&cfqg->stats.time, time);
629ed0b1 688#ifdef CONFIG_DEBUG_BLK_CGROUP
155fead9 689 blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
629ed0b1 690#endif
2ce4d50f
TH
691}
692
ef295ecf
CH
693static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
694 unsigned int op)
2ce4d50f 695{
ef295ecf 696 blkg_rwstat_add(&cfqg->stats.queued, op, -1);
2ce4d50f
TH
697}
698
ef295ecf
CH
699static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
700 unsigned int op)
2ce4d50f 701{
ef295ecf 702 blkg_rwstat_add(&cfqg->stats.merged, op, 1);
2ce4d50f
TH
703}
704
155fead9 705static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
ef295ecf
CH
706 uint64_t start_time, uint64_t io_start_time,
707 unsigned int op)
2ce4d50f 708{
155fead9 709 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 710 unsigned long long now = sched_clock();
629ed0b1
TH
711
712 if (time_after64(now, io_start_time))
ef295ecf 713 blkg_rwstat_add(&stats->service_time, op, now - io_start_time);
629ed0b1 714 if (time_after64(io_start_time, start_time))
ef295ecf 715 blkg_rwstat_add(&stats->wait_time, op,
629ed0b1 716 io_start_time - start_time);
2ce4d50f
TH
717}
718
689665af
TH
719/* @stats = 0 */
720static void cfqg_stats_reset(struct cfqg_stats *stats)
155fead9 721{
155fead9 722 /* queued stats shouldn't be cleared */
155fead9
TH
723 blkg_rwstat_reset(&stats->merged);
724 blkg_rwstat_reset(&stats->service_time);
725 blkg_rwstat_reset(&stats->wait_time);
726 blkg_stat_reset(&stats->time);
727#ifdef CONFIG_DEBUG_BLK_CGROUP
728 blkg_stat_reset(&stats->unaccounted_time);
729 blkg_stat_reset(&stats->avg_queue_size_sum);
730 blkg_stat_reset(&stats->avg_queue_size_samples);
731 blkg_stat_reset(&stats->dequeue);
732 blkg_stat_reset(&stats->group_wait_time);
733 blkg_stat_reset(&stats->idle_time);
734 blkg_stat_reset(&stats->empty_time);
735#endif
736}
737
0b39920b 738/* @to += @from */
e6269c44 739static void cfqg_stats_add_aux(struct cfqg_stats *to, struct cfqg_stats *from)
0b39920b
TH
740{
741 /* queued stats shouldn't be cleared */
e6269c44
TH
742 blkg_rwstat_add_aux(&to->merged, &from->merged);
743 blkg_rwstat_add_aux(&to->service_time, &from->service_time);
744 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
745 blkg_stat_add_aux(&from->time, &from->time);
0b39920b 746#ifdef CONFIG_DEBUG_BLK_CGROUP
e6269c44
TH
747 blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time);
748 blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
749 blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
750 blkg_stat_add_aux(&to->dequeue, &from->dequeue);
751 blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
752 blkg_stat_add_aux(&to->idle_time, &from->idle_time);
753 blkg_stat_add_aux(&to->empty_time, &from->empty_time);
0b39920b
TH
754#endif
755}
756
757/*
e6269c44 758 * Transfer @cfqg's stats to its parent's aux counts so that the ancestors'
0b39920b
TH
759 * recursive stats can still account for the amount used by this cfqg after
760 * it's gone.
761 */
762static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
763{
764 struct cfq_group *parent = cfqg_parent(cfqg);
765
766 lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
767
768 if (unlikely(!parent))
769 return;
770
e6269c44 771 cfqg_stats_add_aux(&parent->stats, &cfqg->stats);
0b39920b 772 cfqg_stats_reset(&cfqg->stats);
0b39920b
TH
773}
774
eb7d8c07
TH
775#else /* CONFIG_CFQ_GROUP_IOSCHED */
776
d02f7aa8 777static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
3984aa55
JK
778static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
779 struct cfq_group *ancestor)
780{
781 return true;
782}
eb7d8c07
TH
783static inline void cfqg_get(struct cfq_group *cfqg) { }
784static inline void cfqg_put(struct cfq_group *cfqg) { }
785
7b679138 786#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
b226e5c4
VG
787 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
788 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
789 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
790 ##args)
4495a7d4 791#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
eb7d8c07 792
155fead9 793static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
ef295ecf 794 struct cfq_group *curr_cfqg, unsigned int op) { }
155fead9 795static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
9a7f38c4 796 uint64_t time, unsigned long unaccounted_time) { }
ef295ecf
CH
797static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
798 unsigned int op) { }
799static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
800 unsigned int op) { }
155fead9 801static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
ef295ecf
CH
802 uint64_t start_time, uint64_t io_start_time,
803 unsigned int op) { }
2ce4d50f 804
eb7d8c07
TH
805#endif /* CONFIG_CFQ_GROUP_IOSCHED */
806
7b679138
JA
807#define cfq_log(cfqd, fmt, args...) \
808 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
809
615f0259
VG
810/* Traverses through cfq group service trees */
811#define for_each_cfqg_st(cfqg, i, j, st) \
812 for (i = 0; i <= IDLE_WORKLOAD; i++) \
813 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
814 : &cfqg->service_tree_idle; \
815 (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
816 (i == IDLE_WORKLOAD && j == 0); \
817 j++, st = i < IDLE_WORKLOAD ? \
818 &cfqg->service_trees[i][j]: NULL) \
819
f5f2b6ce
SL
820static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
821 struct cfq_ttime *ttime, bool group_idle)
822{
9a7f38c4 823 u64 slice;
f5f2b6ce
SL
824 if (!sample_valid(ttime->ttime_samples))
825 return false;
826 if (group_idle)
827 slice = cfqd->cfq_group_idle;
828 else
829 slice = cfqd->cfq_slice_idle;
830 return ttime->ttime_mean > slice;
831}
615f0259 832
02b35081
VG
833static inline bool iops_mode(struct cfq_data *cfqd)
834{
835 /*
836 * If we are not idling on queues and it is a NCQ drive, parallel
837 * execution of requests is on and measuring time is not possible
838 * in most of the cases until and unless we drive shallower queue
839 * depths and that becomes a performance bottleneck. In such cases
840 * switch to start providing fairness in terms of number of IOs.
841 */
842 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
843 return true;
844 else
845 return false;
846}
847
3bf10fea 848static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
c0324a02
CZ
849{
850 if (cfq_class_idle(cfqq))
851 return IDLE_WORKLOAD;
852 if (cfq_class_rt(cfqq))
853 return RT_WORKLOAD;
854 return BE_WORKLOAD;
855}
856
718eee05
CZ
857
858static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
859{
860 if (!cfq_cfqq_sync(cfqq))
861 return ASYNC_WORKLOAD;
862 if (!cfq_cfqq_idle_window(cfqq))
863 return SYNC_NOIDLE_WORKLOAD;
864 return SYNC_WORKLOAD;
865}
866
3bf10fea 867static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
58ff82f3
VG
868 struct cfq_data *cfqd,
869 struct cfq_group *cfqg)
c0324a02 870{
3bf10fea 871 if (wl_class == IDLE_WORKLOAD)
cdb16e8f 872 return cfqg->service_tree_idle.count;
c0324a02 873
34b98d03
VG
874 return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
875 cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
876 cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
c0324a02
CZ
877}
878
f26bd1f0
VG
879static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
880 struct cfq_group *cfqg)
881{
34b98d03
VG
882 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
883 cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
f26bd1f0
VG
884}
885
165125e1 886static void cfq_dispatch_insert(struct request_queue *, struct request *);
4f85cb96 887static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
2da8de0b 888 struct cfq_io_cq *cic, struct bio *bio);
91fac317 889
c5869807
TH
890static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
891{
892 /* cic->icq is the first member, %NULL will convert to %NULL */
893 return container_of(icq, struct cfq_io_cq, icq);
894}
895
47fdd4ca
TH
896static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
897 struct io_context *ioc)
898{
899 if (ioc)
900 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
901 return NULL;
902}
903
c5869807 904static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
91fac317 905{
a6151c3a 906 return cic->cfqq[is_sync];
91fac317
VT
907}
908
c5869807
TH
909static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
910 bool is_sync)
91fac317 911{
a6151c3a 912 cic->cfqq[is_sync] = cfqq;
91fac317
VT
913}
914
c5869807 915static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
bca4b914 916{
c5869807 917 return cic->icq.q->elevator->elevator_data;
bca4b914
KK
918}
919
99f95e52
AM
920/*
921 * scheduler run of queue, if there are requests pending and no one in the
922 * driver that will restart queueing
923 */
23e018a1 924static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
99f95e52 925{
7b679138
JA
926 if (cfqd->busy_queues) {
927 cfq_log(cfqd, "schedule dispatch");
59c3d45e 928 kblockd_schedule_work(&cfqd->unplug_work);
7b679138 929 }
99f95e52
AM
930}
931
44f7c160
JA
932/*
933 * Scale schedule slice based on io priority. Use the sync time slice only
934 * if a queue is marked sync and has sync io queued. A sync queue with async
935 * io only, should not get full sync slice length.
936 */
9a7f38c4 937static inline u64 cfq_prio_slice(struct cfq_data *cfqd, bool sync,
d9e7620e 938 unsigned short prio)
44f7c160 939{
9a7f38c4
JM
940 u64 base_slice = cfqd->cfq_slice[sync];
941 u64 slice = div_u64(base_slice, CFQ_SLICE_SCALE);
44f7c160 942
d9e7620e
JA
943 WARN_ON(prio >= IOPRIO_BE_NR);
944
9a7f38c4 945 return base_slice + (slice * (4 - prio));
d9e7620e 946}
44f7c160 947
9a7f38c4 948static inline u64
d9e7620e
JA
949cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
950{
951 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
44f7c160
JA
952}
953
1d3650f7
TH
954/**
955 * cfqg_scale_charge - scale disk time charge according to cfqg weight
956 * @charge: disk time being charged
957 * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
958 *
959 * Scale @charge according to @vfraction, which is in range (0, 1]. The
960 * scaling is inversely proportional.
961 *
962 * scaled = charge / vfraction
963 *
964 * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
965 */
9a7f38c4 966static inline u64 cfqg_scale_charge(u64 charge,
1d3650f7 967 unsigned int vfraction)
25bc6b07 968{
1d3650f7 969 u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */
25bc6b07 970
1d3650f7
TH
971 /* charge / vfraction */
972 c <<= CFQ_SERVICE_SHIFT;
9a7f38c4 973 return div_u64(c, vfraction);
25bc6b07
VG
974}
975
976static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
977{
978 s64 delta = (s64)(vdisktime - min_vdisktime);
979 if (delta > 0)
980 min_vdisktime = vdisktime;
981
982 return min_vdisktime;
983}
984
985static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
986{
987 s64 delta = (s64)(vdisktime - min_vdisktime);
988 if (delta < 0)
989 min_vdisktime = vdisktime;
990
991 return min_vdisktime;
992}
993
994static void update_min_vdisktime(struct cfq_rb_root *st)
995{
25bc6b07
VG
996 struct cfq_group *cfqg;
997
25bc6b07
VG
998 if (st->left) {
999 cfqg = rb_entry_cfqg(st->left);
a6032710
GJ
1000 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
1001 cfqg->vdisktime);
25bc6b07 1002 }
25bc6b07
VG
1003}
1004
5db5d642
CZ
1005/*
1006 * get averaged number of queues of RT/BE priority.
1007 * average is updated, with a formula that gives more weight to higher numbers,
1008 * to quickly follows sudden increases and decrease slowly
1009 */
1010
58ff82f3
VG
1011static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
1012 struct cfq_group *cfqg, bool rt)
5869619c 1013{
5db5d642
CZ
1014 unsigned min_q, max_q;
1015 unsigned mult = cfq_hist_divisor - 1;
1016 unsigned round = cfq_hist_divisor / 2;
58ff82f3 1017 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
5db5d642 1018
58ff82f3
VG
1019 min_q = min(cfqg->busy_queues_avg[rt], busy);
1020 max_q = max(cfqg->busy_queues_avg[rt], busy);
1021 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
5db5d642 1022 cfq_hist_divisor;
58ff82f3
VG
1023 return cfqg->busy_queues_avg[rt];
1024}
1025
9a7f38c4 1026static inline u64
58ff82f3
VG
1027cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
1028{
41cad6ab 1029 return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
5db5d642
CZ
1030}
1031
9a7f38c4 1032static inline u64
ba5bd520 1033cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
44f7c160 1034{
9a7f38c4 1035 u64 slice = cfq_prio_to_slice(cfqd, cfqq);
5db5d642 1036 if (cfqd->cfq_latency) {
58ff82f3
VG
1037 /*
1038 * interested queues (we consider only the ones with the same
1039 * priority class in the cfq group)
1040 */
1041 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
1042 cfq_class_rt(cfqq));
9a7f38c4
JM
1043 u64 sync_slice = cfqd->cfq_slice[1];
1044 u64 expect_latency = sync_slice * iq;
1045 u64 group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
58ff82f3
VG
1046
1047 if (expect_latency > group_slice) {
9a7f38c4
JM
1048 u64 base_low_slice = 2 * cfqd->cfq_slice_idle;
1049 u64 low_slice;
1050
5db5d642
CZ
1051 /* scale low_slice according to IO priority
1052 * and sync vs async */
9a7f38c4
JM
1053 low_slice = div64_u64(base_low_slice*slice, sync_slice);
1054 low_slice = min(slice, low_slice);
5db5d642
CZ
1055 /* the adapted slice value is scaled to fit all iqs
1056 * into the target latency */
9a7f38c4
JM
1057 slice = div64_u64(slice*group_slice, expect_latency);
1058 slice = max(slice, low_slice);
5db5d642
CZ
1059 }
1060 }
c553f8e3
SL
1061 return slice;
1062}
1063
1064static inline void
1065cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1066{
9a7f38c4
JM
1067 u64 slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
1068 u64 now = ktime_get_ns();
c553f8e3 1069
9a7f38c4
JM
1070 cfqq->slice_start = now;
1071 cfqq->slice_end = now + slice;
f75edf2d 1072 cfqq->allocated_slice = slice;
9a7f38c4 1073 cfq_log_cfqq(cfqd, cfqq, "set_slice=%llu", cfqq->slice_end - now);
44f7c160
JA
1074}
1075
1076/*
1077 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
1078 * isn't valid until the first request from the dispatch is activated
1079 * and the slice time set.
1080 */
a6151c3a 1081static inline bool cfq_slice_used(struct cfq_queue *cfqq)
44f7c160
JA
1082{
1083 if (cfq_cfqq_slice_new(cfqq))
c1e44756 1084 return false;
9a7f38c4 1085 if (ktime_get_ns() < cfqq->slice_end)
c1e44756 1086 return false;
44f7c160 1087
c1e44756 1088 return true;
44f7c160
JA
1089}
1090
1da177e4 1091/*
5e705374 1092 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1da177e4 1093 * We choose the request that is closest to the head right now. Distance
e8a99053 1094 * behind the head is penalized and only allowed to a certain extent.
1da177e4 1095 */
5e705374 1096static struct request *
cf7c25cf 1097cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1da177e4 1098{
cf7c25cf 1099 sector_t s1, s2, d1 = 0, d2 = 0;
1da177e4 1100 unsigned long back_max;
e8a99053
AM
1101#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
1102#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
1103 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1da177e4 1104
5e705374
JA
1105 if (rq1 == NULL || rq1 == rq2)
1106 return rq2;
1107 if (rq2 == NULL)
1108 return rq1;
9c2c38a1 1109
229836bd
NK
1110 if (rq_is_sync(rq1) != rq_is_sync(rq2))
1111 return rq_is_sync(rq1) ? rq1 : rq2;
1112
65299a3b
CH
1113 if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
1114 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
b53d1ed7 1115
83096ebf
TH
1116 s1 = blk_rq_pos(rq1);
1117 s2 = blk_rq_pos(rq2);
1da177e4 1118
1da177e4
LT
1119 /*
1120 * by definition, 1KiB is 2 sectors
1121 */
1122 back_max = cfqd->cfq_back_max * 2;
1123
1124 /*
1125 * Strict one way elevator _except_ in the case where we allow
1126 * short backward seeks which are biased as twice the cost of a
1127 * similar forward seek.
1128 */
1129 if (s1 >= last)
1130 d1 = s1 - last;
1131 else if (s1 + back_max >= last)
1132 d1 = (last - s1) * cfqd->cfq_back_penalty;
1133 else
e8a99053 1134 wrap |= CFQ_RQ1_WRAP;
1da177e4
LT
1135
1136 if (s2 >= last)
1137 d2 = s2 - last;
1138 else if (s2 + back_max >= last)
1139 d2 = (last - s2) * cfqd->cfq_back_penalty;
1140 else
e8a99053 1141 wrap |= CFQ_RQ2_WRAP;
1da177e4
LT
1142
1143 /* Found required data */
e8a99053
AM
1144
1145 /*
1146 * By doing switch() on the bit mask "wrap" we avoid having to
1147 * check two variables for all permutations: --> faster!
1148 */
1149 switch (wrap) {
5e705374 1150 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
e8a99053 1151 if (d1 < d2)
5e705374 1152 return rq1;
e8a99053 1153 else if (d2 < d1)
5e705374 1154 return rq2;
e8a99053
AM
1155 else {
1156 if (s1 >= s2)
5e705374 1157 return rq1;
e8a99053 1158 else
5e705374 1159 return rq2;
e8a99053 1160 }
1da177e4 1161
e8a99053 1162 case CFQ_RQ2_WRAP:
5e705374 1163 return rq1;
e8a99053 1164 case CFQ_RQ1_WRAP:
5e705374
JA
1165 return rq2;
1166 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
e8a99053
AM
1167 default:
1168 /*
1169 * Since both rqs are wrapped,
1170 * start with the one that's further behind head
1171 * (--> only *one* back seek required),
1172 * since back seek takes more time than forward.
1173 */
1174 if (s1 <= s2)
5e705374 1175 return rq1;
1da177e4 1176 else
5e705374 1177 return rq2;
1da177e4
LT
1178 }
1179}
1180
498d3aa2
JA
1181/*
1182 * The below is leftmost cache rbtree addon
1183 */
0871714e 1184static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
cc09e299 1185{
615f0259
VG
1186 /* Service tree is empty */
1187 if (!root->count)
1188 return NULL;
1189
cc09e299
JA
1190 if (!root->left)
1191 root->left = rb_first(&root->rb);
1192
0871714e
JA
1193 if (root->left)
1194 return rb_entry(root->left, struct cfq_queue, rb_node);
1195
1196 return NULL;
cc09e299
JA
1197}
1198
1fa8f6d6
VG
1199static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1200{
1201 if (!root->left)
1202 root->left = rb_first(&root->rb);
1203
1204 if (root->left)
1205 return rb_entry_cfqg(root->left);
1206
1207 return NULL;
1208}
1209
a36e71f9
JA
1210static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1211{
1212 rb_erase(n, root);
1213 RB_CLEAR_NODE(n);
1214}
1215
cc09e299
JA
1216static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1217{
1218 if (root->left == n)
1219 root->left = NULL;
a36e71f9 1220 rb_erase_init(n, &root->rb);
aa6f6a3d 1221 --root->count;
cc09e299
JA
1222}
1223
1da177e4
LT
1224/*
1225 * would be nice to take fifo expire time into account as well
1226 */
5e705374
JA
1227static struct request *
1228cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1229 struct request *last)
1da177e4 1230{
21183b07
JA
1231 struct rb_node *rbnext = rb_next(&last->rb_node);
1232 struct rb_node *rbprev = rb_prev(&last->rb_node);
5e705374 1233 struct request *next = NULL, *prev = NULL;
1da177e4 1234
21183b07 1235 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1da177e4
LT
1236
1237 if (rbprev)
5e705374 1238 prev = rb_entry_rq(rbprev);
1da177e4 1239
21183b07 1240 if (rbnext)
5e705374 1241 next = rb_entry_rq(rbnext);
21183b07
JA
1242 else {
1243 rbnext = rb_first(&cfqq->sort_list);
1244 if (rbnext && rbnext != &last->rb_node)
5e705374 1245 next = rb_entry_rq(rbnext);
21183b07 1246 }
1da177e4 1247
cf7c25cf 1248 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1da177e4
LT
1249}
1250
9a7f38c4
JM
1251static u64 cfq_slice_offset(struct cfq_data *cfqd,
1252 struct cfq_queue *cfqq)
1da177e4 1253{
d9e7620e
JA
1254 /*
1255 * just an approximation, should be ok.
1256 */
cdb16e8f 1257 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
464191c6 1258 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
d9e7620e
JA
1259}
1260
1fa8f6d6
VG
1261static inline s64
1262cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1263{
1264 return cfqg->vdisktime - st->min_vdisktime;
1265}
1266
1267static void
1268__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1269{
1270 struct rb_node **node = &st->rb.rb_node;
1271 struct rb_node *parent = NULL;
1272 struct cfq_group *__cfqg;
1273 s64 key = cfqg_key(st, cfqg);
1274 int left = 1;
1275
1276 while (*node != NULL) {
1277 parent = *node;
1278 __cfqg = rb_entry_cfqg(parent);
1279
1280 if (key < cfqg_key(st, __cfqg))
1281 node = &parent->rb_left;
1282 else {
1283 node = &parent->rb_right;
1284 left = 0;
1285 }
1286 }
1287
1288 if (left)
1289 st->left = &cfqg->rb_node;
1290
1291 rb_link_node(&cfqg->rb_node, parent, node);
1292 rb_insert_color(&cfqg->rb_node, &st->rb);
1293}
1294
7b5af5cf
TM
1295/*
1296 * This has to be called only on activation of cfqg
1297 */
1fa8f6d6 1298static void
8184f93e
JT
1299cfq_update_group_weight(struct cfq_group *cfqg)
1300{
3381cb8d 1301 if (cfqg->new_weight) {
8184f93e 1302 cfqg->weight = cfqg->new_weight;
3381cb8d 1303 cfqg->new_weight = 0;
8184f93e 1304 }
e15693ef
TM
1305}
1306
1307static void
1308cfq_update_group_leaf_weight(struct cfq_group *cfqg)
1309{
1310 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
e71357e1
TH
1311
1312 if (cfqg->new_leaf_weight) {
1313 cfqg->leaf_weight = cfqg->new_leaf_weight;
1314 cfqg->new_leaf_weight = 0;
1315 }
8184f93e
JT
1316}
1317
1318static void
1319cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1320{
1d3650f7 1321 unsigned int vfr = 1 << CFQ_SERVICE_SHIFT; /* start with 1 */
7918ffb5 1322 struct cfq_group *pos = cfqg;
1d3650f7 1323 struct cfq_group *parent;
7918ffb5
TH
1324 bool propagate;
1325
1326 /* add to the service tree */
8184f93e
JT
1327 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1328
7b5af5cf
TM
1329 /*
1330 * Update leaf_weight. We cannot update weight at this point
1331 * because cfqg might already have been activated and is
1332 * contributing its current weight to the parent's child_weight.
1333 */
e15693ef 1334 cfq_update_group_leaf_weight(cfqg);
8184f93e 1335 __cfq_group_service_tree_add(st, cfqg);
7918ffb5
TH
1336
1337 /*
1d3650f7
TH
1338 * Activate @cfqg and calculate the portion of vfraction @cfqg is
1339 * entitled to. vfraction is calculated by walking the tree
1340 * towards the root calculating the fraction it has at each level.
1341 * The compounded ratio is how much vfraction @cfqg owns.
1342 *
1343 * Start with the proportion tasks in this cfqg has against active
1344 * children cfqgs - its leaf_weight against children_weight.
7918ffb5
TH
1345 */
1346 propagate = !pos->nr_active++;
1347 pos->children_weight += pos->leaf_weight;
1d3650f7 1348 vfr = vfr * pos->leaf_weight / pos->children_weight;
7918ffb5 1349
1d3650f7
TH
1350 /*
1351 * Compound ->weight walking up the tree. Both activation and
1352 * vfraction calculation are done in the same loop. Propagation
1353 * stops once an already activated node is met. vfraction
1354 * calculation should always continue to the root.
1355 */
d02f7aa8 1356 while ((parent = cfqg_parent(pos))) {
1d3650f7 1357 if (propagate) {
e15693ef 1358 cfq_update_group_weight(pos);
1d3650f7
TH
1359 propagate = !parent->nr_active++;
1360 parent->children_weight += pos->weight;
1361 }
1362 vfr = vfr * pos->weight / parent->children_weight;
7918ffb5
TH
1363 pos = parent;
1364 }
1d3650f7
TH
1365
1366 cfqg->vfraction = max_t(unsigned, vfr, 1);
8184f93e
JT
1367}
1368
5be6b756
HT
1369static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
1370{
1371 if (!iops_mode(cfqd))
1372 return CFQ_SLICE_MODE_GROUP_DELAY;
1373 else
1374 return CFQ_IOPS_MODE_GROUP_DELAY;
1375}
1376
8184f93e
JT
1377static void
1378cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1fa8f6d6
VG
1379{
1380 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1381 struct cfq_group *__cfqg;
1382 struct rb_node *n;
1383
1384 cfqg->nr_cfqq++;
760701bf 1385 if (!RB_EMPTY_NODE(&cfqg->rb_node))
1fa8f6d6
VG
1386 return;
1387
1388 /*
1389 * Currently put the group at the end. Later implement something
1390 * so that groups get lesser vtime based on their weights, so that
25985edc 1391 * if group does not loose all if it was not continuously backlogged.
1fa8f6d6
VG
1392 */
1393 n = rb_last(&st->rb);
1394 if (n) {
1395 __cfqg = rb_entry_cfqg(n);
5be6b756
HT
1396 cfqg->vdisktime = __cfqg->vdisktime +
1397 cfq_get_cfqg_vdisktime_delay(cfqd);
1fa8f6d6
VG
1398 } else
1399 cfqg->vdisktime = st->min_vdisktime;
8184f93e
JT
1400 cfq_group_service_tree_add(st, cfqg);
1401}
1fa8f6d6 1402
8184f93e
JT
1403static void
1404cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1405{
7918ffb5
TH
1406 struct cfq_group *pos = cfqg;
1407 bool propagate;
1408
1409 /*
1410 * Undo activation from cfq_group_service_tree_add(). Deactivate
1411 * @cfqg and propagate deactivation upwards.
1412 */
1413 propagate = !--pos->nr_active;
1414 pos->children_weight -= pos->leaf_weight;
1415
1416 while (propagate) {
d02f7aa8 1417 struct cfq_group *parent = cfqg_parent(pos);
7918ffb5
TH
1418
1419 /* @pos has 0 nr_active at this point */
1420 WARN_ON_ONCE(pos->children_weight);
1d3650f7 1421 pos->vfraction = 0;
7918ffb5
TH
1422
1423 if (!parent)
1424 break;
1425
1426 propagate = !--parent->nr_active;
1427 parent->children_weight -= pos->weight;
1428 pos = parent;
1429 }
1430
1431 /* remove from the service tree */
8184f93e
JT
1432 if (!RB_EMPTY_NODE(&cfqg->rb_node))
1433 cfq_rb_erase(&cfqg->rb_node, st);
1fa8f6d6
VG
1434}
1435
1436static void
8184f93e 1437cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1fa8f6d6
VG
1438{
1439 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1440
1441 BUG_ON(cfqg->nr_cfqq < 1);
1442 cfqg->nr_cfqq--;
25bc6b07 1443
1fa8f6d6
VG
1444 /* If there are other cfq queues under this group, don't delete it */
1445 if (cfqg->nr_cfqq)
1446 return;
1447
2868ef7b 1448 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
8184f93e 1449 cfq_group_service_tree_del(st, cfqg);
4d2ceea4 1450 cfqg->saved_wl_slice = 0;
155fead9 1451 cfqg_stats_update_dequeue(cfqg);
dae739eb
VG
1452}
1453
9a7f38c4
JM
1454static inline u64 cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1455 u64 *unaccounted_time)
dae739eb 1456{
9a7f38c4
JM
1457 u64 slice_used;
1458 u64 now = ktime_get_ns();
dae739eb
VG
1459
1460 /*
1461 * Queue got expired before even a single request completed or
1462 * got expired immediately after first request completion.
1463 */
9a7f38c4 1464 if (!cfqq->slice_start || cfqq->slice_start == now) {
dae739eb
VG
1465 /*
1466 * Also charge the seek time incurred to the group, otherwise
1467 * if there are mutiple queues in the group, each can dispatch
1468 * a single request on seeky media and cause lots of seek time
1469 * and group will never know it.
1470 */
0b31c10c
JK
1471 slice_used = max_t(u64, (now - cfqq->dispatch_start),
1472 jiffies_to_nsecs(1));
dae739eb 1473 } else {
9a7f38c4 1474 slice_used = now - cfqq->slice_start;
167400d3
JT
1475 if (slice_used > cfqq->allocated_slice) {
1476 *unaccounted_time = slice_used - cfqq->allocated_slice;
f75edf2d 1477 slice_used = cfqq->allocated_slice;
167400d3 1478 }
9a7f38c4 1479 if (cfqq->slice_start > cfqq->dispatch_start)
167400d3
JT
1480 *unaccounted_time += cfqq->slice_start -
1481 cfqq->dispatch_start;
dae739eb
VG
1482 }
1483
dae739eb
VG
1484 return slice_used;
1485}
1486
1487static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
e5ff082e 1488 struct cfq_queue *cfqq)
dae739eb
VG
1489{
1490 struct cfq_rb_root *st = &cfqd->grp_service_tree;
9a7f38c4 1491 u64 used_sl, charge, unaccounted_sl = 0;
f26bd1f0
VG
1492 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1493 - cfqg->service_tree_idle.count;
1d3650f7 1494 unsigned int vfr;
9a7f38c4 1495 u64 now = ktime_get_ns();
f26bd1f0
VG
1496
1497 BUG_ON(nr_sync < 0);
167400d3 1498 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
dae739eb 1499
02b35081
VG
1500 if (iops_mode(cfqd))
1501 charge = cfqq->slice_dispatch;
1502 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1503 charge = cfqq->allocated_slice;
dae739eb 1504
1d3650f7
TH
1505 /*
1506 * Can't update vdisktime while on service tree and cfqg->vfraction
1507 * is valid only while on it. Cache vfr, leave the service tree,
1508 * update vdisktime and go back on. The re-addition to the tree
1509 * will also update the weights as necessary.
1510 */
1511 vfr = cfqg->vfraction;
8184f93e 1512 cfq_group_service_tree_del(st, cfqg);
1d3650f7 1513 cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
8184f93e 1514 cfq_group_service_tree_add(st, cfqg);
dae739eb
VG
1515
1516 /* This group is being expired. Save the context */
9a7f38c4
JM
1517 if (cfqd->workload_expires > now) {
1518 cfqg->saved_wl_slice = cfqd->workload_expires - now;
4d2ceea4
VG
1519 cfqg->saved_wl_type = cfqd->serving_wl_type;
1520 cfqg->saved_wl_class = cfqd->serving_wl_class;
dae739eb 1521 } else
4d2ceea4 1522 cfqg->saved_wl_slice = 0;
2868ef7b
VG
1523
1524 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1525 st->min_vdisktime);
fd16d263 1526 cfq_log_cfqq(cfqq->cfqd, cfqq,
9a7f38c4 1527 "sl_used=%llu disp=%llu charge=%llu iops=%u sect=%lu",
fd16d263
JP
1528 used_sl, cfqq->slice_dispatch, charge,
1529 iops_mode(cfqd), cfqq->nr_sectors);
155fead9
TH
1530 cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1531 cfqg_stats_set_start_empty_time(cfqg);
1fa8f6d6
VG
1532}
1533
f51b802c
TH
1534/**
1535 * cfq_init_cfqg_base - initialize base part of a cfq_group
1536 * @cfqg: cfq_group to initialize
1537 *
1538 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1539 * is enabled or not.
1540 */
1541static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1542{
1543 struct cfq_rb_root *st;
1544 int i, j;
1545
1546 for_each_cfqg_st(cfqg, i, j, st)
1547 *st = CFQ_RB_ROOT;
1548 RB_CLEAR_NODE(&cfqg->rb_node);
1549
9a7f38c4 1550 cfqg->ttime.last_end_request = ktime_get_ns();
f51b802c
TH
1551}
1552
25fb5169 1553#ifdef CONFIG_CFQ_GROUP_IOSCHED
69d7fde5
TH
1554static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
1555 bool on_dfl, bool reset_dev, bool is_leaf_weight);
1556
24bdb8ef 1557static void cfqg_stats_exit(struct cfqg_stats *stats)
90d3839b 1558{
24bdb8ef
TH
1559 blkg_rwstat_exit(&stats->merged);
1560 blkg_rwstat_exit(&stats->service_time);
1561 blkg_rwstat_exit(&stats->wait_time);
1562 blkg_rwstat_exit(&stats->queued);
24bdb8ef
TH
1563 blkg_stat_exit(&stats->time);
1564#ifdef CONFIG_DEBUG_BLK_CGROUP
1565 blkg_stat_exit(&stats->unaccounted_time);
1566 blkg_stat_exit(&stats->avg_queue_size_sum);
1567 blkg_stat_exit(&stats->avg_queue_size_samples);
1568 blkg_stat_exit(&stats->dequeue);
1569 blkg_stat_exit(&stats->group_wait_time);
1570 blkg_stat_exit(&stats->idle_time);
1571 blkg_stat_exit(&stats->empty_time);
1572#endif
1573}
1574
1575static int cfqg_stats_init(struct cfqg_stats *stats, gfp_t gfp)
1576{
77ea7338 1577 if (blkg_rwstat_init(&stats->merged, gfp) ||
24bdb8ef
TH
1578 blkg_rwstat_init(&stats->service_time, gfp) ||
1579 blkg_rwstat_init(&stats->wait_time, gfp) ||
1580 blkg_rwstat_init(&stats->queued, gfp) ||
24bdb8ef
TH
1581 blkg_stat_init(&stats->time, gfp))
1582 goto err;
90d3839b
PZ
1583
1584#ifdef CONFIG_DEBUG_BLK_CGROUP
24bdb8ef
TH
1585 if (blkg_stat_init(&stats->unaccounted_time, gfp) ||
1586 blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
1587 blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
1588 blkg_stat_init(&stats->dequeue, gfp) ||
1589 blkg_stat_init(&stats->group_wait_time, gfp) ||
1590 blkg_stat_init(&stats->idle_time, gfp) ||
1591 blkg_stat_init(&stats->empty_time, gfp))
1592 goto err;
90d3839b 1593#endif
24bdb8ef
TH
1594 return 0;
1595err:
1596 cfqg_stats_exit(stats);
1597 return -ENOMEM;
90d3839b
PZ
1598}
1599
e4a9bde9
TH
1600static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
1601{
1602 struct cfq_group_data *cgd;
1603
ebc4ff66 1604 cgd = kzalloc(sizeof(*cgd), gfp);
e4a9bde9
TH
1605 if (!cgd)
1606 return NULL;
1607 return &cgd->cpd;
1608}
1609
81437648 1610static void cfq_cpd_init(struct blkcg_policy_data *cpd)
e48453c3 1611{
81437648 1612 struct cfq_group_data *cgd = cpd_to_cfqgd(cpd);
9e10a130 1613 unsigned int weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
69d7fde5 1614 CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
e48453c3 1615
69d7fde5
TH
1616 if (cpd_to_blkcg(cpd) == &blkcg_root)
1617 weight *= 2;
1618
1619 cgd->weight = weight;
1620 cgd->leaf_weight = weight;
e48453c3
AA
1621}
1622
e4a9bde9
TH
1623static void cfq_cpd_free(struct blkcg_policy_data *cpd)
1624{
1625 kfree(cpd_to_cfqgd(cpd));
1626}
1627
69d7fde5
TH
1628static void cfq_cpd_bind(struct blkcg_policy_data *cpd)
1629{
1630 struct blkcg *blkcg = cpd_to_blkcg(cpd);
9e10a130 1631 bool on_dfl = cgroup_subsys_on_dfl(io_cgrp_subsys);
69d7fde5
TH
1632 unsigned int weight = on_dfl ? CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
1633
1634 if (blkcg == &blkcg_root)
1635 weight *= 2;
1636
1637 WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, false));
1638 WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, true));
1639}
1640
001bea73
TH
1641static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node)
1642{
b2ce2643
TH
1643 struct cfq_group *cfqg;
1644
1645 cfqg = kzalloc_node(sizeof(*cfqg), gfp, node);
1646 if (!cfqg)
1647 return NULL;
1648
1649 cfq_init_cfqg_base(cfqg);
24bdb8ef
TH
1650 if (cfqg_stats_init(&cfqg->stats, gfp)) {
1651 kfree(cfqg);
1652 return NULL;
1653 }
b2ce2643
TH
1654
1655 return &cfqg->pd;
001bea73
TH
1656}
1657
a9520cd6 1658static void cfq_pd_init(struct blkg_policy_data *pd)
f469a7b4 1659{
a9520cd6
TH
1660 struct cfq_group *cfqg = pd_to_cfqg(pd);
1661 struct cfq_group_data *cgd = blkcg_to_cfqgd(pd->blkg->blkcg);
25fb5169 1662
e48453c3
AA
1663 cfqg->weight = cgd->weight;
1664 cfqg->leaf_weight = cgd->leaf_weight;
25fb5169
VG
1665}
1666
a9520cd6 1667static void cfq_pd_offline(struct blkg_policy_data *pd)
0b39920b 1668{
a9520cd6 1669 struct cfq_group *cfqg = pd_to_cfqg(pd);
60a83707
TH
1670 int i;
1671
1672 for (i = 0; i < IOPRIO_BE_NR; i++) {
1673 if (cfqg->async_cfqq[0][i])
1674 cfq_put_queue(cfqg->async_cfqq[0][i]);
1675 if (cfqg->async_cfqq[1][i])
1676 cfq_put_queue(cfqg->async_cfqq[1][i]);
1677 }
1678
1679 if (cfqg->async_idle_cfqq)
1680 cfq_put_queue(cfqg->async_idle_cfqq);
1681
0b39920b
TH
1682 /*
1683 * @blkg is going offline and will be ignored by
1684 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
1685 * that they don't get lost. If IOs complete after this point, the
1686 * stats for them will be lost. Oh well...
1687 */
60a83707 1688 cfqg_stats_xfer_dead(cfqg);
0b39920b
TH
1689}
1690
001bea73
TH
1691static void cfq_pd_free(struct blkg_policy_data *pd)
1692{
24bdb8ef
TH
1693 struct cfq_group *cfqg = pd_to_cfqg(pd);
1694
1695 cfqg_stats_exit(&cfqg->stats);
1696 return kfree(cfqg);
001bea73
TH
1697}
1698
a9520cd6 1699static void cfq_pd_reset_stats(struct blkg_policy_data *pd)
689665af 1700{
a9520cd6 1701 struct cfq_group *cfqg = pd_to_cfqg(pd);
689665af
TH
1702
1703 cfqg_stats_reset(&cfqg->stats);
25fb5169
VG
1704}
1705
ae118896
TH
1706static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
1707 struct blkcg *blkcg)
25fb5169 1708{
ae118896 1709 struct blkcg_gq *blkg;
f469a7b4 1710
ae118896
TH
1711 blkg = blkg_lookup(blkcg, cfqd->queue);
1712 if (likely(blkg))
1713 return blkg_to_cfqg(blkg);
1714 return NULL;
25fb5169
VG
1715}
1716
1717static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1718{
25fb5169 1719 cfqq->cfqg = cfqg;
b1c35769 1720 /* cfqq reference on cfqg */
eb7d8c07 1721 cfqg_get(cfqg);
b1c35769
VG
1722}
1723
f95a04af
TH
1724static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1725 struct blkg_policy_data *pd, int off)
60c2bc2d 1726{
f95a04af 1727 struct cfq_group *cfqg = pd_to_cfqg(pd);
3381cb8d
TH
1728
1729 if (!cfqg->dev_weight)
60c2bc2d 1730 return 0;
f95a04af 1731 return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
60c2bc2d
TH
1732}
1733
2da8ca82 1734static int cfqg_print_weight_device(struct seq_file *sf, void *v)
60c2bc2d 1735{
2da8ca82
TH
1736 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1737 cfqg_prfill_weight_device, &blkcg_policy_cfq,
1738 0, false);
60c2bc2d
TH
1739 return 0;
1740}
1741
e71357e1
TH
1742static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1743 struct blkg_policy_data *pd, int off)
1744{
1745 struct cfq_group *cfqg = pd_to_cfqg(pd);
1746
1747 if (!cfqg->dev_leaf_weight)
1748 return 0;
1749 return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1750}
1751
2da8ca82 1752static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v)
e71357e1 1753{
2da8ca82
TH
1754 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1755 cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq,
1756 0, false);
e71357e1
TH
1757 return 0;
1758}
1759
2da8ca82 1760static int cfq_print_weight(struct seq_file *sf, void *v)
60c2bc2d 1761{
e48453c3 1762 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
9470e4a6
JA
1763 struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1764 unsigned int val = 0;
e48453c3 1765
9470e4a6
JA
1766 if (cgd)
1767 val = cgd->weight;
1768
1769 seq_printf(sf, "%u\n", val);
60c2bc2d
TH
1770 return 0;
1771}
1772
2da8ca82 1773static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
e71357e1 1774{
e48453c3 1775 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
9470e4a6
JA
1776 struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1777 unsigned int val = 0;
1778
1779 if (cgd)
1780 val = cgd->leaf_weight;
e48453c3 1781
9470e4a6 1782 seq_printf(sf, "%u\n", val);
e71357e1
TH
1783 return 0;
1784}
1785
451af504
TH
1786static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
1787 char *buf, size_t nbytes, loff_t off,
2ee867dc 1788 bool on_dfl, bool is_leaf_weight)
60c2bc2d 1789{
69d7fde5
TH
1790 unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
1791 unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
451af504 1792 struct blkcg *blkcg = css_to_blkcg(of_css(of));
60c2bc2d 1793 struct blkg_conf_ctx ctx;
3381cb8d 1794 struct cfq_group *cfqg;
e48453c3 1795 struct cfq_group_data *cfqgd;
60c2bc2d 1796 int ret;
36aa9e5f 1797 u64 v;
60c2bc2d 1798
3c798398 1799 ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
60c2bc2d
TH
1800 if (ret)
1801 return ret;
1802
2ee867dc
TH
1803 if (sscanf(ctx.body, "%llu", &v) == 1) {
1804 /* require "default" on dfl */
1805 ret = -ERANGE;
1806 if (!v && on_dfl)
1807 goto out_finish;
1808 } else if (!strcmp(strim(ctx.body), "default")) {
1809 v = 0;
1810 } else {
1811 ret = -EINVAL;
36aa9e5f 1812 goto out_finish;
2ee867dc 1813 }
36aa9e5f 1814
3381cb8d 1815 cfqg = blkg_to_cfqg(ctx.blkg);
e48453c3 1816 cfqgd = blkcg_to_cfqgd(blkcg);
ae994ea9 1817
20386ce0 1818 ret = -ERANGE;
69d7fde5 1819 if (!v || (v >= min && v <= max)) {
e71357e1 1820 if (!is_leaf_weight) {
36aa9e5f
TH
1821 cfqg->dev_weight = v;
1822 cfqg->new_weight = v ?: cfqgd->weight;
e71357e1 1823 } else {
36aa9e5f
TH
1824 cfqg->dev_leaf_weight = v;
1825 cfqg->new_leaf_weight = v ?: cfqgd->leaf_weight;
e71357e1 1826 }
60c2bc2d
TH
1827 ret = 0;
1828 }
36aa9e5f 1829out_finish:
60c2bc2d 1830 blkg_conf_finish(&ctx);
451af504 1831 return ret ?: nbytes;
60c2bc2d
TH
1832}
1833
451af504
TH
1834static ssize_t cfqg_set_weight_device(struct kernfs_open_file *of,
1835 char *buf, size_t nbytes, loff_t off)
e71357e1 1836{
2ee867dc 1837 return __cfqg_set_weight_device(of, buf, nbytes, off, false, false);
e71357e1
TH
1838}
1839
451af504
TH
1840static ssize_t cfqg_set_leaf_weight_device(struct kernfs_open_file *of,
1841 char *buf, size_t nbytes, loff_t off)
e71357e1 1842{
2ee867dc 1843 return __cfqg_set_weight_device(of, buf, nbytes, off, false, true);
e71357e1
TH
1844}
1845
dd165eb3 1846static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
69d7fde5 1847 bool on_dfl, bool reset_dev, bool is_leaf_weight)
60c2bc2d 1848{
69d7fde5
TH
1849 unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
1850 unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
182446d0 1851 struct blkcg *blkcg = css_to_blkcg(css);
3c798398 1852 struct blkcg_gq *blkg;
e48453c3 1853 struct cfq_group_data *cfqgd;
ae994ea9 1854 int ret = 0;
60c2bc2d 1855
69d7fde5
TH
1856 if (val < min || val > max)
1857 return -ERANGE;
60c2bc2d
TH
1858
1859 spin_lock_irq(&blkcg->lock);
e48453c3 1860 cfqgd = blkcg_to_cfqgd(blkcg);
ae994ea9
JA
1861 if (!cfqgd) {
1862 ret = -EINVAL;
1863 goto out;
1864 }
e71357e1
TH
1865
1866 if (!is_leaf_weight)
e48453c3 1867 cfqgd->weight = val;
e71357e1 1868 else
e48453c3 1869 cfqgd->leaf_weight = val;
60c2bc2d 1870
b67bfe0d 1871 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3381cb8d 1872 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
60c2bc2d 1873
e71357e1
TH
1874 if (!cfqg)
1875 continue;
1876
1877 if (!is_leaf_weight) {
69d7fde5
TH
1878 if (reset_dev)
1879 cfqg->dev_weight = 0;
e71357e1 1880 if (!cfqg->dev_weight)
e48453c3 1881 cfqg->new_weight = cfqgd->weight;
e71357e1 1882 } else {
69d7fde5
TH
1883 if (reset_dev)
1884 cfqg->dev_leaf_weight = 0;
e71357e1 1885 if (!cfqg->dev_leaf_weight)
e48453c3 1886 cfqg->new_leaf_weight = cfqgd->leaf_weight;
e71357e1 1887 }
60c2bc2d
TH
1888 }
1889
ae994ea9 1890out:
60c2bc2d 1891 spin_unlock_irq(&blkcg->lock);
ae994ea9 1892 return ret;
60c2bc2d
TH
1893}
1894
182446d0
TH
1895static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1896 u64 val)
e71357e1 1897{
69d7fde5 1898 return __cfq_set_weight(css, val, false, false, false);
e71357e1
TH
1899}
1900
182446d0
TH
1901static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
1902 struct cftype *cft, u64 val)
e71357e1 1903{
69d7fde5 1904 return __cfq_set_weight(css, val, false, false, true);
e71357e1
TH
1905}
1906
2da8ca82 1907static int cfqg_print_stat(struct seq_file *sf, void *v)
5bc4afb1 1908{
2da8ca82
TH
1909 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1910 &blkcg_policy_cfq, seq_cft(sf)->private, false);
5bc4afb1
TH
1911 return 0;
1912}
1913
2da8ca82 1914static int cfqg_print_rwstat(struct seq_file *sf, void *v)
5bc4afb1 1915{
2da8ca82
TH
1916 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1917 &blkcg_policy_cfq, seq_cft(sf)->private, true);
5bc4afb1
TH
1918 return 0;
1919}
1920
43114018
TH
1921static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1922 struct blkg_policy_data *pd, int off)
1923{
f12c74ca
TH
1924 u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
1925 &blkcg_policy_cfq, off);
43114018
TH
1926 return __blkg_prfill_u64(sf, pd, sum);
1927}
1928
1929static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1930 struct blkg_policy_data *pd, int off)
1931{
f12c74ca
TH
1932 struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
1933 &blkcg_policy_cfq, off);
43114018
TH
1934 return __blkg_prfill_rwstat(sf, pd, &sum);
1935}
1936
2da8ca82 1937static int cfqg_print_stat_recursive(struct seq_file *sf, void *v)
43114018 1938{
2da8ca82
TH
1939 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1940 cfqg_prfill_stat_recursive, &blkcg_policy_cfq,
1941 seq_cft(sf)->private, false);
43114018
TH
1942 return 0;
1943}
1944
2da8ca82 1945static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
43114018 1946{
2da8ca82
TH
1947 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1948 cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq,
1949 seq_cft(sf)->private, true);
43114018
TH
1950 return 0;
1951}
1952
702747ca
TH
1953static u64 cfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1954 int off)
1955{
1956 u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
1957
1958 return __blkg_prfill_u64(sf, pd, sum >> 9);
1959}
1960
1961static int cfqg_print_stat_sectors(struct seq_file *sf, void *v)
1962{
1963 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1964 cfqg_prfill_sectors, &blkcg_policy_cfq, 0, false);
1965 return 0;
1966}
1967
1968static u64 cfqg_prfill_sectors_recursive(struct seq_file *sf,
1969 struct blkg_policy_data *pd, int off)
1970{
1971 struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
1972 offsetof(struct blkcg_gq, stat_bytes));
1973 u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
1974 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
1975
1976 return __blkg_prfill_u64(sf, pd, sum >> 9);
1977}
1978
1979static int cfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1980{
1981 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1982 cfqg_prfill_sectors_recursive, &blkcg_policy_cfq, 0,
1983 false);
1984 return 0;
1985}
1986
60c2bc2d 1987#ifdef CONFIG_DEBUG_BLK_CGROUP
f95a04af
TH
1988static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1989 struct blkg_policy_data *pd, int off)
60c2bc2d 1990{
f95a04af 1991 struct cfq_group *cfqg = pd_to_cfqg(pd);
155fead9 1992 u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
60c2bc2d
TH
1993 u64 v = 0;
1994
1995 if (samples) {
155fead9 1996 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
f3cff25f 1997 v = div64_u64(v, samples);
60c2bc2d 1998 }
f95a04af 1999 __blkg_prfill_u64(sf, pd, v);
60c2bc2d
TH
2000 return 0;
2001}
2002
2003/* print avg_queue_size */
2da8ca82 2004static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v)
60c2bc2d 2005{
2da8ca82
TH
2006 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
2007 cfqg_prfill_avg_queue_size, &blkcg_policy_cfq,
2008 0, false);
60c2bc2d
TH
2009 return 0;
2010}
2011#endif /* CONFIG_DEBUG_BLK_CGROUP */
2012
880f50e2 2013static struct cftype cfq_blkcg_legacy_files[] = {
1d3650f7 2014 /* on root, weight is mapped to leaf_weight */
60c2bc2d
TH
2015 {
2016 .name = "weight_device",
1d3650f7 2017 .flags = CFTYPE_ONLY_ON_ROOT,
2da8ca82 2018 .seq_show = cfqg_print_leaf_weight_device,
451af504 2019 .write = cfqg_set_leaf_weight_device,
60c2bc2d
TH
2020 },
2021 {
2022 .name = "weight",
1d3650f7 2023 .flags = CFTYPE_ONLY_ON_ROOT,
2da8ca82 2024 .seq_show = cfq_print_leaf_weight,
1d3650f7 2025 .write_u64 = cfq_set_leaf_weight,
60c2bc2d 2026 },
e71357e1 2027
1d3650f7 2028 /* no such mapping necessary for !roots */
60c2bc2d
TH
2029 {
2030 .name = "weight_device",
1d3650f7 2031 .flags = CFTYPE_NOT_ON_ROOT,
2da8ca82 2032 .seq_show = cfqg_print_weight_device,
451af504 2033 .write = cfqg_set_weight_device,
60c2bc2d
TH
2034 },
2035 {
2036 .name = "weight",
1d3650f7 2037 .flags = CFTYPE_NOT_ON_ROOT,
2da8ca82 2038 .seq_show = cfq_print_weight,
3381cb8d 2039 .write_u64 = cfq_set_weight,
60c2bc2d 2040 },
e71357e1 2041
e71357e1
TH
2042 {
2043 .name = "leaf_weight_device",
2da8ca82 2044 .seq_show = cfqg_print_leaf_weight_device,
451af504 2045 .write = cfqg_set_leaf_weight_device,
e71357e1
TH
2046 },
2047 {
2048 .name = "leaf_weight",
2da8ca82 2049 .seq_show = cfq_print_leaf_weight,
e71357e1
TH
2050 .write_u64 = cfq_set_leaf_weight,
2051 },
2052
43114018 2053 /* statistics, covers only the tasks in the cfqg */
60c2bc2d
TH
2054 {
2055 .name = "time",
5bc4afb1 2056 .private = offsetof(struct cfq_group, stats.time),
2da8ca82 2057 .seq_show = cfqg_print_stat,
60c2bc2d
TH
2058 },
2059 {
2060 .name = "sectors",
702747ca 2061 .seq_show = cfqg_print_stat_sectors,
60c2bc2d
TH
2062 },
2063 {
2064 .name = "io_service_bytes",
77ea7338
TH
2065 .private = (unsigned long)&blkcg_policy_cfq,
2066 .seq_show = blkg_print_stat_bytes,
60c2bc2d
TH
2067 },
2068 {
2069 .name = "io_serviced",
77ea7338
TH
2070 .private = (unsigned long)&blkcg_policy_cfq,
2071 .seq_show = blkg_print_stat_ios,
60c2bc2d
TH
2072 },
2073 {
2074 .name = "io_service_time",
5bc4afb1 2075 .private = offsetof(struct cfq_group, stats.service_time),
2da8ca82 2076 .seq_show = cfqg_print_rwstat,
60c2bc2d
TH
2077 },
2078 {
2079 .name = "io_wait_time",
5bc4afb1 2080 .private = offsetof(struct cfq_group, stats.wait_time),
2da8ca82 2081 .seq_show = cfqg_print_rwstat,
60c2bc2d
TH
2082 },
2083 {
2084 .name = "io_merged",
5bc4afb1 2085 .private = offsetof(struct cfq_group, stats.merged),
2da8ca82 2086 .seq_show = cfqg_print_rwstat,
60c2bc2d
TH
2087 },
2088 {
2089 .name = "io_queued",
5bc4afb1 2090 .private = offsetof(struct cfq_group, stats.queued),
2da8ca82 2091 .seq_show = cfqg_print_rwstat,
60c2bc2d 2092 },
43114018
TH
2093
2094 /* the same statictics which cover the cfqg and its descendants */
2095 {
2096 .name = "time_recursive",
2097 .private = offsetof(struct cfq_group, stats.time),
2da8ca82 2098 .seq_show = cfqg_print_stat_recursive,
43114018
TH
2099 },
2100 {
2101 .name = "sectors_recursive",
702747ca 2102 .seq_show = cfqg_print_stat_sectors_recursive,
43114018
TH
2103 },
2104 {
2105 .name = "io_service_bytes_recursive",
77ea7338
TH
2106 .private = (unsigned long)&blkcg_policy_cfq,
2107 .seq_show = blkg_print_stat_bytes_recursive,
43114018
TH
2108 },
2109 {
2110 .name = "io_serviced_recursive",
77ea7338
TH
2111 .private = (unsigned long)&blkcg_policy_cfq,
2112 .seq_show = blkg_print_stat_ios_recursive,
43114018
TH
2113 },
2114 {
2115 .name = "io_service_time_recursive",
2116 .private = offsetof(struct cfq_group, stats.service_time),
2da8ca82 2117 .seq_show = cfqg_print_rwstat_recursive,
43114018
TH
2118 },
2119 {
2120 .name = "io_wait_time_recursive",
2121 .private = offsetof(struct cfq_group, stats.wait_time),
2da8ca82 2122 .seq_show = cfqg_print_rwstat_recursive,
43114018
TH
2123 },
2124 {
2125 .name = "io_merged_recursive",
2126 .private = offsetof(struct cfq_group, stats.merged),
2da8ca82 2127 .seq_show = cfqg_print_rwstat_recursive,
43114018
TH
2128 },
2129 {
2130 .name = "io_queued_recursive",
2131 .private = offsetof(struct cfq_group, stats.queued),
2da8ca82 2132 .seq_show = cfqg_print_rwstat_recursive,
43114018 2133 },
60c2bc2d
TH
2134#ifdef CONFIG_DEBUG_BLK_CGROUP
2135 {
2136 .name = "avg_queue_size",
2da8ca82 2137 .seq_show = cfqg_print_avg_queue_size,
60c2bc2d
TH
2138 },
2139 {
2140 .name = "group_wait_time",
5bc4afb1 2141 .private = offsetof(struct cfq_group, stats.group_wait_time),
2da8ca82 2142 .seq_show = cfqg_print_stat,
60c2bc2d
TH
2143 },
2144 {
2145 .name = "idle_time",
5bc4afb1 2146 .private = offsetof(struct cfq_group, stats.idle_time),
2da8ca82 2147 .seq_show = cfqg_print_stat,
60c2bc2d
TH
2148 },
2149 {
2150 .name = "empty_time",
5bc4afb1 2151 .private = offsetof(struct cfq_group, stats.empty_time),
2da8ca82 2152 .seq_show = cfqg_print_stat,
60c2bc2d
TH
2153 },
2154 {
2155 .name = "dequeue",
5bc4afb1 2156 .private = offsetof(struct cfq_group, stats.dequeue),
2da8ca82 2157 .seq_show = cfqg_print_stat,
60c2bc2d
TH
2158 },
2159 {
2160 .name = "unaccounted_time",
5bc4afb1 2161 .private = offsetof(struct cfq_group, stats.unaccounted_time),
2da8ca82 2162 .seq_show = cfqg_print_stat,
60c2bc2d
TH
2163 },
2164#endif /* CONFIG_DEBUG_BLK_CGROUP */
2165 { } /* terminate */
2166};
2ee867dc
TH
2167
2168static int cfq_print_weight_on_dfl(struct seq_file *sf, void *v)
2169{
2170 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2171 struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
2172
2173 seq_printf(sf, "default %u\n", cgd->weight);
2174 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_weight_device,
2175 &blkcg_policy_cfq, 0, false);
2176 return 0;
2177}
2178
2179static ssize_t cfq_set_weight_on_dfl(struct kernfs_open_file *of,
2180 char *buf, size_t nbytes, loff_t off)
2181{
2182 char *endp;
2183 int ret;
2184 u64 v;
2185
2186 buf = strim(buf);
2187
2188 /* "WEIGHT" or "default WEIGHT" sets the default weight */
2189 v = simple_strtoull(buf, &endp, 0);
2190 if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
69d7fde5 2191 ret = __cfq_set_weight(of_css(of), v, true, false, false);
2ee867dc
TH
2192 return ret ?: nbytes;
2193 }
2194
2195 /* "MAJ:MIN WEIGHT" */
2196 return __cfqg_set_weight_device(of, buf, nbytes, off, true, false);
2197}
2198
2199static struct cftype cfq_blkcg_files[] = {
2200 {
2201 .name = "weight",
2202 .flags = CFTYPE_NOT_ON_ROOT,
2203 .seq_show = cfq_print_weight_on_dfl,
2204 .write = cfq_set_weight_on_dfl,
2205 },
2206 { } /* terminate */
2207};
2208
25fb5169 2209#else /* GROUP_IOSCHED */
ae118896
TH
2210static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
2211 struct blkcg *blkcg)
25fb5169 2212{
f51b802c 2213 return cfqd->root_group;
25fb5169 2214}
7f1dc8a2 2215
25fb5169
VG
2216static inline void
2217cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
2218 cfqq->cfqg = cfqg;
2219}
2220
2221#endif /* GROUP_IOSCHED */
2222
498d3aa2 2223/*
c0324a02 2224 * The cfqd->service_trees holds all pending cfq_queue's that have
498d3aa2
JA
2225 * requests waiting to be processed. It is sorted in the order that
2226 * we will service the queues.
2227 */
a36e71f9 2228static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a 2229 bool add_front)
d9e7620e 2230{
0871714e
JA
2231 struct rb_node **p, *parent;
2232 struct cfq_queue *__cfqq;
9a7f38c4 2233 u64 rb_key;
34b98d03 2234 struct cfq_rb_root *st;
498d3aa2 2235 int left;
dae739eb 2236 int new_cfqq = 1;
9a7f38c4 2237 u64 now = ktime_get_ns();
ae30c286 2238
34b98d03 2239 st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
0871714e
JA
2240 if (cfq_class_idle(cfqq)) {
2241 rb_key = CFQ_IDLE_DELAY;
34b98d03 2242 parent = rb_last(&st->rb);
0871714e
JA
2243 if (parent && parent != &cfqq->rb_node) {
2244 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2245 rb_key += __cfqq->rb_key;
2246 } else
9a7f38c4 2247 rb_key += now;
0871714e 2248 } else if (!add_front) {
b9c8946b
JA
2249 /*
2250 * Get our rb key offset. Subtract any residual slice
2251 * value carried from last service. A negative resid
2252 * count indicates slice overrun, and this should position
2253 * the next service time further away in the tree.
2254 */
9a7f38c4 2255 rb_key = cfq_slice_offset(cfqd, cfqq) + now;
b9c8946b 2256 rb_key -= cfqq->slice_resid;
edd75ffd 2257 cfqq->slice_resid = 0;
48e025e6 2258 } else {
9a7f38c4 2259 rb_key = -NSEC_PER_SEC;
34b98d03 2260 __cfqq = cfq_rb_first(st);
9a7f38c4 2261 rb_key += __cfqq ? __cfqq->rb_key : now;
48e025e6 2262 }
1da177e4 2263
d9e7620e 2264 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
dae739eb 2265 new_cfqq = 0;
99f9628a 2266 /*
d9e7620e 2267 * same position, nothing more to do
99f9628a 2268 */
34b98d03 2269 if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
d9e7620e 2270 return;
1da177e4 2271
aa6f6a3d
CZ
2272 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2273 cfqq->service_tree = NULL;
1da177e4 2274 }
d9e7620e 2275
498d3aa2 2276 left = 1;
0871714e 2277 parent = NULL;
34b98d03
VG
2278 cfqq->service_tree = st;
2279 p = &st->rb.rb_node;
d9e7620e
JA
2280 while (*p) {
2281 parent = *p;
2282 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2283
0c534e0a 2284 /*
c0324a02 2285 * sort by key, that represents service time.
0c534e0a 2286 */
9a7f38c4 2287 if (rb_key < __cfqq->rb_key)
1f23f121 2288 p = &parent->rb_left;
c0324a02 2289 else {
1f23f121 2290 p = &parent->rb_right;
cc09e299 2291 left = 0;
c0324a02 2292 }
d9e7620e
JA
2293 }
2294
cc09e299 2295 if (left)
34b98d03 2296 st->left = &cfqq->rb_node;
cc09e299 2297
d9e7620e
JA
2298 cfqq->rb_key = rb_key;
2299 rb_link_node(&cfqq->rb_node, parent, p);
34b98d03
VG
2300 rb_insert_color(&cfqq->rb_node, &st->rb);
2301 st->count++;
20359f27 2302 if (add_front || !new_cfqq)
dae739eb 2303 return;
8184f93e 2304 cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1da177e4
LT
2305}
2306
a36e71f9 2307static struct cfq_queue *
f2d1f0ae
JA
2308cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
2309 sector_t sector, struct rb_node **ret_parent,
2310 struct rb_node ***rb_link)
a36e71f9 2311{
a36e71f9
JA
2312 struct rb_node **p, *parent;
2313 struct cfq_queue *cfqq = NULL;
2314
2315 parent = NULL;
2316 p = &root->rb_node;
2317 while (*p) {
2318 struct rb_node **n;
2319
2320 parent = *p;
2321 cfqq = rb_entry(parent, struct cfq_queue, p_node);
2322
2323 /*
2324 * Sort strictly based on sector. Smallest to the left,
2325 * largest to the right.
2326 */
2e46e8b2 2327 if (sector > blk_rq_pos(cfqq->next_rq))
a36e71f9 2328 n = &(*p)->rb_right;
2e46e8b2 2329 else if (sector < blk_rq_pos(cfqq->next_rq))
a36e71f9
JA
2330 n = &(*p)->rb_left;
2331 else
2332 break;
2333 p = n;
3ac6c9f8 2334 cfqq = NULL;
a36e71f9
JA
2335 }
2336
2337 *ret_parent = parent;
2338 if (rb_link)
2339 *rb_link = p;
3ac6c9f8 2340 return cfqq;
a36e71f9
JA
2341}
2342
2343static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2344{
a36e71f9
JA
2345 struct rb_node **p, *parent;
2346 struct cfq_queue *__cfqq;
2347
f2d1f0ae
JA
2348 if (cfqq->p_root) {
2349 rb_erase(&cfqq->p_node, cfqq->p_root);
2350 cfqq->p_root = NULL;
2351 }
a36e71f9
JA
2352
2353 if (cfq_class_idle(cfqq))
2354 return;
2355 if (!cfqq->next_rq)
2356 return;
2357
f2d1f0ae 2358 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2e46e8b2
TH
2359 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
2360 blk_rq_pos(cfqq->next_rq), &parent, &p);
3ac6c9f8
JA
2361 if (!__cfqq) {
2362 rb_link_node(&cfqq->p_node, parent, p);
f2d1f0ae
JA
2363 rb_insert_color(&cfqq->p_node, cfqq->p_root);
2364 } else
2365 cfqq->p_root = NULL;
a36e71f9
JA
2366}
2367
498d3aa2
JA
2368/*
2369 * Update cfqq's position in the service tree.
2370 */
edd75ffd 2371static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
6d048f53 2372{
6d048f53
JA
2373 /*
2374 * Resorting requires the cfqq to be on the RR list already.
2375 */
a36e71f9 2376 if (cfq_cfqq_on_rr(cfqq)) {
edd75ffd 2377 cfq_service_tree_add(cfqd, cfqq, 0);
a36e71f9
JA
2378 cfq_prio_tree_add(cfqd, cfqq);
2379 }
6d048f53
JA
2380}
2381
1da177e4
LT
2382/*
2383 * add to busy list of queues for service, trying to be fair in ordering
22e2c507 2384 * the pending list according to last request service
1da177e4 2385 */
febffd61 2386static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 2387{
7b679138 2388 cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
3b18152c
JA
2389 BUG_ON(cfq_cfqq_on_rr(cfqq));
2390 cfq_mark_cfqq_on_rr(cfqq);
1da177e4 2391 cfqd->busy_queues++;
ef8a41df
SL
2392 if (cfq_cfqq_sync(cfqq))
2393 cfqd->busy_sync_queues++;
1da177e4 2394
edd75ffd 2395 cfq_resort_rr_list(cfqd, cfqq);
1da177e4
LT
2396}
2397
498d3aa2
JA
2398/*
2399 * Called when the cfqq no longer has requests pending, remove it from
2400 * the service tree.
2401 */
febffd61 2402static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 2403{
7b679138 2404 cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
3b18152c
JA
2405 BUG_ON(!cfq_cfqq_on_rr(cfqq));
2406 cfq_clear_cfqq_on_rr(cfqq);
1da177e4 2407
aa6f6a3d
CZ
2408 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2409 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2410 cfqq->service_tree = NULL;
2411 }
f2d1f0ae
JA
2412 if (cfqq->p_root) {
2413 rb_erase(&cfqq->p_node, cfqq->p_root);
2414 cfqq->p_root = NULL;
2415 }
d9e7620e 2416
8184f93e 2417 cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1da177e4
LT
2418 BUG_ON(!cfqd->busy_queues);
2419 cfqd->busy_queues--;
ef8a41df
SL
2420 if (cfq_cfqq_sync(cfqq))
2421 cfqd->busy_sync_queues--;
1da177e4
LT
2422}
2423
2424/*
2425 * rb tree support functions
2426 */
febffd61 2427static void cfq_del_rq_rb(struct request *rq)
1da177e4 2428{
5e705374 2429 struct cfq_queue *cfqq = RQ_CFQQ(rq);
5e705374 2430 const int sync = rq_is_sync(rq);
1da177e4 2431
b4878f24
JA
2432 BUG_ON(!cfqq->queued[sync]);
2433 cfqq->queued[sync]--;
1da177e4 2434
5e705374 2435 elv_rb_del(&cfqq->sort_list, rq);
1da177e4 2436
f04a6424
VG
2437 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
2438 /*
2439 * Queue will be deleted from service tree when we actually
2440 * expire it later. Right now just remove it from prio tree
2441 * as it is empty.
2442 */
2443 if (cfqq->p_root) {
2444 rb_erase(&cfqq->p_node, cfqq->p_root);
2445 cfqq->p_root = NULL;
2446 }
2447 }
1da177e4
LT
2448}
2449
5e705374 2450static void cfq_add_rq_rb(struct request *rq)
1da177e4 2451{
5e705374 2452 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4 2453 struct cfq_data *cfqd = cfqq->cfqd;
796d5116 2454 struct request *prev;
1da177e4 2455
5380a101 2456 cfqq->queued[rq_is_sync(rq)]++;
1da177e4 2457
796d5116 2458 elv_rb_add(&cfqq->sort_list, rq);
5fccbf61
JA
2459
2460 if (!cfq_cfqq_on_rr(cfqq))
2461 cfq_add_cfqq_rr(cfqd, cfqq);
5044eed4
JA
2462
2463 /*
2464 * check if this request is a better next-serve candidate
2465 */
a36e71f9 2466 prev = cfqq->next_rq;
cf7c25cf 2467 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
a36e71f9
JA
2468
2469 /*
2470 * adjust priority tree position, if ->next_rq changes
2471 */
2472 if (prev != cfqq->next_rq)
2473 cfq_prio_tree_add(cfqd, cfqq);
2474
5044eed4 2475 BUG_ON(!cfqq->next_rq);
1da177e4
LT
2476}
2477
febffd61 2478static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1da177e4 2479{
5380a101
JA
2480 elv_rb_del(&cfqq->sort_list, rq);
2481 cfqq->queued[rq_is_sync(rq)]--;
ef295ecf 2482 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
5e705374 2483 cfq_add_rq_rb(rq);
155fead9 2484 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
ef295ecf 2485 rq->cmd_flags);
1da177e4
LT
2486}
2487
206dc69b
JA
2488static struct request *
2489cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1da177e4 2490{
206dc69b 2491 struct task_struct *tsk = current;
c5869807 2492 struct cfq_io_cq *cic;
206dc69b 2493 struct cfq_queue *cfqq;
1da177e4 2494
4ac845a2 2495 cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317
VT
2496 if (!cic)
2497 return NULL;
2498
aa39ebd4 2499 cfqq = cic_to_cfqq(cic, op_is_sync(bio->bi_opf));
f73a1c7d
KO
2500 if (cfqq)
2501 return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
1da177e4 2502
1da177e4
LT
2503 return NULL;
2504}
2505
165125e1 2506static void cfq_activate_request(struct request_queue *q, struct request *rq)
1da177e4 2507{
22e2c507 2508 struct cfq_data *cfqd = q->elevator->elevator_data;
3b18152c 2509
53c583d2 2510 cfqd->rq_in_driver++;
7b679138 2511 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
53c583d2 2512 cfqd->rq_in_driver);
25776e35 2513
5b93629b 2514 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1da177e4
LT
2515}
2516
165125e1 2517static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1da177e4 2518{
b4878f24
JA
2519 struct cfq_data *cfqd = q->elevator->elevator_data;
2520
53c583d2
CZ
2521 WARN_ON(!cfqd->rq_in_driver);
2522 cfqd->rq_in_driver--;
7b679138 2523 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
53c583d2 2524 cfqd->rq_in_driver);
1da177e4
LT
2525}
2526
b4878f24 2527static void cfq_remove_request(struct request *rq)
1da177e4 2528{
5e705374 2529 struct cfq_queue *cfqq = RQ_CFQQ(rq);
21183b07 2530
5e705374
JA
2531 if (cfqq->next_rq == rq)
2532 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1da177e4 2533
b4878f24 2534 list_del_init(&rq->queuelist);
5e705374 2535 cfq_del_rq_rb(rq);
374f84ac 2536
45333d5a 2537 cfqq->cfqd->rq_queued--;
ef295ecf 2538 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
65299a3b
CH
2539 if (rq->cmd_flags & REQ_PRIO) {
2540 WARN_ON(!cfqq->prio_pending);
2541 cfqq->prio_pending--;
b53d1ed7 2542 }
1da177e4
LT
2543}
2544
34fe7c05 2545static enum elv_merge cfq_merge(struct request_queue *q, struct request **req,
165125e1 2546 struct bio *bio)
1da177e4
LT
2547{
2548 struct cfq_data *cfqd = q->elevator->elevator_data;
2549 struct request *__rq;
1da177e4 2550
206dc69b 2551 __rq = cfq_find_rq_fmerge(cfqd, bio);
72ef799b 2552 if (__rq && elv_bio_merge_ok(__rq, bio)) {
9817064b
JA
2553 *req = __rq;
2554 return ELEVATOR_FRONT_MERGE;
1da177e4
LT
2555 }
2556
2557 return ELEVATOR_NO_MERGE;
1da177e4
LT
2558}
2559
165125e1 2560static void cfq_merged_request(struct request_queue *q, struct request *req,
34fe7c05 2561 enum elv_merge type)
1da177e4 2562{
21183b07 2563 if (type == ELEVATOR_FRONT_MERGE) {
5e705374 2564 struct cfq_queue *cfqq = RQ_CFQQ(req);
1da177e4 2565
5e705374 2566 cfq_reposition_rq_rb(cfqq, req);
1da177e4 2567 }
1da177e4
LT
2568}
2569
812d4026
DS
2570static void cfq_bio_merged(struct request_queue *q, struct request *req,
2571 struct bio *bio)
2572{
ef295ecf 2573 cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_opf);
812d4026
DS
2574}
2575
1da177e4 2576static void
165125e1 2577cfq_merged_requests(struct request_queue *q, struct request *rq,
1da177e4
LT
2578 struct request *next)
2579{
cf7c25cf 2580 struct cfq_queue *cfqq = RQ_CFQQ(rq);
4a0b75c7
SL
2581 struct cfq_data *cfqd = q->elevator->elevator_data;
2582
22e2c507
JA
2583 /*
2584 * reposition in fifo if next is older than rq
2585 */
2586 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
9a7f38c4 2587 next->fifo_time < rq->fifo_time &&
3d106fba 2588 cfqq == RQ_CFQQ(next)) {
22e2c507 2589 list_move(&rq->queuelist, &next->queuelist);
8b4922d3 2590 rq->fifo_time = next->fifo_time;
30996f40 2591 }
22e2c507 2592
cf7c25cf
CZ
2593 if (cfqq->next_rq == next)
2594 cfqq->next_rq = rq;
b4878f24 2595 cfq_remove_request(next);
ef295ecf 2596 cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
4a0b75c7
SL
2597
2598 cfqq = RQ_CFQQ(next);
2599 /*
2600 * all requests of this queue are merged to other queues, delete it
2601 * from the service tree. If it's the active_queue,
2602 * cfq_dispatch_requests() will choose to expire it or do idle
2603 */
2604 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
2605 cfqq != cfqd->active_queue)
2606 cfq_del_cfqq_rr(cfqd, cfqq);
22e2c507
JA
2607}
2608
72ef799b
TE
2609static int cfq_allow_bio_merge(struct request_queue *q, struct request *rq,
2610 struct bio *bio)
da775265
JA
2611{
2612 struct cfq_data *cfqd = q->elevator->elevator_data;
aa39ebd4 2613 bool is_sync = op_is_sync(bio->bi_opf);
c5869807 2614 struct cfq_io_cq *cic;
da775265 2615 struct cfq_queue *cfqq;
da775265
JA
2616
2617 /*
ec8acb69 2618 * Disallow merge of a sync bio into an async request.
da775265 2619 */
aa39ebd4 2620 if (is_sync && !rq_is_sync(rq))
a6151c3a 2621 return false;
da775265
JA
2622
2623 /*
f1a4f4d3 2624 * Lookup the cfqq that this bio will be queued with and allow
07c2bd37 2625 * merge only if rq is queued there.
f1a4f4d3 2626 */
07c2bd37
TH
2627 cic = cfq_cic_lookup(cfqd, current->io_context);
2628 if (!cic)
2629 return false;
719d3402 2630
aa39ebd4 2631 cfqq = cic_to_cfqq(cic, is_sync);
a6151c3a 2632 return cfqq == RQ_CFQQ(rq);
da775265
JA
2633}
2634
72ef799b
TE
2635static int cfq_allow_rq_merge(struct request_queue *q, struct request *rq,
2636 struct request *next)
2637{
2638 return RQ_CFQQ(rq) == RQ_CFQQ(next);
2639}
2640
812df48d
DS
2641static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2642{
91148325 2643 hrtimer_try_to_cancel(&cfqd->idle_slice_timer);
155fead9 2644 cfqg_stats_update_idle_time(cfqq->cfqg);
812df48d
DS
2645}
2646
febffd61
JA
2647static void __cfq_set_active_queue(struct cfq_data *cfqd,
2648 struct cfq_queue *cfqq)
22e2c507
JA
2649{
2650 if (cfqq) {
3bf10fea 2651 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
4d2ceea4 2652 cfqd->serving_wl_class, cfqd->serving_wl_type);
155fead9 2653 cfqg_stats_update_avg_queue_size(cfqq->cfqg);
62a37f6b 2654 cfqq->slice_start = 0;
9a7f38c4 2655 cfqq->dispatch_start = ktime_get_ns();
62a37f6b
JT
2656 cfqq->allocated_slice = 0;
2657 cfqq->slice_end = 0;
2658 cfqq->slice_dispatch = 0;
2659 cfqq->nr_sectors = 0;
2660
2661 cfq_clear_cfqq_wait_request(cfqq);
2662 cfq_clear_cfqq_must_dispatch(cfqq);
2663 cfq_clear_cfqq_must_alloc_slice(cfqq);
2664 cfq_clear_cfqq_fifo_expire(cfqq);
2665 cfq_mark_cfqq_slice_new(cfqq);
2666
2667 cfq_del_timer(cfqd, cfqq);
22e2c507
JA
2668 }
2669
2670 cfqd->active_queue = cfqq;
2671}
2672
7b14e3b5
JA
2673/*
2674 * current cfqq expired its slice (or was too idle), select new one
2675 */
2676static void
2677__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e5ff082e 2678 bool timed_out)
7b14e3b5 2679{
7b679138
JA
2680 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2681
7b14e3b5 2682 if (cfq_cfqq_wait_request(cfqq))
812df48d 2683 cfq_del_timer(cfqd, cfqq);
7b14e3b5 2684
7b14e3b5 2685 cfq_clear_cfqq_wait_request(cfqq);
f75edf2d 2686 cfq_clear_cfqq_wait_busy(cfqq);
7b14e3b5 2687
ae54abed
SL
2688 /*
2689 * If this cfqq is shared between multiple processes, check to
2690 * make sure that those processes are still issuing I/Os within
2691 * the mean seek distance. If not, it may be time to break the
2692 * queues apart again.
2693 */
2694 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2695 cfq_mark_cfqq_split_coop(cfqq);
2696
7b14e3b5 2697 /*
6084cdda 2698 * store what was left of this slice, if the queue idled/timed out
7b14e3b5 2699 */
c553f8e3
SL
2700 if (timed_out) {
2701 if (cfq_cfqq_slice_new(cfqq))
ba5bd520 2702 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
c553f8e3 2703 else
9a7f38c4 2704 cfqq->slice_resid = cfqq->slice_end - ktime_get_ns();
93fdf147 2705 cfq_log_cfqq(cfqd, cfqq, "resid=%lld", cfqq->slice_resid);
7b679138 2706 }
7b14e3b5 2707
e5ff082e 2708 cfq_group_served(cfqd, cfqq->cfqg, cfqq);
dae739eb 2709
f04a6424
VG
2710 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2711 cfq_del_cfqq_rr(cfqd, cfqq);
2712
edd75ffd 2713 cfq_resort_rr_list(cfqd, cfqq);
7b14e3b5
JA
2714
2715 if (cfqq == cfqd->active_queue)
2716 cfqd->active_queue = NULL;
2717
2718 if (cfqd->active_cic) {
11a3122f 2719 put_io_context(cfqd->active_cic->icq.ioc);
7b14e3b5
JA
2720 cfqd->active_cic = NULL;
2721 }
7b14e3b5
JA
2722}
2723
e5ff082e 2724static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
7b14e3b5
JA
2725{
2726 struct cfq_queue *cfqq = cfqd->active_queue;
2727
2728 if (cfqq)
e5ff082e 2729 __cfq_slice_expired(cfqd, cfqq, timed_out);
7b14e3b5
JA
2730}
2731
498d3aa2
JA
2732/*
2733 * Get next queue for service. Unless we have a queue preemption,
2734 * we'll simply select the first cfqq in the service tree.
2735 */
6d048f53 2736static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
22e2c507 2737{
34b98d03
VG
2738 struct cfq_rb_root *st = st_for(cfqd->serving_group,
2739 cfqd->serving_wl_class, cfqd->serving_wl_type);
d9e7620e 2740
f04a6424
VG
2741 if (!cfqd->rq_queued)
2742 return NULL;
2743
1fa8f6d6 2744 /* There is nothing to dispatch */
34b98d03 2745 if (!st)
1fa8f6d6 2746 return NULL;
34b98d03 2747 if (RB_EMPTY_ROOT(&st->rb))
c0324a02 2748 return NULL;
34b98d03 2749 return cfq_rb_first(st);
6d048f53
JA
2750}
2751
f04a6424
VG
2752static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2753{
25fb5169 2754 struct cfq_group *cfqg;
f04a6424
VG
2755 struct cfq_queue *cfqq;
2756 int i, j;
2757 struct cfq_rb_root *st;
2758
2759 if (!cfqd->rq_queued)
2760 return NULL;
2761
25fb5169
VG
2762 cfqg = cfq_get_next_cfqg(cfqd);
2763 if (!cfqg)
2764 return NULL;
2765
1cf41753
ME
2766 for_each_cfqg_st(cfqg, i, j, st) {
2767 cfqq = cfq_rb_first(st);
2768 if (cfqq)
f04a6424 2769 return cfqq;
1cf41753 2770 }
f04a6424
VG
2771 return NULL;
2772}
2773
498d3aa2
JA
2774/*
2775 * Get and set a new active queue for service.
2776 */
a36e71f9
JA
2777static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2778 struct cfq_queue *cfqq)
6d048f53 2779{
e00ef799 2780 if (!cfqq)
a36e71f9 2781 cfqq = cfq_get_next_queue(cfqd);
6d048f53 2782
22e2c507 2783 __cfq_set_active_queue(cfqd, cfqq);
3b18152c 2784 return cfqq;
22e2c507
JA
2785}
2786
d9e7620e
JA
2787static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2788 struct request *rq)
2789{
83096ebf
TH
2790 if (blk_rq_pos(rq) >= cfqd->last_position)
2791 return blk_rq_pos(rq) - cfqd->last_position;
d9e7620e 2792 else
83096ebf 2793 return cfqd->last_position - blk_rq_pos(rq);
d9e7620e
JA
2794}
2795
b2c18e1e 2796static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e9ce335d 2797 struct request *rq)
6d048f53 2798{
e9ce335d 2799 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
6d048f53
JA
2800}
2801
a36e71f9
JA
2802static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2803 struct cfq_queue *cur_cfqq)
2804{
f2d1f0ae 2805 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
a36e71f9
JA
2806 struct rb_node *parent, *node;
2807 struct cfq_queue *__cfqq;
2808 sector_t sector = cfqd->last_position;
2809
2810 if (RB_EMPTY_ROOT(root))
2811 return NULL;
2812
2813 /*
2814 * First, if we find a request starting at the end of the last
2815 * request, choose it.
2816 */
f2d1f0ae 2817 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
a36e71f9
JA
2818 if (__cfqq)
2819 return __cfqq;
2820
2821 /*
2822 * If the exact sector wasn't found, the parent of the NULL leaf
2823 * will contain the closest sector.
2824 */
2825 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
e9ce335d 2826 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f9
JA
2827 return __cfqq;
2828
2e46e8b2 2829 if (blk_rq_pos(__cfqq->next_rq) < sector)
a36e71f9
JA
2830 node = rb_next(&__cfqq->p_node);
2831 else
2832 node = rb_prev(&__cfqq->p_node);
2833 if (!node)
2834 return NULL;
2835
2836 __cfqq = rb_entry(node, struct cfq_queue, p_node);
e9ce335d 2837 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f9
JA
2838 return __cfqq;
2839
2840 return NULL;
2841}
2842
2843/*
2844 * cfqd - obvious
2845 * cur_cfqq - passed in so that we don't decide that the current queue is
2846 * closely cooperating with itself.
2847 *
2848 * So, basically we're assuming that that cur_cfqq has dispatched at least
2849 * one request, and that cfqd->last_position reflects a position on the disk
2850 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
2851 * assumption.
2852 */
2853static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
b3b6d040 2854 struct cfq_queue *cur_cfqq)
6d048f53 2855{
a36e71f9
JA
2856 struct cfq_queue *cfqq;
2857
39c01b21
DS
2858 if (cfq_class_idle(cur_cfqq))
2859 return NULL;
e6c5bc73
JM
2860 if (!cfq_cfqq_sync(cur_cfqq))
2861 return NULL;
2862 if (CFQQ_SEEKY(cur_cfqq))
2863 return NULL;
2864
b9d8f4c7
GJ
2865 /*
2866 * Don't search priority tree if it's the only queue in the group.
2867 */
2868 if (cur_cfqq->cfqg->nr_cfqq == 1)
2869 return NULL;
2870
6d048f53 2871 /*
d9e7620e
JA
2872 * We should notice if some of the queues are cooperating, eg
2873 * working closely on the same area of the disk. In that case,
2874 * we can group them together and don't waste time idling.
6d048f53 2875 */
a36e71f9
JA
2876 cfqq = cfqq_close(cfqd, cur_cfqq);
2877 if (!cfqq)
2878 return NULL;
2879
8682e1f1
VG
2880 /* If new queue belongs to different cfq_group, don't choose it */
2881 if (cur_cfqq->cfqg != cfqq->cfqg)
2882 return NULL;
2883
df5fe3e8
JM
2884 /*
2885 * It only makes sense to merge sync queues.
2886 */
2887 if (!cfq_cfqq_sync(cfqq))
2888 return NULL;
e6c5bc73
JM
2889 if (CFQQ_SEEKY(cfqq))
2890 return NULL;
df5fe3e8 2891
c0324a02
CZ
2892 /*
2893 * Do not merge queues of different priority classes
2894 */
2895 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2896 return NULL;
2897
a36e71f9 2898 return cfqq;
6d048f53
JA
2899}
2900
a6d44e98
CZ
2901/*
2902 * Determine whether we should enforce idle window for this queue.
2903 */
2904
2905static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2906{
3bf10fea 2907 enum wl_class_t wl_class = cfqq_class(cfqq);
34b98d03 2908 struct cfq_rb_root *st = cfqq->service_tree;
a6d44e98 2909
34b98d03
VG
2910 BUG_ON(!st);
2911 BUG_ON(!st->count);
f04a6424 2912
b6508c16
VG
2913 if (!cfqd->cfq_slice_idle)
2914 return false;
2915
a6d44e98 2916 /* We never do for idle class queues. */
3bf10fea 2917 if (wl_class == IDLE_WORKLOAD)
a6d44e98
CZ
2918 return false;
2919
2920 /* We do for queues that were marked with idle window flag. */
3c764b7a
SL
2921 if (cfq_cfqq_idle_window(cfqq) &&
2922 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
a6d44e98
CZ
2923 return true;
2924
2925 /*
2926 * Otherwise, we do only if they are the last ones
2927 * in their service tree.
2928 */
34b98d03
VG
2929 if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2930 !cfq_io_thinktime_big(cfqd, &st->ttime, false))
c1e44756 2931 return true;
34b98d03 2932 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
c1e44756 2933 return false;
a6d44e98
CZ
2934}
2935
6d048f53 2936static void cfq_arm_slice_timer(struct cfq_data *cfqd)
22e2c507 2937{
1792669c 2938 struct cfq_queue *cfqq = cfqd->active_queue;
e795421e 2939 struct cfq_rb_root *st = cfqq->service_tree;
c5869807 2940 struct cfq_io_cq *cic;
9a7f38c4
JM
2941 u64 sl, group_idle = 0;
2942 u64 now = ktime_get_ns();
7b14e3b5 2943
a68bbddb 2944 /*
f7d7b7a7
JA
2945 * SSD device without seek penalty, disable idling. But only do so
2946 * for devices that support queuing, otherwise we still have a problem
2947 * with sync vs async workloads.
a68bbddb 2948 */
f7d7b7a7 2949 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
a68bbddb
JA
2950 return;
2951
dd67d051 2952 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
6d048f53 2953 WARN_ON(cfq_cfqq_slice_new(cfqq));
22e2c507
JA
2954
2955 /*
2956 * idle is disabled, either manually or by past process history
2957 */
80bdf0c7
VG
2958 if (!cfq_should_idle(cfqd, cfqq)) {
2959 /* no queue idling. Check for group idling */
2960 if (cfqd->cfq_group_idle)
2961 group_idle = cfqd->cfq_group_idle;
2962 else
2963 return;
2964 }
6d048f53 2965
7b679138 2966 /*
8e550632 2967 * still active requests from this queue, don't idle
7b679138 2968 */
8e550632 2969 if (cfqq->dispatched)
7b679138
JA
2970 return;
2971
22e2c507
JA
2972 /*
2973 * task has exited, don't wait
2974 */
206dc69b 2975 cic = cfqd->active_cic;
f6e8d01b 2976 if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
6d048f53
JA
2977 return;
2978
355b659c
CZ
2979 /*
2980 * If our average think time is larger than the remaining time
2981 * slice, then don't idle. This avoids overrunning the allotted
2982 * time slice.
2983 */
383cd721 2984 if (sample_valid(cic->ttime.ttime_samples) &&
9a7f38c4
JM
2985 (cfqq->slice_end - now < cic->ttime.ttime_mean)) {
2986 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%llu",
383cd721 2987 cic->ttime.ttime_mean);
355b659c 2988 return;
b1ffe737 2989 }
355b659c 2990
e795421e
JK
2991 /*
2992 * There are other queues in the group or this is the only group and
2993 * it has too big thinktime, don't do group idle.
2994 */
2995 if (group_idle &&
2996 (cfqq->cfqg->nr_cfqq > 1 ||
2997 cfq_io_thinktime_big(cfqd, &st->ttime, true)))
80bdf0c7
VG
2998 return;
2999
3b18152c 3000 cfq_mark_cfqq_wait_request(cfqq);
22e2c507 3001
80bdf0c7
VG
3002 if (group_idle)
3003 sl = cfqd->cfq_group_idle;
3004 else
3005 sl = cfqd->cfq_slice_idle;
206dc69b 3006
91148325
JK
3007 hrtimer_start(&cfqd->idle_slice_timer, ns_to_ktime(sl),
3008 HRTIMER_MODE_REL);
155fead9 3009 cfqg_stats_set_start_idle_time(cfqq->cfqg);
9a7f38c4 3010 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %llu group_idle: %d", sl,
80bdf0c7 3011 group_idle ? 1 : 0);
1da177e4
LT
3012}
3013
498d3aa2
JA
3014/*
3015 * Move request from internal lists to the request queue dispatch list.
3016 */
165125e1 3017static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1da177e4 3018{
3ed9a296 3019 struct cfq_data *cfqd = q->elevator->elevator_data;
5e705374 3020 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507 3021
7b679138
JA
3022 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
3023
06d21886 3024 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
5380a101 3025 cfq_remove_request(rq);
6d048f53 3026 cfqq->dispatched++;
80bdf0c7 3027 (RQ_CFQG(rq))->dispatched++;
5380a101 3028 elv_dispatch_sort(q, rq);
3ed9a296 3029
53c583d2 3030 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
c4e7893e 3031 cfqq->nr_sectors += blk_rq_sectors(rq);
1da177e4
LT
3032}
3033
3034/*
3035 * return expired entry, or NULL to just start from scratch in rbtree
3036 */
febffd61 3037static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1da177e4 3038{
30996f40 3039 struct request *rq = NULL;
1da177e4 3040
3b18152c 3041 if (cfq_cfqq_fifo_expire(cfqq))
1da177e4 3042 return NULL;
cb887411
JA
3043
3044 cfq_mark_cfqq_fifo_expire(cfqq);
3045
89850f7e
JA
3046 if (list_empty(&cfqq->fifo))
3047 return NULL;
1da177e4 3048
89850f7e 3049 rq = rq_entry_fifo(cfqq->fifo.next);
9a7f38c4 3050 if (ktime_get_ns() < rq->fifo_time)
7b679138 3051 rq = NULL;
1da177e4 3052
6d048f53 3053 return rq;
1da177e4
LT
3054}
3055
22e2c507
JA
3056static inline int
3057cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3058{
3059 const int base_rq = cfqd->cfq_slice_async_rq;
1da177e4 3060
22e2c507 3061 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1da177e4 3062
b9f8ce05 3063 return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
1da177e4
LT
3064}
3065
df5fe3e8
JM
3066/*
3067 * Must be called with the queue_lock held.
3068 */
3069static int cfqq_process_refs(struct cfq_queue *cfqq)
3070{
3071 int process_refs, io_refs;
3072
3073 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
30d7b944 3074 process_refs = cfqq->ref - io_refs;
df5fe3e8
JM
3075 BUG_ON(process_refs < 0);
3076 return process_refs;
3077}
3078
3079static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
3080{
e6c5bc73 3081 int process_refs, new_process_refs;
df5fe3e8
JM
3082 struct cfq_queue *__cfqq;
3083
c10b61f0
JM
3084 /*
3085 * If there are no process references on the new_cfqq, then it is
3086 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
3087 * chain may have dropped their last reference (not just their
3088 * last process reference).
3089 */
3090 if (!cfqq_process_refs(new_cfqq))
3091 return;
3092
df5fe3e8
JM
3093 /* Avoid a circular list and skip interim queue merges */
3094 while ((__cfqq = new_cfqq->new_cfqq)) {
3095 if (__cfqq == cfqq)
3096 return;
3097 new_cfqq = __cfqq;
3098 }
3099
3100 process_refs = cfqq_process_refs(cfqq);
c10b61f0 3101 new_process_refs = cfqq_process_refs(new_cfqq);
df5fe3e8
JM
3102 /*
3103 * If the process for the cfqq has gone away, there is no
3104 * sense in merging the queues.
3105 */
c10b61f0 3106 if (process_refs == 0 || new_process_refs == 0)
df5fe3e8
JM
3107 return;
3108
e6c5bc73
JM
3109 /*
3110 * Merge in the direction of the lesser amount of work.
3111 */
e6c5bc73
JM
3112 if (new_process_refs >= process_refs) {
3113 cfqq->new_cfqq = new_cfqq;
30d7b944 3114 new_cfqq->ref += process_refs;
e6c5bc73
JM
3115 } else {
3116 new_cfqq->new_cfqq = cfqq;
30d7b944 3117 cfqq->ref += new_process_refs;
e6c5bc73 3118 }
df5fe3e8
JM
3119}
3120
6d816ec7 3121static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
3bf10fea 3122 struct cfq_group *cfqg, enum wl_class_t wl_class)
718eee05
CZ
3123{
3124 struct cfq_queue *queue;
3125 int i;
3126 bool key_valid = false;
9a7f38c4 3127 u64 lowest_key = 0;
718eee05
CZ
3128 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
3129
65b32a57
VG
3130 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
3131 /* select the one with lowest rb_key */
34b98d03 3132 queue = cfq_rb_first(st_for(cfqg, wl_class, i));
718eee05 3133 if (queue &&
9a7f38c4 3134 (!key_valid || queue->rb_key < lowest_key)) {
718eee05
CZ
3135 lowest_key = queue->rb_key;
3136 cur_best = i;
3137 key_valid = true;
3138 }
3139 }
3140
3141 return cur_best;
3142}
3143
6d816ec7
VG
3144static void
3145choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
718eee05 3146{
9a7f38c4 3147 u64 slice;
718eee05 3148 unsigned count;
cdb16e8f 3149 struct cfq_rb_root *st;
9a7f38c4 3150 u64 group_slice;
4d2ceea4 3151 enum wl_class_t original_class = cfqd->serving_wl_class;
9a7f38c4 3152 u64 now = ktime_get_ns();
1fa8f6d6 3153
718eee05 3154 /* Choose next priority. RT > BE > IDLE */
58ff82f3 3155 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
4d2ceea4 3156 cfqd->serving_wl_class = RT_WORKLOAD;
58ff82f3 3157 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
4d2ceea4 3158 cfqd->serving_wl_class = BE_WORKLOAD;
718eee05 3159 else {
4d2ceea4 3160 cfqd->serving_wl_class = IDLE_WORKLOAD;
9a7f38c4 3161 cfqd->workload_expires = now + jiffies_to_nsecs(1);
718eee05
CZ
3162 return;
3163 }
3164
4d2ceea4 3165 if (original_class != cfqd->serving_wl_class)
e4ea0c16
SL
3166 goto new_workload;
3167
718eee05
CZ
3168 /*
3169 * For RT and BE, we have to choose also the type
3170 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
3171 * expiration time
3172 */
34b98d03 3173 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
cdb16e8f 3174 count = st->count;
718eee05
CZ
3175
3176 /*
65b32a57 3177 * check workload expiration, and that we still have other queues ready
718eee05 3178 */
9a7f38c4 3179 if (count && !(now > cfqd->workload_expires))
718eee05
CZ
3180 return;
3181
e4ea0c16 3182new_workload:
718eee05 3183 /* otherwise select new workload type */
6d816ec7 3184 cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
4d2ceea4 3185 cfqd->serving_wl_class);
34b98d03 3186 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
cdb16e8f 3187 count = st->count;
718eee05
CZ
3188
3189 /*
3190 * the workload slice is computed as a fraction of target latency
3191 * proportional to the number of queues in that workload, over
3192 * all the queues in the same priority class
3193 */
58ff82f3
VG
3194 group_slice = cfq_group_slice(cfqd, cfqg);
3195
9a7f38c4 3196 slice = div_u64(group_slice * count,
4d2ceea4
VG
3197 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
3198 cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
9a7f38c4 3199 cfqg)));
718eee05 3200
4d2ceea4 3201 if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
9a7f38c4 3202 u64 tmp;
f26bd1f0
VG
3203
3204 /*
3205 * Async queues are currently system wide. Just taking
3206 * proportion of queues with-in same group will lead to higher
3207 * async ratio system wide as generally root group is going
3208 * to have higher weight. A more accurate thing would be to
3209 * calculate system wide asnc/sync ratio.
3210 */
5bf14c07
TM
3211 tmp = cfqd->cfq_target_latency *
3212 cfqg_busy_async_queues(cfqd, cfqg);
9a7f38c4
JM
3213 tmp = div_u64(tmp, cfqd->busy_queues);
3214 slice = min_t(u64, slice, tmp);
f26bd1f0 3215
718eee05
CZ
3216 /* async workload slice is scaled down according to
3217 * the sync/async slice ratio. */
9a7f38c4 3218 slice = div64_u64(slice*cfqd->cfq_slice[0], cfqd->cfq_slice[1]);
f26bd1f0 3219 } else
718eee05
CZ
3220 /* sync workload slice is at least 2 * cfq_slice_idle */
3221 slice = max(slice, 2 * cfqd->cfq_slice_idle);
3222
9a7f38c4
JM
3223 slice = max_t(u64, slice, CFQ_MIN_TT);
3224 cfq_log(cfqd, "workload slice:%llu", slice);
3225 cfqd->workload_expires = now + slice;
718eee05
CZ
3226}
3227
1fa8f6d6
VG
3228static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
3229{
3230 struct cfq_rb_root *st = &cfqd->grp_service_tree;
25bc6b07 3231 struct cfq_group *cfqg;
1fa8f6d6
VG
3232
3233 if (RB_EMPTY_ROOT(&st->rb))
3234 return NULL;
25bc6b07 3235 cfqg = cfq_rb_first_group(st);
25bc6b07
VG
3236 update_min_vdisktime(st);
3237 return cfqg;
1fa8f6d6
VG
3238}
3239
cdb16e8f
VG
3240static void cfq_choose_cfqg(struct cfq_data *cfqd)
3241{
1fa8f6d6 3242 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
9a7f38c4 3243 u64 now = ktime_get_ns();
1fa8f6d6
VG
3244
3245 cfqd->serving_group = cfqg;
dae739eb
VG
3246
3247 /* Restore the workload type data */
4d2ceea4 3248 if (cfqg->saved_wl_slice) {
9a7f38c4 3249 cfqd->workload_expires = now + cfqg->saved_wl_slice;
4d2ceea4
VG
3250 cfqd->serving_wl_type = cfqg->saved_wl_type;
3251 cfqd->serving_wl_class = cfqg->saved_wl_class;
66ae2919 3252 } else
9a7f38c4 3253 cfqd->workload_expires = now - 1;
66ae2919 3254
6d816ec7 3255 choose_wl_class_and_type(cfqd, cfqg);
cdb16e8f
VG
3256}
3257
22e2c507 3258/*
498d3aa2
JA
3259 * Select a queue for service. If we have a current active queue,
3260 * check whether to continue servicing it, or retrieve and set a new one.
22e2c507 3261 */
1b5ed5e1 3262static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1da177e4 3263{
a36e71f9 3264 struct cfq_queue *cfqq, *new_cfqq = NULL;
9a7f38c4 3265 u64 now = ktime_get_ns();
1da177e4 3266
22e2c507
JA
3267 cfqq = cfqd->active_queue;
3268 if (!cfqq)
3269 goto new_queue;
1da177e4 3270
f04a6424
VG
3271 if (!cfqd->rq_queued)
3272 return NULL;
c244bb50
VG
3273
3274 /*
3275 * We were waiting for group to get backlogged. Expire the queue
3276 */
3277 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
3278 goto expire;
3279
22e2c507 3280 /*
6d048f53 3281 * The active queue has run out of time, expire it and select new.
22e2c507 3282 */
7667aa06
VG
3283 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
3284 /*
3285 * If slice had not expired at the completion of last request
3286 * we might not have turned on wait_busy flag. Don't expire
3287 * the queue yet. Allow the group to get backlogged.
3288 *
3289 * The very fact that we have used the slice, that means we
3290 * have been idling all along on this queue and it should be
3291 * ok to wait for this request to complete.
3292 */
82bbbf28
VG
3293 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
3294 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3295 cfqq = NULL;
7667aa06 3296 goto keep_queue;
82bbbf28 3297 } else
80bdf0c7 3298 goto check_group_idle;
7667aa06 3299 }
1da177e4 3300
22e2c507 3301 /*
6d048f53
JA
3302 * The active queue has requests and isn't expired, allow it to
3303 * dispatch.
22e2c507 3304 */
dd67d051 3305 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507 3306 goto keep_queue;
6d048f53 3307
a36e71f9
JA
3308 /*
3309 * If another queue has a request waiting within our mean seek
3310 * distance, let it run. The expire code will check for close
3311 * cooperators and put the close queue at the front of the service
df5fe3e8 3312 * tree. If possible, merge the expiring queue with the new cfqq.
a36e71f9 3313 */
b3b6d040 3314 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
df5fe3e8
JM
3315 if (new_cfqq) {
3316 if (!cfqq->new_cfqq)
3317 cfq_setup_merge(cfqq, new_cfqq);
a36e71f9 3318 goto expire;
df5fe3e8 3319 }
a36e71f9 3320
6d048f53
JA
3321 /*
3322 * No requests pending. If the active queue still has requests in
3323 * flight or is idling for a new request, allow either of these
3324 * conditions to happen (or time out) before selecting a new queue.
3325 */
91148325 3326 if (hrtimer_active(&cfqd->idle_slice_timer)) {
80bdf0c7
VG
3327 cfqq = NULL;
3328 goto keep_queue;
3329 }
3330
8e1ac665
SL
3331 /*
3332 * This is a deep seek queue, but the device is much faster than
3333 * the queue can deliver, don't idle
3334 **/
3335 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
3336 (cfq_cfqq_slice_new(cfqq) ||
9a7f38c4 3337 (cfqq->slice_end - now > now - cfqq->slice_start))) {
8e1ac665
SL
3338 cfq_clear_cfqq_deep(cfqq);
3339 cfq_clear_cfqq_idle_window(cfqq);
3340 }
3341
80bdf0c7
VG
3342 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3343 cfqq = NULL;
3344 goto keep_queue;
3345 }
3346
3347 /*
3348 * If group idle is enabled and there are requests dispatched from
3349 * this group, wait for requests to complete.
3350 */
3351check_group_idle:
7700fc4f
SL
3352 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
3353 cfqq->cfqg->dispatched &&
3354 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
caaa5f9f
JA
3355 cfqq = NULL;
3356 goto keep_queue;
22e2c507
JA
3357 }
3358
3b18152c 3359expire:
e5ff082e 3360 cfq_slice_expired(cfqd, 0);
3b18152c 3361new_queue:
718eee05
CZ
3362 /*
3363 * Current queue expired. Check if we have to switch to a new
3364 * service tree
3365 */
3366 if (!new_cfqq)
cdb16e8f 3367 cfq_choose_cfqg(cfqd);
718eee05 3368
a36e71f9 3369 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
22e2c507 3370keep_queue:
3b18152c 3371 return cfqq;
22e2c507
JA
3372}
3373
febffd61 3374static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
d9e7620e
JA
3375{
3376 int dispatched = 0;
3377
3378 while (cfqq->next_rq) {
3379 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
3380 dispatched++;
3381 }
3382
3383 BUG_ON(!list_empty(&cfqq->fifo));
f04a6424
VG
3384
3385 /* By default cfqq is not expired if it is empty. Do it explicitly */
e5ff082e 3386 __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
d9e7620e
JA
3387 return dispatched;
3388}
3389
498d3aa2
JA
3390/*
3391 * Drain our current requests. Used for barriers and when switching
3392 * io schedulers on-the-fly.
3393 */
d9e7620e 3394static int cfq_forced_dispatch(struct cfq_data *cfqd)
1b5ed5e1 3395{
0871714e 3396 struct cfq_queue *cfqq;
d9e7620e 3397 int dispatched = 0;
cdb16e8f 3398
3440c49f 3399 /* Expire the timeslice of the current active queue first */
e5ff082e 3400 cfq_slice_expired(cfqd, 0);
3440c49f
DS
3401 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
3402 __cfq_set_active_queue(cfqd, cfqq);
f04a6424 3403 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3440c49f 3404 }
1b5ed5e1 3405
1b5ed5e1
TH
3406 BUG_ON(cfqd->busy_queues);
3407
6923715a 3408 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
1b5ed5e1
TH
3409 return dispatched;
3410}
3411
abc3c744
SL
3412static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
3413 struct cfq_queue *cfqq)
3414{
9a7f38c4
JM
3415 u64 now = ktime_get_ns();
3416
abc3c744
SL
3417 /* the queue hasn't finished any request, can't estimate */
3418 if (cfq_cfqq_slice_new(cfqq))
c1e44756 3419 return true;
9a7f38c4 3420 if (now + cfqd->cfq_slice_idle * cfqq->dispatched > cfqq->slice_end)
c1e44756 3421 return true;
abc3c744 3422
c1e44756 3423 return false;
abc3c744
SL
3424}
3425
0b182d61 3426static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2f5cb738 3427{
2f5cb738 3428 unsigned int max_dispatch;
22e2c507 3429
3932a86b
GC
3430 if (cfq_cfqq_must_dispatch(cfqq))
3431 return true;
3432
5ad531db
JA
3433 /*
3434 * Drain async requests before we start sync IO
3435 */
53c583d2 3436 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
0b182d61 3437 return false;
5ad531db 3438
2f5cb738
JA
3439 /*
3440 * If this is an async queue and we have sync IO in flight, let it wait
3441 */
53c583d2 3442 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
0b182d61 3443 return false;
2f5cb738 3444
abc3c744 3445 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2f5cb738
JA
3446 if (cfq_class_idle(cfqq))
3447 max_dispatch = 1;
b4878f24 3448
2f5cb738
JA
3449 /*
3450 * Does this cfqq already have too much IO in flight?
3451 */
3452 if (cfqq->dispatched >= max_dispatch) {
ef8a41df 3453 bool promote_sync = false;
2f5cb738
JA
3454 /*
3455 * idle queue must always only have a single IO in flight
3456 */
3ed9a296 3457 if (cfq_class_idle(cfqq))
0b182d61 3458 return false;
3ed9a296 3459
ef8a41df 3460 /*
c4ade94f
LS
3461 * If there is only one sync queue
3462 * we can ignore async queue here and give the sync
ef8a41df
SL
3463 * queue no dispatch limit. The reason is a sync queue can
3464 * preempt async queue, limiting the sync queue doesn't make
3465 * sense. This is useful for aiostress test.
3466 */
c4ade94f
LS
3467 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
3468 promote_sync = true;
ef8a41df 3469
2f5cb738
JA
3470 /*
3471 * We have other queues, don't allow more IO from this one
3472 */
ef8a41df
SL
3473 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
3474 !promote_sync)
0b182d61 3475 return false;
9ede209e 3476
365722bb 3477 /*
474b18cc 3478 * Sole queue user, no limit
365722bb 3479 */
ef8a41df 3480 if (cfqd->busy_queues == 1 || promote_sync)
abc3c744
SL
3481 max_dispatch = -1;
3482 else
3483 /*
3484 * Normally we start throttling cfqq when cfq_quantum/2
3485 * requests have been dispatched. But we can drive
3486 * deeper queue depths at the beginning of slice
3487 * subjected to upper limit of cfq_quantum.
3488 * */
3489 max_dispatch = cfqd->cfq_quantum;
8e296755
JA
3490 }
3491
3492 /*
3493 * Async queues must wait a bit before being allowed dispatch.
3494 * We also ramp up the dispatch depth gradually for async IO,
3495 * based on the last sync IO we serviced
3496 */
963b72fc 3497 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
9a7f38c4 3498 u64 last_sync = ktime_get_ns() - cfqd->last_delayed_sync;
8e296755 3499 unsigned int depth;
365722bb 3500
9a7f38c4 3501 depth = div64_u64(last_sync, cfqd->cfq_slice[1]);
e00c54c3
JA
3502 if (!depth && !cfqq->dispatched)
3503 depth = 1;
8e296755
JA
3504 if (depth < max_dispatch)
3505 max_dispatch = depth;
2f5cb738 3506 }
3ed9a296 3507
0b182d61
JA
3508 /*
3509 * If we're below the current max, allow a dispatch
3510 */
3511 return cfqq->dispatched < max_dispatch;
3512}
3513
3514/*
3515 * Dispatch a request from cfqq, moving them to the request queue
3516 * dispatch list.
3517 */
3518static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3519{
3520 struct request *rq;
3521
3522 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
3523
3932a86b
GC
3524 rq = cfq_check_fifo(cfqq);
3525 if (rq)
3526 cfq_mark_cfqq_must_dispatch(cfqq);
3527
0b182d61
JA
3528 if (!cfq_may_dispatch(cfqd, cfqq))
3529 return false;
3530
3531 /*
3532 * follow expired path, else get first next available
3533 */
0b182d61
JA
3534 if (!rq)
3535 rq = cfqq->next_rq;
3932a86b
GC
3536 else
3537 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
0b182d61
JA
3538
3539 /*
3540 * insert request into driver dispatch list
3541 */
3542 cfq_dispatch_insert(cfqd->queue, rq);
3543
3544 if (!cfqd->active_cic) {
c5869807 3545 struct cfq_io_cq *cic = RQ_CIC(rq);
0b182d61 3546
c5869807 3547 atomic_long_inc(&cic->icq.ioc->refcount);
0b182d61
JA
3548 cfqd->active_cic = cic;
3549 }
3550
3551 return true;
3552}
3553
3554/*
3555 * Find the cfqq that we need to service and move a request from that to the
3556 * dispatch list
3557 */
3558static int cfq_dispatch_requests(struct request_queue *q, int force)
3559{
3560 struct cfq_data *cfqd = q->elevator->elevator_data;
3561 struct cfq_queue *cfqq;
3562
3563 if (!cfqd->busy_queues)
3564 return 0;
3565
3566 if (unlikely(force))
3567 return cfq_forced_dispatch(cfqd);
3568
3569 cfqq = cfq_select_queue(cfqd);
3570 if (!cfqq)
8e296755
JA
3571 return 0;
3572
2f5cb738 3573 /*
0b182d61 3574 * Dispatch a request from this cfqq, if it is allowed
2f5cb738 3575 */
0b182d61
JA
3576 if (!cfq_dispatch_request(cfqd, cfqq))
3577 return 0;
3578
2f5cb738 3579 cfqq->slice_dispatch++;
b029195d 3580 cfq_clear_cfqq_must_dispatch(cfqq);
22e2c507 3581
2f5cb738
JA
3582 /*
3583 * expire an async queue immediately if it has used up its slice. idle
3584 * queue always expire after 1 dispatch round.
3585 */
3586 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
3587 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
3588 cfq_class_idle(cfqq))) {
9a7f38c4 3589 cfqq->slice_end = ktime_get_ns() + 1;
e5ff082e 3590 cfq_slice_expired(cfqd, 0);
1da177e4
LT
3591 }
3592
b217a903 3593 cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2f5cb738 3594 return 1;
1da177e4
LT
3595}
3596
1da177e4 3597/*
5e705374
JA
3598 * task holds one reference to the queue, dropped when task exits. each rq
3599 * in-flight on this queue also holds a reference, dropped when rq is freed.
1da177e4 3600 *
b1c35769 3601 * Each cfq queue took a reference on the parent group. Drop it now.
1da177e4
LT
3602 * queue lock must be held here.
3603 */
3604static void cfq_put_queue(struct cfq_queue *cfqq)
3605{
22e2c507 3606 struct cfq_data *cfqd = cfqq->cfqd;
0bbfeb83 3607 struct cfq_group *cfqg;
22e2c507 3608
30d7b944 3609 BUG_ON(cfqq->ref <= 0);
1da177e4 3610
30d7b944
SL
3611 cfqq->ref--;
3612 if (cfqq->ref)
1da177e4
LT
3613 return;
3614
7b679138 3615 cfq_log_cfqq(cfqd, cfqq, "put_queue");
1da177e4 3616 BUG_ON(rb_first(&cfqq->sort_list));
22e2c507 3617 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
b1c35769 3618 cfqg = cfqq->cfqg;
1da177e4 3619
28f95cbc 3620 if (unlikely(cfqd->active_queue == cfqq)) {
e5ff082e 3621 __cfq_slice_expired(cfqd, cfqq, 0);
23e018a1 3622 cfq_schedule_dispatch(cfqd);
28f95cbc 3623 }
22e2c507 3624
f04a6424 3625 BUG_ON(cfq_cfqq_on_rr(cfqq));
1da177e4 3626 kmem_cache_free(cfq_pool, cfqq);
eb7d8c07 3627 cfqg_put(cfqg);
1da177e4
LT
3628}
3629
d02a2c07 3630static void cfq_put_cooperator(struct cfq_queue *cfqq)
1da177e4 3631{
df5fe3e8
JM
3632 struct cfq_queue *__cfqq, *next;
3633
df5fe3e8
JM
3634 /*
3635 * If this queue was scheduled to merge with another queue, be
3636 * sure to drop the reference taken on that queue (and others in
3637 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
3638 */
3639 __cfqq = cfqq->new_cfqq;
3640 while (__cfqq) {
3641 if (__cfqq == cfqq) {
3642 WARN(1, "cfqq->new_cfqq loop detected\n");
3643 break;
3644 }
3645 next = __cfqq->new_cfqq;
3646 cfq_put_queue(__cfqq);
3647 __cfqq = next;
3648 }
d02a2c07
SL
3649}
3650
3651static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3652{
3653 if (unlikely(cfqq == cfqd->active_queue)) {
3654 __cfq_slice_expired(cfqd, cfqq, 0);
3655 cfq_schedule_dispatch(cfqd);
3656 }
3657
3658 cfq_put_cooperator(cfqq);
df5fe3e8 3659
89850f7e
JA
3660 cfq_put_queue(cfqq);
3661}
22e2c507 3662
9b84cacd
TH
3663static void cfq_init_icq(struct io_cq *icq)
3664{
3665 struct cfq_io_cq *cic = icq_to_cic(icq);
3666
9a7f38c4 3667 cic->ttime.last_end_request = ktime_get_ns();
9b84cacd
TH
3668}
3669
c5869807 3670static void cfq_exit_icq(struct io_cq *icq)
89850f7e 3671{
c5869807 3672 struct cfq_io_cq *cic = icq_to_cic(icq);
283287a5 3673 struct cfq_data *cfqd = cic_to_cfqd(cic);
4faa3c81 3674
563180a4
TH
3675 if (cic_to_cfqq(cic, false)) {
3676 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, false));
3677 cic_set_cfqq(cic, NULL, false);
12a05732
AV
3678 }
3679
563180a4
TH
3680 if (cic_to_cfqq(cic, true)) {
3681 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, true));
3682 cic_set_cfqq(cic, NULL, true);
12a05732 3683 }
89850f7e
JA
3684}
3685
abede6da 3686static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
22e2c507
JA
3687{
3688 struct task_struct *tsk = current;
3689 int ioprio_class;
3690
3b18152c 3691 if (!cfq_cfqq_prio_changed(cfqq))
22e2c507
JA
3692 return;
3693
598971bf 3694 ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
22e2c507 3695 switch (ioprio_class) {
fe094d98
JA
3696 default:
3697 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3698 case IOPRIO_CLASS_NONE:
3699 /*
6d63c275 3700 * no prio set, inherit CPU scheduling settings
fe094d98
JA
3701 */
3702 cfqq->ioprio = task_nice_ioprio(tsk);
6d63c275 3703 cfqq->ioprio_class = task_nice_ioclass(tsk);
fe094d98
JA
3704 break;
3705 case IOPRIO_CLASS_RT:
598971bf 3706 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
fe094d98
JA
3707 cfqq->ioprio_class = IOPRIO_CLASS_RT;
3708 break;
3709 case IOPRIO_CLASS_BE:
598971bf 3710 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
fe094d98
JA
3711 cfqq->ioprio_class = IOPRIO_CLASS_BE;
3712 break;
3713 case IOPRIO_CLASS_IDLE:
3714 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3715 cfqq->ioprio = 7;
3716 cfq_clear_cfqq_idle_window(cfqq);
3717 break;
22e2c507
JA
3718 }
3719
3720 /*
3721 * keep track of original prio settings in case we have to temporarily
3722 * elevate the priority of this queue
3723 */
3724 cfqq->org_ioprio = cfqq->ioprio;
b8269db4 3725 cfqq->org_ioprio_class = cfqq->ioprio_class;
3b18152c 3726 cfq_clear_cfqq_prio_changed(cfqq);
22e2c507
JA
3727}
3728
598971bf 3729static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
22e2c507 3730{
598971bf 3731 int ioprio = cic->icq.ioc->ioprio;
bca4b914 3732 struct cfq_data *cfqd = cic_to_cfqd(cic);
478a82b0 3733 struct cfq_queue *cfqq;
35e6077c 3734
598971bf
TH
3735 /*
3736 * Check whether ioprio has changed. The condition may trigger
3737 * spuriously on a newly created cic but there's no harm.
3738 */
3739 if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
caaa5f9f
JA
3740 return;
3741
563180a4 3742 cfqq = cic_to_cfqq(cic, false);
caaa5f9f 3743 if (cfqq) {
563180a4 3744 cfq_put_queue(cfqq);
2da8de0b 3745 cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio);
563180a4 3746 cic_set_cfqq(cic, cfqq, false);
22e2c507 3747 }
caaa5f9f 3748
563180a4 3749 cfqq = cic_to_cfqq(cic, true);
caaa5f9f
JA
3750 if (cfqq)
3751 cfq_mark_cfqq_prio_changed(cfqq);
598971bf
TH
3752
3753 cic->ioprio = ioprio;
22e2c507
JA
3754}
3755
d5036d77 3756static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a 3757 pid_t pid, bool is_sync)
d5036d77
JA
3758{
3759 RB_CLEAR_NODE(&cfqq->rb_node);
3760 RB_CLEAR_NODE(&cfqq->p_node);
3761 INIT_LIST_HEAD(&cfqq->fifo);
3762
30d7b944 3763 cfqq->ref = 0;
d5036d77
JA
3764 cfqq->cfqd = cfqd;
3765
3766 cfq_mark_cfqq_prio_changed(cfqq);
3767
3768 if (is_sync) {
3769 if (!cfq_class_idle(cfqq))
3770 cfq_mark_cfqq_idle_window(cfqq);
3771 cfq_mark_cfqq_sync(cfqq);
3772 }
3773 cfqq->pid = pid;
3774}
3775
24610333 3776#ifdef CONFIG_CFQ_GROUP_IOSCHED
142bbdfc 3777static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
24610333 3778{
bca4b914 3779 struct cfq_data *cfqd = cic_to_cfqd(cic);
60a83707 3780 struct cfq_queue *cfqq;
f4da8072 3781 uint64_t serial_nr;
24610333 3782
598971bf 3783 rcu_read_lock();
f4da8072 3784 serial_nr = bio_blkcg(bio)->css.serial_nr;
598971bf 3785 rcu_read_unlock();
24610333 3786
598971bf
TH
3787 /*
3788 * Check whether blkcg has changed. The condition may trigger
3789 * spuriously on a newly created cic but there's no harm.
3790 */
f4da8072 3791 if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
142bbdfc 3792 return;
87760e5e 3793
60a83707
TH
3794 /*
3795 * Drop reference to queues. New queues will be assigned in new
3796 * group upon arrival of fresh requests.
3797 */
3798 cfqq = cic_to_cfqq(cic, false);
3799 if (cfqq) {
3800 cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3801 cic_set_cfqq(cic, NULL, false);
3802 cfq_put_queue(cfqq);
3803 }
3804
3805 cfqq = cic_to_cfqq(cic, true);
3806 if (cfqq) {
3807 cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3808 cic_set_cfqq(cic, NULL, true);
3809 cfq_put_queue(cfqq);
24610333 3810 }
598971bf 3811
f4da8072 3812 cic->blkcg_serial_nr = serial_nr;
24610333 3813}
598971bf 3814#else
142bbdfc 3815static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
5d7f5ce1 3816{
5d7f5ce1 3817}
24610333
VG
3818#endif /* CONFIG_CFQ_GROUP_IOSCHED */
3819
c2dea2d1 3820static struct cfq_queue **
60a83707 3821cfq_async_queue_prio(struct cfq_group *cfqg, int ioprio_class, int ioprio)
c2dea2d1 3822{
fe094d98 3823 switch (ioprio_class) {
c2dea2d1 3824 case IOPRIO_CLASS_RT:
60a83707 3825 return &cfqg->async_cfqq[0][ioprio];
598971bf
TH
3826 case IOPRIO_CLASS_NONE:
3827 ioprio = IOPRIO_NORM;
3828 /* fall through */
c2dea2d1 3829 case IOPRIO_CLASS_BE:
60a83707 3830 return &cfqg->async_cfqq[1][ioprio];
c2dea2d1 3831 case IOPRIO_CLASS_IDLE:
60a83707 3832 return &cfqg->async_idle_cfqq;
c2dea2d1
VT
3833 default:
3834 BUG();
3835 }
3836}
3837
15c31be4 3838static struct cfq_queue *
abede6da 3839cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
2da8de0b 3840 struct bio *bio)
15c31be4 3841{
c6ce1943
JM
3842 int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3843 int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
d4aad7ff 3844 struct cfq_queue **async_cfqq = NULL;
4ebc1c61 3845 struct cfq_queue *cfqq;
322731ed
TH
3846 struct cfq_group *cfqg;
3847
3848 rcu_read_lock();
ae118896 3849 cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
322731ed
TH
3850 if (!cfqg) {
3851 cfqq = &cfqd->oom_cfqq;
3852 goto out;
3853 }
15c31be4 3854
c2dea2d1 3855 if (!is_sync) {
c6ce1943
JM
3856 if (!ioprio_valid(cic->ioprio)) {
3857 struct task_struct *tsk = current;
3858 ioprio = task_nice_ioprio(tsk);
3859 ioprio_class = task_nice_ioclass(tsk);
3860 }
60a83707 3861 async_cfqq = cfq_async_queue_prio(cfqg, ioprio_class, ioprio);
c2dea2d1 3862 cfqq = *async_cfqq;
4ebc1c61
TH
3863 if (cfqq)
3864 goto out;
c2dea2d1
VT
3865 }
3866
e00f4f4d
TH
3867 cfqq = kmem_cache_alloc_node(cfq_pool,
3868 GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
d4aad7ff
TH
3869 cfqd->queue->node);
3870 if (!cfqq) {
3871 cfqq = &cfqd->oom_cfqq;
3872 goto out;
3873 }
3874
4d608baa
AP
3875 /* cfq_init_cfqq() assumes cfqq->ioprio_class is initialized. */
3876 cfqq->ioprio_class = IOPRIO_CLASS_NONE;
d4aad7ff
TH
3877 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3878 cfq_init_prio_data(cfqq, cic);
3879 cfq_link_cfqq_cfqg(cfqq, cfqg);
3880 cfq_log_cfqq(cfqd, cfqq, "alloced");
15c31be4 3881
d4aad7ff
TH
3882 if (async_cfqq) {
3883 /* a new async queue is created, pin and remember */
30d7b944 3884 cfqq->ref++;
c2dea2d1 3885 *async_cfqq = cfqq;
15c31be4 3886 }
4ebc1c61 3887out:
30d7b944 3888 cfqq->ref++;
322731ed 3889 rcu_read_unlock();
15c31be4
JA
3890 return cfqq;
3891}
3892
22e2c507 3893static void
9a7f38c4 3894__cfq_update_io_thinktime(struct cfq_ttime *ttime, u64 slice_idle)
1da177e4 3895{
9a7f38c4 3896 u64 elapsed = ktime_get_ns() - ttime->last_end_request;
383cd721 3897 elapsed = min(elapsed, 2UL * slice_idle);
db3b5848 3898
383cd721 3899 ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
9a7f38c4
JM
3900 ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
3901 ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
3902 ttime->ttime_samples);
383cd721
SL
3903}
3904
3905static void
3906cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
c5869807 3907 struct cfq_io_cq *cic)
383cd721 3908{
f5f2b6ce 3909 if (cfq_cfqq_sync(cfqq)) {
383cd721 3910 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
f5f2b6ce
SL
3911 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3912 cfqd->cfq_slice_idle);
3913 }
7700fc4f
SL
3914#ifdef CONFIG_CFQ_GROUP_IOSCHED
3915 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3916#endif
22e2c507 3917}
1da177e4 3918
206dc69b 3919static void
b2c18e1e 3920cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
6d048f53 3921 struct request *rq)
206dc69b 3922{
3dde36dd 3923 sector_t sdist = 0;
41647e7a 3924 sector_t n_sec = blk_rq_sectors(rq);
3dde36dd
CZ
3925 if (cfqq->last_request_pos) {
3926 if (cfqq->last_request_pos < blk_rq_pos(rq))
3927 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3928 else
3929 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3930 }
206dc69b 3931
3dde36dd 3932 cfqq->seek_history <<= 1;
41647e7a
CZ
3933 if (blk_queue_nonrot(cfqd->queue))
3934 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3935 else
3936 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
206dc69b 3937}
1da177e4 3938
a2b80967
CH
3939static inline bool req_noidle(struct request *req)
3940{
3941 return req_op(req) == REQ_OP_WRITE &&
3942 (req->cmd_flags & (REQ_SYNC | REQ_IDLE)) == REQ_SYNC;
3943}
3944
22e2c507
JA
3945/*
3946 * Disable idle window if the process thinks too long or seeks so much that
3947 * it doesn't matter
3948 */
3949static void
3950cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
c5869807 3951 struct cfq_io_cq *cic)
22e2c507 3952{
7b679138 3953 int old_idle, enable_idle;
1be92f2f 3954
0871714e
JA
3955 /*
3956 * Don't idle for async or idle io prio class
3957 */
3958 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
1be92f2f
JA
3959 return;
3960
c265a7f4 3961 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
1da177e4 3962
76280aff
CZ
3963 if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3964 cfq_mark_cfqq_deep(cfqq);
3965
a2b80967 3966 if (cfqq->next_rq && req_noidle(cfqq->next_rq))
749ef9f8 3967 enable_idle = 0;
f6e8d01b 3968 else if (!atomic_read(&cic->icq.ioc->active_ref) ||
c5869807
TH
3969 !cfqd->cfq_slice_idle ||
3970 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
22e2c507 3971 enable_idle = 0;
383cd721
SL
3972 else if (sample_valid(cic->ttime.ttime_samples)) {
3973 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
22e2c507
JA
3974 enable_idle = 0;
3975 else
3976 enable_idle = 1;
1da177e4
LT
3977 }
3978
7b679138
JA
3979 if (old_idle != enable_idle) {
3980 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3981 if (enable_idle)
3982 cfq_mark_cfqq_idle_window(cfqq);
3983 else
3984 cfq_clear_cfqq_idle_window(cfqq);
3985 }
22e2c507 3986}
1da177e4 3987
22e2c507
JA
3988/*
3989 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3990 * no or if we aren't sure, a 1 will cause a preempt.
3991 */
a6151c3a 3992static bool
22e2c507 3993cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
5e705374 3994 struct request *rq)
22e2c507 3995{
6d048f53 3996 struct cfq_queue *cfqq;
22e2c507 3997
6d048f53
JA
3998 cfqq = cfqd->active_queue;
3999 if (!cfqq)
a6151c3a 4000 return false;
22e2c507 4001
6d048f53 4002 if (cfq_class_idle(new_cfqq))
a6151c3a 4003 return false;
22e2c507
JA
4004
4005 if (cfq_class_idle(cfqq))
a6151c3a 4006 return true;
1e3335de 4007
875feb63
DS
4008 /*
4009 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
4010 */
4011 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
4012 return false;
4013
374f84ac
JA
4014 /*
4015 * if the new request is sync, but the currently running queue is
4016 * not, let the sync request have priority.
4017 */
3932a86b 4018 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
a6151c3a 4019 return true;
1e3335de 4020
3984aa55
JK
4021 /*
4022 * Treat ancestors of current cgroup the same way as current cgroup.
4023 * For anybody else we disallow preemption to guarantee service
4024 * fairness among cgroups.
4025 */
4026 if (!cfqg_is_descendant(cfqq->cfqg, new_cfqq->cfqg))
8682e1f1
VG
4027 return false;
4028
4029 if (cfq_slice_used(cfqq))
4030 return true;
4031
6c80731c
JK
4032 /*
4033 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
4034 */
4035 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
4036 return true;
4037
4038 WARN_ON_ONCE(cfqq->ioprio_class != new_cfqq->ioprio_class);
8682e1f1 4039 /* Allow preemption only if we are idling on sync-noidle tree */
4d2ceea4 4040 if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
8682e1f1 4041 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
8682e1f1
VG
4042 RB_EMPTY_ROOT(&cfqq->sort_list))
4043 return true;
4044
b53d1ed7
JA
4045 /*
4046 * So both queues are sync. Let the new request get disk time if
4047 * it's a metadata request and the current queue is doing regular IO.
4048 */
65299a3b 4049 if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
b53d1ed7
JA
4050 return true;
4051
d2d59e18
SL
4052 /* An idle queue should not be idle now for some reason */
4053 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
4054 return true;
4055
1e3335de 4056 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
a6151c3a 4057 return false;
1e3335de
JA
4058
4059 /*
4060 * if this request is as-good as one we would expect from the
4061 * current cfqq, let it preempt
4062 */
e9ce335d 4063 if (cfq_rq_close(cfqd, cfqq, rq))
a6151c3a 4064 return true;
1e3335de 4065
a6151c3a 4066 return false;
22e2c507
JA
4067}
4068
4069/*
4070 * cfqq preempts the active queue. if we allowed preempt with no slice left,
4071 * let it have half of its nominal slice.
4072 */
4073static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
4074{
df0793ab
SL
4075 enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
4076
7b679138 4077 cfq_log_cfqq(cfqd, cfqq, "preempt");
df0793ab 4078 cfq_slice_expired(cfqd, 1);
22e2c507 4079
f8ae6e3e
SL
4080 /*
4081 * workload type is changed, don't save slice, otherwise preempt
4082 * doesn't happen
4083 */
df0793ab 4084 if (old_type != cfqq_type(cfqq))
4d2ceea4 4085 cfqq->cfqg->saved_wl_slice = 0;
f8ae6e3e 4086
bf572256
JA
4087 /*
4088 * Put the new queue at the front of the of the current list,
4089 * so we know that it will be selected next.
4090 */
4091 BUG_ON(!cfq_cfqq_on_rr(cfqq));
edd75ffd
JA
4092
4093 cfq_service_tree_add(cfqd, cfqq, 1);
eda5e0c9 4094
62a37f6b
JT
4095 cfqq->slice_end = 0;
4096 cfq_mark_cfqq_slice_new(cfqq);
22e2c507
JA
4097}
4098
22e2c507 4099/*
5e705374 4100 * Called when a new fs request (rq) is added (to cfqq). Check if there's
22e2c507
JA
4101 * something we should do about it
4102 */
4103static void
5e705374
JA
4104cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
4105 struct request *rq)
22e2c507 4106{
c5869807 4107 struct cfq_io_cq *cic = RQ_CIC(rq);
12e9fddd 4108
45333d5a 4109 cfqd->rq_queued++;
65299a3b
CH
4110 if (rq->cmd_flags & REQ_PRIO)
4111 cfqq->prio_pending++;
374f84ac 4112
383cd721 4113 cfq_update_io_thinktime(cfqd, cfqq, cic);
b2c18e1e 4114 cfq_update_io_seektime(cfqd, cfqq, rq);
9c2c38a1
JA
4115 cfq_update_idle_window(cfqd, cfqq, cic);
4116
b2c18e1e 4117 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
22e2c507
JA
4118
4119 if (cfqq == cfqd->active_queue) {
4120 /*
b029195d
JA
4121 * Remember that we saw a request from this process, but
4122 * don't start queuing just yet. Otherwise we risk seeing lots
4123 * of tiny requests, because we disrupt the normal plugging
d6ceb25e
JA
4124 * and merging. If the request is already larger than a single
4125 * page, let it rip immediately. For that case we assume that
2d870722
JA
4126 * merging is already done. Ditto for a busy system that
4127 * has other work pending, don't risk delaying until the
4128 * idle timer unplug to continue working.
22e2c507 4129 */
d6ceb25e 4130 if (cfq_cfqq_wait_request(cfqq)) {
09cbfeaf 4131 if (blk_rq_bytes(rq) > PAGE_SIZE ||
2d870722 4132 cfqd->busy_queues > 1) {
812df48d 4133 cfq_del_timer(cfqd, cfqq);
554554f6 4134 cfq_clear_cfqq_wait_request(cfqq);
24ecfbe2 4135 __blk_run_queue(cfqd->queue);
a11cdaa7 4136 } else {
155fead9 4137 cfqg_stats_update_idle_time(cfqq->cfqg);
bf791937 4138 cfq_mark_cfqq_must_dispatch(cfqq);
a11cdaa7 4139 }
d6ceb25e 4140 }
5e705374 4141 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
22e2c507
JA
4142 /*
4143 * not the active queue - expire current slice if it is
4144 * idle and has expired it's mean thinktime or this new queue
3a9a3f6c
DS
4145 * has some old slice time left and is of higher priority or
4146 * this new queue is RT and the current one is BE
22e2c507
JA
4147 */
4148 cfq_preempt_queue(cfqd, cfqq);
24ecfbe2 4149 __blk_run_queue(cfqd->queue);
22e2c507 4150 }
1da177e4
LT
4151}
4152
165125e1 4153static void cfq_insert_request(struct request_queue *q, struct request *rq)
1da177e4 4154{
b4878f24 4155 struct cfq_data *cfqd = q->elevator->elevator_data;
5e705374 4156 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507 4157
7b679138 4158 cfq_log_cfqq(cfqd, cfqq, "insert_request");
abede6da 4159 cfq_init_prio_data(cfqq, RQ_CIC(rq));
1da177e4 4160
9a7f38c4 4161 rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
22e2c507 4162 list_add_tail(&rq->queuelist, &cfqq->fifo);
aa6f6a3d 4163 cfq_add_rq_rb(rq);
ef295ecf 4164 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
155fead9 4165 rq->cmd_flags);
5e705374 4166 cfq_rq_enqueued(cfqd, cfqq, rq);
1da177e4
LT
4167}
4168
45333d5a
AC
4169/*
4170 * Update hw_tag based on peak queue depth over 50 samples under
4171 * sufficient load.
4172 */
4173static void cfq_update_hw_tag(struct cfq_data *cfqd)
4174{
1a1238a7
SL
4175 struct cfq_queue *cfqq = cfqd->active_queue;
4176
53c583d2
CZ
4177 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
4178 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
e459dd08
CZ
4179
4180 if (cfqd->hw_tag == 1)
4181 return;
45333d5a
AC
4182
4183 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
53c583d2 4184 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
45333d5a
AC
4185 return;
4186
1a1238a7
SL
4187 /*
4188 * If active queue hasn't enough requests and can idle, cfq might not
4189 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
4190 * case
4191 */
4192 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
4193 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
53c583d2 4194 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
1a1238a7
SL
4195 return;
4196
45333d5a
AC
4197 if (cfqd->hw_tag_samples++ < 50)
4198 return;
4199
e459dd08 4200 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
45333d5a
AC
4201 cfqd->hw_tag = 1;
4202 else
4203 cfqd->hw_tag = 0;
45333d5a
AC
4204}
4205
7667aa06
VG
4206static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
4207{
c5869807 4208 struct cfq_io_cq *cic = cfqd->active_cic;
9a7f38c4 4209 u64 now = ktime_get_ns();
7667aa06 4210
02a8f01b
JT
4211 /* If the queue already has requests, don't wait */
4212 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4213 return false;
4214
7667aa06
VG
4215 /* If there are other queues in the group, don't wait */
4216 if (cfqq->cfqg->nr_cfqq > 1)
4217 return false;
4218
7700fc4f
SL
4219 /* the only queue in the group, but think time is big */
4220 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
4221 return false;
4222
7667aa06
VG
4223 if (cfq_slice_used(cfqq))
4224 return true;
4225
4226 /* if slice left is less than think time, wait busy */
383cd721 4227 if (cic && sample_valid(cic->ttime.ttime_samples)
9a7f38c4 4228 && (cfqq->slice_end - now < cic->ttime.ttime_mean))
7667aa06
VG
4229 return true;
4230
4231 /*
4232 * If think times is less than a jiffy than ttime_mean=0 and above
4233 * will not be true. It might happen that slice has not expired yet
4234 * but will expire soon (4-5 ns) during select_queue(). To cover the
4235 * case where think time is less than a jiffy, mark the queue wait
4236 * busy if only 1 jiffy is left in the slice.
4237 */
9a7f38c4 4238 if (cfqq->slice_end - now <= jiffies_to_nsecs(1))
7667aa06
VG
4239 return true;
4240
4241 return false;
4242}
4243
165125e1 4244static void cfq_completed_request(struct request_queue *q, struct request *rq)
1da177e4 4245{
5e705374 4246 struct cfq_queue *cfqq = RQ_CFQQ(rq);
b4878f24 4247 struct cfq_data *cfqd = cfqq->cfqd;
5380a101 4248 const int sync = rq_is_sync(rq);
9a7f38c4 4249 u64 now = ktime_get_ns();
1da177e4 4250
a2b80967 4251 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", req_noidle(rq));
1da177e4 4252
45333d5a
AC
4253 cfq_update_hw_tag(cfqd);
4254
53c583d2 4255 WARN_ON(!cfqd->rq_in_driver);
6d048f53 4256 WARN_ON(!cfqq->dispatched);
53c583d2 4257 cfqd->rq_in_driver--;
6d048f53 4258 cfqq->dispatched--;
80bdf0c7 4259 (RQ_CFQG(rq))->dispatched--;
155fead9 4260 cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
ef295ecf 4261 rq_io_start_time_ns(rq), rq->cmd_flags);
1da177e4 4262
53c583d2 4263 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3ed9a296 4264
365722bb 4265 if (sync) {
34b98d03 4266 struct cfq_rb_root *st;
f5f2b6ce 4267
383cd721 4268 RQ_CIC(rq)->ttime.last_end_request = now;
f5f2b6ce
SL
4269
4270 if (cfq_cfqq_on_rr(cfqq))
34b98d03 4271 st = cfqq->service_tree;
f5f2b6ce 4272 else
34b98d03
VG
4273 st = st_for(cfqq->cfqg, cfqq_class(cfqq),
4274 cfqq_type(cfqq));
4275
4276 st->ttime.last_end_request = now;
149321a6
JK
4277 /*
4278 * We have to do this check in jiffies since start_time is in
4279 * jiffies and it is not trivial to convert to ns. If
4280 * cfq_fifo_expire[1] ever comes close to 1 jiffie, this test
4281 * will become problematic but so far we are fine (the default
4282 * is 128 ms).
4283 */
4284 if (!time_after(rq->start_time +
4285 nsecs_to_jiffies(cfqd->cfq_fifo_expire[1]),
4286 jiffies))
573412b2 4287 cfqd->last_delayed_sync = now;
365722bb 4288 }
caaa5f9f 4289
7700fc4f
SL
4290#ifdef CONFIG_CFQ_GROUP_IOSCHED
4291 cfqq->cfqg->ttime.last_end_request = now;
4292#endif
4293
caaa5f9f
JA
4294 /*
4295 * If this is the active queue, check if it needs to be expired,
4296 * or if we want to idle in case it has no pending requests.
4297 */
4298 if (cfqd->active_queue == cfqq) {
a36e71f9
JA
4299 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
4300
44f7c160
JA
4301 if (cfq_cfqq_slice_new(cfqq)) {
4302 cfq_set_prio_slice(cfqd, cfqq);
4303 cfq_clear_cfqq_slice_new(cfqq);
4304 }
f75edf2d
VG
4305
4306 /*
7667aa06
VG
4307 * Should we wait for next request to come in before we expire
4308 * the queue.
f75edf2d 4309 */
7667aa06 4310 if (cfq_should_wait_busy(cfqd, cfqq)) {
9a7f38c4 4311 u64 extend_sl = cfqd->cfq_slice_idle;
80bdf0c7
VG
4312 if (!cfqd->cfq_slice_idle)
4313 extend_sl = cfqd->cfq_group_idle;
9a7f38c4 4314 cfqq->slice_end = now + extend_sl;
f75edf2d 4315 cfq_mark_cfqq_wait_busy(cfqq);
b1ffe737 4316 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
f75edf2d
VG
4317 }
4318
a36e71f9 4319 /*
8e550632
CZ
4320 * Idling is not enabled on:
4321 * - expired queues
4322 * - idle-priority queues
4323 * - async queues
4324 * - queues with still some requests queued
4325 * - when there is a close cooperator
a36e71f9 4326 */
0871714e 4327 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
e5ff082e 4328 cfq_slice_expired(cfqd, 1);
8e550632
CZ
4329 else if (sync && cfqq_empty &&
4330 !cfq_close_cooperator(cfqd, cfqq)) {
749ef9f8 4331 cfq_arm_slice_timer(cfqd);
8e550632 4332 }
caaa5f9f 4333 }
6d048f53 4334
53c583d2 4335 if (!cfqd->rq_in_driver)
23e018a1 4336 cfq_schedule_dispatch(cfqd);
1da177e4
LT
4337}
4338
ef295ecf 4339static void cfqq_boost_on_prio(struct cfq_queue *cfqq, unsigned int op)
b8269db4
JA
4340{
4341 /*
4342 * If REQ_PRIO is set, boost class and prio level, if it's below
4343 * BE/NORM. If prio is not set, restore the potentially boosted
4344 * class/prio level.
4345 */
ef295ecf 4346 if (!(op & REQ_PRIO)) {
b8269db4
JA
4347 cfqq->ioprio_class = cfqq->org_ioprio_class;
4348 cfqq->ioprio = cfqq->org_ioprio;
4349 } else {
4350 if (cfq_class_idle(cfqq))
4351 cfqq->ioprio_class = IOPRIO_CLASS_BE;
4352 if (cfqq->ioprio > IOPRIO_NORM)
4353 cfqq->ioprio = IOPRIO_NORM;
4354 }
4355}
4356
89850f7e 4357static inline int __cfq_may_queue(struct cfq_queue *cfqq)
22e2c507 4358{
1b379d8d 4359 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3b18152c 4360 cfq_mark_cfqq_must_alloc_slice(cfqq);
22e2c507 4361 return ELV_MQUEUE_MUST;
3b18152c 4362 }
1da177e4 4363
22e2c507 4364 return ELV_MQUEUE_MAY;
22e2c507
JA
4365}
4366
ef295ecf 4367static int cfq_may_queue(struct request_queue *q, unsigned int op)
22e2c507
JA
4368{
4369 struct cfq_data *cfqd = q->elevator->elevator_data;
4370 struct task_struct *tsk = current;
c5869807 4371 struct cfq_io_cq *cic;
22e2c507
JA
4372 struct cfq_queue *cfqq;
4373
4374 /*
4375 * don't force setup of a queue from here, as a call to may_queue
4376 * does not necessarily imply that a request actually will be queued.
4377 * so just lookup a possibly existing queue, or return 'may queue'
4378 * if that fails
4379 */
4ac845a2 4380 cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317
VT
4381 if (!cic)
4382 return ELV_MQUEUE_MAY;
4383
ef295ecf 4384 cfqq = cic_to_cfqq(cic, op_is_sync(op));
22e2c507 4385 if (cfqq) {
abede6da 4386 cfq_init_prio_data(cfqq, cic);
ef295ecf 4387 cfqq_boost_on_prio(cfqq, op);
22e2c507 4388
89850f7e 4389 return __cfq_may_queue(cfqq);
22e2c507
JA
4390 }
4391
4392 return ELV_MQUEUE_MAY;
1da177e4
LT
4393}
4394
1da177e4
LT
4395/*
4396 * queue lock held here
4397 */
bb37b94c 4398static void cfq_put_request(struct request *rq)
1da177e4 4399{
5e705374 4400 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4 4401
5e705374 4402 if (cfqq) {
22e2c507 4403 const int rw = rq_data_dir(rq);
1da177e4 4404
22e2c507
JA
4405 BUG_ON(!cfqq->allocated[rw]);
4406 cfqq->allocated[rw]--;
1da177e4 4407
7f1dc8a2 4408 /* Put down rq reference on cfqg */
eb7d8c07 4409 cfqg_put(RQ_CFQG(rq));
a612fddf
TH
4410 rq->elv.priv[0] = NULL;
4411 rq->elv.priv[1] = NULL;
7f1dc8a2 4412
1da177e4
LT
4413 cfq_put_queue(cfqq);
4414 }
4415}
4416
df5fe3e8 4417static struct cfq_queue *
c5869807 4418cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
df5fe3e8
JM
4419 struct cfq_queue *cfqq)
4420{
4421 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
4422 cic_set_cfqq(cic, cfqq->new_cfqq, 1);
b3b6d040 4423 cfq_mark_cfqq_coop(cfqq->new_cfqq);
df5fe3e8
JM
4424 cfq_put_queue(cfqq);
4425 return cic_to_cfqq(cic, 1);
4426}
4427
e6c5bc73
JM
4428/*
4429 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
4430 * was the last process referring to said cfqq.
4431 */
4432static struct cfq_queue *
c5869807 4433split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
e6c5bc73
JM
4434{
4435 if (cfqq_process_refs(cfqq) == 1) {
e6c5bc73
JM
4436 cfqq->pid = current->pid;
4437 cfq_clear_cfqq_coop(cfqq);
ae54abed 4438 cfq_clear_cfqq_split_coop(cfqq);
e6c5bc73
JM
4439 return cfqq;
4440 }
4441
4442 cic_set_cfqq(cic, NULL, 1);
d02a2c07
SL
4443
4444 cfq_put_cooperator(cfqq);
4445
e6c5bc73
JM
4446 cfq_put_queue(cfqq);
4447 return NULL;
4448}
1da177e4 4449/*
22e2c507 4450 * Allocate cfq data structures associated with this request.
1da177e4 4451 */
22e2c507 4452static int
852c788f
TH
4453cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4454 gfp_t gfp_mask)
1da177e4
LT
4455{
4456 struct cfq_data *cfqd = q->elevator->elevator_data;
f1f8cc94 4457 struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
1da177e4 4458 const int rw = rq_data_dir(rq);
a6151c3a 4459 const bool is_sync = rq_is_sync(rq);
22e2c507 4460 struct cfq_queue *cfqq;
1da177e4 4461
216284c3 4462 spin_lock_irq(q->queue_lock);
f1f8cc94 4463
598971bf 4464 check_ioprio_changed(cic, bio);
142bbdfc 4465 check_blkcg_changed(cic, bio);
e6c5bc73 4466new_queue:
91fac317 4467 cfqq = cic_to_cfqq(cic, is_sync);
32f2e807 4468 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
bce6133b
TH
4469 if (cfqq)
4470 cfq_put_queue(cfqq);
2da8de0b 4471 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio);
91fac317 4472 cic_set_cfqq(cic, cfqq, is_sync);
df5fe3e8 4473 } else {
e6c5bc73
JM
4474 /*
4475 * If the queue was seeky for too long, break it apart.
4476 */
ae54abed 4477 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
e6c5bc73
JM
4478 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
4479 cfqq = split_cfqq(cic, cfqq);
4480 if (!cfqq)
4481 goto new_queue;
4482 }
4483
df5fe3e8
JM
4484 /*
4485 * Check to see if this queue is scheduled to merge with
4486 * another, closely cooperating queue. The merging of
4487 * queues happens here as it must be done in process context.
4488 * The reference on new_cfqq was taken in merge_cfqqs.
4489 */
4490 if (cfqq->new_cfqq)
4491 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
91fac317 4492 }
1da177e4
LT
4493
4494 cfqq->allocated[rw]++;
1da177e4 4495
6fae9c25 4496 cfqq->ref++;
eb7d8c07 4497 cfqg_get(cfqq->cfqg);
a612fddf 4498 rq->elv.priv[0] = cfqq;
1adaf3dd 4499 rq->elv.priv[1] = cfqq->cfqg;
216284c3 4500 spin_unlock_irq(q->queue_lock);
5d7f5ce1 4501
5e705374 4502 return 0;
1da177e4
LT
4503}
4504
65f27f38 4505static void cfq_kick_queue(struct work_struct *work)
22e2c507 4506{
65f27f38 4507 struct cfq_data *cfqd =
23e018a1 4508 container_of(work, struct cfq_data, unplug_work);
165125e1 4509 struct request_queue *q = cfqd->queue;
22e2c507 4510
40bb54d1 4511 spin_lock_irq(q->queue_lock);
24ecfbe2 4512 __blk_run_queue(cfqd->queue);
40bb54d1 4513 spin_unlock_irq(q->queue_lock);
22e2c507
JA
4514}
4515
4516/*
4517 * Timer running if the active_queue is currently idling inside its time slice
4518 */
91148325 4519static enum hrtimer_restart cfq_idle_slice_timer(struct hrtimer *timer)
22e2c507 4520{
91148325
JK
4521 struct cfq_data *cfqd = container_of(timer, struct cfq_data,
4522 idle_slice_timer);
22e2c507
JA
4523 struct cfq_queue *cfqq;
4524 unsigned long flags;
3c6bd2f8 4525 int timed_out = 1;
22e2c507 4526
7b679138
JA
4527 cfq_log(cfqd, "idle timer fired");
4528
22e2c507
JA
4529 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
4530
fe094d98
JA
4531 cfqq = cfqd->active_queue;
4532 if (cfqq) {
3c6bd2f8
JA
4533 timed_out = 0;
4534
b029195d
JA
4535 /*
4536 * We saw a request before the queue expired, let it through
4537 */
4538 if (cfq_cfqq_must_dispatch(cfqq))
4539 goto out_kick;
4540
22e2c507
JA
4541 /*
4542 * expired
4543 */
44f7c160 4544 if (cfq_slice_used(cfqq))
22e2c507
JA
4545 goto expire;
4546
4547 /*
4548 * only expire and reinvoke request handler, if there are
4549 * other queues with pending requests
4550 */
caaa5f9f 4551 if (!cfqd->busy_queues)
22e2c507 4552 goto out_cont;
22e2c507
JA
4553
4554 /*
4555 * not expired and it has a request pending, let it dispatch
4556 */
75e50984 4557 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507 4558 goto out_kick;
76280aff
CZ
4559
4560 /*
4561 * Queue depth flag is reset only when the idle didn't succeed
4562 */
4563 cfq_clear_cfqq_deep(cfqq);
22e2c507
JA
4564 }
4565expire:
e5ff082e 4566 cfq_slice_expired(cfqd, timed_out);
22e2c507 4567out_kick:
23e018a1 4568 cfq_schedule_dispatch(cfqd);
22e2c507
JA
4569out_cont:
4570 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
91148325 4571 return HRTIMER_NORESTART;
22e2c507
JA
4572}
4573
3b18152c
JA
4574static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
4575{
91148325 4576 hrtimer_cancel(&cfqd->idle_slice_timer);
23e018a1 4577 cancel_work_sync(&cfqd->unplug_work);
3b18152c 4578}
22e2c507 4579
b374d18a 4580static void cfq_exit_queue(struct elevator_queue *e)
1da177e4 4581{
22e2c507 4582 struct cfq_data *cfqd = e->elevator_data;
165125e1 4583 struct request_queue *q = cfqd->queue;
22e2c507 4584
3b18152c 4585 cfq_shutdown_timer_wq(cfqd);
e2d74ac0 4586
d9ff4187 4587 spin_lock_irq(q->queue_lock);
e2d74ac0 4588
d9ff4187 4589 if (cfqd->active_queue)
e5ff082e 4590 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
e2d74ac0 4591
03aa264a
TH
4592 spin_unlock_irq(q->queue_lock);
4593
a90d742e
AV
4594 cfq_shutdown_timer_wq(cfqd);
4595
ffea73fc
TH
4596#ifdef CONFIG_CFQ_GROUP_IOSCHED
4597 blkcg_deactivate_policy(q, &blkcg_policy_cfq);
4598#else
f51b802c 4599 kfree(cfqd->root_group);
2abae55f 4600#endif
56edf7d7 4601 kfree(cfqd);
1da177e4
LT
4602}
4603
d50235b7 4604static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
1da177e4
LT
4605{
4606 struct cfq_data *cfqd;
3c798398 4607 struct blkcg_gq *blkg __maybe_unused;
a2b1693b 4608 int i, ret;
d50235b7
JM
4609 struct elevator_queue *eq;
4610
4611 eq = elevator_alloc(q, e);
4612 if (!eq)
4613 return -ENOMEM;
1da177e4 4614
c1b511eb 4615 cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
d50235b7
JM
4616 if (!cfqd) {
4617 kobject_put(&eq->kobj);
b2fab5ac 4618 return -ENOMEM;
d50235b7
JM
4619 }
4620 eq->elevator_data = cfqd;
80b15c73 4621
f51b802c 4622 cfqd->queue = q;
d50235b7
JM
4623 spin_lock_irq(q->queue_lock);
4624 q->elevator = eq;
4625 spin_unlock_irq(q->queue_lock);
f51b802c 4626
1fa8f6d6
VG
4627 /* Init root service tree */
4628 cfqd->grp_service_tree = CFQ_RB_ROOT;
4629
f51b802c 4630 /* Init root group and prefer root group over other groups by default */
25fb5169 4631#ifdef CONFIG_CFQ_GROUP_IOSCHED
3c798398 4632 ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
a2b1693b
TH
4633 if (ret)
4634 goto out_free;
f51b802c 4635
a2b1693b 4636 cfqd->root_group = blkg_to_cfqg(q->root_blkg);
f51b802c 4637#else
a2b1693b 4638 ret = -ENOMEM;
f51b802c
TH
4639 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4640 GFP_KERNEL, cfqd->queue->node);
a2b1693b
TH
4641 if (!cfqd->root_group)
4642 goto out_free;
5624a4e4 4643
a2b1693b 4644 cfq_init_cfqg_base(cfqd->root_group);
3ecca629
TH
4645 cfqd->root_group->weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
4646 cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
69d7fde5 4647#endif
5624a4e4 4648
26a2ac00
JA
4649 /*
4650 * Not strictly needed (since RB_ROOT just clears the node and we
4651 * zeroed cfqd on alloc), but better be safe in case someone decides
4652 * to add magic to the rb code
4653 */
4654 for (i = 0; i < CFQ_PRIO_LISTS; i++)
4655 cfqd->prio_trees[i] = RB_ROOT;
4656
6118b70b 4657 /*
d4aad7ff 4658 * Our fallback cfqq if cfq_get_queue() runs into OOM issues.
6118b70b 4659 * Grab a permanent reference to it, so that the normal code flow
f51b802c
TH
4660 * will not attempt to free it. oom_cfqq is linked to root_group
4661 * but shouldn't hold a reference as it'll never be unlinked. Lose
4662 * the reference from linking right away.
6118b70b
JA
4663 */
4664 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
30d7b944 4665 cfqd->oom_cfqq.ref++;
1adaf3dd
TH
4666
4667 spin_lock_irq(q->queue_lock);
f51b802c 4668 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
eb7d8c07 4669 cfqg_put(cfqd->root_group);
1adaf3dd 4670 spin_unlock_irq(q->queue_lock);
1da177e4 4671
91148325
JK
4672 hrtimer_init(&cfqd->idle_slice_timer, CLOCK_MONOTONIC,
4673 HRTIMER_MODE_REL);
22e2c507 4674 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
22e2c507 4675
23e018a1 4676 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
22e2c507 4677
1da177e4 4678 cfqd->cfq_quantum = cfq_quantum;
22e2c507
JA
4679 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4680 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
1da177e4
LT
4681 cfqd->cfq_back_max = cfq_back_max;
4682 cfqd->cfq_back_penalty = cfq_back_penalty;
22e2c507
JA
4683 cfqd->cfq_slice[0] = cfq_slice_async;
4684 cfqd->cfq_slice[1] = cfq_slice_sync;
5bf14c07 4685 cfqd->cfq_target_latency = cfq_target_latency;
22e2c507 4686 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
0bb97947 4687 cfqd->cfq_slice_idle = cfq_slice_idle;
80bdf0c7 4688 cfqd->cfq_group_idle = cfq_group_idle;
963b72fc 4689 cfqd->cfq_latency = 1;
e459dd08 4690 cfqd->hw_tag = -1;
edc71131
CZ
4691 /*
4692 * we optimistically start assuming sync ops weren't delayed in last
4693 * second, in order to have larger depth for async operations.
4694 */
9a7f38c4 4695 cfqd->last_delayed_sync = ktime_get_ns() - NSEC_PER_SEC;
b2fab5ac 4696 return 0;
a2b1693b
TH
4697
4698out_free:
4699 kfree(cfqd);
d50235b7 4700 kobject_put(&eq->kobj);
a2b1693b 4701 return ret;
1da177e4
LT
4702}
4703
0bb97947
JA
4704static void cfq_registered_queue(struct request_queue *q)
4705{
4706 struct elevator_queue *e = q->elevator;
4707 struct cfq_data *cfqd = e->elevator_data;
4708
4709 /*
4710 * Default to IOPS mode with no idling for SSDs
4711 */
4712 if (blk_queue_nonrot(q))
4713 cfqd->cfq_slice_idle = 0;
142bbdfc 4714 wbt_disable_default(q);
0bb97947
JA
4715}
4716
1da177e4
LT
4717/*
4718 * sysfs parts below -->
4719 */
1da177e4
LT
4720static ssize_t
4721cfq_var_show(unsigned int var, char *page)
4722{
176167ad 4723 return sprintf(page, "%u\n", var);
1da177e4
LT
4724}
4725
4726static ssize_t
4727cfq_var_store(unsigned int *var, const char *page, size_t count)
4728{
4729 char *p = (char *) page;
4730
4731 *var = simple_strtoul(p, &p, 10);
4732 return count;
4733}
4734
1da177e4 4735#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
b374d18a 4736static ssize_t __FUNC(struct elevator_queue *e, char *page) \
1da177e4 4737{ \
3d1ab40f 4738 struct cfq_data *cfqd = e->elevator_data; \
9a7f38c4 4739 u64 __data = __VAR; \
1da177e4 4740 if (__CONV) \
9a7f38c4 4741 __data = div_u64(__data, NSEC_PER_MSEC); \
1da177e4
LT
4742 return cfq_var_show(__data, (page)); \
4743}
4744SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
22e2c507
JA
4745SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4746SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
e572ec7e
AV
4747SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4748SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
22e2c507 4749SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
80bdf0c7 4750SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
22e2c507
JA
4751SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4752SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4753SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
963b72fc 4754SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
5bf14c07 4755SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
1da177e4
LT
4756#undef SHOW_FUNCTION
4757
d2d481d0
JM
4758#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
4759static ssize_t __FUNC(struct elevator_queue *e, char *page) \
4760{ \
4761 struct cfq_data *cfqd = e->elevator_data; \
4762 u64 __data = __VAR; \
4763 __data = div_u64(__data, NSEC_PER_USEC); \
4764 return cfq_var_show(__data, (page)); \
4765}
4766USEC_SHOW_FUNCTION(cfq_slice_idle_us_show, cfqd->cfq_slice_idle);
4767USEC_SHOW_FUNCTION(cfq_group_idle_us_show, cfqd->cfq_group_idle);
4768USEC_SHOW_FUNCTION(cfq_slice_sync_us_show, cfqd->cfq_slice[1]);
4769USEC_SHOW_FUNCTION(cfq_slice_async_us_show, cfqd->cfq_slice[0]);
4770USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency);
4771#undef USEC_SHOW_FUNCTION
4772
1da177e4 4773#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
b374d18a 4774static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
1da177e4 4775{ \
3d1ab40f 4776 struct cfq_data *cfqd = e->elevator_data; \
1da177e4
LT
4777 unsigned int __data; \
4778 int ret = cfq_var_store(&__data, (page), count); \
4779 if (__data < (MIN)) \
4780 __data = (MIN); \
4781 else if (__data > (MAX)) \
4782 __data = (MAX); \
4783 if (__CONV) \
9a7f38c4 4784 *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
1da177e4
LT
4785 else \
4786 *(__PTR) = __data; \
4787 return ret; \
4788}
4789STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
fe094d98
JA
4790STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4791 UINT_MAX, 1);
4792STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4793 UINT_MAX, 1);
e572ec7e 4794STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
fe094d98
JA
4795STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4796 UINT_MAX, 0);
22e2c507 4797STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
80bdf0c7 4798STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
22e2c507
JA
4799STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4800STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
fe094d98
JA
4801STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4802 UINT_MAX, 0);
963b72fc 4803STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
5bf14c07 4804STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
1da177e4
LT
4805#undef STORE_FUNCTION
4806
d2d481d0
JM
4807#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
4808static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4809{ \
4810 struct cfq_data *cfqd = e->elevator_data; \
4811 unsigned int __data; \
4812 int ret = cfq_var_store(&__data, (page), count); \
4813 if (__data < (MIN)) \
4814 __data = (MIN); \
4815 else if (__data > (MAX)) \
4816 __data = (MAX); \
4817 *(__PTR) = (u64)__data * NSEC_PER_USEC; \
4818 return ret; \
4819}
4820USEC_STORE_FUNCTION(cfq_slice_idle_us_store, &cfqd->cfq_slice_idle, 0, UINT_MAX);
4821USEC_STORE_FUNCTION(cfq_group_idle_us_store, &cfqd->cfq_group_idle, 0, UINT_MAX);
4822USEC_STORE_FUNCTION(cfq_slice_sync_us_store, &cfqd->cfq_slice[1], 1, UINT_MAX);
4823USEC_STORE_FUNCTION(cfq_slice_async_us_store, &cfqd->cfq_slice[0], 1, UINT_MAX);
4824USEC_STORE_FUNCTION(cfq_target_latency_us_store, &cfqd->cfq_target_latency, 1, UINT_MAX);
4825#undef USEC_STORE_FUNCTION
4826
e572ec7e
AV
4827#define CFQ_ATTR(name) \
4828 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4829
4830static struct elv_fs_entry cfq_attrs[] = {
4831 CFQ_ATTR(quantum),
e572ec7e
AV
4832 CFQ_ATTR(fifo_expire_sync),
4833 CFQ_ATTR(fifo_expire_async),
4834 CFQ_ATTR(back_seek_max),
4835 CFQ_ATTR(back_seek_penalty),
4836 CFQ_ATTR(slice_sync),
d2d481d0 4837 CFQ_ATTR(slice_sync_us),
e572ec7e 4838 CFQ_ATTR(slice_async),
d2d481d0 4839 CFQ_ATTR(slice_async_us),
e572ec7e
AV
4840 CFQ_ATTR(slice_async_rq),
4841 CFQ_ATTR(slice_idle),
d2d481d0 4842 CFQ_ATTR(slice_idle_us),
80bdf0c7 4843 CFQ_ATTR(group_idle),
d2d481d0 4844 CFQ_ATTR(group_idle_us),
963b72fc 4845 CFQ_ATTR(low_latency),
5bf14c07 4846 CFQ_ATTR(target_latency),
d2d481d0 4847 CFQ_ATTR(target_latency_us),
e572ec7e 4848 __ATTR_NULL
1da177e4
LT
4849};
4850
1da177e4 4851static struct elevator_type iosched_cfq = {
c51ca6cf 4852 .ops.sq = {
1da177e4
LT
4853 .elevator_merge_fn = cfq_merge,
4854 .elevator_merged_fn = cfq_merged_request,
4855 .elevator_merge_req_fn = cfq_merged_requests,
72ef799b
TE
4856 .elevator_allow_bio_merge_fn = cfq_allow_bio_merge,
4857 .elevator_allow_rq_merge_fn = cfq_allow_rq_merge,
812d4026 4858 .elevator_bio_merged_fn = cfq_bio_merged,
b4878f24 4859 .elevator_dispatch_fn = cfq_dispatch_requests,
1da177e4 4860 .elevator_add_req_fn = cfq_insert_request,
b4878f24 4861 .elevator_activate_req_fn = cfq_activate_request,
1da177e4 4862 .elevator_deactivate_req_fn = cfq_deactivate_request,
1da177e4 4863 .elevator_completed_req_fn = cfq_completed_request,
21183b07
JA
4864 .elevator_former_req_fn = elv_rb_former_request,
4865 .elevator_latter_req_fn = elv_rb_latter_request,
9b84cacd 4866 .elevator_init_icq_fn = cfq_init_icq,
7e5a8794 4867 .elevator_exit_icq_fn = cfq_exit_icq,
1da177e4
LT
4868 .elevator_set_req_fn = cfq_set_request,
4869 .elevator_put_req_fn = cfq_put_request,
4870 .elevator_may_queue_fn = cfq_may_queue,
4871 .elevator_init_fn = cfq_init_queue,
4872 .elevator_exit_fn = cfq_exit_queue,
0bb97947 4873 .elevator_registered_fn = cfq_registered_queue,
1da177e4 4874 },
3d3c2379
TH
4875 .icq_size = sizeof(struct cfq_io_cq),
4876 .icq_align = __alignof__(struct cfq_io_cq),
3d1ab40f 4877 .elevator_attrs = cfq_attrs,
3d3c2379 4878 .elevator_name = "cfq",
1da177e4
LT
4879 .elevator_owner = THIS_MODULE,
4880};
4881
3e252066 4882#ifdef CONFIG_CFQ_GROUP_IOSCHED
3c798398 4883static struct blkcg_policy blkcg_policy_cfq = {
2ee867dc 4884 .dfl_cftypes = cfq_blkcg_files,
880f50e2 4885 .legacy_cftypes = cfq_blkcg_legacy_files,
f9fcc2d3 4886
e4a9bde9 4887 .cpd_alloc_fn = cfq_cpd_alloc,
e48453c3 4888 .cpd_init_fn = cfq_cpd_init,
e4a9bde9 4889 .cpd_free_fn = cfq_cpd_free,
69d7fde5 4890 .cpd_bind_fn = cfq_cpd_bind,
e4a9bde9 4891
001bea73 4892 .pd_alloc_fn = cfq_pd_alloc,
f9fcc2d3 4893 .pd_init_fn = cfq_pd_init,
0b39920b 4894 .pd_offline_fn = cfq_pd_offline,
001bea73 4895 .pd_free_fn = cfq_pd_free,
f9fcc2d3 4896 .pd_reset_stats_fn = cfq_pd_reset_stats,
3e252066 4897};
3e252066
VG
4898#endif
4899
1da177e4
LT
4900static int __init cfq_init(void)
4901{
3d3c2379
TH
4902 int ret;
4903
80bdf0c7 4904#ifdef CONFIG_CFQ_GROUP_IOSCHED
3c798398 4905 ret = blkcg_policy_register(&blkcg_policy_cfq);
8bd435b3
TH
4906 if (ret)
4907 return ret;
ffea73fc
TH
4908#else
4909 cfq_group_idle = 0;
4910#endif
8bd435b3 4911
fd794956 4912 ret = -ENOMEM;
3d3c2379
TH
4913 cfq_pool = KMEM_CACHE(cfq_queue, 0);
4914 if (!cfq_pool)
8bd435b3 4915 goto err_pol_unreg;
1da177e4 4916
3d3c2379 4917 ret = elv_register(&iosched_cfq);
8bd435b3
TH
4918 if (ret)
4919 goto err_free_pool;
3d3c2379 4920
2fdd82bd 4921 return 0;
8bd435b3
TH
4922
4923err_free_pool:
4924 kmem_cache_destroy(cfq_pool);
4925err_pol_unreg:
ffea73fc 4926#ifdef CONFIG_CFQ_GROUP_IOSCHED
3c798398 4927 blkcg_policy_unregister(&blkcg_policy_cfq);
ffea73fc 4928#endif
8bd435b3 4929 return ret;
1da177e4
LT
4930}
4931
4932static void __exit cfq_exit(void)
4933{
ffea73fc 4934#ifdef CONFIG_CFQ_GROUP_IOSCHED
3c798398 4935 blkcg_policy_unregister(&blkcg_policy_cfq);
ffea73fc 4936#endif
1da177e4 4937 elv_unregister(&iosched_cfq);
3d3c2379 4938 kmem_cache_destroy(cfq_pool);
1da177e4
LT
4939}
4940
4941module_init(cfq_init);
4942module_exit(cfq_exit);
4943
4944MODULE_AUTHOR("Jens Axboe");
4945MODULE_LICENSE("GPL");
4946MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");