]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/cfq-iosched.c
blk-throttle: Free up a group only after one rcu grace period
[mirror_ubuntu-bionic-kernel.git] / block / cfq-iosched.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * CFQ, or complete fairness queueing, disk scheduler.
3 *
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
0fe23479 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
1da177e4 8 */
1da177e4 9#include <linux/module.h>
5a0e3ad6 10#include <linux/slab.h>
1cc9be68
AV
11#include <linux/blkdev.h>
12#include <linux/elevator.h>
ad5ebd2f 13#include <linux/jiffies.h>
1da177e4 14#include <linux/rbtree.h>
22e2c507 15#include <linux/ioprio.h>
7b679138 16#include <linux/blktrace_api.h>
e98ef89b 17#include "cfq.h"
1da177e4
LT
18
19/*
20 * tunables
21 */
fe094d98 22/* max queue in one round of service */
abc3c744 23static const int cfq_quantum = 8;
64100099 24static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
fe094d98
JA
25/* maximum backwards seek, in KiB */
26static const int cfq_back_max = 16 * 1024;
27/* penalty of a backwards seek */
28static const int cfq_back_penalty = 2;
64100099 29static const int cfq_slice_sync = HZ / 10;
3b18152c 30static int cfq_slice_async = HZ / 25;
64100099 31static const int cfq_slice_async_rq = 2;
caaa5f9f 32static int cfq_slice_idle = HZ / 125;
80bdf0c7 33static int cfq_group_idle = HZ / 125;
5db5d642
CZ
34static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
35static const int cfq_hist_divisor = 4;
22e2c507 36
d9e7620e 37/*
0871714e 38 * offset from end of service tree
d9e7620e 39 */
0871714e 40#define CFQ_IDLE_DELAY (HZ / 5)
d9e7620e
JA
41
42/*
43 * below this threshold, we consider thinktime immediate
44 */
45#define CFQ_MIN_TT (2)
46
22e2c507 47#define CFQ_SLICE_SCALE (5)
45333d5a 48#define CFQ_HW_QUEUE_MIN (5)
25bc6b07 49#define CFQ_SERVICE_SHIFT 12
22e2c507 50
3dde36dd 51#define CFQQ_SEEK_THR (sector_t)(8 * 100)
e9ce335d 52#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
41647e7a 53#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
3dde36dd 54#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
ae54abed 55
fe094d98 56#define RQ_CIC(rq) \
c186794d
MS
57 ((struct cfq_io_context *) (rq)->elevator_private[0])
58#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private[1])
59#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private[2])
1da177e4 60
e18b890b
CL
61static struct kmem_cache *cfq_pool;
62static struct kmem_cache *cfq_ioc_pool;
1da177e4 63
245b2e70 64static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
334e94de 65static struct completion *ioc_gone;
9a11b4ed 66static DEFINE_SPINLOCK(ioc_gone_lock);
334e94de 67
80b15c73
KK
68static DEFINE_SPINLOCK(cic_index_lock);
69static DEFINE_IDA(cic_index_ida);
70
22e2c507
JA
71#define CFQ_PRIO_LISTS IOPRIO_BE_NR
72#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
22e2c507
JA
73#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
74
206dc69b 75#define sample_valid(samples) ((samples) > 80)
1fa8f6d6 76#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
206dc69b 77
cc09e299
JA
78/*
79 * Most of our rbtree usage is for sorting with min extraction, so
80 * if we cache the leftmost node we don't have to walk down the tree
81 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
82 * move this into the elevator for the rq sorting as well.
83 */
84struct cfq_rb_root {
85 struct rb_root rb;
86 struct rb_node *left;
aa6f6a3d 87 unsigned count;
73e9ffdd 88 unsigned total_weight;
1fa8f6d6 89 u64 min_vdisktime;
cc09e299 90};
73e9ffdd
RK
91#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \
92 .count = 0, .min_vdisktime = 0, }
cc09e299 93
6118b70b
JA
94/*
95 * Per process-grouping structure
96 */
97struct cfq_queue {
98 /* reference count */
30d7b944 99 int ref;
6118b70b
JA
100 /* various state flags, see below */
101 unsigned int flags;
102 /* parent cfq_data */
103 struct cfq_data *cfqd;
104 /* service_tree member */
105 struct rb_node rb_node;
106 /* service_tree key */
107 unsigned long rb_key;
108 /* prio tree member */
109 struct rb_node p_node;
110 /* prio tree root we belong to, if any */
111 struct rb_root *p_root;
112 /* sorted list of pending requests */
113 struct rb_root sort_list;
114 /* if fifo isn't expired, next request to serve */
115 struct request *next_rq;
116 /* requests queued in sort_list */
117 int queued[2];
118 /* currently allocated requests */
119 int allocated[2];
120 /* fifo list of requests in sort_list */
121 struct list_head fifo;
122
dae739eb
VG
123 /* time when queue got scheduled in to dispatch first request. */
124 unsigned long dispatch_start;
f75edf2d 125 unsigned int allocated_slice;
c4081ba5 126 unsigned int slice_dispatch;
dae739eb
VG
127 /* time when first request from queue completed and slice started. */
128 unsigned long slice_start;
6118b70b
JA
129 unsigned long slice_end;
130 long slice_resid;
6118b70b
JA
131
132 /* pending metadata requests */
133 int meta_pending;
134 /* number of requests that are on the dispatch list or inside driver */
135 int dispatched;
136
137 /* io prio of this group */
138 unsigned short ioprio, org_ioprio;
139 unsigned short ioprio_class, org_ioprio_class;
140
c4081ba5
RK
141 pid_t pid;
142
3dde36dd 143 u32 seek_history;
b2c18e1e
JM
144 sector_t last_request_pos;
145
aa6f6a3d 146 struct cfq_rb_root *service_tree;
df5fe3e8 147 struct cfq_queue *new_cfqq;
cdb16e8f 148 struct cfq_group *cfqg;
c4e7893e
VG
149 /* Number of sectors dispatched from queue in single dispatch round */
150 unsigned long nr_sectors;
6118b70b
JA
151};
152
c0324a02 153/*
718eee05 154 * First index in the service_trees.
c0324a02
CZ
155 * IDLE is handled separately, so it has negative index
156 */
157enum wl_prio_t {
c0324a02 158 BE_WORKLOAD = 0,
615f0259
VG
159 RT_WORKLOAD = 1,
160 IDLE_WORKLOAD = 2,
b4627321 161 CFQ_PRIO_NR,
c0324a02
CZ
162};
163
718eee05
CZ
164/*
165 * Second index in the service_trees.
166 */
167enum wl_type_t {
168 ASYNC_WORKLOAD = 0,
169 SYNC_NOIDLE_WORKLOAD = 1,
170 SYNC_WORKLOAD = 2
171};
172
cdb16e8f
VG
173/* This is per cgroup per device grouping structure */
174struct cfq_group {
1fa8f6d6
VG
175 /* group service_tree member */
176 struct rb_node rb_node;
177
178 /* group service_tree key */
179 u64 vdisktime;
25bc6b07 180 unsigned int weight;
8184f93e
JT
181 unsigned int new_weight;
182 bool needs_update;
1fa8f6d6
VG
183
184 /* number of cfqq currently on this group */
185 int nr_cfqq;
186
cdb16e8f 187 /*
b4627321
VG
188 * Per group busy queus average. Useful for workload slice calc. We
189 * create the array for each prio class but at run time it is used
190 * only for RT and BE class and slot for IDLE class remains unused.
191 * This is primarily done to avoid confusion and a gcc warning.
192 */
193 unsigned int busy_queues_avg[CFQ_PRIO_NR];
194 /*
195 * rr lists of queues with requests. We maintain service trees for
196 * RT and BE classes. These trees are subdivided in subclasses
197 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
198 * class there is no subclassification and all the cfq queues go on
199 * a single tree service_tree_idle.
cdb16e8f
VG
200 * Counts are embedded in the cfq_rb_root
201 */
202 struct cfq_rb_root service_trees[2][3];
203 struct cfq_rb_root service_tree_idle;
dae739eb
VG
204
205 unsigned long saved_workload_slice;
206 enum wl_type_t saved_workload;
207 enum wl_prio_t saved_serving_prio;
25fb5169
VG
208 struct blkio_group blkg;
209#ifdef CONFIG_CFQ_GROUP_IOSCHED
210 struct hlist_node cfqd_node;
329a6781 211 int ref;
25fb5169 212#endif
80bdf0c7
VG
213 /* number of requests that are on the dispatch list or inside driver */
214 int dispatched;
cdb16e8f 215};
718eee05 216
22e2c507
JA
217/*
218 * Per block device queue structure
219 */
1da177e4 220struct cfq_data {
165125e1 221 struct request_queue *queue;
1fa8f6d6
VG
222 /* Root service tree for cfq_groups */
223 struct cfq_rb_root grp_service_tree;
cdb16e8f 224 struct cfq_group root_group;
22e2c507 225
c0324a02
CZ
226 /*
227 * The priority currently being served
22e2c507 228 */
c0324a02 229 enum wl_prio_t serving_prio;
718eee05
CZ
230 enum wl_type_t serving_type;
231 unsigned long workload_expires;
cdb16e8f 232 struct cfq_group *serving_group;
a36e71f9
JA
233
234 /*
235 * Each priority tree is sorted by next_request position. These
236 * trees are used when determining if two or more queues are
237 * interleaving requests (see cfq_close_cooperator).
238 */
239 struct rb_root prio_trees[CFQ_PRIO_LISTS];
240
22e2c507 241 unsigned int busy_queues;
ef8a41df 242 unsigned int busy_sync_queues;
22e2c507 243
53c583d2
CZ
244 int rq_in_driver;
245 int rq_in_flight[2];
45333d5a
AC
246
247 /*
248 * queue-depth detection
249 */
250 int rq_queued;
25776e35 251 int hw_tag;
e459dd08
CZ
252 /*
253 * hw_tag can be
254 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
255 * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
256 * 0 => no NCQ
257 */
258 int hw_tag_est_depth;
259 unsigned int hw_tag_samples;
1da177e4 260
22e2c507
JA
261 /*
262 * idle window management
263 */
264 struct timer_list idle_slice_timer;
23e018a1 265 struct work_struct unplug_work;
1da177e4 266
22e2c507
JA
267 struct cfq_queue *active_queue;
268 struct cfq_io_context *active_cic;
22e2c507 269
c2dea2d1
VT
270 /*
271 * async queue for each priority case
272 */
273 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
274 struct cfq_queue *async_idle_cfqq;
15c31be4 275
6d048f53 276 sector_t last_position;
1da177e4 277
1da177e4
LT
278 /*
279 * tunables, see top of file
280 */
281 unsigned int cfq_quantum;
22e2c507 282 unsigned int cfq_fifo_expire[2];
1da177e4
LT
283 unsigned int cfq_back_penalty;
284 unsigned int cfq_back_max;
22e2c507
JA
285 unsigned int cfq_slice[2];
286 unsigned int cfq_slice_async_rq;
287 unsigned int cfq_slice_idle;
80bdf0c7 288 unsigned int cfq_group_idle;
963b72fc 289 unsigned int cfq_latency;
d9ff4187 290
80b15c73 291 unsigned int cic_index;
d9ff4187 292 struct list_head cic_list;
1da177e4 293
6118b70b
JA
294 /*
295 * Fallback dummy cfqq for extreme OOM conditions
296 */
297 struct cfq_queue oom_cfqq;
365722bb 298
573412b2 299 unsigned long last_delayed_sync;
25fb5169
VG
300
301 /* List of cfq groups being managed on this device*/
302 struct hlist_head cfqg_list;
56edf7d7
VG
303
304 /* Number of groups which are on blkcg->blkg_list */
305 unsigned int nr_blkcg_linked_grps;
1da177e4
LT
306};
307
25fb5169
VG
308static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
309
cdb16e8f
VG
310static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
311 enum wl_prio_t prio,
65b32a57 312 enum wl_type_t type)
c0324a02 313{
1fa8f6d6
VG
314 if (!cfqg)
315 return NULL;
316
c0324a02 317 if (prio == IDLE_WORKLOAD)
cdb16e8f 318 return &cfqg->service_tree_idle;
c0324a02 319
cdb16e8f 320 return &cfqg->service_trees[prio][type];
c0324a02
CZ
321}
322
3b18152c 323enum cfqq_state_flags {
b0b8d749
JA
324 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
325 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
b029195d 326 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
b0b8d749 327 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
b0b8d749
JA
328 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
329 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
330 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
44f7c160 331 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
91fac317 332 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
b3b6d040 333 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
ae54abed 334 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
76280aff 335 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
f75edf2d 336 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
3b18152c
JA
337};
338
339#define CFQ_CFQQ_FNS(name) \
340static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
341{ \
fe094d98 342 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
3b18152c
JA
343} \
344static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
345{ \
fe094d98 346 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
3b18152c
JA
347} \
348static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
349{ \
fe094d98 350 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
3b18152c
JA
351}
352
353CFQ_CFQQ_FNS(on_rr);
354CFQ_CFQQ_FNS(wait_request);
b029195d 355CFQ_CFQQ_FNS(must_dispatch);
3b18152c 356CFQ_CFQQ_FNS(must_alloc_slice);
3b18152c
JA
357CFQ_CFQQ_FNS(fifo_expire);
358CFQ_CFQQ_FNS(idle_window);
359CFQ_CFQQ_FNS(prio_changed);
44f7c160 360CFQ_CFQQ_FNS(slice_new);
91fac317 361CFQ_CFQQ_FNS(sync);
a36e71f9 362CFQ_CFQQ_FNS(coop);
ae54abed 363CFQ_CFQQ_FNS(split_coop);
76280aff 364CFQ_CFQQ_FNS(deep);
f75edf2d 365CFQ_CFQQ_FNS(wait_busy);
3b18152c
JA
366#undef CFQ_CFQQ_FNS
367
afc24d49 368#ifdef CONFIG_CFQ_GROUP_IOSCHED
2868ef7b
VG
369#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
370 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
371 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
372 blkg_path(&(cfqq)->cfqg->blkg), ##args);
373
374#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
375 blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
376 blkg_path(&(cfqg)->blkg), ##args); \
377
378#else
7b679138
JA
379#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
380 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
2868ef7b
VG
381#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0);
382#endif
7b679138
JA
383#define cfq_log(cfqd, fmt, args...) \
384 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
385
615f0259
VG
386/* Traverses through cfq group service trees */
387#define for_each_cfqg_st(cfqg, i, j, st) \
388 for (i = 0; i <= IDLE_WORKLOAD; i++) \
389 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
390 : &cfqg->service_tree_idle; \
391 (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
392 (i == IDLE_WORKLOAD && j == 0); \
393 j++, st = i < IDLE_WORKLOAD ? \
394 &cfqg->service_trees[i][j]: NULL) \
395
396
02b35081
VG
397static inline bool iops_mode(struct cfq_data *cfqd)
398{
399 /*
400 * If we are not idling on queues and it is a NCQ drive, parallel
401 * execution of requests is on and measuring time is not possible
402 * in most of the cases until and unless we drive shallower queue
403 * depths and that becomes a performance bottleneck. In such cases
404 * switch to start providing fairness in terms of number of IOs.
405 */
406 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
407 return true;
408 else
409 return false;
410}
411
c0324a02
CZ
412static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
413{
414 if (cfq_class_idle(cfqq))
415 return IDLE_WORKLOAD;
416 if (cfq_class_rt(cfqq))
417 return RT_WORKLOAD;
418 return BE_WORKLOAD;
419}
420
718eee05
CZ
421
422static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
423{
424 if (!cfq_cfqq_sync(cfqq))
425 return ASYNC_WORKLOAD;
426 if (!cfq_cfqq_idle_window(cfqq))
427 return SYNC_NOIDLE_WORKLOAD;
428 return SYNC_WORKLOAD;
429}
430
58ff82f3
VG
431static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
432 struct cfq_data *cfqd,
433 struct cfq_group *cfqg)
c0324a02
CZ
434{
435 if (wl == IDLE_WORKLOAD)
cdb16e8f 436 return cfqg->service_tree_idle.count;
c0324a02 437
cdb16e8f
VG
438 return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
439 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
440 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
c0324a02
CZ
441}
442
f26bd1f0
VG
443static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
444 struct cfq_group *cfqg)
445{
446 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
447 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
448}
449
165125e1 450static void cfq_dispatch_insert(struct request_queue *, struct request *);
a6151c3a 451static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
fd0928df 452 struct io_context *, gfp_t);
4ac845a2 453static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
91fac317
VT
454 struct io_context *);
455
456static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
a6151c3a 457 bool is_sync)
91fac317 458{
a6151c3a 459 return cic->cfqq[is_sync];
91fac317
VT
460}
461
462static inline void cic_set_cfqq(struct cfq_io_context *cic,
a6151c3a 463 struct cfq_queue *cfqq, bool is_sync)
91fac317 464{
a6151c3a 465 cic->cfqq[is_sync] = cfqq;
91fac317
VT
466}
467
bca4b914 468#define CIC_DEAD_KEY 1ul
80b15c73 469#define CIC_DEAD_INDEX_SHIFT 1
bca4b914
KK
470
471static inline void *cfqd_dead_key(struct cfq_data *cfqd)
472{
80b15c73 473 return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY);
bca4b914
KK
474}
475
476static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
477{
478 struct cfq_data *cfqd = cic->key;
479
480 if (unlikely((unsigned long) cfqd & CIC_DEAD_KEY))
481 return NULL;
482
483 return cfqd;
484}
485
91fac317
VT
486/*
487 * We regard a request as SYNC, if it's either a read or has the SYNC bit
488 * set (in which case it could also be direct WRITE).
489 */
a6151c3a 490static inline bool cfq_bio_sync(struct bio *bio)
91fac317 491{
7b6d91da 492 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
91fac317 493}
1da177e4 494
99f95e52
AM
495/*
496 * scheduler run of queue, if there are requests pending and no one in the
497 * driver that will restart queueing
498 */
23e018a1 499static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
99f95e52 500{
7b679138
JA
501 if (cfqd->busy_queues) {
502 cfq_log(cfqd, "schedule dispatch");
23e018a1 503 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
7b679138 504 }
99f95e52
AM
505}
506
44f7c160
JA
507/*
508 * Scale schedule slice based on io priority. Use the sync time slice only
509 * if a queue is marked sync and has sync io queued. A sync queue with async
510 * io only, should not get full sync slice length.
511 */
a6151c3a 512static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
d9e7620e 513 unsigned short prio)
44f7c160 514{
d9e7620e 515 const int base_slice = cfqd->cfq_slice[sync];
44f7c160 516
d9e7620e
JA
517 WARN_ON(prio >= IOPRIO_BE_NR);
518
519 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
520}
44f7c160 521
d9e7620e
JA
522static inline int
523cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
524{
525 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
44f7c160
JA
526}
527
25bc6b07
VG
528static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
529{
530 u64 d = delta << CFQ_SERVICE_SHIFT;
531
532 d = d * BLKIO_WEIGHT_DEFAULT;
533 do_div(d, cfqg->weight);
534 return d;
535}
536
537static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
538{
539 s64 delta = (s64)(vdisktime - min_vdisktime);
540 if (delta > 0)
541 min_vdisktime = vdisktime;
542
543 return min_vdisktime;
544}
545
546static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
547{
548 s64 delta = (s64)(vdisktime - min_vdisktime);
549 if (delta < 0)
550 min_vdisktime = vdisktime;
551
552 return min_vdisktime;
553}
554
555static void update_min_vdisktime(struct cfq_rb_root *st)
556{
25bc6b07
VG
557 struct cfq_group *cfqg;
558
25bc6b07
VG
559 if (st->left) {
560 cfqg = rb_entry_cfqg(st->left);
a6032710
GJ
561 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
562 cfqg->vdisktime);
25bc6b07 563 }
25bc6b07
VG
564}
565
5db5d642
CZ
566/*
567 * get averaged number of queues of RT/BE priority.
568 * average is updated, with a formula that gives more weight to higher numbers,
569 * to quickly follows sudden increases and decrease slowly
570 */
571
58ff82f3
VG
572static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
573 struct cfq_group *cfqg, bool rt)
5869619c 574{
5db5d642
CZ
575 unsigned min_q, max_q;
576 unsigned mult = cfq_hist_divisor - 1;
577 unsigned round = cfq_hist_divisor / 2;
58ff82f3 578 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
5db5d642 579
58ff82f3
VG
580 min_q = min(cfqg->busy_queues_avg[rt], busy);
581 max_q = max(cfqg->busy_queues_avg[rt], busy);
582 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
5db5d642 583 cfq_hist_divisor;
58ff82f3
VG
584 return cfqg->busy_queues_avg[rt];
585}
586
587static inline unsigned
588cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
589{
590 struct cfq_rb_root *st = &cfqd->grp_service_tree;
591
592 return cfq_target_latency * cfqg->weight / st->total_weight;
5db5d642
CZ
593}
594
c553f8e3 595static inline unsigned
ba5bd520 596cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
44f7c160 597{
5db5d642
CZ
598 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
599 if (cfqd->cfq_latency) {
58ff82f3
VG
600 /*
601 * interested queues (we consider only the ones with the same
602 * priority class in the cfq group)
603 */
604 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
605 cfq_class_rt(cfqq));
5db5d642
CZ
606 unsigned sync_slice = cfqd->cfq_slice[1];
607 unsigned expect_latency = sync_slice * iq;
58ff82f3
VG
608 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
609
610 if (expect_latency > group_slice) {
5db5d642
CZ
611 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
612 /* scale low_slice according to IO priority
613 * and sync vs async */
614 unsigned low_slice =
615 min(slice, base_low_slice * slice / sync_slice);
616 /* the adapted slice value is scaled to fit all iqs
617 * into the target latency */
58ff82f3 618 slice = max(slice * group_slice / expect_latency,
5db5d642
CZ
619 low_slice);
620 }
621 }
c553f8e3
SL
622 return slice;
623}
624
625static inline void
626cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
627{
ba5bd520 628 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
c553f8e3 629
dae739eb 630 cfqq->slice_start = jiffies;
5db5d642 631 cfqq->slice_end = jiffies + slice;
f75edf2d 632 cfqq->allocated_slice = slice;
7b679138 633 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
44f7c160
JA
634}
635
636/*
637 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
638 * isn't valid until the first request from the dispatch is activated
639 * and the slice time set.
640 */
a6151c3a 641static inline bool cfq_slice_used(struct cfq_queue *cfqq)
44f7c160
JA
642{
643 if (cfq_cfqq_slice_new(cfqq))
c1e44756 644 return false;
44f7c160 645 if (time_before(jiffies, cfqq->slice_end))
c1e44756 646 return false;
44f7c160 647
c1e44756 648 return true;
44f7c160
JA
649}
650
1da177e4 651/*
5e705374 652 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1da177e4 653 * We choose the request that is closest to the head right now. Distance
e8a99053 654 * behind the head is penalized and only allowed to a certain extent.
1da177e4 655 */
5e705374 656static struct request *
cf7c25cf 657cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1da177e4 658{
cf7c25cf 659 sector_t s1, s2, d1 = 0, d2 = 0;
1da177e4 660 unsigned long back_max;
e8a99053
AM
661#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
662#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
663 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1da177e4 664
5e705374
JA
665 if (rq1 == NULL || rq1 == rq2)
666 return rq2;
667 if (rq2 == NULL)
668 return rq1;
9c2c38a1 669
5e705374
JA
670 if (rq_is_sync(rq1) && !rq_is_sync(rq2))
671 return rq1;
672 else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
673 return rq2;
7b6d91da 674 if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
374f84ac 675 return rq1;
7b6d91da
CH
676 else if ((rq2->cmd_flags & REQ_META) &&
677 !(rq1->cmd_flags & REQ_META))
374f84ac 678 return rq2;
1da177e4 679
83096ebf
TH
680 s1 = blk_rq_pos(rq1);
681 s2 = blk_rq_pos(rq2);
1da177e4 682
1da177e4
LT
683 /*
684 * by definition, 1KiB is 2 sectors
685 */
686 back_max = cfqd->cfq_back_max * 2;
687
688 /*
689 * Strict one way elevator _except_ in the case where we allow
690 * short backward seeks which are biased as twice the cost of a
691 * similar forward seek.
692 */
693 if (s1 >= last)
694 d1 = s1 - last;
695 else if (s1 + back_max >= last)
696 d1 = (last - s1) * cfqd->cfq_back_penalty;
697 else
e8a99053 698 wrap |= CFQ_RQ1_WRAP;
1da177e4
LT
699
700 if (s2 >= last)
701 d2 = s2 - last;
702 else if (s2 + back_max >= last)
703 d2 = (last - s2) * cfqd->cfq_back_penalty;
704 else
e8a99053 705 wrap |= CFQ_RQ2_WRAP;
1da177e4
LT
706
707 /* Found required data */
e8a99053
AM
708
709 /*
710 * By doing switch() on the bit mask "wrap" we avoid having to
711 * check two variables for all permutations: --> faster!
712 */
713 switch (wrap) {
5e705374 714 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
e8a99053 715 if (d1 < d2)
5e705374 716 return rq1;
e8a99053 717 else if (d2 < d1)
5e705374 718 return rq2;
e8a99053
AM
719 else {
720 if (s1 >= s2)
5e705374 721 return rq1;
e8a99053 722 else
5e705374 723 return rq2;
e8a99053 724 }
1da177e4 725
e8a99053 726 case CFQ_RQ2_WRAP:
5e705374 727 return rq1;
e8a99053 728 case CFQ_RQ1_WRAP:
5e705374
JA
729 return rq2;
730 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
e8a99053
AM
731 default:
732 /*
733 * Since both rqs are wrapped,
734 * start with the one that's further behind head
735 * (--> only *one* back seek required),
736 * since back seek takes more time than forward.
737 */
738 if (s1 <= s2)
5e705374 739 return rq1;
1da177e4 740 else
5e705374 741 return rq2;
1da177e4
LT
742 }
743}
744
498d3aa2
JA
745/*
746 * The below is leftmost cache rbtree addon
747 */
0871714e 748static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
cc09e299 749{
615f0259
VG
750 /* Service tree is empty */
751 if (!root->count)
752 return NULL;
753
cc09e299
JA
754 if (!root->left)
755 root->left = rb_first(&root->rb);
756
0871714e
JA
757 if (root->left)
758 return rb_entry(root->left, struct cfq_queue, rb_node);
759
760 return NULL;
cc09e299
JA
761}
762
1fa8f6d6
VG
763static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
764{
765 if (!root->left)
766 root->left = rb_first(&root->rb);
767
768 if (root->left)
769 return rb_entry_cfqg(root->left);
770
771 return NULL;
772}
773
a36e71f9
JA
774static void rb_erase_init(struct rb_node *n, struct rb_root *root)
775{
776 rb_erase(n, root);
777 RB_CLEAR_NODE(n);
778}
779
cc09e299
JA
780static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
781{
782 if (root->left == n)
783 root->left = NULL;
a36e71f9 784 rb_erase_init(n, &root->rb);
aa6f6a3d 785 --root->count;
cc09e299
JA
786}
787
1da177e4
LT
788/*
789 * would be nice to take fifo expire time into account as well
790 */
5e705374
JA
791static struct request *
792cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
793 struct request *last)
1da177e4 794{
21183b07
JA
795 struct rb_node *rbnext = rb_next(&last->rb_node);
796 struct rb_node *rbprev = rb_prev(&last->rb_node);
5e705374 797 struct request *next = NULL, *prev = NULL;
1da177e4 798
21183b07 799 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1da177e4
LT
800
801 if (rbprev)
5e705374 802 prev = rb_entry_rq(rbprev);
1da177e4 803
21183b07 804 if (rbnext)
5e705374 805 next = rb_entry_rq(rbnext);
21183b07
JA
806 else {
807 rbnext = rb_first(&cfqq->sort_list);
808 if (rbnext && rbnext != &last->rb_node)
5e705374 809 next = rb_entry_rq(rbnext);
21183b07 810 }
1da177e4 811
cf7c25cf 812 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1da177e4
LT
813}
814
d9e7620e
JA
815static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
816 struct cfq_queue *cfqq)
1da177e4 817{
d9e7620e
JA
818 /*
819 * just an approximation, should be ok.
820 */
cdb16e8f 821 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
464191c6 822 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
d9e7620e
JA
823}
824
1fa8f6d6
VG
825static inline s64
826cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
827{
828 return cfqg->vdisktime - st->min_vdisktime;
829}
830
831static void
832__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
833{
834 struct rb_node **node = &st->rb.rb_node;
835 struct rb_node *parent = NULL;
836 struct cfq_group *__cfqg;
837 s64 key = cfqg_key(st, cfqg);
838 int left = 1;
839
840 while (*node != NULL) {
841 parent = *node;
842 __cfqg = rb_entry_cfqg(parent);
843
844 if (key < cfqg_key(st, __cfqg))
845 node = &parent->rb_left;
846 else {
847 node = &parent->rb_right;
848 left = 0;
849 }
850 }
851
852 if (left)
853 st->left = &cfqg->rb_node;
854
855 rb_link_node(&cfqg->rb_node, parent, node);
856 rb_insert_color(&cfqg->rb_node, &st->rb);
857}
858
859static void
8184f93e
JT
860cfq_update_group_weight(struct cfq_group *cfqg)
861{
862 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
863 if (cfqg->needs_update) {
864 cfqg->weight = cfqg->new_weight;
865 cfqg->needs_update = false;
866 }
867}
868
869static void
870cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
871{
872 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
873
874 cfq_update_group_weight(cfqg);
875 __cfq_group_service_tree_add(st, cfqg);
876 st->total_weight += cfqg->weight;
877}
878
879static void
880cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1fa8f6d6
VG
881{
882 struct cfq_rb_root *st = &cfqd->grp_service_tree;
883 struct cfq_group *__cfqg;
884 struct rb_node *n;
885
886 cfqg->nr_cfqq++;
760701bf 887 if (!RB_EMPTY_NODE(&cfqg->rb_node))
1fa8f6d6
VG
888 return;
889
890 /*
891 * Currently put the group at the end. Later implement something
892 * so that groups get lesser vtime based on their weights, so that
25985edc 893 * if group does not loose all if it was not continuously backlogged.
1fa8f6d6
VG
894 */
895 n = rb_last(&st->rb);
896 if (n) {
897 __cfqg = rb_entry_cfqg(n);
898 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
899 } else
900 cfqg->vdisktime = st->min_vdisktime;
8184f93e
JT
901 cfq_group_service_tree_add(st, cfqg);
902}
1fa8f6d6 903
8184f93e
JT
904static void
905cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
906{
907 st->total_weight -= cfqg->weight;
908 if (!RB_EMPTY_NODE(&cfqg->rb_node))
909 cfq_rb_erase(&cfqg->rb_node, st);
1fa8f6d6
VG
910}
911
912static void
8184f93e 913cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1fa8f6d6
VG
914{
915 struct cfq_rb_root *st = &cfqd->grp_service_tree;
916
917 BUG_ON(cfqg->nr_cfqq < 1);
918 cfqg->nr_cfqq--;
25bc6b07 919
1fa8f6d6
VG
920 /* If there are other cfq queues under this group, don't delete it */
921 if (cfqg->nr_cfqq)
922 return;
923
2868ef7b 924 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
8184f93e 925 cfq_group_service_tree_del(st, cfqg);
dae739eb 926 cfqg->saved_workload_slice = 0;
e98ef89b 927 cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
dae739eb
VG
928}
929
167400d3
JT
930static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
931 unsigned int *unaccounted_time)
dae739eb 932{
f75edf2d 933 unsigned int slice_used;
dae739eb
VG
934
935 /*
936 * Queue got expired before even a single request completed or
937 * got expired immediately after first request completion.
938 */
939 if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
940 /*
941 * Also charge the seek time incurred to the group, otherwise
942 * if there are mutiple queues in the group, each can dispatch
943 * a single request on seeky media and cause lots of seek time
944 * and group will never know it.
945 */
946 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
947 1);
948 } else {
949 slice_used = jiffies - cfqq->slice_start;
167400d3
JT
950 if (slice_used > cfqq->allocated_slice) {
951 *unaccounted_time = slice_used - cfqq->allocated_slice;
f75edf2d 952 slice_used = cfqq->allocated_slice;
167400d3
JT
953 }
954 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
955 *unaccounted_time += cfqq->slice_start -
956 cfqq->dispatch_start;
dae739eb
VG
957 }
958
dae739eb
VG
959 return slice_used;
960}
961
962static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
e5ff082e 963 struct cfq_queue *cfqq)
dae739eb
VG
964{
965 struct cfq_rb_root *st = &cfqd->grp_service_tree;
167400d3 966 unsigned int used_sl, charge, unaccounted_sl = 0;
f26bd1f0
VG
967 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
968 - cfqg->service_tree_idle.count;
969
970 BUG_ON(nr_sync < 0);
167400d3 971 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
dae739eb 972
02b35081
VG
973 if (iops_mode(cfqd))
974 charge = cfqq->slice_dispatch;
975 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
976 charge = cfqq->allocated_slice;
dae739eb
VG
977
978 /* Can't update vdisktime while group is on service tree */
8184f93e 979 cfq_group_service_tree_del(st, cfqg);
02b35081 980 cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
8184f93e
JT
981 /* If a new weight was requested, update now, off tree */
982 cfq_group_service_tree_add(st, cfqg);
dae739eb
VG
983
984 /* This group is being expired. Save the context */
985 if (time_after(cfqd->workload_expires, jiffies)) {
986 cfqg->saved_workload_slice = cfqd->workload_expires
987 - jiffies;
988 cfqg->saved_workload = cfqd->serving_type;
989 cfqg->saved_serving_prio = cfqd->serving_prio;
990 } else
991 cfqg->saved_workload_slice = 0;
2868ef7b
VG
992
993 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
994 st->min_vdisktime);
c4e7893e
VG
995 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
996 " sect=%u", used_sl, cfqq->slice_dispatch, charge,
997 iops_mode(cfqd), cfqq->nr_sectors);
167400d3
JT
998 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
999 unaccounted_sl);
e98ef89b 1000 cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
1fa8f6d6
VG
1001}
1002
25fb5169
VG
1003#ifdef CONFIG_CFQ_GROUP_IOSCHED
1004static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
1005{
1006 if (blkg)
1007 return container_of(blkg, struct cfq_group, blkg);
1008 return NULL;
1009}
1010
fe071437
VG
1011void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
1012 unsigned int weight)
f8d461d6 1013{
8184f93e
JT
1014 struct cfq_group *cfqg = cfqg_of_blkg(blkg);
1015 cfqg->new_weight = weight;
1016 cfqg->needs_update = true;
f8d461d6
VG
1017}
1018
f469a7b4
VG
1019static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
1020 struct cfq_group *cfqg, struct blkio_cgroup *blkcg)
25fb5169 1021{
22084190
VG
1022 struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
1023 unsigned int major, minor;
25fb5169 1024
f469a7b4
VG
1025 /*
1026 * Add group onto cgroup list. It might happen that bdi->dev is
1027 * not initialized yet. Initialize this new group without major
1028 * and minor info and this info will be filled in once a new thread
1029 * comes for IO.
1030 */
1031 if (bdi->dev) {
a74b2ada 1032 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
f469a7b4
VG
1033 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
1034 (void *)cfqd, MKDEV(major, minor));
1035 } else
1036 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
1037 (void *)cfqd, 0);
1038
1039 cfqd->nr_blkcg_linked_grps++;
1040 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1041
1042 /* Add group on cfqd list */
1043 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1044}
1045
1046/*
1047 * Should be called from sleepable context. No request queue lock as per
1048 * cpu stats are allocated dynamically and alloc_percpu needs to be called
1049 * from sleepable context.
1050 */
1051static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
1052{
1053 struct cfq_group *cfqg = NULL;
1054 int i, j;
1055 struct cfq_rb_root *st;
25fb5169
VG
1056
1057 cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
1058 if (!cfqg)
f469a7b4 1059 return NULL;
25fb5169 1060
25fb5169
VG
1061 for_each_cfqg_st(cfqg, i, j, st)
1062 *st = CFQ_RB_ROOT;
1063 RB_CLEAR_NODE(&cfqg->rb_node);
1064
b1c35769
VG
1065 /*
1066 * Take the initial reference that will be released on destroy
1067 * This can be thought of a joint reference by cgroup and
1068 * elevator which will be dropped by either elevator exit
1069 * or cgroup deletion path depending on who is exiting first.
1070 */
329a6781 1071 cfqg->ref = 1;
f469a7b4
VG
1072 return cfqg;
1073}
1074
1075static struct cfq_group *
1076cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
1077{
1078 struct cfq_group *cfqg = NULL;
1079 void *key = cfqd;
1080 struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
1081 unsigned int major, minor;
b1c35769 1082
180be2a0 1083 /*
f469a7b4
VG
1084 * This is the common case when there are no blkio cgroups.
1085 * Avoid lookup in this case
180be2a0 1086 */
f469a7b4
VG
1087 if (blkcg == &blkio_root_cgroup)
1088 cfqg = &cfqd->root_group;
1089 else
1090 cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
25fb5169 1091
f469a7b4
VG
1092 if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
1093 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1094 cfqg->blkg.dev = MKDEV(major, minor);
1095 }
25fb5169 1096
25fb5169
VG
1097 return cfqg;
1098}
1099
1100/*
3e59cf9d
VG
1101 * Search for the cfq group current task belongs to. request_queue lock must
1102 * be held.
25fb5169 1103 */
3e59cf9d 1104static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
25fb5169 1105{
70087dc3 1106 struct blkio_cgroup *blkcg;
f469a7b4
VG
1107 struct cfq_group *cfqg = NULL, *__cfqg = NULL;
1108 struct request_queue *q = cfqd->queue;
25fb5169
VG
1109
1110 rcu_read_lock();
70087dc3 1111 blkcg = task_blkio_cgroup(current);
f469a7b4
VG
1112 cfqg = cfq_find_cfqg(cfqd, blkcg);
1113 if (cfqg) {
1114 rcu_read_unlock();
1115 return cfqg;
1116 }
1117
1118 /*
1119 * Need to allocate a group. Allocation of group also needs allocation
1120 * of per cpu stats which in-turn takes a mutex() and can block. Hence
1121 * we need to drop rcu lock and queue_lock before we call alloc.
1122 *
1123 * Not taking any queue reference here and assuming that queue is
1124 * around by the time we return. CFQ queue allocation code does
1125 * the same. It might be racy though.
1126 */
1127
1128 rcu_read_unlock();
1129 spin_unlock_irq(q->queue_lock);
1130
1131 cfqg = cfq_alloc_cfqg(cfqd);
1132
1133 spin_lock_irq(q->queue_lock);
1134
1135 rcu_read_lock();
1136 blkcg = task_blkio_cgroup(current);
1137
1138 /*
1139 * If some other thread already allocated the group while we were
1140 * not holding queue lock, free up the group
1141 */
1142 __cfqg = cfq_find_cfqg(cfqd, blkcg);
1143
1144 if (__cfqg) {
1145 kfree(cfqg);
1146 rcu_read_unlock();
1147 return __cfqg;
1148 }
1149
3e59cf9d 1150 if (!cfqg)
25fb5169 1151 cfqg = &cfqd->root_group;
f469a7b4
VG
1152
1153 cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
25fb5169
VG
1154 rcu_read_unlock();
1155 return cfqg;
1156}
1157
7f1dc8a2
VG
1158static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1159{
329a6781 1160 cfqg->ref++;
7f1dc8a2
VG
1161 return cfqg;
1162}
1163
25fb5169
VG
1164static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1165{
1166 /* Currently, all async queues are mapped to root group */
1167 if (!cfq_cfqq_sync(cfqq))
1168 cfqg = &cfqq->cfqd->root_group;
1169
1170 cfqq->cfqg = cfqg;
b1c35769 1171 /* cfqq reference on cfqg */
329a6781 1172 cfqq->cfqg->ref++;
b1c35769
VG
1173}
1174
1175static void cfq_put_cfqg(struct cfq_group *cfqg)
1176{
1177 struct cfq_rb_root *st;
1178 int i, j;
1179
329a6781
SL
1180 BUG_ON(cfqg->ref <= 0);
1181 cfqg->ref--;
1182 if (cfqg->ref)
b1c35769
VG
1183 return;
1184 for_each_cfqg_st(cfqg, i, j, st)
b54ce60e 1185 BUG_ON(!RB_EMPTY_ROOT(&st->rb));
b1c35769
VG
1186 kfree(cfqg);
1187}
1188
1189static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1190{
1191 /* Something wrong if we are trying to remove same group twice */
1192 BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
1193
1194 hlist_del_init(&cfqg->cfqd_node);
1195
1196 /*
1197 * Put the reference taken at the time of creation so that when all
1198 * queues are gone, group can be destroyed.
1199 */
1200 cfq_put_cfqg(cfqg);
1201}
1202
1203static void cfq_release_cfq_groups(struct cfq_data *cfqd)
1204{
1205 struct hlist_node *pos, *n;
1206 struct cfq_group *cfqg;
1207
1208 hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
1209 /*
1210 * If cgroup removal path got to blk_group first and removed
1211 * it from cgroup list, then it will take care of destroying
1212 * cfqg also.
1213 */
e98ef89b 1214 if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
b1c35769
VG
1215 cfq_destroy_cfqg(cfqd, cfqg);
1216 }
25fb5169 1217}
b1c35769
VG
1218
1219/*
1220 * Blk cgroup controller notification saying that blkio_group object is being
1221 * delinked as associated cgroup object is going away. That also means that
1222 * no new IO will come in this group. So get rid of this group as soon as
1223 * any pending IO in the group is finished.
1224 *
1225 * This function is called under rcu_read_lock(). key is the rcu protected
1226 * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
1227 * read lock.
1228 *
1229 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1230 * it should not be NULL as even if elevator was exiting, cgroup deltion
1231 * path got to it first.
1232 */
1233void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
1234{
1235 unsigned long flags;
1236 struct cfq_data *cfqd = key;
1237
1238 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1239 cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
1240 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1241}
1242
25fb5169 1243#else /* GROUP_IOSCHED */
3e59cf9d 1244static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
25fb5169
VG
1245{
1246 return &cfqd->root_group;
1247}
7f1dc8a2
VG
1248
1249static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1250{
50eaeb32 1251 return cfqg;
7f1dc8a2
VG
1252}
1253
25fb5169
VG
1254static inline void
1255cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1256 cfqq->cfqg = cfqg;
1257}
1258
b1c35769
VG
1259static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
1260static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
1261
25fb5169
VG
1262#endif /* GROUP_IOSCHED */
1263
498d3aa2 1264/*
c0324a02 1265 * The cfqd->service_trees holds all pending cfq_queue's that have
498d3aa2
JA
1266 * requests waiting to be processed. It is sorted in the order that
1267 * we will service the queues.
1268 */
a36e71f9 1269static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a 1270 bool add_front)
d9e7620e 1271{
0871714e
JA
1272 struct rb_node **p, *parent;
1273 struct cfq_queue *__cfqq;
d9e7620e 1274 unsigned long rb_key;
c0324a02 1275 struct cfq_rb_root *service_tree;
498d3aa2 1276 int left;
dae739eb 1277 int new_cfqq = 1;
ae30c286
VG
1278 int group_changed = 0;
1279
cdb16e8f 1280 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
65b32a57 1281 cfqq_type(cfqq));
0871714e
JA
1282 if (cfq_class_idle(cfqq)) {
1283 rb_key = CFQ_IDLE_DELAY;
aa6f6a3d 1284 parent = rb_last(&service_tree->rb);
0871714e
JA
1285 if (parent && parent != &cfqq->rb_node) {
1286 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1287 rb_key += __cfqq->rb_key;
1288 } else
1289 rb_key += jiffies;
1290 } else if (!add_front) {
b9c8946b
JA
1291 /*
1292 * Get our rb key offset. Subtract any residual slice
1293 * value carried from last service. A negative resid
1294 * count indicates slice overrun, and this should position
1295 * the next service time further away in the tree.
1296 */
edd75ffd 1297 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
b9c8946b 1298 rb_key -= cfqq->slice_resid;
edd75ffd 1299 cfqq->slice_resid = 0;
48e025e6
CZ
1300 } else {
1301 rb_key = -HZ;
aa6f6a3d 1302 __cfqq = cfq_rb_first(service_tree);
48e025e6
CZ
1303 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1304 }
1da177e4 1305
d9e7620e 1306 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
dae739eb 1307 new_cfqq = 0;
99f9628a 1308 /*
d9e7620e 1309 * same position, nothing more to do
99f9628a 1310 */
c0324a02
CZ
1311 if (rb_key == cfqq->rb_key &&
1312 cfqq->service_tree == service_tree)
d9e7620e 1313 return;
1da177e4 1314
aa6f6a3d
CZ
1315 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1316 cfqq->service_tree = NULL;
1da177e4 1317 }
d9e7620e 1318
498d3aa2 1319 left = 1;
0871714e 1320 parent = NULL;
aa6f6a3d
CZ
1321 cfqq->service_tree = service_tree;
1322 p = &service_tree->rb.rb_node;
d9e7620e 1323 while (*p) {
67060e37 1324 struct rb_node **n;
cc09e299 1325
d9e7620e
JA
1326 parent = *p;
1327 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1328
0c534e0a 1329 /*
c0324a02 1330 * sort by key, that represents service time.
0c534e0a 1331 */
c0324a02 1332 if (time_before(rb_key, __cfqq->rb_key))
67060e37 1333 n = &(*p)->rb_left;
c0324a02 1334 else {
67060e37 1335 n = &(*p)->rb_right;
cc09e299 1336 left = 0;
c0324a02 1337 }
67060e37
JA
1338
1339 p = n;
d9e7620e
JA
1340 }
1341
cc09e299 1342 if (left)
aa6f6a3d 1343 service_tree->left = &cfqq->rb_node;
cc09e299 1344
d9e7620e
JA
1345 cfqq->rb_key = rb_key;
1346 rb_link_node(&cfqq->rb_node, parent, p);
aa6f6a3d
CZ
1347 rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1348 service_tree->count++;
ae30c286 1349 if ((add_front || !new_cfqq) && !group_changed)
dae739eb 1350 return;
8184f93e 1351 cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1da177e4
LT
1352}
1353
a36e71f9 1354static struct cfq_queue *
f2d1f0ae
JA
1355cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1356 sector_t sector, struct rb_node **ret_parent,
1357 struct rb_node ***rb_link)
a36e71f9 1358{
a36e71f9
JA
1359 struct rb_node **p, *parent;
1360 struct cfq_queue *cfqq = NULL;
1361
1362 parent = NULL;
1363 p = &root->rb_node;
1364 while (*p) {
1365 struct rb_node **n;
1366
1367 parent = *p;
1368 cfqq = rb_entry(parent, struct cfq_queue, p_node);
1369
1370 /*
1371 * Sort strictly based on sector. Smallest to the left,
1372 * largest to the right.
1373 */
2e46e8b2 1374 if (sector > blk_rq_pos(cfqq->next_rq))
a36e71f9 1375 n = &(*p)->rb_right;
2e46e8b2 1376 else if (sector < blk_rq_pos(cfqq->next_rq))
a36e71f9
JA
1377 n = &(*p)->rb_left;
1378 else
1379 break;
1380 p = n;
3ac6c9f8 1381 cfqq = NULL;
a36e71f9
JA
1382 }
1383
1384 *ret_parent = parent;
1385 if (rb_link)
1386 *rb_link = p;
3ac6c9f8 1387 return cfqq;
a36e71f9
JA
1388}
1389
1390static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1391{
a36e71f9
JA
1392 struct rb_node **p, *parent;
1393 struct cfq_queue *__cfqq;
1394
f2d1f0ae
JA
1395 if (cfqq->p_root) {
1396 rb_erase(&cfqq->p_node, cfqq->p_root);
1397 cfqq->p_root = NULL;
1398 }
a36e71f9
JA
1399
1400 if (cfq_class_idle(cfqq))
1401 return;
1402 if (!cfqq->next_rq)
1403 return;
1404
f2d1f0ae 1405 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2e46e8b2
TH
1406 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1407 blk_rq_pos(cfqq->next_rq), &parent, &p);
3ac6c9f8
JA
1408 if (!__cfqq) {
1409 rb_link_node(&cfqq->p_node, parent, p);
f2d1f0ae
JA
1410 rb_insert_color(&cfqq->p_node, cfqq->p_root);
1411 } else
1412 cfqq->p_root = NULL;
a36e71f9
JA
1413}
1414
498d3aa2
JA
1415/*
1416 * Update cfqq's position in the service tree.
1417 */
edd75ffd 1418static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
6d048f53 1419{
6d048f53
JA
1420 /*
1421 * Resorting requires the cfqq to be on the RR list already.
1422 */
a36e71f9 1423 if (cfq_cfqq_on_rr(cfqq)) {
edd75ffd 1424 cfq_service_tree_add(cfqd, cfqq, 0);
a36e71f9
JA
1425 cfq_prio_tree_add(cfqd, cfqq);
1426 }
6d048f53
JA
1427}
1428
1da177e4
LT
1429/*
1430 * add to busy list of queues for service, trying to be fair in ordering
22e2c507 1431 * the pending list according to last request service
1da177e4 1432 */
febffd61 1433static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 1434{
7b679138 1435 cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
3b18152c
JA
1436 BUG_ON(cfq_cfqq_on_rr(cfqq));
1437 cfq_mark_cfqq_on_rr(cfqq);
1da177e4 1438 cfqd->busy_queues++;
ef8a41df
SL
1439 if (cfq_cfqq_sync(cfqq))
1440 cfqd->busy_sync_queues++;
1da177e4 1441
edd75ffd 1442 cfq_resort_rr_list(cfqd, cfqq);
1da177e4
LT
1443}
1444
498d3aa2
JA
1445/*
1446 * Called when the cfqq no longer has requests pending, remove it from
1447 * the service tree.
1448 */
febffd61 1449static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 1450{
7b679138 1451 cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
3b18152c
JA
1452 BUG_ON(!cfq_cfqq_on_rr(cfqq));
1453 cfq_clear_cfqq_on_rr(cfqq);
1da177e4 1454
aa6f6a3d
CZ
1455 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1456 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1457 cfqq->service_tree = NULL;
1458 }
f2d1f0ae
JA
1459 if (cfqq->p_root) {
1460 rb_erase(&cfqq->p_node, cfqq->p_root);
1461 cfqq->p_root = NULL;
1462 }
d9e7620e 1463
8184f93e 1464 cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1da177e4
LT
1465 BUG_ON(!cfqd->busy_queues);
1466 cfqd->busy_queues--;
ef8a41df
SL
1467 if (cfq_cfqq_sync(cfqq))
1468 cfqd->busy_sync_queues--;
1da177e4
LT
1469}
1470
1471/*
1472 * rb tree support functions
1473 */
febffd61 1474static void cfq_del_rq_rb(struct request *rq)
1da177e4 1475{
5e705374 1476 struct cfq_queue *cfqq = RQ_CFQQ(rq);
5e705374 1477 const int sync = rq_is_sync(rq);
1da177e4 1478
b4878f24
JA
1479 BUG_ON(!cfqq->queued[sync]);
1480 cfqq->queued[sync]--;
1da177e4 1481
5e705374 1482 elv_rb_del(&cfqq->sort_list, rq);
1da177e4 1483
f04a6424
VG
1484 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1485 /*
1486 * Queue will be deleted from service tree when we actually
1487 * expire it later. Right now just remove it from prio tree
1488 * as it is empty.
1489 */
1490 if (cfqq->p_root) {
1491 rb_erase(&cfqq->p_node, cfqq->p_root);
1492 cfqq->p_root = NULL;
1493 }
1494 }
1da177e4
LT
1495}
1496
5e705374 1497static void cfq_add_rq_rb(struct request *rq)
1da177e4 1498{
5e705374 1499 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4 1500 struct cfq_data *cfqd = cfqq->cfqd;
a36e71f9 1501 struct request *__alias, *prev;
1da177e4 1502
5380a101 1503 cfqq->queued[rq_is_sync(rq)]++;
1da177e4
LT
1504
1505 /*
1506 * looks a little odd, but the first insert might return an alias.
1507 * if that happens, put the alias on the dispatch list
1508 */
21183b07 1509 while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
5e705374 1510 cfq_dispatch_insert(cfqd->queue, __alias);
5fccbf61
JA
1511
1512 if (!cfq_cfqq_on_rr(cfqq))
1513 cfq_add_cfqq_rr(cfqd, cfqq);
5044eed4
JA
1514
1515 /*
1516 * check if this request is a better next-serve candidate
1517 */
a36e71f9 1518 prev = cfqq->next_rq;
cf7c25cf 1519 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
a36e71f9
JA
1520
1521 /*
1522 * adjust priority tree position, if ->next_rq changes
1523 */
1524 if (prev != cfqq->next_rq)
1525 cfq_prio_tree_add(cfqd, cfqq);
1526
5044eed4 1527 BUG_ON(!cfqq->next_rq);
1da177e4
LT
1528}
1529
febffd61 1530static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1da177e4 1531{
5380a101
JA
1532 elv_rb_del(&cfqq->sort_list, rq);
1533 cfqq->queued[rq_is_sync(rq)]--;
e98ef89b
VG
1534 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1535 rq_data_dir(rq), rq_is_sync(rq));
5e705374 1536 cfq_add_rq_rb(rq);
e98ef89b 1537 cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
7f1dc8a2
VG
1538 &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
1539 rq_is_sync(rq));
1da177e4
LT
1540}
1541
206dc69b
JA
1542static struct request *
1543cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1da177e4 1544{
206dc69b 1545 struct task_struct *tsk = current;
91fac317 1546 struct cfq_io_context *cic;
206dc69b 1547 struct cfq_queue *cfqq;
1da177e4 1548
4ac845a2 1549 cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317
VT
1550 if (!cic)
1551 return NULL;
1552
1553 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
89850f7e
JA
1554 if (cfqq) {
1555 sector_t sector = bio->bi_sector + bio_sectors(bio);
1556
21183b07 1557 return elv_rb_find(&cfqq->sort_list, sector);
89850f7e 1558 }
1da177e4 1559
1da177e4
LT
1560 return NULL;
1561}
1562
165125e1 1563static void cfq_activate_request(struct request_queue *q, struct request *rq)
1da177e4 1564{
22e2c507 1565 struct cfq_data *cfqd = q->elevator->elevator_data;
3b18152c 1566
53c583d2 1567 cfqd->rq_in_driver++;
7b679138 1568 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
53c583d2 1569 cfqd->rq_in_driver);
25776e35 1570
5b93629b 1571 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1da177e4
LT
1572}
1573
165125e1 1574static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1da177e4 1575{
b4878f24
JA
1576 struct cfq_data *cfqd = q->elevator->elevator_data;
1577
53c583d2
CZ
1578 WARN_ON(!cfqd->rq_in_driver);
1579 cfqd->rq_in_driver--;
7b679138 1580 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
53c583d2 1581 cfqd->rq_in_driver);
1da177e4
LT
1582}
1583
b4878f24 1584static void cfq_remove_request(struct request *rq)
1da177e4 1585{
5e705374 1586 struct cfq_queue *cfqq = RQ_CFQQ(rq);
21183b07 1587
5e705374
JA
1588 if (cfqq->next_rq == rq)
1589 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1da177e4 1590
b4878f24 1591 list_del_init(&rq->queuelist);
5e705374 1592 cfq_del_rq_rb(rq);
374f84ac 1593
45333d5a 1594 cfqq->cfqd->rq_queued--;
e98ef89b
VG
1595 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1596 rq_data_dir(rq), rq_is_sync(rq));
7b6d91da 1597 if (rq->cmd_flags & REQ_META) {
374f84ac
JA
1598 WARN_ON(!cfqq->meta_pending);
1599 cfqq->meta_pending--;
1600 }
1da177e4
LT
1601}
1602
165125e1
JA
1603static int cfq_merge(struct request_queue *q, struct request **req,
1604 struct bio *bio)
1da177e4
LT
1605{
1606 struct cfq_data *cfqd = q->elevator->elevator_data;
1607 struct request *__rq;
1da177e4 1608
206dc69b 1609 __rq = cfq_find_rq_fmerge(cfqd, bio);
22e2c507 1610 if (__rq && elv_rq_merge_ok(__rq, bio)) {
9817064b
JA
1611 *req = __rq;
1612 return ELEVATOR_FRONT_MERGE;
1da177e4
LT
1613 }
1614
1615 return ELEVATOR_NO_MERGE;
1da177e4
LT
1616}
1617
165125e1 1618static void cfq_merged_request(struct request_queue *q, struct request *req,
21183b07 1619 int type)
1da177e4 1620{
21183b07 1621 if (type == ELEVATOR_FRONT_MERGE) {
5e705374 1622 struct cfq_queue *cfqq = RQ_CFQQ(req);
1da177e4 1623
5e705374 1624 cfq_reposition_rq_rb(cfqq, req);
1da177e4 1625 }
1da177e4
LT
1626}
1627
812d4026
DS
1628static void cfq_bio_merged(struct request_queue *q, struct request *req,
1629 struct bio *bio)
1630{
e98ef89b
VG
1631 cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
1632 bio_data_dir(bio), cfq_bio_sync(bio));
812d4026
DS
1633}
1634
1da177e4 1635static void
165125e1 1636cfq_merged_requests(struct request_queue *q, struct request *rq,
1da177e4
LT
1637 struct request *next)
1638{
cf7c25cf 1639 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507
JA
1640 /*
1641 * reposition in fifo if next is older than rq
1642 */
1643 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
30996f40 1644 time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
22e2c507 1645 list_move(&rq->queuelist, &next->queuelist);
30996f40
JA
1646 rq_set_fifo_time(rq, rq_fifo_time(next));
1647 }
22e2c507 1648
cf7c25cf
CZ
1649 if (cfqq->next_rq == next)
1650 cfqq->next_rq = rq;
b4878f24 1651 cfq_remove_request(next);
e98ef89b
VG
1652 cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
1653 rq_data_dir(next), rq_is_sync(next));
22e2c507
JA
1654}
1655
165125e1 1656static int cfq_allow_merge(struct request_queue *q, struct request *rq,
da775265
JA
1657 struct bio *bio)
1658{
1659 struct cfq_data *cfqd = q->elevator->elevator_data;
91fac317 1660 struct cfq_io_context *cic;
da775265 1661 struct cfq_queue *cfqq;
da775265
JA
1662
1663 /*
ec8acb69 1664 * Disallow merge of a sync bio into an async request.
da775265 1665 */
91fac317 1666 if (cfq_bio_sync(bio) && !rq_is_sync(rq))
a6151c3a 1667 return false;
da775265
JA
1668
1669 /*
719d3402
JA
1670 * Lookup the cfqq that this bio will be queued with. Allow
1671 * merge only if rq is queued there.
da775265 1672 */
4ac845a2 1673 cic = cfq_cic_lookup(cfqd, current->io_context);
91fac317 1674 if (!cic)
a6151c3a 1675 return false;
719d3402 1676
91fac317 1677 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
a6151c3a 1678 return cfqq == RQ_CFQQ(rq);
da775265
JA
1679}
1680
812df48d
DS
1681static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1682{
1683 del_timer(&cfqd->idle_slice_timer);
e98ef89b 1684 cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
812df48d
DS
1685}
1686
febffd61
JA
1687static void __cfq_set_active_queue(struct cfq_data *cfqd,
1688 struct cfq_queue *cfqq)
22e2c507
JA
1689{
1690 if (cfqq) {
b1ffe737
DS
1691 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1692 cfqd->serving_prio, cfqd->serving_type);
62a37f6b
JT
1693 cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
1694 cfqq->slice_start = 0;
1695 cfqq->dispatch_start = jiffies;
1696 cfqq->allocated_slice = 0;
1697 cfqq->slice_end = 0;
1698 cfqq->slice_dispatch = 0;
1699 cfqq->nr_sectors = 0;
1700
1701 cfq_clear_cfqq_wait_request(cfqq);
1702 cfq_clear_cfqq_must_dispatch(cfqq);
1703 cfq_clear_cfqq_must_alloc_slice(cfqq);
1704 cfq_clear_cfqq_fifo_expire(cfqq);
1705 cfq_mark_cfqq_slice_new(cfqq);
1706
1707 cfq_del_timer(cfqd, cfqq);
22e2c507
JA
1708 }
1709
1710 cfqd->active_queue = cfqq;
1711}
1712
7b14e3b5
JA
1713/*
1714 * current cfqq expired its slice (or was too idle), select new one
1715 */
1716static void
1717__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e5ff082e 1718 bool timed_out)
7b14e3b5 1719{
7b679138
JA
1720 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1721
7b14e3b5 1722 if (cfq_cfqq_wait_request(cfqq))
812df48d 1723 cfq_del_timer(cfqd, cfqq);
7b14e3b5 1724
7b14e3b5 1725 cfq_clear_cfqq_wait_request(cfqq);
f75edf2d 1726 cfq_clear_cfqq_wait_busy(cfqq);
7b14e3b5 1727
ae54abed
SL
1728 /*
1729 * If this cfqq is shared between multiple processes, check to
1730 * make sure that those processes are still issuing I/Os within
1731 * the mean seek distance. If not, it may be time to break the
1732 * queues apart again.
1733 */
1734 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1735 cfq_mark_cfqq_split_coop(cfqq);
1736
7b14e3b5 1737 /*
6084cdda 1738 * store what was left of this slice, if the queue idled/timed out
7b14e3b5 1739 */
c553f8e3
SL
1740 if (timed_out) {
1741 if (cfq_cfqq_slice_new(cfqq))
ba5bd520 1742 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
c553f8e3
SL
1743 else
1744 cfqq->slice_resid = cfqq->slice_end - jiffies;
7b679138
JA
1745 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1746 }
7b14e3b5 1747
e5ff082e 1748 cfq_group_served(cfqd, cfqq->cfqg, cfqq);
dae739eb 1749
f04a6424
VG
1750 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1751 cfq_del_cfqq_rr(cfqd, cfqq);
1752
edd75ffd 1753 cfq_resort_rr_list(cfqd, cfqq);
7b14e3b5
JA
1754
1755 if (cfqq == cfqd->active_queue)
1756 cfqd->active_queue = NULL;
1757
1758 if (cfqd->active_cic) {
1759 put_io_context(cfqd->active_cic->ioc);
1760 cfqd->active_cic = NULL;
1761 }
7b14e3b5
JA
1762}
1763
e5ff082e 1764static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
7b14e3b5
JA
1765{
1766 struct cfq_queue *cfqq = cfqd->active_queue;
1767
1768 if (cfqq)
e5ff082e 1769 __cfq_slice_expired(cfqd, cfqq, timed_out);
7b14e3b5
JA
1770}
1771
498d3aa2
JA
1772/*
1773 * Get next queue for service. Unless we have a queue preemption,
1774 * we'll simply select the first cfqq in the service tree.
1775 */
6d048f53 1776static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
22e2c507 1777{
c0324a02 1778 struct cfq_rb_root *service_tree =
cdb16e8f 1779 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
65b32a57 1780 cfqd->serving_type);
d9e7620e 1781
f04a6424
VG
1782 if (!cfqd->rq_queued)
1783 return NULL;
1784
1fa8f6d6
VG
1785 /* There is nothing to dispatch */
1786 if (!service_tree)
1787 return NULL;
c0324a02
CZ
1788 if (RB_EMPTY_ROOT(&service_tree->rb))
1789 return NULL;
1790 return cfq_rb_first(service_tree);
6d048f53
JA
1791}
1792
f04a6424
VG
1793static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1794{
25fb5169 1795 struct cfq_group *cfqg;
f04a6424
VG
1796 struct cfq_queue *cfqq;
1797 int i, j;
1798 struct cfq_rb_root *st;
1799
1800 if (!cfqd->rq_queued)
1801 return NULL;
1802
25fb5169
VG
1803 cfqg = cfq_get_next_cfqg(cfqd);
1804 if (!cfqg)
1805 return NULL;
1806
f04a6424
VG
1807 for_each_cfqg_st(cfqg, i, j, st)
1808 if ((cfqq = cfq_rb_first(st)) != NULL)
1809 return cfqq;
1810 return NULL;
1811}
1812
498d3aa2
JA
1813/*
1814 * Get and set a new active queue for service.
1815 */
a36e71f9
JA
1816static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1817 struct cfq_queue *cfqq)
6d048f53 1818{
e00ef799 1819 if (!cfqq)
a36e71f9 1820 cfqq = cfq_get_next_queue(cfqd);
6d048f53 1821
22e2c507 1822 __cfq_set_active_queue(cfqd, cfqq);
3b18152c 1823 return cfqq;
22e2c507
JA
1824}
1825
d9e7620e
JA
1826static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1827 struct request *rq)
1828{
83096ebf
TH
1829 if (blk_rq_pos(rq) >= cfqd->last_position)
1830 return blk_rq_pos(rq) - cfqd->last_position;
d9e7620e 1831 else
83096ebf 1832 return cfqd->last_position - blk_rq_pos(rq);
d9e7620e
JA
1833}
1834
b2c18e1e 1835static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e9ce335d 1836 struct request *rq)
6d048f53 1837{
e9ce335d 1838 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
6d048f53
JA
1839}
1840
a36e71f9
JA
1841static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1842 struct cfq_queue *cur_cfqq)
1843{
f2d1f0ae 1844 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
a36e71f9
JA
1845 struct rb_node *parent, *node;
1846 struct cfq_queue *__cfqq;
1847 sector_t sector = cfqd->last_position;
1848
1849 if (RB_EMPTY_ROOT(root))
1850 return NULL;
1851
1852 /*
1853 * First, if we find a request starting at the end of the last
1854 * request, choose it.
1855 */
f2d1f0ae 1856 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
a36e71f9
JA
1857 if (__cfqq)
1858 return __cfqq;
1859
1860 /*
1861 * If the exact sector wasn't found, the parent of the NULL leaf
1862 * will contain the closest sector.
1863 */
1864 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
e9ce335d 1865 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f9
JA
1866 return __cfqq;
1867
2e46e8b2 1868 if (blk_rq_pos(__cfqq->next_rq) < sector)
a36e71f9
JA
1869 node = rb_next(&__cfqq->p_node);
1870 else
1871 node = rb_prev(&__cfqq->p_node);
1872 if (!node)
1873 return NULL;
1874
1875 __cfqq = rb_entry(node, struct cfq_queue, p_node);
e9ce335d 1876 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f9
JA
1877 return __cfqq;
1878
1879 return NULL;
1880}
1881
1882/*
1883 * cfqd - obvious
1884 * cur_cfqq - passed in so that we don't decide that the current queue is
1885 * closely cooperating with itself.
1886 *
1887 * So, basically we're assuming that that cur_cfqq has dispatched at least
1888 * one request, and that cfqd->last_position reflects a position on the disk
1889 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
1890 * assumption.
1891 */
1892static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
b3b6d040 1893 struct cfq_queue *cur_cfqq)
6d048f53 1894{
a36e71f9
JA
1895 struct cfq_queue *cfqq;
1896
39c01b21
DS
1897 if (cfq_class_idle(cur_cfqq))
1898 return NULL;
e6c5bc73
JM
1899 if (!cfq_cfqq_sync(cur_cfqq))
1900 return NULL;
1901 if (CFQQ_SEEKY(cur_cfqq))
1902 return NULL;
1903
b9d8f4c7
GJ
1904 /*
1905 * Don't search priority tree if it's the only queue in the group.
1906 */
1907 if (cur_cfqq->cfqg->nr_cfqq == 1)
1908 return NULL;
1909
6d048f53 1910 /*
d9e7620e
JA
1911 * We should notice if some of the queues are cooperating, eg
1912 * working closely on the same area of the disk. In that case,
1913 * we can group them together and don't waste time idling.
6d048f53 1914 */
a36e71f9
JA
1915 cfqq = cfqq_close(cfqd, cur_cfqq);
1916 if (!cfqq)
1917 return NULL;
1918
8682e1f1
VG
1919 /* If new queue belongs to different cfq_group, don't choose it */
1920 if (cur_cfqq->cfqg != cfqq->cfqg)
1921 return NULL;
1922
df5fe3e8
JM
1923 /*
1924 * It only makes sense to merge sync queues.
1925 */
1926 if (!cfq_cfqq_sync(cfqq))
1927 return NULL;
e6c5bc73
JM
1928 if (CFQQ_SEEKY(cfqq))
1929 return NULL;
df5fe3e8 1930
c0324a02
CZ
1931 /*
1932 * Do not merge queues of different priority classes
1933 */
1934 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1935 return NULL;
1936
a36e71f9 1937 return cfqq;
6d048f53
JA
1938}
1939
a6d44e98
CZ
1940/*
1941 * Determine whether we should enforce idle window for this queue.
1942 */
1943
1944static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1945{
1946 enum wl_prio_t prio = cfqq_prio(cfqq);
718eee05 1947 struct cfq_rb_root *service_tree = cfqq->service_tree;
a6d44e98 1948
f04a6424
VG
1949 BUG_ON(!service_tree);
1950 BUG_ON(!service_tree->count);
1951
b6508c16
VG
1952 if (!cfqd->cfq_slice_idle)
1953 return false;
1954
a6d44e98
CZ
1955 /* We never do for idle class queues. */
1956 if (prio == IDLE_WORKLOAD)
1957 return false;
1958
1959 /* We do for queues that were marked with idle window flag. */
3c764b7a
SL
1960 if (cfq_cfqq_idle_window(cfqq) &&
1961 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
a6d44e98
CZ
1962 return true;
1963
1964 /*
1965 * Otherwise, we do only if they are the last ones
1966 * in their service tree.
1967 */
b1ffe737 1968 if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
c1e44756 1969 return true;
b1ffe737
DS
1970 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1971 service_tree->count);
c1e44756 1972 return false;
a6d44e98
CZ
1973}
1974
6d048f53 1975static void cfq_arm_slice_timer(struct cfq_data *cfqd)
22e2c507 1976{
1792669c 1977 struct cfq_queue *cfqq = cfqd->active_queue;
206dc69b 1978 struct cfq_io_context *cic;
80bdf0c7 1979 unsigned long sl, group_idle = 0;
7b14e3b5 1980
a68bbddb 1981 /*
f7d7b7a7
JA
1982 * SSD device without seek penalty, disable idling. But only do so
1983 * for devices that support queuing, otherwise we still have a problem
1984 * with sync vs async workloads.
a68bbddb 1985 */
f7d7b7a7 1986 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
a68bbddb
JA
1987 return;
1988
dd67d051 1989 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
6d048f53 1990 WARN_ON(cfq_cfqq_slice_new(cfqq));
22e2c507
JA
1991
1992 /*
1993 * idle is disabled, either manually or by past process history
1994 */
80bdf0c7
VG
1995 if (!cfq_should_idle(cfqd, cfqq)) {
1996 /* no queue idling. Check for group idling */
1997 if (cfqd->cfq_group_idle)
1998 group_idle = cfqd->cfq_group_idle;
1999 else
2000 return;
2001 }
6d048f53 2002
7b679138 2003 /*
8e550632 2004 * still active requests from this queue, don't idle
7b679138 2005 */
8e550632 2006 if (cfqq->dispatched)
7b679138
JA
2007 return;
2008
22e2c507
JA
2009 /*
2010 * task has exited, don't wait
2011 */
206dc69b 2012 cic = cfqd->active_cic;
66dac98e 2013 if (!cic || !atomic_read(&cic->ioc->nr_tasks))
6d048f53
JA
2014 return;
2015
355b659c
CZ
2016 /*
2017 * If our average think time is larger than the remaining time
2018 * slice, then don't idle. This avoids overrunning the allotted
2019 * time slice.
2020 */
2021 if (sample_valid(cic->ttime_samples) &&
b1ffe737
DS
2022 (cfqq->slice_end - jiffies < cic->ttime_mean)) {
2023 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
2024 cic->ttime_mean);
355b659c 2025 return;
b1ffe737 2026 }
355b659c 2027
80bdf0c7
VG
2028 /* There are other queues in the group, don't do group idle */
2029 if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2030 return;
2031
3b18152c 2032 cfq_mark_cfqq_wait_request(cfqq);
22e2c507 2033
80bdf0c7
VG
2034 if (group_idle)
2035 sl = cfqd->cfq_group_idle;
2036 else
2037 sl = cfqd->cfq_slice_idle;
206dc69b 2038
7b14e3b5 2039 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
e98ef89b 2040 cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
80bdf0c7
VG
2041 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2042 group_idle ? 1 : 0);
1da177e4
LT
2043}
2044
498d3aa2
JA
2045/*
2046 * Move request from internal lists to the request queue dispatch list.
2047 */
165125e1 2048static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1da177e4 2049{
3ed9a296 2050 struct cfq_data *cfqd = q->elevator->elevator_data;
5e705374 2051 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507 2052
7b679138
JA
2053 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2054
06d21886 2055 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
5380a101 2056 cfq_remove_request(rq);
6d048f53 2057 cfqq->dispatched++;
80bdf0c7 2058 (RQ_CFQG(rq))->dispatched++;
5380a101 2059 elv_dispatch_sort(q, rq);
3ed9a296 2060
53c583d2 2061 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
c4e7893e 2062 cfqq->nr_sectors += blk_rq_sectors(rq);
e98ef89b 2063 cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
84c124da 2064 rq_data_dir(rq), rq_is_sync(rq));
1da177e4
LT
2065}
2066
2067/*
2068 * return expired entry, or NULL to just start from scratch in rbtree
2069 */
febffd61 2070static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1da177e4 2071{
30996f40 2072 struct request *rq = NULL;
1da177e4 2073
3b18152c 2074 if (cfq_cfqq_fifo_expire(cfqq))
1da177e4 2075 return NULL;
cb887411
JA
2076
2077 cfq_mark_cfqq_fifo_expire(cfqq);
2078
89850f7e
JA
2079 if (list_empty(&cfqq->fifo))
2080 return NULL;
1da177e4 2081
89850f7e 2082 rq = rq_entry_fifo(cfqq->fifo.next);
30996f40 2083 if (time_before(jiffies, rq_fifo_time(rq)))
7b679138 2084 rq = NULL;
1da177e4 2085
30996f40 2086 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
6d048f53 2087 return rq;
1da177e4
LT
2088}
2089
22e2c507
JA
2090static inline int
2091cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2092{
2093 const int base_rq = cfqd->cfq_slice_async_rq;
1da177e4 2094
22e2c507 2095 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1da177e4 2096
22e2c507 2097 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
1da177e4
LT
2098}
2099
df5fe3e8
JM
2100/*
2101 * Must be called with the queue_lock held.
2102 */
2103static int cfqq_process_refs(struct cfq_queue *cfqq)
2104{
2105 int process_refs, io_refs;
2106
2107 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
30d7b944 2108 process_refs = cfqq->ref - io_refs;
df5fe3e8
JM
2109 BUG_ON(process_refs < 0);
2110 return process_refs;
2111}
2112
2113static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2114{
e6c5bc73 2115 int process_refs, new_process_refs;
df5fe3e8
JM
2116 struct cfq_queue *__cfqq;
2117
c10b61f0
JM
2118 /*
2119 * If there are no process references on the new_cfqq, then it is
2120 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2121 * chain may have dropped their last reference (not just their
2122 * last process reference).
2123 */
2124 if (!cfqq_process_refs(new_cfqq))
2125 return;
2126
df5fe3e8
JM
2127 /* Avoid a circular list and skip interim queue merges */
2128 while ((__cfqq = new_cfqq->new_cfqq)) {
2129 if (__cfqq == cfqq)
2130 return;
2131 new_cfqq = __cfqq;
2132 }
2133
2134 process_refs = cfqq_process_refs(cfqq);
c10b61f0 2135 new_process_refs = cfqq_process_refs(new_cfqq);
df5fe3e8
JM
2136 /*
2137 * If the process for the cfqq has gone away, there is no
2138 * sense in merging the queues.
2139 */
c10b61f0 2140 if (process_refs == 0 || new_process_refs == 0)
df5fe3e8
JM
2141 return;
2142
e6c5bc73
JM
2143 /*
2144 * Merge in the direction of the lesser amount of work.
2145 */
e6c5bc73
JM
2146 if (new_process_refs >= process_refs) {
2147 cfqq->new_cfqq = new_cfqq;
30d7b944 2148 new_cfqq->ref += process_refs;
e6c5bc73
JM
2149 } else {
2150 new_cfqq->new_cfqq = cfqq;
30d7b944 2151 cfqq->ref += new_process_refs;
e6c5bc73 2152 }
df5fe3e8
JM
2153}
2154
cdb16e8f 2155static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
65b32a57 2156 struct cfq_group *cfqg, enum wl_prio_t prio)
718eee05
CZ
2157{
2158 struct cfq_queue *queue;
2159 int i;
2160 bool key_valid = false;
2161 unsigned long lowest_key = 0;
2162 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2163
65b32a57
VG
2164 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2165 /* select the one with lowest rb_key */
2166 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
718eee05
CZ
2167 if (queue &&
2168 (!key_valid || time_before(queue->rb_key, lowest_key))) {
2169 lowest_key = queue->rb_key;
2170 cur_best = i;
2171 key_valid = true;
2172 }
2173 }
2174
2175 return cur_best;
2176}
2177
cdb16e8f 2178static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
718eee05 2179{
718eee05
CZ
2180 unsigned slice;
2181 unsigned count;
cdb16e8f 2182 struct cfq_rb_root *st;
58ff82f3 2183 unsigned group_slice;
e4ea0c16 2184 enum wl_prio_t original_prio = cfqd->serving_prio;
1fa8f6d6 2185
718eee05 2186 /* Choose next priority. RT > BE > IDLE */
58ff82f3 2187 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
718eee05 2188 cfqd->serving_prio = RT_WORKLOAD;
58ff82f3 2189 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
718eee05
CZ
2190 cfqd->serving_prio = BE_WORKLOAD;
2191 else {
2192 cfqd->serving_prio = IDLE_WORKLOAD;
2193 cfqd->workload_expires = jiffies + 1;
2194 return;
2195 }
2196
e4ea0c16
SL
2197 if (original_prio != cfqd->serving_prio)
2198 goto new_workload;
2199
718eee05
CZ
2200 /*
2201 * For RT and BE, we have to choose also the type
2202 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2203 * expiration time
2204 */
65b32a57 2205 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
cdb16e8f 2206 count = st->count;
718eee05
CZ
2207
2208 /*
65b32a57 2209 * check workload expiration, and that we still have other queues ready
718eee05 2210 */
65b32a57 2211 if (count && !time_after(jiffies, cfqd->workload_expires))
718eee05
CZ
2212 return;
2213
e4ea0c16 2214new_workload:
718eee05
CZ
2215 /* otherwise select new workload type */
2216 cfqd->serving_type =
65b32a57
VG
2217 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2218 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
cdb16e8f 2219 count = st->count;
718eee05
CZ
2220
2221 /*
2222 * the workload slice is computed as a fraction of target latency
2223 * proportional to the number of queues in that workload, over
2224 * all the queues in the same priority class
2225 */
58ff82f3
VG
2226 group_slice = cfq_group_slice(cfqd, cfqg);
2227
2228 slice = group_slice * count /
2229 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2230 cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
718eee05 2231
f26bd1f0
VG
2232 if (cfqd->serving_type == ASYNC_WORKLOAD) {
2233 unsigned int tmp;
2234
2235 /*
2236 * Async queues are currently system wide. Just taking
2237 * proportion of queues with-in same group will lead to higher
2238 * async ratio system wide as generally root group is going
2239 * to have higher weight. A more accurate thing would be to
2240 * calculate system wide asnc/sync ratio.
2241 */
2242 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2243 tmp = tmp/cfqd->busy_queues;
2244 slice = min_t(unsigned, slice, tmp);
2245
718eee05
CZ
2246 /* async workload slice is scaled down according to
2247 * the sync/async slice ratio. */
2248 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
f26bd1f0 2249 } else
718eee05
CZ
2250 /* sync workload slice is at least 2 * cfq_slice_idle */
2251 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2252
2253 slice = max_t(unsigned, slice, CFQ_MIN_TT);
b1ffe737 2254 cfq_log(cfqd, "workload slice:%d", slice);
718eee05
CZ
2255 cfqd->workload_expires = jiffies + slice;
2256}
2257
1fa8f6d6
VG
2258static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2259{
2260 struct cfq_rb_root *st = &cfqd->grp_service_tree;
25bc6b07 2261 struct cfq_group *cfqg;
1fa8f6d6
VG
2262
2263 if (RB_EMPTY_ROOT(&st->rb))
2264 return NULL;
25bc6b07 2265 cfqg = cfq_rb_first_group(st);
25bc6b07
VG
2266 update_min_vdisktime(st);
2267 return cfqg;
1fa8f6d6
VG
2268}
2269
cdb16e8f
VG
2270static void cfq_choose_cfqg(struct cfq_data *cfqd)
2271{
1fa8f6d6
VG
2272 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2273
2274 cfqd->serving_group = cfqg;
dae739eb
VG
2275
2276 /* Restore the workload type data */
2277 if (cfqg->saved_workload_slice) {
2278 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2279 cfqd->serving_type = cfqg->saved_workload;
2280 cfqd->serving_prio = cfqg->saved_serving_prio;
66ae2919
GJ
2281 } else
2282 cfqd->workload_expires = jiffies - 1;
2283
1fa8f6d6 2284 choose_service_tree(cfqd, cfqg);
cdb16e8f
VG
2285}
2286
22e2c507 2287/*
498d3aa2
JA
2288 * Select a queue for service. If we have a current active queue,
2289 * check whether to continue servicing it, or retrieve and set a new one.
22e2c507 2290 */
1b5ed5e1 2291static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1da177e4 2292{
a36e71f9 2293 struct cfq_queue *cfqq, *new_cfqq = NULL;
1da177e4 2294
22e2c507
JA
2295 cfqq = cfqd->active_queue;
2296 if (!cfqq)
2297 goto new_queue;
1da177e4 2298
f04a6424
VG
2299 if (!cfqd->rq_queued)
2300 return NULL;
c244bb50
VG
2301
2302 /*
2303 * We were waiting for group to get backlogged. Expire the queue
2304 */
2305 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2306 goto expire;
2307
22e2c507 2308 /*
6d048f53 2309 * The active queue has run out of time, expire it and select new.
22e2c507 2310 */
7667aa06
VG
2311 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2312 /*
2313 * If slice had not expired at the completion of last request
2314 * we might not have turned on wait_busy flag. Don't expire
2315 * the queue yet. Allow the group to get backlogged.
2316 *
2317 * The very fact that we have used the slice, that means we
2318 * have been idling all along on this queue and it should be
2319 * ok to wait for this request to complete.
2320 */
82bbbf28
VG
2321 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2322 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2323 cfqq = NULL;
7667aa06 2324 goto keep_queue;
82bbbf28 2325 } else
80bdf0c7 2326 goto check_group_idle;
7667aa06 2327 }
1da177e4 2328
22e2c507 2329 /*
6d048f53
JA
2330 * The active queue has requests and isn't expired, allow it to
2331 * dispatch.
22e2c507 2332 */
dd67d051 2333 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507 2334 goto keep_queue;
6d048f53 2335
a36e71f9
JA
2336 /*
2337 * If another queue has a request waiting within our mean seek
2338 * distance, let it run. The expire code will check for close
2339 * cooperators and put the close queue at the front of the service
df5fe3e8 2340 * tree. If possible, merge the expiring queue with the new cfqq.
a36e71f9 2341 */
b3b6d040 2342 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
df5fe3e8
JM
2343 if (new_cfqq) {
2344 if (!cfqq->new_cfqq)
2345 cfq_setup_merge(cfqq, new_cfqq);
a36e71f9 2346 goto expire;
df5fe3e8 2347 }
a36e71f9 2348
6d048f53
JA
2349 /*
2350 * No requests pending. If the active queue still has requests in
2351 * flight or is idling for a new request, allow either of these
2352 * conditions to happen (or time out) before selecting a new queue.
2353 */
80bdf0c7
VG
2354 if (timer_pending(&cfqd->idle_slice_timer)) {
2355 cfqq = NULL;
2356 goto keep_queue;
2357 }
2358
8e1ac665
SL
2359 /*
2360 * This is a deep seek queue, but the device is much faster than
2361 * the queue can deliver, don't idle
2362 **/
2363 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2364 (cfq_cfqq_slice_new(cfqq) ||
2365 (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2366 cfq_clear_cfqq_deep(cfqq);
2367 cfq_clear_cfqq_idle_window(cfqq);
2368 }
2369
80bdf0c7
VG
2370 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2371 cfqq = NULL;
2372 goto keep_queue;
2373 }
2374
2375 /*
2376 * If group idle is enabled and there are requests dispatched from
2377 * this group, wait for requests to complete.
2378 */
2379check_group_idle:
2380 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
2381 && cfqq->cfqg->dispatched) {
caaa5f9f
JA
2382 cfqq = NULL;
2383 goto keep_queue;
22e2c507
JA
2384 }
2385
3b18152c 2386expire:
e5ff082e 2387 cfq_slice_expired(cfqd, 0);
3b18152c 2388new_queue:
718eee05
CZ
2389 /*
2390 * Current queue expired. Check if we have to switch to a new
2391 * service tree
2392 */
2393 if (!new_cfqq)
cdb16e8f 2394 cfq_choose_cfqg(cfqd);
718eee05 2395
a36e71f9 2396 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
22e2c507 2397keep_queue:
3b18152c 2398 return cfqq;
22e2c507
JA
2399}
2400
febffd61 2401static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
d9e7620e
JA
2402{
2403 int dispatched = 0;
2404
2405 while (cfqq->next_rq) {
2406 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2407 dispatched++;
2408 }
2409
2410 BUG_ON(!list_empty(&cfqq->fifo));
f04a6424
VG
2411
2412 /* By default cfqq is not expired if it is empty. Do it explicitly */
e5ff082e 2413 __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
d9e7620e
JA
2414 return dispatched;
2415}
2416
498d3aa2
JA
2417/*
2418 * Drain our current requests. Used for barriers and when switching
2419 * io schedulers on-the-fly.
2420 */
d9e7620e 2421static int cfq_forced_dispatch(struct cfq_data *cfqd)
1b5ed5e1 2422{
0871714e 2423 struct cfq_queue *cfqq;
d9e7620e 2424 int dispatched = 0;
cdb16e8f 2425
3440c49f 2426 /* Expire the timeslice of the current active queue first */
e5ff082e 2427 cfq_slice_expired(cfqd, 0);
3440c49f
DS
2428 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2429 __cfq_set_active_queue(cfqd, cfqq);
f04a6424 2430 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3440c49f 2431 }
1b5ed5e1 2432
1b5ed5e1
TH
2433 BUG_ON(cfqd->busy_queues);
2434
6923715a 2435 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
1b5ed5e1
TH
2436 return dispatched;
2437}
2438
abc3c744
SL
2439static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2440 struct cfq_queue *cfqq)
2441{
2442 /* the queue hasn't finished any request, can't estimate */
2443 if (cfq_cfqq_slice_new(cfqq))
c1e44756 2444 return true;
abc3c744
SL
2445 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2446 cfqq->slice_end))
c1e44756 2447 return true;
abc3c744 2448
c1e44756 2449 return false;
abc3c744
SL
2450}
2451
0b182d61 2452static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2f5cb738 2453{
2f5cb738 2454 unsigned int max_dispatch;
22e2c507 2455
5ad531db
JA
2456 /*
2457 * Drain async requests before we start sync IO
2458 */
53c583d2 2459 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
0b182d61 2460 return false;
5ad531db 2461
2f5cb738
JA
2462 /*
2463 * If this is an async queue and we have sync IO in flight, let it wait
2464 */
53c583d2 2465 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
0b182d61 2466 return false;
2f5cb738 2467
abc3c744 2468 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2f5cb738
JA
2469 if (cfq_class_idle(cfqq))
2470 max_dispatch = 1;
b4878f24 2471
2f5cb738
JA
2472 /*
2473 * Does this cfqq already have too much IO in flight?
2474 */
2475 if (cfqq->dispatched >= max_dispatch) {
ef8a41df 2476 bool promote_sync = false;
2f5cb738
JA
2477 /*
2478 * idle queue must always only have a single IO in flight
2479 */
3ed9a296 2480 if (cfq_class_idle(cfqq))
0b182d61 2481 return false;
3ed9a296 2482
ef8a41df 2483 /*
c4ade94f
LS
2484 * If there is only one sync queue
2485 * we can ignore async queue here and give the sync
ef8a41df
SL
2486 * queue no dispatch limit. The reason is a sync queue can
2487 * preempt async queue, limiting the sync queue doesn't make
2488 * sense. This is useful for aiostress test.
2489 */
c4ade94f
LS
2490 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
2491 promote_sync = true;
ef8a41df 2492
2f5cb738
JA
2493 /*
2494 * We have other queues, don't allow more IO from this one
2495 */
ef8a41df
SL
2496 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
2497 !promote_sync)
0b182d61 2498 return false;
9ede209e 2499
365722bb 2500 /*
474b18cc 2501 * Sole queue user, no limit
365722bb 2502 */
ef8a41df 2503 if (cfqd->busy_queues == 1 || promote_sync)
abc3c744
SL
2504 max_dispatch = -1;
2505 else
2506 /*
2507 * Normally we start throttling cfqq when cfq_quantum/2
2508 * requests have been dispatched. But we can drive
2509 * deeper queue depths at the beginning of slice
2510 * subjected to upper limit of cfq_quantum.
2511 * */
2512 max_dispatch = cfqd->cfq_quantum;
8e296755
JA
2513 }
2514
2515 /*
2516 * Async queues must wait a bit before being allowed dispatch.
2517 * We also ramp up the dispatch depth gradually for async IO,
2518 * based on the last sync IO we serviced
2519 */
963b72fc 2520 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
573412b2 2521 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
8e296755 2522 unsigned int depth;
365722bb 2523
61f0c1dc 2524 depth = last_sync / cfqd->cfq_slice[1];
e00c54c3
JA
2525 if (!depth && !cfqq->dispatched)
2526 depth = 1;
8e296755
JA
2527 if (depth < max_dispatch)
2528 max_dispatch = depth;
2f5cb738 2529 }
3ed9a296 2530
0b182d61
JA
2531 /*
2532 * If we're below the current max, allow a dispatch
2533 */
2534 return cfqq->dispatched < max_dispatch;
2535}
2536
2537/*
2538 * Dispatch a request from cfqq, moving them to the request queue
2539 * dispatch list.
2540 */
2541static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2542{
2543 struct request *rq;
2544
2545 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2546
2547 if (!cfq_may_dispatch(cfqd, cfqq))
2548 return false;
2549
2550 /*
2551 * follow expired path, else get first next available
2552 */
2553 rq = cfq_check_fifo(cfqq);
2554 if (!rq)
2555 rq = cfqq->next_rq;
2556
2557 /*
2558 * insert request into driver dispatch list
2559 */
2560 cfq_dispatch_insert(cfqd->queue, rq);
2561
2562 if (!cfqd->active_cic) {
2563 struct cfq_io_context *cic = RQ_CIC(rq);
2564
2565 atomic_long_inc(&cic->ioc->refcount);
2566 cfqd->active_cic = cic;
2567 }
2568
2569 return true;
2570}
2571
2572/*
2573 * Find the cfqq that we need to service and move a request from that to the
2574 * dispatch list
2575 */
2576static int cfq_dispatch_requests(struct request_queue *q, int force)
2577{
2578 struct cfq_data *cfqd = q->elevator->elevator_data;
2579 struct cfq_queue *cfqq;
2580
2581 if (!cfqd->busy_queues)
2582 return 0;
2583
2584 if (unlikely(force))
2585 return cfq_forced_dispatch(cfqd);
2586
2587 cfqq = cfq_select_queue(cfqd);
2588 if (!cfqq)
8e296755
JA
2589 return 0;
2590
2f5cb738 2591 /*
0b182d61 2592 * Dispatch a request from this cfqq, if it is allowed
2f5cb738 2593 */
0b182d61
JA
2594 if (!cfq_dispatch_request(cfqd, cfqq))
2595 return 0;
2596
2f5cb738 2597 cfqq->slice_dispatch++;
b029195d 2598 cfq_clear_cfqq_must_dispatch(cfqq);
22e2c507 2599
2f5cb738
JA
2600 /*
2601 * expire an async queue immediately if it has used up its slice. idle
2602 * queue always expire after 1 dispatch round.
2603 */
2604 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2605 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2606 cfq_class_idle(cfqq))) {
2607 cfqq->slice_end = jiffies + 1;
e5ff082e 2608 cfq_slice_expired(cfqd, 0);
1da177e4
LT
2609 }
2610
b217a903 2611 cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2f5cb738 2612 return 1;
1da177e4
LT
2613}
2614
1da177e4 2615/*
5e705374
JA
2616 * task holds one reference to the queue, dropped when task exits. each rq
2617 * in-flight on this queue also holds a reference, dropped when rq is freed.
1da177e4 2618 *
b1c35769 2619 * Each cfq queue took a reference on the parent group. Drop it now.
1da177e4
LT
2620 * queue lock must be held here.
2621 */
2622static void cfq_put_queue(struct cfq_queue *cfqq)
2623{
22e2c507 2624 struct cfq_data *cfqd = cfqq->cfqd;
0bbfeb83 2625 struct cfq_group *cfqg;
22e2c507 2626
30d7b944 2627 BUG_ON(cfqq->ref <= 0);
1da177e4 2628
30d7b944
SL
2629 cfqq->ref--;
2630 if (cfqq->ref)
1da177e4
LT
2631 return;
2632
7b679138 2633 cfq_log_cfqq(cfqd, cfqq, "put_queue");
1da177e4 2634 BUG_ON(rb_first(&cfqq->sort_list));
22e2c507 2635 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
b1c35769 2636 cfqg = cfqq->cfqg;
1da177e4 2637
28f95cbc 2638 if (unlikely(cfqd->active_queue == cfqq)) {
e5ff082e 2639 __cfq_slice_expired(cfqd, cfqq, 0);
23e018a1 2640 cfq_schedule_dispatch(cfqd);
28f95cbc 2641 }
22e2c507 2642
f04a6424 2643 BUG_ON(cfq_cfqq_on_rr(cfqq));
1da177e4 2644 kmem_cache_free(cfq_pool, cfqq);
b1c35769 2645 cfq_put_cfqg(cfqg);
1da177e4
LT
2646}
2647
d6de8be7 2648/*
5f45c695 2649 * Call func for each cic attached to this ioc.
d6de8be7 2650 */
07416d29 2651static void
5f45c695
JA
2652call_for_each_cic(struct io_context *ioc,
2653 void (*func)(struct io_context *, struct cfq_io_context *))
07416d29
JA
2654{
2655 struct cfq_io_context *cic;
2656 struct hlist_node *n;
2657
5f45c695
JA
2658 rcu_read_lock();
2659
07416d29
JA
2660 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
2661 func(ioc, cic);
07416d29 2662
4ac845a2 2663 rcu_read_unlock();
34e6bbf2
FC
2664}
2665
2666static void cfq_cic_free_rcu(struct rcu_head *head)
2667{
2668 struct cfq_io_context *cic;
2669
2670 cic = container_of(head, struct cfq_io_context, rcu_head);
2671
2672 kmem_cache_free(cfq_ioc_pool, cic);
245b2e70 2673 elv_ioc_count_dec(cfq_ioc_count);
34e6bbf2 2674
9a11b4ed
JA
2675 if (ioc_gone) {
2676 /*
2677 * CFQ scheduler is exiting, grab exit lock and check
2678 * the pending io context count. If it hits zero,
2679 * complete ioc_gone and set it back to NULL
2680 */
2681 spin_lock(&ioc_gone_lock);
245b2e70 2682 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
9a11b4ed
JA
2683 complete(ioc_gone);
2684 ioc_gone = NULL;
2685 }
2686 spin_unlock(&ioc_gone_lock);
2687 }
34e6bbf2 2688}
4ac845a2 2689
34e6bbf2
FC
2690static void cfq_cic_free(struct cfq_io_context *cic)
2691{
2692 call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
4ac845a2
JA
2693}
2694
2695static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
2696{
2697 unsigned long flags;
bca4b914 2698 unsigned long dead_key = (unsigned long) cic->key;
4ac845a2 2699
bca4b914 2700 BUG_ON(!(dead_key & CIC_DEAD_KEY));
4ac845a2
JA
2701
2702 spin_lock_irqsave(&ioc->lock, flags);
80b15c73 2703 radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT);
ffc4e759 2704 hlist_del_rcu(&cic->cic_list);
4ac845a2
JA
2705 spin_unlock_irqrestore(&ioc->lock, flags);
2706
34e6bbf2 2707 cfq_cic_free(cic);
4ac845a2
JA
2708}
2709
d6de8be7
JA
2710/*
2711 * Must be called with rcu_read_lock() held or preemption otherwise disabled.
2712 * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
2713 * and ->trim() which is called with the task lock held
2714 */
4ac845a2
JA
2715static void cfq_free_io_context(struct io_context *ioc)
2716{
4ac845a2 2717 /*
34e6bbf2
FC
2718 * ioc->refcount is zero here, or we are called from elv_unregister(),
2719 * so no more cic's are allowed to be linked into this ioc. So it
2720 * should be ok to iterate over the known list, we will see all cic's
2721 * since no new ones are added.
4ac845a2 2722 */
5f45c695 2723 call_for_each_cic(ioc, cic_free_func);
1da177e4
LT
2724}
2725
d02a2c07 2726static void cfq_put_cooperator(struct cfq_queue *cfqq)
1da177e4 2727{
df5fe3e8
JM
2728 struct cfq_queue *__cfqq, *next;
2729
df5fe3e8
JM
2730 /*
2731 * If this queue was scheduled to merge with another queue, be
2732 * sure to drop the reference taken on that queue (and others in
2733 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
2734 */
2735 __cfqq = cfqq->new_cfqq;
2736 while (__cfqq) {
2737 if (__cfqq == cfqq) {
2738 WARN(1, "cfqq->new_cfqq loop detected\n");
2739 break;
2740 }
2741 next = __cfqq->new_cfqq;
2742 cfq_put_queue(__cfqq);
2743 __cfqq = next;
2744 }
d02a2c07
SL
2745}
2746
2747static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2748{
2749 if (unlikely(cfqq == cfqd->active_queue)) {
2750 __cfq_slice_expired(cfqd, cfqq, 0);
2751 cfq_schedule_dispatch(cfqd);
2752 }
2753
2754 cfq_put_cooperator(cfqq);
df5fe3e8 2755
89850f7e
JA
2756 cfq_put_queue(cfqq);
2757}
22e2c507 2758
89850f7e
JA
2759static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
2760 struct cfq_io_context *cic)
2761{
4faa3c81
FC
2762 struct io_context *ioc = cic->ioc;
2763
fc46379d 2764 list_del_init(&cic->queue_list);
4ac845a2
JA
2765
2766 /*
bca4b914 2767 * Make sure dead mark is seen for dead queues
4ac845a2 2768 */
fc46379d 2769 smp_wmb();
bca4b914 2770 cic->key = cfqd_dead_key(cfqd);
fc46379d 2771
4faa3c81
FC
2772 if (ioc->ioc_data == cic)
2773 rcu_assign_pointer(ioc->ioc_data, NULL);
2774
ff6657c6
JA
2775 if (cic->cfqq[BLK_RW_ASYNC]) {
2776 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2777 cic->cfqq[BLK_RW_ASYNC] = NULL;
12a05732
AV
2778 }
2779
ff6657c6
JA
2780 if (cic->cfqq[BLK_RW_SYNC]) {
2781 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2782 cic->cfqq[BLK_RW_SYNC] = NULL;
12a05732 2783 }
89850f7e
JA
2784}
2785
4ac845a2
JA
2786static void cfq_exit_single_io_context(struct io_context *ioc,
2787 struct cfq_io_context *cic)
89850f7e 2788{
bca4b914 2789 struct cfq_data *cfqd = cic_to_cfqd(cic);
89850f7e 2790
89850f7e 2791 if (cfqd) {
165125e1 2792 struct request_queue *q = cfqd->queue;
4ac845a2 2793 unsigned long flags;
89850f7e 2794
4ac845a2 2795 spin_lock_irqsave(q->queue_lock, flags);
62c1fe9d
JA
2796
2797 /*
2798 * Ensure we get a fresh copy of the ->key to prevent
2799 * race between exiting task and queue
2800 */
2801 smp_read_barrier_depends();
bca4b914 2802 if (cic->key == cfqd)
62c1fe9d
JA
2803 __cfq_exit_single_io_context(cfqd, cic);
2804
4ac845a2 2805 spin_unlock_irqrestore(q->queue_lock, flags);
89850f7e 2806 }
1da177e4
LT
2807}
2808
498d3aa2
JA
2809/*
2810 * The process that ioc belongs to has exited, we need to clean up
2811 * and put the internal structures we have that belongs to that process.
2812 */
e2d74ac0 2813static void cfq_exit_io_context(struct io_context *ioc)
1da177e4 2814{
4ac845a2 2815 call_for_each_cic(ioc, cfq_exit_single_io_context);
1da177e4
LT
2816}
2817
22e2c507 2818static struct cfq_io_context *
8267e268 2819cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1da177e4 2820{
b5deef90 2821 struct cfq_io_context *cic;
1da177e4 2822
94f6030c
CL
2823 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
2824 cfqd->queue->node);
1da177e4 2825 if (cic) {
22e2c507 2826 cic->last_end_request = jiffies;
553698f9 2827 INIT_LIST_HEAD(&cic->queue_list);
ffc4e759 2828 INIT_HLIST_NODE(&cic->cic_list);
22e2c507
JA
2829 cic->dtor = cfq_free_io_context;
2830 cic->exit = cfq_exit_io_context;
245b2e70 2831 elv_ioc_count_inc(cfq_ioc_count);
1da177e4
LT
2832 }
2833
2834 return cic;
2835}
2836
fd0928df 2837static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
22e2c507
JA
2838{
2839 struct task_struct *tsk = current;
2840 int ioprio_class;
2841
3b18152c 2842 if (!cfq_cfqq_prio_changed(cfqq))
22e2c507
JA
2843 return;
2844
fd0928df 2845 ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
22e2c507 2846 switch (ioprio_class) {
fe094d98
JA
2847 default:
2848 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
2849 case IOPRIO_CLASS_NONE:
2850 /*
6d63c275 2851 * no prio set, inherit CPU scheduling settings
fe094d98
JA
2852 */
2853 cfqq->ioprio = task_nice_ioprio(tsk);
6d63c275 2854 cfqq->ioprio_class = task_nice_ioclass(tsk);
fe094d98
JA
2855 break;
2856 case IOPRIO_CLASS_RT:
2857 cfqq->ioprio = task_ioprio(ioc);
2858 cfqq->ioprio_class = IOPRIO_CLASS_RT;
2859 break;
2860 case IOPRIO_CLASS_BE:
2861 cfqq->ioprio = task_ioprio(ioc);
2862 cfqq->ioprio_class = IOPRIO_CLASS_BE;
2863 break;
2864 case IOPRIO_CLASS_IDLE:
2865 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
2866 cfqq->ioprio = 7;
2867 cfq_clear_cfqq_idle_window(cfqq);
2868 break;
22e2c507
JA
2869 }
2870
2871 /*
2872 * keep track of original prio settings in case we have to temporarily
2873 * elevate the priority of this queue
2874 */
2875 cfqq->org_ioprio = cfqq->ioprio;
2876 cfqq->org_ioprio_class = cfqq->ioprio_class;
3b18152c 2877 cfq_clear_cfqq_prio_changed(cfqq);
22e2c507
JA
2878}
2879
febffd61 2880static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
22e2c507 2881{
bca4b914 2882 struct cfq_data *cfqd = cic_to_cfqd(cic);
478a82b0 2883 struct cfq_queue *cfqq;
c1b707d2 2884 unsigned long flags;
35e6077c 2885
caaa5f9f
JA
2886 if (unlikely(!cfqd))
2887 return;
2888
c1b707d2 2889 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
caaa5f9f 2890
ff6657c6 2891 cfqq = cic->cfqq[BLK_RW_ASYNC];
caaa5f9f
JA
2892 if (cfqq) {
2893 struct cfq_queue *new_cfqq;
ff6657c6
JA
2894 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
2895 GFP_ATOMIC);
caaa5f9f 2896 if (new_cfqq) {
ff6657c6 2897 cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
caaa5f9f
JA
2898 cfq_put_queue(cfqq);
2899 }
22e2c507 2900 }
caaa5f9f 2901
ff6657c6 2902 cfqq = cic->cfqq[BLK_RW_SYNC];
caaa5f9f
JA
2903 if (cfqq)
2904 cfq_mark_cfqq_prio_changed(cfqq);
2905
c1b707d2 2906 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
22e2c507
JA
2907}
2908
fc46379d 2909static void cfq_ioc_set_ioprio(struct io_context *ioc)
22e2c507 2910{
4ac845a2 2911 call_for_each_cic(ioc, changed_ioprio);
fc46379d 2912 ioc->ioprio_changed = 0;
22e2c507
JA
2913}
2914
d5036d77 2915static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a 2916 pid_t pid, bool is_sync)
d5036d77
JA
2917{
2918 RB_CLEAR_NODE(&cfqq->rb_node);
2919 RB_CLEAR_NODE(&cfqq->p_node);
2920 INIT_LIST_HEAD(&cfqq->fifo);
2921
30d7b944 2922 cfqq->ref = 0;
d5036d77
JA
2923 cfqq->cfqd = cfqd;
2924
2925 cfq_mark_cfqq_prio_changed(cfqq);
2926
2927 if (is_sync) {
2928 if (!cfq_class_idle(cfqq))
2929 cfq_mark_cfqq_idle_window(cfqq);
2930 cfq_mark_cfqq_sync(cfqq);
2931 }
2932 cfqq->pid = pid;
2933}
2934
24610333
VG
2935#ifdef CONFIG_CFQ_GROUP_IOSCHED
2936static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
2937{
2938 struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
bca4b914 2939 struct cfq_data *cfqd = cic_to_cfqd(cic);
24610333
VG
2940 unsigned long flags;
2941 struct request_queue *q;
2942
2943 if (unlikely(!cfqd))
2944 return;
2945
2946 q = cfqd->queue;
2947
2948 spin_lock_irqsave(q->queue_lock, flags);
2949
2950 if (sync_cfqq) {
2951 /*
2952 * Drop reference to sync queue. A new sync queue will be
2953 * assigned in new group upon arrival of a fresh request.
2954 */
2955 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2956 cic_set_cfqq(cic, NULL, 1);
2957 cfq_put_queue(sync_cfqq);
2958 }
2959
2960 spin_unlock_irqrestore(q->queue_lock, flags);
2961}
2962
2963static void cfq_ioc_set_cgroup(struct io_context *ioc)
2964{
2965 call_for_each_cic(ioc, changed_cgroup);
2966 ioc->cgroup_changed = 0;
2967}
2968#endif /* CONFIG_CFQ_GROUP_IOSCHED */
2969
22e2c507 2970static struct cfq_queue *
a6151c3a 2971cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
fd0928df 2972 struct io_context *ioc, gfp_t gfp_mask)
22e2c507 2973{
22e2c507 2974 struct cfq_queue *cfqq, *new_cfqq = NULL;
91fac317 2975 struct cfq_io_context *cic;
cdb16e8f 2976 struct cfq_group *cfqg;
22e2c507
JA
2977
2978retry:
3e59cf9d 2979 cfqg = cfq_get_cfqg(cfqd);
4ac845a2 2980 cic = cfq_cic_lookup(cfqd, ioc);
91fac317
VT
2981 /* cic always exists here */
2982 cfqq = cic_to_cfqq(cic, is_sync);
22e2c507 2983
6118b70b
JA
2984 /*
2985 * Always try a new alloc if we fell back to the OOM cfqq
2986 * originally, since it should just be a temporary situation.
2987 */
2988 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2989 cfqq = NULL;
22e2c507
JA
2990 if (new_cfqq) {
2991 cfqq = new_cfqq;
2992 new_cfqq = NULL;
2993 } else if (gfp_mask & __GFP_WAIT) {
2994 spin_unlock_irq(cfqd->queue->queue_lock);
94f6030c 2995 new_cfqq = kmem_cache_alloc_node(cfq_pool,
6118b70b 2996 gfp_mask | __GFP_ZERO,
94f6030c 2997 cfqd->queue->node);
22e2c507 2998 spin_lock_irq(cfqd->queue->queue_lock);
6118b70b
JA
2999 if (new_cfqq)
3000 goto retry;
22e2c507 3001 } else {
94f6030c
CL
3002 cfqq = kmem_cache_alloc_node(cfq_pool,
3003 gfp_mask | __GFP_ZERO,
3004 cfqd->queue->node);
22e2c507
JA
3005 }
3006
6118b70b
JA
3007 if (cfqq) {
3008 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3009 cfq_init_prio_data(cfqq, ioc);
cdb16e8f 3010 cfq_link_cfqq_cfqg(cfqq, cfqg);
6118b70b
JA
3011 cfq_log_cfqq(cfqd, cfqq, "alloced");
3012 } else
3013 cfqq = &cfqd->oom_cfqq;
22e2c507
JA
3014 }
3015
3016 if (new_cfqq)
3017 kmem_cache_free(cfq_pool, new_cfqq);
3018
22e2c507
JA
3019 return cfqq;
3020}
3021
c2dea2d1
VT
3022static struct cfq_queue **
3023cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
3024{
fe094d98 3025 switch (ioprio_class) {
c2dea2d1
VT
3026 case IOPRIO_CLASS_RT:
3027 return &cfqd->async_cfqq[0][ioprio];
3028 case IOPRIO_CLASS_BE:
3029 return &cfqd->async_cfqq[1][ioprio];
3030 case IOPRIO_CLASS_IDLE:
3031 return &cfqd->async_idle_cfqq;
3032 default:
3033 BUG();
3034 }
3035}
3036
15c31be4 3037static struct cfq_queue *
a6151c3a 3038cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
15c31be4
JA
3039 gfp_t gfp_mask)
3040{
fd0928df
JA
3041 const int ioprio = task_ioprio(ioc);
3042 const int ioprio_class = task_ioprio_class(ioc);
c2dea2d1 3043 struct cfq_queue **async_cfqq = NULL;
15c31be4
JA
3044 struct cfq_queue *cfqq = NULL;
3045
c2dea2d1
VT
3046 if (!is_sync) {
3047 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
3048 cfqq = *async_cfqq;
3049 }
3050
6118b70b 3051 if (!cfqq)
fd0928df 3052 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
15c31be4
JA
3053
3054 /*
3055 * pin the queue now that it's allocated, scheduler exit will prune it
3056 */
c2dea2d1 3057 if (!is_sync && !(*async_cfqq)) {
30d7b944 3058 cfqq->ref++;
c2dea2d1 3059 *async_cfqq = cfqq;
15c31be4
JA
3060 }
3061
30d7b944 3062 cfqq->ref++;
15c31be4
JA
3063 return cfqq;
3064}
3065
498d3aa2
JA
3066/*
3067 * We drop cfq io contexts lazily, so we may find a dead one.
3068 */
dbecf3ab 3069static void
4ac845a2
JA
3070cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
3071 struct cfq_io_context *cic)
dbecf3ab 3072{
4ac845a2
JA
3073 unsigned long flags;
3074
fc46379d 3075 WARN_ON(!list_empty(&cic->queue_list));
bca4b914 3076 BUG_ON(cic->key != cfqd_dead_key(cfqd));
597bc485 3077
4ac845a2
JA
3078 spin_lock_irqsave(&ioc->lock, flags);
3079
4faa3c81 3080 BUG_ON(ioc->ioc_data == cic);
597bc485 3081
80b15c73 3082 radix_tree_delete(&ioc->radix_root, cfqd->cic_index);
ffc4e759 3083 hlist_del_rcu(&cic->cic_list);
4ac845a2
JA
3084 spin_unlock_irqrestore(&ioc->lock, flags);
3085
3086 cfq_cic_free(cic);
dbecf3ab
OH
3087}
3088
e2d74ac0 3089static struct cfq_io_context *
4ac845a2 3090cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
e2d74ac0 3091{
e2d74ac0 3092 struct cfq_io_context *cic;
d6de8be7 3093 unsigned long flags;
e2d74ac0 3094
91fac317
VT
3095 if (unlikely(!ioc))
3096 return NULL;
3097
d6de8be7
JA
3098 rcu_read_lock();
3099
597bc485
JA
3100 /*
3101 * we maintain a last-hit cache, to avoid browsing over the tree
3102 */
4ac845a2 3103 cic = rcu_dereference(ioc->ioc_data);
d6de8be7
JA
3104 if (cic && cic->key == cfqd) {
3105 rcu_read_unlock();
597bc485 3106 return cic;
d6de8be7 3107 }
597bc485 3108
4ac845a2 3109 do {
80b15c73 3110 cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index);
4ac845a2
JA
3111 rcu_read_unlock();
3112 if (!cic)
3113 break;
bca4b914 3114 if (unlikely(cic->key != cfqd)) {
4ac845a2 3115 cfq_drop_dead_cic(cfqd, ioc, cic);
d6de8be7 3116 rcu_read_lock();
4ac845a2 3117 continue;
dbecf3ab 3118 }
e2d74ac0 3119
d6de8be7 3120 spin_lock_irqsave(&ioc->lock, flags);
4ac845a2 3121 rcu_assign_pointer(ioc->ioc_data, cic);
d6de8be7 3122 spin_unlock_irqrestore(&ioc->lock, flags);
4ac845a2
JA
3123 break;
3124 } while (1);
e2d74ac0 3125
4ac845a2 3126 return cic;
e2d74ac0
JA
3127}
3128
4ac845a2
JA
3129/*
3130 * Add cic into ioc, using cfqd as the search key. This enables us to lookup
3131 * the process specific cfq io context when entered from the block layer.
3132 * Also adds the cic to a per-cfqd list, used when this queue is removed.
3133 */
febffd61
JA
3134static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
3135 struct cfq_io_context *cic, gfp_t gfp_mask)
e2d74ac0 3136{
0261d688 3137 unsigned long flags;
4ac845a2 3138 int ret;
e2d74ac0 3139
4ac845a2
JA
3140 ret = radix_tree_preload(gfp_mask);
3141 if (!ret) {
3142 cic->ioc = ioc;
3143 cic->key = cfqd;
e2d74ac0 3144
4ac845a2
JA
3145 spin_lock_irqsave(&ioc->lock, flags);
3146 ret = radix_tree_insert(&ioc->radix_root,
80b15c73 3147 cfqd->cic_index, cic);
ffc4e759
JA
3148 if (!ret)
3149 hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
4ac845a2 3150 spin_unlock_irqrestore(&ioc->lock, flags);
e2d74ac0 3151
4ac845a2
JA
3152 radix_tree_preload_end();
3153
3154 if (!ret) {
3155 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3156 list_add(&cic->queue_list, &cfqd->cic_list);
3157 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3158 }
e2d74ac0
JA
3159 }
3160
4ac845a2
JA
3161 if (ret)
3162 printk(KERN_ERR "cfq: cic link failed!\n");
fc46379d 3163
4ac845a2 3164 return ret;
e2d74ac0
JA
3165}
3166
1da177e4
LT
3167/*
3168 * Setup general io context and cfq io context. There can be several cfq
3169 * io contexts per general io context, if this process is doing io to more
e2d74ac0 3170 * than one device managed by cfq.
1da177e4
LT
3171 */
3172static struct cfq_io_context *
e2d74ac0 3173cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1da177e4 3174{
22e2c507 3175 struct io_context *ioc = NULL;
1da177e4 3176 struct cfq_io_context *cic;
1da177e4 3177
22e2c507 3178 might_sleep_if(gfp_mask & __GFP_WAIT);
1da177e4 3179
b5deef90 3180 ioc = get_io_context(gfp_mask, cfqd->queue->node);
1da177e4
LT
3181 if (!ioc)
3182 return NULL;
3183
4ac845a2 3184 cic = cfq_cic_lookup(cfqd, ioc);
e2d74ac0
JA
3185 if (cic)
3186 goto out;
1da177e4 3187
e2d74ac0
JA
3188 cic = cfq_alloc_io_context(cfqd, gfp_mask);
3189 if (cic == NULL)
3190 goto err;
1da177e4 3191
4ac845a2
JA
3192 if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
3193 goto err_free;
3194
1da177e4 3195out:
fc46379d
JA
3196 smp_read_barrier_depends();
3197 if (unlikely(ioc->ioprio_changed))
3198 cfq_ioc_set_ioprio(ioc);
3199
24610333
VG
3200#ifdef CONFIG_CFQ_GROUP_IOSCHED
3201 if (unlikely(ioc->cgroup_changed))
3202 cfq_ioc_set_cgroup(ioc);
3203#endif
1da177e4 3204 return cic;
4ac845a2
JA
3205err_free:
3206 cfq_cic_free(cic);
1da177e4
LT
3207err:
3208 put_io_context(ioc);
3209 return NULL;
3210}
3211
22e2c507
JA
3212static void
3213cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1da177e4 3214{
aaf1228d
JA
3215 unsigned long elapsed = jiffies - cic->last_end_request;
3216 unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
db3b5848 3217
22e2c507
JA
3218 cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
3219 cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
3220 cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
3221}
1da177e4 3222
206dc69b 3223static void
b2c18e1e 3224cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
6d048f53 3225 struct request *rq)
206dc69b 3226{
3dde36dd 3227 sector_t sdist = 0;
41647e7a 3228 sector_t n_sec = blk_rq_sectors(rq);
3dde36dd
CZ
3229 if (cfqq->last_request_pos) {
3230 if (cfqq->last_request_pos < blk_rq_pos(rq))
3231 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3232 else
3233 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3234 }
206dc69b 3235
3dde36dd 3236 cfqq->seek_history <<= 1;
41647e7a
CZ
3237 if (blk_queue_nonrot(cfqd->queue))
3238 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3239 else
3240 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
206dc69b 3241}
1da177e4 3242
22e2c507
JA
3243/*
3244 * Disable idle window if the process thinks too long or seeks so much that
3245 * it doesn't matter
3246 */
3247static void
3248cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3249 struct cfq_io_context *cic)
3250{
7b679138 3251 int old_idle, enable_idle;
1be92f2f 3252
0871714e
JA
3253 /*
3254 * Don't idle for async or idle io prio class
3255 */
3256 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
1be92f2f
JA
3257 return;
3258
c265a7f4 3259 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
1da177e4 3260
76280aff
CZ
3261 if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3262 cfq_mark_cfqq_deep(cfqq);
3263
749ef9f8
CZ
3264 if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3265 enable_idle = 0;
3266 else if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
3dde36dd 3267 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
22e2c507
JA
3268 enable_idle = 0;
3269 else if (sample_valid(cic->ttime_samples)) {
718eee05 3270 if (cic->ttime_mean > cfqd->cfq_slice_idle)
22e2c507
JA
3271 enable_idle = 0;
3272 else
3273 enable_idle = 1;
1da177e4
LT
3274 }
3275
7b679138
JA
3276 if (old_idle != enable_idle) {
3277 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3278 if (enable_idle)
3279 cfq_mark_cfqq_idle_window(cfqq);
3280 else
3281 cfq_clear_cfqq_idle_window(cfqq);
3282 }
22e2c507 3283}
1da177e4 3284
22e2c507
JA
3285/*
3286 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3287 * no or if we aren't sure, a 1 will cause a preempt.
3288 */
a6151c3a 3289static bool
22e2c507 3290cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
5e705374 3291 struct request *rq)
22e2c507 3292{
6d048f53 3293 struct cfq_queue *cfqq;
22e2c507 3294
6d048f53
JA
3295 cfqq = cfqd->active_queue;
3296 if (!cfqq)
a6151c3a 3297 return false;
22e2c507 3298
6d048f53 3299 if (cfq_class_idle(new_cfqq))
a6151c3a 3300 return false;
22e2c507
JA
3301
3302 if (cfq_class_idle(cfqq))
a6151c3a 3303 return true;
1e3335de 3304
875feb63
DS
3305 /*
3306 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3307 */
3308 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3309 return false;
3310
374f84ac
JA
3311 /*
3312 * if the new request is sync, but the currently running queue is
3313 * not, let the sync request have priority.
3314 */
5e705374 3315 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
a6151c3a 3316 return true;
1e3335de 3317
8682e1f1
VG
3318 if (new_cfqq->cfqg != cfqq->cfqg)
3319 return false;
3320
3321 if (cfq_slice_used(cfqq))
3322 return true;
3323
3324 /* Allow preemption only if we are idling on sync-noidle tree */
3325 if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3326 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3327 new_cfqq->service_tree->count == 2 &&
3328 RB_EMPTY_ROOT(&cfqq->sort_list))
3329 return true;
3330
374f84ac
JA
3331 /*
3332 * So both queues are sync. Let the new request get disk time if
3333 * it's a metadata request and the current queue is doing regular IO.
3334 */
7b6d91da 3335 if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
e6ec4fe2 3336 return true;
22e2c507 3337
3a9a3f6c
DS
3338 /*
3339 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3340 */
3341 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
a6151c3a 3342 return true;
3a9a3f6c 3343
d2d59e18
SL
3344 /* An idle queue should not be idle now for some reason */
3345 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3346 return true;
3347
1e3335de 3348 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
a6151c3a 3349 return false;
1e3335de
JA
3350
3351 /*
3352 * if this request is as-good as one we would expect from the
3353 * current cfqq, let it preempt
3354 */
e9ce335d 3355 if (cfq_rq_close(cfqd, cfqq, rq))
a6151c3a 3356 return true;
1e3335de 3357
a6151c3a 3358 return false;
22e2c507
JA
3359}
3360
3361/*
3362 * cfqq preempts the active queue. if we allowed preempt with no slice left,
3363 * let it have half of its nominal slice.
3364 */
3365static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3366{
f8ae6e3e
SL
3367 struct cfq_queue *old_cfqq = cfqd->active_queue;
3368
7b679138 3369 cfq_log_cfqq(cfqd, cfqq, "preempt");
e5ff082e 3370 cfq_slice_expired(cfqd, 1);
22e2c507 3371
f8ae6e3e
SL
3372 /*
3373 * workload type is changed, don't save slice, otherwise preempt
3374 * doesn't happen
3375 */
3376 if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
3377 cfqq->cfqg->saved_workload_slice = 0;
3378
bf572256
JA
3379 /*
3380 * Put the new queue at the front of the of the current list,
3381 * so we know that it will be selected next.
3382 */
3383 BUG_ON(!cfq_cfqq_on_rr(cfqq));
edd75ffd
JA
3384
3385 cfq_service_tree_add(cfqd, cfqq, 1);
eda5e0c9 3386
62a37f6b
JT
3387 cfqq->slice_end = 0;
3388 cfq_mark_cfqq_slice_new(cfqq);
22e2c507
JA
3389}
3390
22e2c507 3391/*
5e705374 3392 * Called when a new fs request (rq) is added (to cfqq). Check if there's
22e2c507
JA
3393 * something we should do about it
3394 */
3395static void
5e705374
JA
3396cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3397 struct request *rq)
22e2c507 3398{
5e705374 3399 struct cfq_io_context *cic = RQ_CIC(rq);
12e9fddd 3400
45333d5a 3401 cfqd->rq_queued++;
7b6d91da 3402 if (rq->cmd_flags & REQ_META)
374f84ac
JA
3403 cfqq->meta_pending++;
3404
9c2c38a1 3405 cfq_update_io_thinktime(cfqd, cic);
b2c18e1e 3406 cfq_update_io_seektime(cfqd, cfqq, rq);
9c2c38a1
JA
3407 cfq_update_idle_window(cfqd, cfqq, cic);
3408
b2c18e1e 3409 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
22e2c507
JA
3410
3411 if (cfqq == cfqd->active_queue) {
3412 /*
b029195d
JA
3413 * Remember that we saw a request from this process, but
3414 * don't start queuing just yet. Otherwise we risk seeing lots
3415 * of tiny requests, because we disrupt the normal plugging
d6ceb25e
JA
3416 * and merging. If the request is already larger than a single
3417 * page, let it rip immediately. For that case we assume that
2d870722
JA
3418 * merging is already done. Ditto for a busy system that
3419 * has other work pending, don't risk delaying until the
3420 * idle timer unplug to continue working.
22e2c507 3421 */
d6ceb25e 3422 if (cfq_cfqq_wait_request(cfqq)) {
2d870722
JA
3423 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3424 cfqd->busy_queues > 1) {
812df48d 3425 cfq_del_timer(cfqd, cfqq);
554554f6 3426 cfq_clear_cfqq_wait_request(cfqq);
24ecfbe2 3427 __blk_run_queue(cfqd->queue);
a11cdaa7 3428 } else {
e98ef89b 3429 cfq_blkiocg_update_idle_time_stats(
a11cdaa7 3430 &cfqq->cfqg->blkg);
bf791937 3431 cfq_mark_cfqq_must_dispatch(cfqq);
a11cdaa7 3432 }
d6ceb25e 3433 }
5e705374 3434 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
22e2c507
JA
3435 /*
3436 * not the active queue - expire current slice if it is
3437 * idle and has expired it's mean thinktime or this new queue
3a9a3f6c
DS
3438 * has some old slice time left and is of higher priority or
3439 * this new queue is RT and the current one is BE
22e2c507
JA
3440 */
3441 cfq_preempt_queue(cfqd, cfqq);
24ecfbe2 3442 __blk_run_queue(cfqd->queue);
22e2c507 3443 }
1da177e4
LT
3444}
3445
165125e1 3446static void cfq_insert_request(struct request_queue *q, struct request *rq)
1da177e4 3447{
b4878f24 3448 struct cfq_data *cfqd = q->elevator->elevator_data;
5e705374 3449 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507 3450
7b679138 3451 cfq_log_cfqq(cfqd, cfqq, "insert_request");
fd0928df 3452 cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
1da177e4 3453
30996f40 3454 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
22e2c507 3455 list_add_tail(&rq->queuelist, &cfqq->fifo);
aa6f6a3d 3456 cfq_add_rq_rb(rq);
e98ef89b 3457 cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
cdc1184c
DS
3458 &cfqd->serving_group->blkg, rq_data_dir(rq),
3459 rq_is_sync(rq));
5e705374 3460 cfq_rq_enqueued(cfqd, cfqq, rq);
1da177e4
LT
3461}
3462
45333d5a
AC
3463/*
3464 * Update hw_tag based on peak queue depth over 50 samples under
3465 * sufficient load.
3466 */
3467static void cfq_update_hw_tag(struct cfq_data *cfqd)
3468{
1a1238a7
SL
3469 struct cfq_queue *cfqq = cfqd->active_queue;
3470
53c583d2
CZ
3471 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3472 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
e459dd08
CZ
3473
3474 if (cfqd->hw_tag == 1)
3475 return;
45333d5a
AC
3476
3477 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
53c583d2 3478 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
45333d5a
AC
3479 return;
3480
1a1238a7
SL
3481 /*
3482 * If active queue hasn't enough requests and can idle, cfq might not
3483 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3484 * case
3485 */
3486 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3487 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
53c583d2 3488 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
1a1238a7
SL
3489 return;
3490
45333d5a
AC
3491 if (cfqd->hw_tag_samples++ < 50)
3492 return;
3493
e459dd08 3494 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
45333d5a
AC
3495 cfqd->hw_tag = 1;
3496 else
3497 cfqd->hw_tag = 0;
45333d5a
AC
3498}
3499
7667aa06
VG
3500static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3501{
3502 struct cfq_io_context *cic = cfqd->active_cic;
3503
02a8f01b
JT
3504 /* If the queue already has requests, don't wait */
3505 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3506 return false;
3507
7667aa06
VG
3508 /* If there are other queues in the group, don't wait */
3509 if (cfqq->cfqg->nr_cfqq > 1)
3510 return false;
3511
3512 if (cfq_slice_used(cfqq))
3513 return true;
3514
3515 /* if slice left is less than think time, wait busy */
3516 if (cic && sample_valid(cic->ttime_samples)
3517 && (cfqq->slice_end - jiffies < cic->ttime_mean))
3518 return true;
3519
3520 /*
3521 * If think times is less than a jiffy than ttime_mean=0 and above
3522 * will not be true. It might happen that slice has not expired yet
3523 * but will expire soon (4-5 ns) during select_queue(). To cover the
3524 * case where think time is less than a jiffy, mark the queue wait
3525 * busy if only 1 jiffy is left in the slice.
3526 */
3527 if (cfqq->slice_end - jiffies == 1)
3528 return true;
3529
3530 return false;
3531}
3532
165125e1 3533static void cfq_completed_request(struct request_queue *q, struct request *rq)
1da177e4 3534{
5e705374 3535 struct cfq_queue *cfqq = RQ_CFQQ(rq);
b4878f24 3536 struct cfq_data *cfqd = cfqq->cfqd;
5380a101 3537 const int sync = rq_is_sync(rq);
b4878f24 3538 unsigned long now;
1da177e4 3539
b4878f24 3540 now = jiffies;
33659ebb
CH
3541 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3542 !!(rq->cmd_flags & REQ_NOIDLE));
1da177e4 3543
45333d5a
AC
3544 cfq_update_hw_tag(cfqd);
3545
53c583d2 3546 WARN_ON(!cfqd->rq_in_driver);
6d048f53 3547 WARN_ON(!cfqq->dispatched);
53c583d2 3548 cfqd->rq_in_driver--;
6d048f53 3549 cfqq->dispatched--;
80bdf0c7 3550 (RQ_CFQG(rq))->dispatched--;
e98ef89b
VG
3551 cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3552 rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3553 rq_data_dir(rq), rq_is_sync(rq));
1da177e4 3554
53c583d2 3555 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3ed9a296 3556
365722bb 3557 if (sync) {
5e705374 3558 RQ_CIC(rq)->last_end_request = now;
573412b2
CZ
3559 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3560 cfqd->last_delayed_sync = now;
365722bb 3561 }
caaa5f9f
JA
3562
3563 /*
3564 * If this is the active queue, check if it needs to be expired,
3565 * or if we want to idle in case it has no pending requests.
3566 */
3567 if (cfqd->active_queue == cfqq) {
a36e71f9
JA
3568 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3569
44f7c160
JA
3570 if (cfq_cfqq_slice_new(cfqq)) {
3571 cfq_set_prio_slice(cfqd, cfqq);
3572 cfq_clear_cfqq_slice_new(cfqq);
3573 }
f75edf2d
VG
3574
3575 /*
7667aa06
VG
3576 * Should we wait for next request to come in before we expire
3577 * the queue.
f75edf2d 3578 */
7667aa06 3579 if (cfq_should_wait_busy(cfqd, cfqq)) {
80bdf0c7
VG
3580 unsigned long extend_sl = cfqd->cfq_slice_idle;
3581 if (!cfqd->cfq_slice_idle)
3582 extend_sl = cfqd->cfq_group_idle;
3583 cfqq->slice_end = jiffies + extend_sl;
f75edf2d 3584 cfq_mark_cfqq_wait_busy(cfqq);
b1ffe737 3585 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
f75edf2d
VG
3586 }
3587
a36e71f9 3588 /*
8e550632
CZ
3589 * Idling is not enabled on:
3590 * - expired queues
3591 * - idle-priority queues
3592 * - async queues
3593 * - queues with still some requests queued
3594 * - when there is a close cooperator
a36e71f9 3595 */
0871714e 3596 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
e5ff082e 3597 cfq_slice_expired(cfqd, 1);
8e550632
CZ
3598 else if (sync && cfqq_empty &&
3599 !cfq_close_cooperator(cfqd, cfqq)) {
749ef9f8 3600 cfq_arm_slice_timer(cfqd);
8e550632 3601 }
caaa5f9f 3602 }
6d048f53 3603
53c583d2 3604 if (!cfqd->rq_in_driver)
23e018a1 3605 cfq_schedule_dispatch(cfqd);
1da177e4
LT
3606}
3607
22e2c507
JA
3608/*
3609 * we temporarily boost lower priority queues if they are holding fs exclusive
3610 * resources. they are boosted to normal prio (CLASS_BE/4)
3611 */
3612static void cfq_prio_boost(struct cfq_queue *cfqq)
1da177e4 3613{
22e2c507
JA
3614 if (has_fs_excl()) {
3615 /*
3616 * boost idle prio on transactions that would lock out other
3617 * users of the filesystem
3618 */
3619 if (cfq_class_idle(cfqq))
3620 cfqq->ioprio_class = IOPRIO_CLASS_BE;
3621 if (cfqq->ioprio > IOPRIO_NORM)
3622 cfqq->ioprio = IOPRIO_NORM;
3623 } else {
3624 /*
dddb7451 3625 * unboost the queue (if needed)
22e2c507 3626 */
dddb7451
CZ
3627 cfqq->ioprio_class = cfqq->org_ioprio_class;
3628 cfqq->ioprio = cfqq->org_ioprio;
22e2c507 3629 }
22e2c507 3630}
1da177e4 3631
89850f7e 3632static inline int __cfq_may_queue(struct cfq_queue *cfqq)
22e2c507 3633{
1b379d8d 3634 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3b18152c 3635 cfq_mark_cfqq_must_alloc_slice(cfqq);
22e2c507 3636 return ELV_MQUEUE_MUST;
3b18152c 3637 }
1da177e4 3638
22e2c507 3639 return ELV_MQUEUE_MAY;
22e2c507
JA
3640}
3641
165125e1 3642static int cfq_may_queue(struct request_queue *q, int rw)
22e2c507
JA
3643{
3644 struct cfq_data *cfqd = q->elevator->elevator_data;
3645 struct task_struct *tsk = current;
91fac317 3646 struct cfq_io_context *cic;
22e2c507
JA
3647 struct cfq_queue *cfqq;
3648
3649 /*
3650 * don't force setup of a queue from here, as a call to may_queue
3651 * does not necessarily imply that a request actually will be queued.
3652 * so just lookup a possibly existing queue, or return 'may queue'
3653 * if that fails
3654 */
4ac845a2 3655 cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317
VT
3656 if (!cic)
3657 return ELV_MQUEUE_MAY;
3658
b0b78f81 3659 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
22e2c507 3660 if (cfqq) {
fd0928df 3661 cfq_init_prio_data(cfqq, cic->ioc);
22e2c507
JA
3662 cfq_prio_boost(cfqq);
3663
89850f7e 3664 return __cfq_may_queue(cfqq);
22e2c507
JA
3665 }
3666
3667 return ELV_MQUEUE_MAY;
1da177e4
LT
3668}
3669
1da177e4
LT
3670/*
3671 * queue lock held here
3672 */
bb37b94c 3673static void cfq_put_request(struct request *rq)
1da177e4 3674{
5e705374 3675 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4 3676
5e705374 3677 if (cfqq) {
22e2c507 3678 const int rw = rq_data_dir(rq);
1da177e4 3679
22e2c507
JA
3680 BUG_ON(!cfqq->allocated[rw]);
3681 cfqq->allocated[rw]--;
1da177e4 3682
5e705374 3683 put_io_context(RQ_CIC(rq)->ioc);
1da177e4 3684
c186794d
MS
3685 rq->elevator_private[0] = NULL;
3686 rq->elevator_private[1] = NULL;
1da177e4 3687
7f1dc8a2
VG
3688 /* Put down rq reference on cfqg */
3689 cfq_put_cfqg(RQ_CFQG(rq));
c186794d 3690 rq->elevator_private[2] = NULL;
7f1dc8a2 3691
1da177e4
LT
3692 cfq_put_queue(cfqq);
3693 }
3694}
3695
df5fe3e8
JM
3696static struct cfq_queue *
3697cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
3698 struct cfq_queue *cfqq)
3699{
3700 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3701 cic_set_cfqq(cic, cfqq->new_cfqq, 1);
b3b6d040 3702 cfq_mark_cfqq_coop(cfqq->new_cfqq);
df5fe3e8
JM
3703 cfq_put_queue(cfqq);
3704 return cic_to_cfqq(cic, 1);
3705}
3706
e6c5bc73
JM
3707/*
3708 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3709 * was the last process referring to said cfqq.
3710 */
3711static struct cfq_queue *
3712split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
3713{
3714 if (cfqq_process_refs(cfqq) == 1) {
e6c5bc73
JM
3715 cfqq->pid = current->pid;
3716 cfq_clear_cfqq_coop(cfqq);
ae54abed 3717 cfq_clear_cfqq_split_coop(cfqq);
e6c5bc73
JM
3718 return cfqq;
3719 }
3720
3721 cic_set_cfqq(cic, NULL, 1);
d02a2c07
SL
3722
3723 cfq_put_cooperator(cfqq);
3724
e6c5bc73
JM
3725 cfq_put_queue(cfqq);
3726 return NULL;
3727}
1da177e4 3728/*
22e2c507 3729 * Allocate cfq data structures associated with this request.
1da177e4 3730 */
22e2c507 3731static int
165125e1 3732cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
1da177e4
LT
3733{
3734 struct cfq_data *cfqd = q->elevator->elevator_data;
3735 struct cfq_io_context *cic;
3736 const int rw = rq_data_dir(rq);
a6151c3a 3737 const bool is_sync = rq_is_sync(rq);
22e2c507 3738 struct cfq_queue *cfqq;
1da177e4
LT
3739 unsigned long flags;
3740
3741 might_sleep_if(gfp_mask & __GFP_WAIT);
3742
e2d74ac0 3743 cic = cfq_get_io_context(cfqd, gfp_mask);
22e2c507 3744
1da177e4
LT
3745 spin_lock_irqsave(q->queue_lock, flags);
3746
22e2c507
JA
3747 if (!cic)
3748 goto queue_fail;
3749
e6c5bc73 3750new_queue:
91fac317 3751 cfqq = cic_to_cfqq(cic, is_sync);
32f2e807 3752 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
fd0928df 3753 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
91fac317 3754 cic_set_cfqq(cic, cfqq, is_sync);
df5fe3e8 3755 } else {
e6c5bc73
JM
3756 /*
3757 * If the queue was seeky for too long, break it apart.
3758 */
ae54abed 3759 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
e6c5bc73
JM
3760 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3761 cfqq = split_cfqq(cic, cfqq);
3762 if (!cfqq)
3763 goto new_queue;
3764 }
3765
df5fe3e8
JM
3766 /*
3767 * Check to see if this queue is scheduled to merge with
3768 * another, closely cooperating queue. The merging of
3769 * queues happens here as it must be done in process context.
3770 * The reference on new_cfqq was taken in merge_cfqqs.
3771 */
3772 if (cfqq->new_cfqq)
3773 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
91fac317 3774 }
1da177e4
LT
3775
3776 cfqq->allocated[rw]++;
1da177e4 3777
6fae9c25 3778 cfqq->ref++;
c186794d
MS
3779 rq->elevator_private[0] = cic;
3780 rq->elevator_private[1] = cfqq;
3781 rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg);
93803e01 3782 spin_unlock_irqrestore(q->queue_lock, flags);
5e705374 3783 return 0;
1da177e4 3784
22e2c507
JA
3785queue_fail:
3786 if (cic)
3787 put_io_context(cic->ioc);
89850f7e 3788
23e018a1 3789 cfq_schedule_dispatch(cfqd);
1da177e4 3790 spin_unlock_irqrestore(q->queue_lock, flags);
7b679138 3791 cfq_log(cfqd, "set_request fail");
1da177e4
LT
3792 return 1;
3793}
3794
65f27f38 3795static void cfq_kick_queue(struct work_struct *work)
22e2c507 3796{
65f27f38 3797 struct cfq_data *cfqd =
23e018a1 3798 container_of(work, struct cfq_data, unplug_work);
165125e1 3799 struct request_queue *q = cfqd->queue;
22e2c507 3800
40bb54d1 3801 spin_lock_irq(q->queue_lock);
24ecfbe2 3802 __blk_run_queue(cfqd->queue);
40bb54d1 3803 spin_unlock_irq(q->queue_lock);
22e2c507
JA
3804}
3805
3806/*
3807 * Timer running if the active_queue is currently idling inside its time slice
3808 */
3809static void cfq_idle_slice_timer(unsigned long data)
3810{
3811 struct cfq_data *cfqd = (struct cfq_data *) data;
3812 struct cfq_queue *cfqq;
3813 unsigned long flags;
3c6bd2f8 3814 int timed_out = 1;
22e2c507 3815
7b679138
JA
3816 cfq_log(cfqd, "idle timer fired");
3817
22e2c507
JA
3818 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3819
fe094d98
JA
3820 cfqq = cfqd->active_queue;
3821 if (cfqq) {
3c6bd2f8
JA
3822 timed_out = 0;
3823
b029195d
JA
3824 /*
3825 * We saw a request before the queue expired, let it through
3826 */
3827 if (cfq_cfqq_must_dispatch(cfqq))
3828 goto out_kick;
3829
22e2c507
JA
3830 /*
3831 * expired
3832 */
44f7c160 3833 if (cfq_slice_used(cfqq))
22e2c507
JA
3834 goto expire;
3835
3836 /*
3837 * only expire and reinvoke request handler, if there are
3838 * other queues with pending requests
3839 */
caaa5f9f 3840 if (!cfqd->busy_queues)
22e2c507 3841 goto out_cont;
22e2c507
JA
3842
3843 /*
3844 * not expired and it has a request pending, let it dispatch
3845 */
75e50984 3846 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507 3847 goto out_kick;
76280aff
CZ
3848
3849 /*
3850 * Queue depth flag is reset only when the idle didn't succeed
3851 */
3852 cfq_clear_cfqq_deep(cfqq);
22e2c507
JA
3853 }
3854expire:
e5ff082e 3855 cfq_slice_expired(cfqd, timed_out);
22e2c507 3856out_kick:
23e018a1 3857 cfq_schedule_dispatch(cfqd);
22e2c507
JA
3858out_cont:
3859 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3860}
3861
3b18152c
JA
3862static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3863{
3864 del_timer_sync(&cfqd->idle_slice_timer);
23e018a1 3865 cancel_work_sync(&cfqd->unplug_work);
3b18152c 3866}
22e2c507 3867
c2dea2d1
VT
3868static void cfq_put_async_queues(struct cfq_data *cfqd)
3869{
3870 int i;
3871
3872 for (i = 0; i < IOPRIO_BE_NR; i++) {
3873 if (cfqd->async_cfqq[0][i])
3874 cfq_put_queue(cfqd->async_cfqq[0][i]);
3875 if (cfqd->async_cfqq[1][i])
3876 cfq_put_queue(cfqd->async_cfqq[1][i]);
c2dea2d1 3877 }
2389d1ef
ON
3878
3879 if (cfqd->async_idle_cfqq)
3880 cfq_put_queue(cfqd->async_idle_cfqq);
c2dea2d1
VT
3881}
3882
b374d18a 3883static void cfq_exit_queue(struct elevator_queue *e)
1da177e4 3884{
22e2c507 3885 struct cfq_data *cfqd = e->elevator_data;
165125e1 3886 struct request_queue *q = cfqd->queue;
56edf7d7 3887 bool wait = false;
22e2c507 3888
3b18152c 3889 cfq_shutdown_timer_wq(cfqd);
e2d74ac0 3890
d9ff4187 3891 spin_lock_irq(q->queue_lock);
e2d74ac0 3892
d9ff4187 3893 if (cfqd->active_queue)
e5ff082e 3894 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
e2d74ac0
JA
3895
3896 while (!list_empty(&cfqd->cic_list)) {
d9ff4187
AV
3897 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
3898 struct cfq_io_context,
3899 queue_list);
89850f7e
JA
3900
3901 __cfq_exit_single_io_context(cfqd, cic);
d9ff4187 3902 }
e2d74ac0 3903
c2dea2d1 3904 cfq_put_async_queues(cfqd);
b1c35769 3905 cfq_release_cfq_groups(cfqd);
56edf7d7
VG
3906
3907 /*
3908 * If there are groups which we could not unlink from blkcg list,
3909 * wait for a rcu period for them to be freed.
3910 */
3911 if (cfqd->nr_blkcg_linked_grps)
3912 wait = true;
15c31be4 3913
d9ff4187 3914 spin_unlock_irq(q->queue_lock);
a90d742e
AV
3915
3916 cfq_shutdown_timer_wq(cfqd);
3917
80b15c73
KK
3918 spin_lock(&cic_index_lock);
3919 ida_remove(&cic_index_ida, cfqd->cic_index);
3920 spin_unlock(&cic_index_lock);
3921
56edf7d7
VG
3922 /*
3923 * Wait for cfqg->blkg->key accessors to exit their grace periods.
3924 * Do this wait only if there are other unlinked groups out
3925 * there. This can happen if cgroup deletion path claimed the
3926 * responsibility of cleaning up a group before queue cleanup code
3927 * get to the group.
3928 *
3929 * Do not call synchronize_rcu() unconditionally as there are drivers
3930 * which create/delete request queue hundreds of times during scan/boot
3931 * and synchronize_rcu() can take significant time and slow down boot.
3932 */
3933 if (wait)
3934 synchronize_rcu();
3935 kfree(cfqd);
1da177e4
LT
3936}
3937
80b15c73
KK
3938static int cfq_alloc_cic_index(void)
3939{
3940 int index, error;
3941
3942 do {
3943 if (!ida_pre_get(&cic_index_ida, GFP_KERNEL))
3944 return -ENOMEM;
3945
3946 spin_lock(&cic_index_lock);
3947 error = ida_get_new(&cic_index_ida, &index);
3948 spin_unlock(&cic_index_lock);
3949 if (error && error != -EAGAIN)
3950 return error;
3951 } while (error);
3952
3953 return index;
3954}
3955
165125e1 3956static void *cfq_init_queue(struct request_queue *q)
1da177e4
LT
3957{
3958 struct cfq_data *cfqd;
718eee05 3959 int i, j;
cdb16e8f 3960 struct cfq_group *cfqg;
615f0259 3961 struct cfq_rb_root *st;
1da177e4 3962
80b15c73
KK
3963 i = cfq_alloc_cic_index();
3964 if (i < 0)
3965 return NULL;
3966
94f6030c 3967 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
1da177e4 3968 if (!cfqd)
bc1c1169 3969 return NULL;
1da177e4 3970
30d7b944
SL
3971 /*
3972 * Don't need take queue_lock in the routine, since we are
3973 * initializing the ioscheduler, and nobody is using cfqd
3974 */
80b15c73
KK
3975 cfqd->cic_index = i;
3976
1fa8f6d6
VG
3977 /* Init root service tree */
3978 cfqd->grp_service_tree = CFQ_RB_ROOT;
3979
cdb16e8f
VG
3980 /* Init root group */
3981 cfqg = &cfqd->root_group;
615f0259
VG
3982 for_each_cfqg_st(cfqg, i, j, st)
3983 *st = CFQ_RB_ROOT;
1fa8f6d6 3984 RB_CLEAR_NODE(&cfqg->rb_node);
26a2ac00 3985
25bc6b07
VG
3986 /* Give preference to root group over other groups */
3987 cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
3988
25fb5169 3989#ifdef CONFIG_CFQ_GROUP_IOSCHED
b1c35769 3990 /*
56edf7d7
VG
3991 * Set root group reference to 2. One reference will be dropped when
3992 * all groups on cfqd->cfqg_list are being deleted during queue exit.
3993 * Other reference will remain there as we don't want to delete this
3994 * group as it is statically allocated and gets destroyed when
3995 * throtl_data goes away.
b1c35769 3996 */
56edf7d7 3997 cfqg->ref = 2;
dcf097b2 3998 rcu_read_lock();
e98ef89b
VG
3999 cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
4000 (void *)cfqd, 0);
dcf097b2 4001 rcu_read_unlock();
56edf7d7
VG
4002 cfqd->nr_blkcg_linked_grps++;
4003
4004 /* Add group on cfqd->cfqg_list */
4005 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
25fb5169 4006#endif
26a2ac00
JA
4007 /*
4008 * Not strictly needed (since RB_ROOT just clears the node and we
4009 * zeroed cfqd on alloc), but better be safe in case someone decides
4010 * to add magic to the rb code
4011 */
4012 for (i = 0; i < CFQ_PRIO_LISTS; i++)
4013 cfqd->prio_trees[i] = RB_ROOT;
4014
6118b70b
JA
4015 /*
4016 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
4017 * Grab a permanent reference to it, so that the normal code flow
4018 * will not attempt to free it.
4019 */
4020 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
30d7b944 4021 cfqd->oom_cfqq.ref++;
cdb16e8f 4022 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
6118b70b 4023
d9ff4187 4024 INIT_LIST_HEAD(&cfqd->cic_list);
1da177e4 4025
1da177e4 4026 cfqd->queue = q;
1da177e4 4027
22e2c507
JA
4028 init_timer(&cfqd->idle_slice_timer);
4029 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4030 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4031
23e018a1 4032 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
22e2c507 4033
1da177e4 4034 cfqd->cfq_quantum = cfq_quantum;
22e2c507
JA
4035 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4036 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
1da177e4
LT
4037 cfqd->cfq_back_max = cfq_back_max;
4038 cfqd->cfq_back_penalty = cfq_back_penalty;
22e2c507
JA
4039 cfqd->cfq_slice[0] = cfq_slice_async;
4040 cfqd->cfq_slice[1] = cfq_slice_sync;
4041 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4042 cfqd->cfq_slice_idle = cfq_slice_idle;
80bdf0c7 4043 cfqd->cfq_group_idle = cfq_group_idle;
963b72fc 4044 cfqd->cfq_latency = 1;
e459dd08 4045 cfqd->hw_tag = -1;
edc71131
CZ
4046 /*
4047 * we optimistically start assuming sync ops weren't delayed in last
4048 * second, in order to have larger depth for async operations.
4049 */
573412b2 4050 cfqd->last_delayed_sync = jiffies - HZ;
bc1c1169 4051 return cfqd;
1da177e4
LT
4052}
4053
4054static void cfq_slab_kill(void)
4055{
d6de8be7
JA
4056 /*
4057 * Caller already ensured that pending RCU callbacks are completed,
4058 * so we should have no busy allocations at this point.
4059 */
1da177e4
LT
4060 if (cfq_pool)
4061 kmem_cache_destroy(cfq_pool);
4062 if (cfq_ioc_pool)
4063 kmem_cache_destroy(cfq_ioc_pool);
4064}
4065
4066static int __init cfq_slab_setup(void)
4067{
0a31bd5f 4068 cfq_pool = KMEM_CACHE(cfq_queue, 0);
1da177e4
LT
4069 if (!cfq_pool)
4070 goto fail;
4071
34e6bbf2 4072 cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
1da177e4
LT
4073 if (!cfq_ioc_pool)
4074 goto fail;
4075
4076 return 0;
4077fail:
4078 cfq_slab_kill();
4079 return -ENOMEM;
4080}
4081
1da177e4
LT
4082/*
4083 * sysfs parts below -->
4084 */
1da177e4
LT
4085static ssize_t
4086cfq_var_show(unsigned int var, char *page)
4087{
4088 return sprintf(page, "%d\n", var);
4089}
4090
4091static ssize_t
4092cfq_var_store(unsigned int *var, const char *page, size_t count)
4093{
4094 char *p = (char *) page;
4095
4096 *var = simple_strtoul(p, &p, 10);
4097 return count;
4098}
4099
1da177e4 4100#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
b374d18a 4101static ssize_t __FUNC(struct elevator_queue *e, char *page) \
1da177e4 4102{ \
3d1ab40f 4103 struct cfq_data *cfqd = e->elevator_data; \
1da177e4
LT
4104 unsigned int __data = __VAR; \
4105 if (__CONV) \
4106 __data = jiffies_to_msecs(__data); \
4107 return cfq_var_show(__data, (page)); \
4108}
4109SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
22e2c507
JA
4110SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4111SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
e572ec7e
AV
4112SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4113SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
22e2c507 4114SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
80bdf0c7 4115SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
22e2c507
JA
4116SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4117SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4118SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
963b72fc 4119SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
1da177e4
LT
4120#undef SHOW_FUNCTION
4121
4122#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
b374d18a 4123static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
1da177e4 4124{ \
3d1ab40f 4125 struct cfq_data *cfqd = e->elevator_data; \
1da177e4
LT
4126 unsigned int __data; \
4127 int ret = cfq_var_store(&__data, (page), count); \
4128 if (__data < (MIN)) \
4129 __data = (MIN); \
4130 else if (__data > (MAX)) \
4131 __data = (MAX); \
4132 if (__CONV) \
4133 *(__PTR) = msecs_to_jiffies(__data); \
4134 else \
4135 *(__PTR) = __data; \
4136 return ret; \
4137}
4138STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
fe094d98
JA
4139STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4140 UINT_MAX, 1);
4141STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4142 UINT_MAX, 1);
e572ec7e 4143STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
fe094d98
JA
4144STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4145 UINT_MAX, 0);
22e2c507 4146STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
80bdf0c7 4147STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
22e2c507
JA
4148STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4149STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
fe094d98
JA
4150STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4151 UINT_MAX, 0);
963b72fc 4152STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
1da177e4
LT
4153#undef STORE_FUNCTION
4154
e572ec7e
AV
4155#define CFQ_ATTR(name) \
4156 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4157
4158static struct elv_fs_entry cfq_attrs[] = {
4159 CFQ_ATTR(quantum),
e572ec7e
AV
4160 CFQ_ATTR(fifo_expire_sync),
4161 CFQ_ATTR(fifo_expire_async),
4162 CFQ_ATTR(back_seek_max),
4163 CFQ_ATTR(back_seek_penalty),
4164 CFQ_ATTR(slice_sync),
4165 CFQ_ATTR(slice_async),
4166 CFQ_ATTR(slice_async_rq),
4167 CFQ_ATTR(slice_idle),
80bdf0c7 4168 CFQ_ATTR(group_idle),
963b72fc 4169 CFQ_ATTR(low_latency),
e572ec7e 4170 __ATTR_NULL
1da177e4
LT
4171};
4172
1da177e4
LT
4173static struct elevator_type iosched_cfq = {
4174 .ops = {
4175 .elevator_merge_fn = cfq_merge,
4176 .elevator_merged_fn = cfq_merged_request,
4177 .elevator_merge_req_fn = cfq_merged_requests,
da775265 4178 .elevator_allow_merge_fn = cfq_allow_merge,
812d4026 4179 .elevator_bio_merged_fn = cfq_bio_merged,
b4878f24 4180 .elevator_dispatch_fn = cfq_dispatch_requests,
1da177e4 4181 .elevator_add_req_fn = cfq_insert_request,
b4878f24 4182 .elevator_activate_req_fn = cfq_activate_request,
1da177e4 4183 .elevator_deactivate_req_fn = cfq_deactivate_request,
1da177e4 4184 .elevator_completed_req_fn = cfq_completed_request,
21183b07
JA
4185 .elevator_former_req_fn = elv_rb_former_request,
4186 .elevator_latter_req_fn = elv_rb_latter_request,
1da177e4
LT
4187 .elevator_set_req_fn = cfq_set_request,
4188 .elevator_put_req_fn = cfq_put_request,
4189 .elevator_may_queue_fn = cfq_may_queue,
4190 .elevator_init_fn = cfq_init_queue,
4191 .elevator_exit_fn = cfq_exit_queue,
fc46379d 4192 .trim = cfq_free_io_context,
1da177e4 4193 },
3d1ab40f 4194 .elevator_attrs = cfq_attrs,
1da177e4
LT
4195 .elevator_name = "cfq",
4196 .elevator_owner = THIS_MODULE,
4197};
4198
3e252066
VG
4199#ifdef CONFIG_CFQ_GROUP_IOSCHED
4200static struct blkio_policy_type blkio_policy_cfq = {
4201 .ops = {
4202 .blkio_unlink_group_fn = cfq_unlink_blkio_group,
4203 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
4204 },
062a644d 4205 .plid = BLKIO_POLICY_PROP,
3e252066
VG
4206};
4207#else
4208static struct blkio_policy_type blkio_policy_cfq;
4209#endif
4210
1da177e4
LT
4211static int __init cfq_init(void)
4212{
22e2c507
JA
4213 /*
4214 * could be 0 on HZ < 1000 setups
4215 */
4216 if (!cfq_slice_async)
4217 cfq_slice_async = 1;
4218 if (!cfq_slice_idle)
4219 cfq_slice_idle = 1;
4220
80bdf0c7
VG
4221#ifdef CONFIG_CFQ_GROUP_IOSCHED
4222 if (!cfq_group_idle)
4223 cfq_group_idle = 1;
4224#else
4225 cfq_group_idle = 0;
4226#endif
1da177e4
LT
4227 if (cfq_slab_setup())
4228 return -ENOMEM;
4229
2fdd82bd 4230 elv_register(&iosched_cfq);
3e252066 4231 blkio_policy_register(&blkio_policy_cfq);
1da177e4 4232
2fdd82bd 4233 return 0;
1da177e4
LT
4234}
4235
4236static void __exit cfq_exit(void)
4237{
6e9a4738 4238 DECLARE_COMPLETION_ONSTACK(all_gone);
3e252066 4239 blkio_policy_unregister(&blkio_policy_cfq);
1da177e4 4240 elv_unregister(&iosched_cfq);
334e94de 4241 ioc_gone = &all_gone;
fba82272
OH
4242 /* ioc_gone's update must be visible before reading ioc_count */
4243 smp_wmb();
d6de8be7
JA
4244
4245 /*
4246 * this also protects us from entering cfq_slab_kill() with
4247 * pending RCU callbacks
4248 */
245b2e70 4249 if (elv_ioc_count_read(cfq_ioc_count))
9a11b4ed 4250 wait_for_completion(&all_gone);
80b15c73 4251 ida_destroy(&cic_index_ida);
83521d3e 4252 cfq_slab_kill();
1da177e4
LT
4253}
4254
4255module_init(cfq_init);
4256module_exit(cfq_exit);
4257
4258MODULE_AUTHOR("Jens Axboe");
4259MODULE_LICENSE("GPL");
4260MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");