]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-wbt.c
blk-wbt: fix has-sleeper queueing check
[mirror_ubuntu-bionic-kernel.git] / block / blk-wbt.c
CommitLineData
e34cbd30
JA
1/*
2 * buffered writeback throttling. loosely based on CoDel. We can't drop
3 * packets for IO scheduling, so the logic is something like this:
4 *
5 * - Monitor latencies in a defined window of time.
6 * - If the minimum latency in the above window exceeds some target, increment
7 * scaling step and scale down queue depth by a factor of 2x. The monitoring
8 * window is then shrunk to 100 / sqrt(scaling step + 1).
9 * - For any window where we don't have solid data on what the latencies
10 * look like, retain status quo.
11 * - If latencies look good, decrement scaling step.
12 * - If we're only doing writes, allow the scaling step to go negative. This
13 * will temporarily boost write performance, snapping back to a stable
14 * scaling step of 0 if reads show up or the heavy writers finish. Unlike
15 * positive scaling steps where we shrink the monitoring window, a negative
16 * scaling step retains the default step==0 window size.
17 *
18 * Copyright (C) 2016 Jens Axboe
19 *
20 */
21#include <linux/kernel.h>
22#include <linux/blk_types.h>
23#include <linux/slab.h>
24#include <linux/backing-dev.h>
25#include <linux/swap.h>
26
27#include "blk-wbt.h"
28
29#define CREATE_TRACE_POINTS
30#include <trace/events/wbt.h>
31
32enum {
33 /*
34 * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
35 * from here depending on device stats
36 */
37 RWB_DEF_DEPTH = 16,
38
39 /*
40 * 100msec window
41 */
42 RWB_WINDOW_NSEC = 100 * 1000 * 1000ULL,
43
44 /*
45 * Disregard stats, if we don't meet this minimum
46 */
47 RWB_MIN_WRITE_SAMPLES = 3,
48
49 /*
50 * If we have this number of consecutive windows with not enough
51 * information to scale up or down, scale up.
52 */
53 RWB_UNKNOWN_BUMP = 5,
54};
55
56static inline bool rwb_enabled(struct rq_wb *rwb)
57{
58 return rwb && rwb->wb_normal != 0;
59}
60
61/*
62 * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
63 * false if 'v' + 1 would be bigger than 'below'.
64 */
65static bool atomic_inc_below(atomic_t *v, int below)
66{
67 int cur = atomic_read(v);
68
69 for (;;) {
70 int old;
71
72 if (cur >= below)
73 return false;
74 old = atomic_cmpxchg(v, cur, cur + 1);
75 if (old == cur)
76 break;
77 cur = old;
78 }
79
80 return true;
81}
82
83static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
84{
85 if (rwb_enabled(rwb)) {
86 const unsigned long cur = jiffies;
87
88 if (cur != *var)
89 *var = cur;
90 }
91}
92
93/*
94 * If a task was rate throttled in balance_dirty_pages() within the last
95 * second or so, use that to indicate a higher cleaning rate.
96 */
97static bool wb_recent_wait(struct rq_wb *rwb)
98{
dc3b17cc 99 struct bdi_writeback *wb = &rwb->queue->backing_dev_info->wb;
e34cbd30
JA
100
101 return time_before(jiffies, wb->dirty_sleep + HZ);
102}
103
d9204a0b
JA
104static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
105 enum wbt_flags wb_acct)
e34cbd30 106{
d9204a0b
JA
107 if (wb_acct & WBT_KSWAPD)
108 return &rwb->rq_wait[WBT_RWQ_KSWAPD];
109
110 return &rwb->rq_wait[WBT_RWQ_BG];
e34cbd30
JA
111}
112
113static void rwb_wake_all(struct rq_wb *rwb)
114{
115 int i;
116
117 for (i = 0; i < WBT_NUM_RWQ; i++) {
118 struct rq_wait *rqw = &rwb->rq_wait[i];
119
7e31a9ca 120 if (wq_has_sleeper(&rqw->wait))
e34cbd30
JA
121 wake_up_all(&rqw->wait);
122 }
123}
124
125void __wbt_done(struct rq_wb *rwb, enum wbt_flags wb_acct)
126{
127 struct rq_wait *rqw;
128 int inflight, limit;
129
130 if (!(wb_acct & WBT_TRACKED))
131 return;
132
d9204a0b 133 rqw = get_rq_wait(rwb, wb_acct);
e34cbd30
JA
134 inflight = atomic_dec_return(&rqw->inflight);
135
136 /*
137 * wbt got disabled with IO in flight. Wake up any potential
138 * waiters, we don't have to do more than that.
139 */
140 if (unlikely(!rwb_enabled(rwb))) {
141 rwb_wake_all(rwb);
142 return;
143 }
144
145 /*
146 * If the device does write back caching, drop further down
147 * before we wake people up.
148 */
149 if (rwb->wc && !wb_recent_wait(rwb))
150 limit = 0;
151 else
152 limit = rwb->wb_normal;
153
154 /*
155 * Don't wake anyone up if we are above the normal limit.
156 */
157 if (inflight && inflight >= limit)
158 return;
159
7e31a9ca 160 if (wq_has_sleeper(&rqw->wait)) {
e34cbd30
JA
161 int diff = limit - inflight;
162
163 if (!inflight || diff >= rwb->wb_background / 2)
e6a7d9d8 164 wake_up(&rqw->wait);
e34cbd30
JA
165 }
166}
167
168/*
169 * Called on completion of a request. Note that it's also called when
170 * a request is merged, when the request gets freed.
171 */
172void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat)
173{
174 if (!rwb)
175 return;
176
177 if (!wbt_is_tracked(stat)) {
178 if (rwb->sync_cookie == stat) {
179 rwb->sync_issue = 0;
180 rwb->sync_cookie = NULL;
181 }
182
183 if (wbt_is_read(stat))
184 wb_timestamp(rwb, &rwb->last_comp);
e34cbd30
JA
185 } else {
186 WARN_ON_ONCE(stat == rwb->sync_cookie);
187 __wbt_done(rwb, wbt_stat_to_mask(stat));
e34cbd30 188 }
62d772fa 189 wbt_clear_state(stat);
e34cbd30
JA
190}
191
192/*
193 * Return true, if we can't increase the depth further by scaling
194 */
195static bool calc_wb_limits(struct rq_wb *rwb)
196{
197 unsigned int depth;
198 bool ret = false;
199
200 if (!rwb->min_lat_nsec) {
201 rwb->wb_max = rwb->wb_normal = rwb->wb_background = 0;
202 return false;
203 }
204
205 /*
206 * For QD=1 devices, this is a special case. It's important for those
207 * to have one request ready when one completes, so force a depth of
208 * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
209 * since the device can't have more than that in flight. If we're
210 * scaling down, then keep a setting of 1/1/1.
211 */
212 if (rwb->queue_depth == 1) {
213 if (rwb->scale_step > 0)
214 rwb->wb_max = rwb->wb_normal = 1;
215 else {
216 rwb->wb_max = rwb->wb_normal = 2;
217 ret = true;
218 }
219 rwb->wb_background = 1;
220 } else {
221 /*
222 * scale_step == 0 is our default state. If we have suffered
223 * latency spikes, step will be > 0, and we shrink the
224 * allowed write depths. If step is < 0, we're only doing
225 * writes, and we allow a temporarily higher depth to
226 * increase performance.
227 */
228 depth = min_t(unsigned int, RWB_DEF_DEPTH, rwb->queue_depth);
229 if (rwb->scale_step > 0)
230 depth = 1 + ((depth - 1) >> min(31, rwb->scale_step));
231 else if (rwb->scale_step < 0) {
232 unsigned int maxd = 3 * rwb->queue_depth / 4;
233
234 depth = 1 + ((depth - 1) << -rwb->scale_step);
235 if (depth > maxd) {
236 depth = maxd;
237 ret = true;
238 }
239 }
240
241 /*
242 * Set our max/normal/bg queue depths based on how far
243 * we have scaled down (->scale_step).
244 */
245 rwb->wb_max = depth;
246 rwb->wb_normal = (rwb->wb_max + 1) / 2;
247 rwb->wb_background = (rwb->wb_max + 3) / 4;
248 }
249
250 return ret;
251}
252
4121d385 253static inline bool stat_sample_valid(struct blk_rq_stat *stat)
e34cbd30
JA
254{
255 /*
256 * We need at least one read sample, and a minimum of
257 * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
258 * that it's writes impacting us, and not just some sole read on
259 * a device that is in a lower power state.
260 */
fa2e39cb
OS
261 return (stat[READ].nr_samples >= 1 &&
262 stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
e34cbd30
JA
263}
264
265static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
266{
6aa7de05 267 u64 now, issue = READ_ONCE(rwb->sync_issue);
e34cbd30
JA
268
269 if (!issue || !rwb->sync_cookie)
270 return 0;
271
272 now = ktime_to_ns(ktime_get());
273 return now - issue;
274}
275
276enum {
277 LAT_OK = 1,
278 LAT_UNKNOWN,
279 LAT_UNKNOWN_WRITES,
280 LAT_EXCEEDED,
281};
282
34dbad5d 283static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
e34cbd30 284{
dc3b17cc 285 struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
e34cbd30
JA
286 u64 thislat;
287
288 /*
289 * If our stored sync issue exceeds the window size, or it
290 * exceeds our min target AND we haven't logged any entries,
291 * flag the latency as exceeded. wbt works off completion latencies,
292 * but for a flooded device, a single sync IO can take a long time
293 * to complete after being issued. If this time exceeds our
294 * monitoring window AND we didn't see any other completions in that
295 * window, then count that sync IO as a violation of the latency.
296 */
297 thislat = rwb_sync_issue_lat(rwb);
298 if (thislat > rwb->cur_win_nsec ||
fa2e39cb 299 (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
d8a0cbfd 300 trace_wbt_lat(bdi, thislat);
e34cbd30
JA
301 return LAT_EXCEEDED;
302 }
303
304 /*
305 * No read/write mix, if stat isn't valid
306 */
307 if (!stat_sample_valid(stat)) {
308 /*
309 * If we had writes in this stat window and the window is
310 * current, we're only doing writes. If a task recently
311 * waited or still has writes in flights, consider us doing
312 * just writes as well.
313 */
34dbad5d
OS
314 if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
315 wbt_inflight(rwb))
e34cbd30
JA
316 return LAT_UNKNOWN_WRITES;
317 return LAT_UNKNOWN;
318 }
319
320 /*
321 * If the 'min' latency exceeds our target, step down.
322 */
fa2e39cb
OS
323 if (stat[READ].min > rwb->min_lat_nsec) {
324 trace_wbt_lat(bdi, stat[READ].min);
d8a0cbfd 325 trace_wbt_stat(bdi, stat);
e34cbd30
JA
326 return LAT_EXCEEDED;
327 }
328
329 if (rwb->scale_step)
d8a0cbfd 330 trace_wbt_stat(bdi, stat);
e34cbd30
JA
331
332 return LAT_OK;
333}
334
e34cbd30
JA
335static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
336{
dc3b17cc 337 struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
d8a0cbfd
JA
338
339 trace_wbt_step(bdi, msg, rwb->scale_step, rwb->cur_win_nsec,
e34cbd30
JA
340 rwb->wb_background, rwb->wb_normal, rwb->wb_max);
341}
342
343static void scale_up(struct rq_wb *rwb)
344{
345 /*
346 * Hit max in previous round, stop here
347 */
348 if (rwb->scaled_max)
349 return;
350
351 rwb->scale_step--;
352 rwb->unknown_cnt = 0;
e34cbd30
JA
353
354 rwb->scaled_max = calc_wb_limits(rwb);
355
356 rwb_wake_all(rwb);
357
358 rwb_trace_step(rwb, "step up");
359}
360
361/*
362 * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
363 * had a latency violation.
364 */
365static void scale_down(struct rq_wb *rwb, bool hard_throttle)
366{
367 /*
368 * Stop scaling down when we've hit the limit. This also prevents
369 * ->scale_step from going to crazy values, if the device can't
370 * keep up.
371 */
372 if (rwb->wb_max == 1)
373 return;
374
375 if (rwb->scale_step < 0 && hard_throttle)
376 rwb->scale_step = 0;
377 else
378 rwb->scale_step++;
379
380 rwb->scaled_max = false;
381 rwb->unknown_cnt = 0;
e34cbd30
JA
382 calc_wb_limits(rwb);
383 rwb_trace_step(rwb, "step down");
384}
385
386static void rwb_arm_timer(struct rq_wb *rwb)
387{
e34cbd30
JA
388 if (rwb->scale_step > 0) {
389 /*
390 * We should speed this up, using some variant of a fast
391 * integer inverse square root calculation. Since we only do
392 * this for every window expiration, it's not a huge deal,
393 * though.
394 */
395 rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
396 int_sqrt((rwb->scale_step + 1) << 8));
397 } else {
398 /*
399 * For step < 0, we don't want to increase/decrease the
400 * window size.
401 */
402 rwb->cur_win_nsec = rwb->win_nsec;
403 }
404
34dbad5d 405 blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
e34cbd30
JA
406}
407
34dbad5d 408static void wb_timer_fn(struct blk_stat_callback *cb)
e34cbd30 409{
34dbad5d 410 struct rq_wb *rwb = cb->data;
e34cbd30
JA
411 unsigned int inflight = wbt_inflight(rwb);
412 int status;
413
34dbad5d 414 status = latency_exceeded(rwb, cb->stat);
e34cbd30 415
dc3b17cc 416 trace_wbt_timer(rwb->queue->backing_dev_info, status, rwb->scale_step,
d8a0cbfd 417 inflight);
e34cbd30
JA
418
419 /*
420 * If we exceeded the latency target, step down. If we did not,
421 * step one level up. If we don't know enough to say either exceeded
422 * or ok, then don't do anything.
423 */
424 switch (status) {
425 case LAT_EXCEEDED:
426 scale_down(rwb, true);
427 break;
428 case LAT_OK:
429 scale_up(rwb);
430 break;
431 case LAT_UNKNOWN_WRITES:
432 /*
433 * We started a the center step, but don't have a valid
434 * read/write sample, but we do have writes going on.
435 * Allow step to go negative, to increase write perf.
436 */
437 scale_up(rwb);
438 break;
439 case LAT_UNKNOWN:
440 if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
441 break;
442 /*
443 * We get here when previously scaled reduced depth, and we
444 * currently don't have a valid read/write sample. For that
445 * case, slowly return to center state (step == 0).
446 */
447 if (rwb->scale_step > 0)
448 scale_up(rwb);
449 else if (rwb->scale_step < 0)
450 scale_down(rwb, false);
451 break;
452 default:
453 break;
454 }
455
456 /*
457 * Re-arm timer, if we have IO in flight
458 */
459 if (rwb->scale_step || inflight)
460 rwb_arm_timer(rwb);
461}
462
463void wbt_update_limits(struct rq_wb *rwb)
464{
465 rwb->scale_step = 0;
466 rwb->scaled_max = false;
467 calc_wb_limits(rwb);
468
469 rwb_wake_all(rwb);
470}
471
472static bool close_io(struct rq_wb *rwb)
473{
474 const unsigned long now = jiffies;
475
476 return time_before(now, rwb->last_issue + HZ / 10) ||
477 time_before(now, rwb->last_comp + HZ / 10);
478}
479
480#define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)
481
482static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
483{
484 unsigned int limit;
485
2fb965cc
JA
486 /*
487 * If we got disabled, just return UINT_MAX. This ensures that
488 * we'll properly inc a new IO, and dec+wakeup at the end.
489 */
490 if (!rwb_enabled(rwb))
491 return UINT_MAX;
492
e34cbd30
JA
493 /*
494 * At this point we know it's a buffered write. If this is
3dfbdc44 495 * kswapd trying to free memory, or REQ_SYNC is set, then
e34cbd30
JA
496 * it's WB_SYNC_ALL writeback, and we'll use the max limit for
497 * that. If the write is marked as a background write, then use
498 * the idle limit, or go to normal if we haven't had competing
499 * IO for a bit.
500 */
501 if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
502 limit = rwb->wb_max;
503 else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
504 /*
505 * If less than 100ms since we completed unrelated IO,
506 * limit us to half the depth for background writeback.
507 */
508 limit = rwb->wb_background;
509 } else
510 limit = rwb->wb_normal;
511
512 return limit;
513}
514
e34cbd30
JA
515/*
516 * Block if we will exceed our limit, or if we are currently waiting for
517 * the timer to kick off queuing again.
518 */
d9204a0b
JA
519static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
520 unsigned long rw, spinlock_t *lock)
9eca5350
BVA
521 __releases(lock)
522 __acquires(lock)
e34cbd30 523{
d9204a0b 524 struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
e6a7d9d8 525 DECLARE_WAITQUEUE(wait, current);
41d2b768 526 bool has_sleeper;
e34cbd30 527
41d2b768
JA
528 has_sleeper = wq_has_sleeper(&rqw->wait);
529 if (!has_sleeper && atomic_inc_below(&rqw->inflight, get_limit(rwb, rw)))
e6a7d9d8
AA
530 return;
531
532 add_wait_queue_exclusive(&rqw->wait, &wait);
e34cbd30 533 do {
e6a7d9d8
AA
534 set_current_state(TASK_UNINTERRUPTIBLE);
535
41d2b768 536 if (!has_sleeper && atomic_inc_below(&rqw->inflight, get_limit(rwb, rw)))
e34cbd30
JA
537 break;
538
9eca5350 539 if (lock) {
e34cbd30 540 spin_unlock_irq(lock);
9eca5350 541 io_schedule();
e34cbd30 542 spin_lock_irq(lock);
9eca5350
BVA
543 } else
544 io_schedule();
41d2b768 545 has_sleeper = false;
e34cbd30
JA
546 } while (1);
547
e6a7d9d8
AA
548 __set_current_state(TASK_RUNNING);
549 remove_wait_queue(&rqw->wait, &wait);
e34cbd30
JA
550}
551
552static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
553{
554 const int op = bio_op(bio);
555
556 /*
be07e14f 557 * If not a WRITE, do nothing
e34cbd30 558 */
be07e14f 559 if (op != REQ_OP_WRITE)
e34cbd30
JA
560 return false;
561
562 /*
563 * Don't throttle WRITE_ODIRECT
564 */
565 if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) == (REQ_SYNC | REQ_IDLE))
566 return false;
567
568 return true;
569}
570
571/*
572 * Returns true if the IO request should be accounted, false if not.
573 * May sleep, if we have exceeded the writeback limits. Caller can pass
574 * in an irq held spinlock, if it holds one when calling this function.
575 * If we do sleep, we'll release and re-grab it.
576 */
f2e0a0b2 577enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
e34cbd30 578{
d9204a0b 579 enum wbt_flags ret = 0;
e34cbd30
JA
580
581 if (!rwb_enabled(rwb))
582 return 0;
583
584 if (bio_op(bio) == REQ_OP_READ)
585 ret = WBT_READ;
586
587 if (!wbt_should_throttle(rwb, bio)) {
588 if (ret & WBT_READ)
589 wb_timestamp(rwb, &rwb->last_issue);
590 return ret;
591 }
592
d9204a0b
JA
593 if (current_is_kswapd())
594 ret |= WBT_KSWAPD;
595
596 __wbt_wait(rwb, ret, bio->bi_opf, lock);
e34cbd30 597
34dbad5d 598 if (!blk_stat_is_active(rwb->cb))
e34cbd30
JA
599 rwb_arm_timer(rwb);
600
e34cbd30
JA
601 return ret | WBT_TRACKED;
602}
603
604void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat)
605{
606 if (!rwb_enabled(rwb))
607 return;
608
609 /*
610 * Track sync issue, in case it takes a long time to complete. Allows
611 * us to react quicker, if a sync IO takes a long time to complete.
612 * Note that this is just a hint. 'stat' can go away when the
613 * request completes, so it's important we never dereference it. We
614 * only use the address to compare with, which is why we store the
615 * sync_issue time locally.
616 */
617 if (wbt_is_read(stat) && !rwb->sync_issue) {
618 rwb->sync_cookie = stat;
619 rwb->sync_issue = blk_stat_time(stat);
620 }
621}
622
623void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat)
624{
625 if (!rwb_enabled(rwb))
626 return;
627 if (stat == rwb->sync_cookie) {
628 rwb->sync_issue = 0;
629 rwb->sync_cookie = NULL;
630 }
631}
632
633void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
634{
635 if (rwb) {
636 rwb->queue_depth = depth;
637 wbt_update_limits(rwb);
638 }
639}
640
641void wbt_set_write_cache(struct rq_wb *rwb, bool write_cache_on)
642{
643 if (rwb)
644 rwb->wc = write_cache_on;
645}
646
3f19cd23 647/*
b5dc5d4d 648 * Disable wbt, if enabled by default.
fa224eed
JA
649 */
650void wbt_disable_default(struct request_queue *q)
e34cbd30 651{
fa224eed
JA
652 struct rq_wb *rwb = q->rq_wb;
653
3f19cd23
JK
654 if (rwb && rwb->enable_state == WBT_STATE_ON_DEFAULT)
655 wbt_exit(q);
e34cbd30 656}
fa224eed 657EXPORT_SYMBOL_GPL(wbt_disable_default);
e34cbd30 658
8330cdb0
JK
659/*
660 * Enable wbt if defaults are configured that way
661 */
662void wbt_enable_default(struct request_queue *q)
663{
664 /* Throttling already enabled? */
665 if (q->rq_wb)
666 return;
667
668 /* Queue not registered? Maybe shutting down... */
669 if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
670 return;
671
672 if ((q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ)) ||
673 (q->request_fn && IS_ENABLED(CONFIG_BLK_WBT_SQ)))
674 wbt_init(q);
675}
676EXPORT_SYMBOL_GPL(wbt_enable_default);
677
80e091d1
JA
678u64 wbt_default_latency_nsec(struct request_queue *q)
679{
680 /*
681 * We default to 2msec for non-rotational storage, and 75msec
682 * for rotational storage.
683 */
684 if (blk_queue_nonrot(q))
685 return 2000000ULL;
686 else
687 return 75000000ULL;
688}
689
99c749a4
JA
690static int wbt_data_dir(const struct request *rq)
691{
79761a35
JA
692 const int op = req_op(rq);
693
694 if (op == REQ_OP_READ)
695 return READ;
696 else if (op == REQ_OP_WRITE || op == REQ_OP_FLUSH)
697 return WRITE;
698
699 /* don't account */
700 return -1;
99c749a4
JA
701}
702
8054b89f 703int wbt_init(struct request_queue *q)
e34cbd30
JA
704{
705 struct rq_wb *rwb;
706 int i;
707
e34cbd30
JA
708 BUILD_BUG_ON(WBT_NR_BITS > BLK_STAT_RES_BITS);
709
e34cbd30
JA
710 rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
711 if (!rwb)
712 return -ENOMEM;
713
99c749a4 714 rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
34dbad5d
OS
715 if (!rwb->cb) {
716 kfree(rwb);
717 return -ENOMEM;
718 }
719
e34cbd30
JA
720 for (i = 0; i < WBT_NUM_RWQ; i++) {
721 atomic_set(&rwb->rq_wait[i].inflight, 0);
722 init_waitqueue_head(&rwb->rq_wait[i].wait);
723 }
724
e34cbd30 725 rwb->last_comp = rwb->last_issue = jiffies;
d8a0cbfd 726 rwb->queue = q;
e34cbd30 727 rwb->win_nsec = RWB_WINDOW_NSEC;
d62118b6 728 rwb->enable_state = WBT_STATE_ON_DEFAULT;
e34cbd30
JA
729 wbt_update_limits(rwb);
730
731 /*
34dbad5d 732 * Assign rwb and add the stats callback.
e34cbd30
JA
733 */
734 q->rq_wb = rwb;
34dbad5d 735 blk_stat_add_callback(q, rwb->cb);
e34cbd30 736
80e091d1 737 rwb->min_lat_nsec = wbt_default_latency_nsec(q);
e34cbd30
JA
738
739 wbt_set_queue_depth(rwb, blk_queue_depth(q));
740 wbt_set_write_cache(rwb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
741
742 return 0;
743}
744
745void wbt_exit(struct request_queue *q)
746{
747 struct rq_wb *rwb = q->rq_wb;
748
749 if (rwb) {
34dbad5d
OS
750 blk_stat_remove_callback(q, rwb->cb);
751 blk_stat_free_callback(rwb->cb);
e34cbd30
JA
752 q->rq_wb = NULL;
753 kfree(rwb);
754 }
755}