]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
cafe5635 KO |
2 | /* |
3 | * background writeback - scan btree for dirty data and write it to the backing | |
4 | * device | |
5 | * | |
6 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> | |
7 | * Copyright 2012 Google, Inc. | |
8 | */ | |
9 | ||
10 | #include "bcache.h" | |
11 | #include "btree.h" | |
12 | #include "debug.h" | |
279afbad | 13 | #include "writeback.h" |
cafe5635 | 14 | |
5e6926da | 15 | #include <linux/delay.h> |
5e6926da | 16 | #include <linux/kthread.h> |
e6017571 | 17 | #include <linux/sched/clock.h> |
c37511b8 KO |
18 | #include <trace/events/bcache.h> |
19 | ||
cafe5635 | 20 | /* Rate limiting */ |
616486ab | 21 | static uint64_t __calc_target_rate(struct cached_dev *dc) |
cafe5635 KO |
22 | { |
23 | struct cache_set *c = dc->disk.c; | |
616486ab ML |
24 | |
25 | /* | |
26 | * This is the size of the cache, minus the amount used for | |
27 | * flash-only devices | |
28 | */ | |
a8394090 TJ |
29 | uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size - |
30 | bcache_flash_devs_sectors_dirty(c); | |
616486ab ML |
31 | |
32 | /* | |
33 | * Unfortunately there is no control of global dirty data. If the | |
34 | * user states that they want 10% dirty data in the cache, and has, | |
35 | * e.g., 5 backing volumes of equal size, we try and ensure each | |
36 | * backing volume uses about 2% of the cache for dirty data. | |
37 | */ | |
38 | uint32_t bdev_share = | |
39 | div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT, | |
40 | c->cached_dev_sectors); | |
41 | ||
cafe5635 KO |
42 | uint64_t cache_dirty_target = |
43 | div_u64(cache_sectors * dc->writeback_percent, 100); | |
cafe5635 | 44 | |
616486ab ML |
45 | /* Ensure each backing dev gets at least one dirty share */ |
46 | if (bdev_share < 1) | |
47 | bdev_share = 1; | |
48 | ||
49 | return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT; | |
50 | } | |
51 | ||
52 | static void __update_writeback_rate(struct cached_dev *dc) | |
53 | { | |
1d316e65 ML |
54 | /* |
55 | * PI controller: | |
56 | * Figures out the amount that should be written per second. | |
57 | * | |
58 | * First, the error (number of sectors that are dirty beyond our | |
59 | * target) is calculated. The error is accumulated (numerically | |
60 | * integrated). | |
61 | * | |
62 | * Then, the proportional value and integral value are scaled | |
63 | * based on configured values. These are stored as inverses to | |
64 | * avoid fixed point math and to make configuration easy-- e.g. | |
65 | * the default value of 40 for writeback_rate_p_term_inverse | |
66 | * attempts to write at a rate that would retire all the dirty | |
67 | * blocks in 40 seconds. | |
68 | * | |
69 | * The writeback_rate_i_inverse value of 10000 means that 1/10000th | |
70 | * of the error is accumulated in the integral term per second. | |
71 | * This acts as a slow, long-term average that is not subject to | |
72 | * variations in usage like the p term. | |
73 | */ | |
616486ab | 74 | int64_t target = __calc_target_rate(dc); |
279afbad | 75 | int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); |
1d316e65 ML |
76 | int64_t error = dirty - target; |
77 | int64_t proportional_scaled = | |
78 | div_s64(error, dc->writeback_rate_p_term_inverse); | |
e41166c5 ML |
79 | int64_t integral_scaled; |
80 | uint32_t new_rate; | |
1d316e65 ML |
81 | |
82 | if ((error < 0 && dc->writeback_rate_integral > 0) || | |
83 | (error > 0 && time_before64(local_clock(), | |
84 | dc->writeback_rate.next + NSEC_PER_MSEC))) { | |
85 | /* | |
86 | * Only decrease the integral term if it's more than | |
87 | * zero. Only increase the integral term if the device | |
88 | * is keeping up. (Don't wind up the integral | |
89 | * ineffectively in either case). | |
90 | * | |
91 | * It's necessary to scale this by | |
92 | * writeback_rate_update_seconds to keep the integral | |
93 | * term dimensioned properly. | |
94 | */ | |
95 | dc->writeback_rate_integral += error * | |
96 | dc->writeback_rate_update_seconds; | |
97 | } | |
cafe5635 | 98 | |
1d316e65 ML |
99 | integral_scaled = div_s64(dc->writeback_rate_integral, |
100 | dc->writeback_rate_i_term_inverse); | |
cafe5635 | 101 | |
e41166c5 ML |
102 | new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled), |
103 | dc->writeback_rate_minimum, NSEC_PER_SEC); | |
16749c23 | 104 | |
1d316e65 ML |
105 | dc->writeback_rate_proportional = proportional_scaled; |
106 | dc->writeback_rate_integral_scaled = integral_scaled; | |
107 | dc->writeback_rate_change = new_rate - dc->writeback_rate.rate; | |
108 | dc->writeback_rate.rate = new_rate; | |
cafe5635 | 109 | dc->writeback_rate_target = target; |
cafe5635 KO |
110 | } |
111 | ||
112 | static void update_writeback_rate(struct work_struct *work) | |
113 | { | |
114 | struct cached_dev *dc = container_of(to_delayed_work(work), | |
115 | struct cached_dev, | |
116 | writeback_rate_update); | |
117 | ||
118 | down_read(&dc->writeback_lock); | |
119 | ||
120 | if (atomic_read(&dc->has_dirty) && | |
121 | dc->writeback_percent) | |
122 | __update_writeback_rate(dc); | |
123 | ||
124 | up_read(&dc->writeback_lock); | |
5e6926da KO |
125 | |
126 | schedule_delayed_work(&dc->writeback_rate_update, | |
127 | dc->writeback_rate_update_seconds * HZ); | |
cafe5635 KO |
128 | } |
129 | ||
130 | static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) | |
131 | { | |
c4d951dd | 132 | if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || |
cafe5635 KO |
133 | !dc->writeback_percent) |
134 | return 0; | |
135 | ||
16749c23 | 136 | return bch_next_delay(&dc->writeback_rate, sectors); |
cafe5635 KO |
137 | } |
138 | ||
5e6926da KO |
139 | struct dirty_io { |
140 | struct closure cl; | |
141 | struct cached_dev *dc; | |
6e6ccc67 | 142 | uint16_t sequence; |
5e6926da KO |
143 | struct bio bio; |
144 | }; | |
72c27061 | 145 | |
cafe5635 KO |
146 | static void dirty_init(struct keybuf_key *w) |
147 | { | |
148 | struct dirty_io *io = w->private; | |
149 | struct bio *bio = &io->bio; | |
150 | ||
3a83f467 ML |
151 | bio_init(bio, bio->bi_inline_vecs, |
152 | DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)); | |
cafe5635 KO |
153 | if (!io->dc->writeback_percent) |
154 | bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); | |
155 | ||
4f024f37 | 156 | bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9; |
cafe5635 | 157 | bio->bi_private = w; |
169ef1cf | 158 | bch_bio_map(bio, NULL); |
cafe5635 KO |
159 | } |
160 | ||
cafe5635 KO |
161 | static void dirty_io_destructor(struct closure *cl) |
162 | { | |
163 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); | |
164 | kfree(io); | |
165 | } | |
166 | ||
167 | static void write_dirty_finish(struct closure *cl) | |
168 | { | |
169 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); | |
170 | struct keybuf_key *w = io->bio.bi_private; | |
171 | struct cached_dev *dc = io->dc; | |
cafe5635 | 172 | |
491221f8 | 173 | bio_free_pages(&io->bio); |
cafe5635 KO |
174 | |
175 | /* This is kind of a dumb way of signalling errors. */ | |
176 | if (KEY_DIRTY(&w->key)) { | |
cc7b8819 | 177 | int ret; |
cafe5635 | 178 | unsigned i; |
0b93207a KO |
179 | struct keylist keys; |
180 | ||
0b93207a | 181 | bch_keylist_init(&keys); |
cafe5635 | 182 | |
1b207d80 KO |
183 | bkey_copy(keys.top, &w->key); |
184 | SET_KEY_DIRTY(keys.top, false); | |
185 | bch_keylist_push(&keys); | |
cafe5635 KO |
186 | |
187 | for (i = 0; i < KEY_PTRS(&w->key); i++) | |
188 | atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); | |
189 | ||
cc7b8819 | 190 | ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key); |
cafe5635 | 191 | |
6054c6d4 | 192 | if (ret) |
c37511b8 KO |
193 | trace_bcache_writeback_collision(&w->key); |
194 | ||
6054c6d4 | 195 | atomic_long_inc(ret |
cafe5635 KO |
196 | ? &dc->disk.c->writeback_keys_failed |
197 | : &dc->disk.c->writeback_keys_done); | |
198 | } | |
199 | ||
200 | bch_keybuf_del(&dc->writeback_keys, w); | |
c2a4f318 | 201 | up(&dc->in_flight); |
cafe5635 KO |
202 | |
203 | closure_return_with_destructor(cl, dirty_io_destructor); | |
204 | } | |
205 | ||
4246a0b6 | 206 | static void dirty_endio(struct bio *bio) |
cafe5635 KO |
207 | { |
208 | struct keybuf_key *w = bio->bi_private; | |
209 | struct dirty_io *io = w->private; | |
210 | ||
4e4cbee9 | 211 | if (bio->bi_status) |
cafe5635 KO |
212 | SET_KEY_DIRTY(&w->key, false); |
213 | ||
214 | closure_put(&io->cl); | |
215 | } | |
216 | ||
217 | static void write_dirty(struct closure *cl) | |
218 | { | |
219 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); | |
220 | struct keybuf_key *w = io->bio.bi_private; | |
6e6ccc67 ML |
221 | struct cached_dev *dc = io->dc; |
222 | ||
223 | uint16_t next_sequence; | |
224 | ||
225 | if (atomic_read(&dc->writeback_sequence_next) != io->sequence) { | |
226 | /* Not our turn to write; wait for a write to complete */ | |
227 | closure_wait(&dc->writeback_ordering_wait, cl); | |
228 | ||
229 | if (atomic_read(&dc->writeback_sequence_next) == io->sequence) { | |
230 | /* | |
231 | * Edge case-- it happened in indeterminate order | |
232 | * relative to when we were added to wait list.. | |
233 | */ | |
234 | closure_wake_up(&dc->writeback_ordering_wait); | |
235 | } | |
236 | ||
237 | continue_at(cl, write_dirty, io->dc->writeback_write_wq); | |
238 | return; | |
239 | } | |
240 | ||
241 | next_sequence = io->sequence + 1; | |
cafe5635 | 242 | |
5fa89fb9 ML |
243 | /* |
244 | * IO errors are signalled using the dirty bit on the key. | |
245 | * If we failed to read, we should not attempt to write to the | |
246 | * backing device. Instead, immediately go to write_dirty_finish | |
247 | * to clean up. | |
248 | */ | |
249 | if (KEY_DIRTY(&w->key)) { | |
250 | dirty_init(w); | |
251 | bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0); | |
252 | io->bio.bi_iter.bi_sector = KEY_START(&w->key); | |
253 | bio_set_dev(&io->bio, io->dc->bdev); | |
254 | io->bio.bi_end_io = dirty_endio; | |
cafe5635 | 255 | |
5fa89fb9 ML |
256 | closure_bio_submit(&io->bio, cl); |
257 | } | |
cafe5635 | 258 | |
6e6ccc67 ML |
259 | atomic_set(&dc->writeback_sequence_next, next_sequence); |
260 | closure_wake_up(&dc->writeback_ordering_wait); | |
261 | ||
9baf3097 | 262 | continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq); |
cafe5635 KO |
263 | } |
264 | ||
4246a0b6 | 265 | static void read_dirty_endio(struct bio *bio) |
cafe5635 KO |
266 | { |
267 | struct keybuf_key *w = bio->bi_private; | |
268 | struct dirty_io *io = w->private; | |
269 | ||
5138ac67 | 270 | /* is_read = 1 */ |
cafe5635 | 271 | bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), |
5138ac67 CL |
272 | bio->bi_status, 1, |
273 | "reading dirty data from cache"); | |
cafe5635 | 274 | |
4246a0b6 | 275 | dirty_endio(bio); |
cafe5635 KO |
276 | } |
277 | ||
278 | static void read_dirty_submit(struct closure *cl) | |
279 | { | |
280 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); | |
281 | ||
749b61da | 282 | closure_bio_submit(&io->bio, cl); |
cafe5635 | 283 | |
9baf3097 | 284 | continue_at(cl, write_dirty, io->dc->writeback_write_wq); |
cafe5635 KO |
285 | } |
286 | ||
5e6926da | 287 | static void read_dirty(struct cached_dev *dc) |
cafe5635 | 288 | { |
5e6926da | 289 | unsigned delay = 0; |
539d39eb TJ |
290 | struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w; |
291 | size_t size; | |
292 | int nk, i; | |
cafe5635 | 293 | struct dirty_io *io; |
5e6926da | 294 | struct closure cl; |
6e6ccc67 | 295 | uint16_t sequence = 0; |
5e6926da | 296 | |
6e6ccc67 ML |
297 | BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list)); |
298 | atomic_set(&dc->writeback_sequence_next, sequence); | |
5e6926da | 299 | closure_init_stack(&cl); |
cafe5635 KO |
300 | |
301 | /* | |
302 | * XXX: if we error, background writeback just spins. Should use some | |
303 | * mempools. | |
304 | */ | |
305 | ||
539d39eb TJ |
306 | next = bch_keybuf_next(&dc->writeback_keys); |
307 | ||
308 | while (!kthread_should_stop() && next) { | |
309 | size = 0; | |
310 | nk = 0; | |
311 | ||
312 | do { | |
313 | BUG_ON(ptr_stale(dc->disk.c, &next->key, 0)); | |
314 | ||
315 | /* | |
316 | * Don't combine too many operations, even if they | |
317 | * are all small. | |
318 | */ | |
319 | if (nk >= MAX_WRITEBACKS_IN_PASS) | |
320 | break; | |
321 | ||
322 | /* | |
323 | * If the current operation is very large, don't | |
324 | * further combine operations. | |
325 | */ | |
326 | if (size >= MAX_WRITESIZE_IN_PASS) | |
327 | break; | |
328 | ||
329 | /* | |
330 | * Operations are only eligible to be combined | |
331 | * if they are contiguous. | |
332 | * | |
333 | * TODO: add a heuristic willing to fire a | |
334 | * certain amount of non-contiguous IO per pass, | |
335 | * so that we can benefit from backing device | |
336 | * command queueing. | |
337 | */ | |
338 | if ((nk != 0) && bkey_cmp(&keys[nk-1]->key, | |
339 | &START_KEY(&next->key))) | |
340 | break; | |
341 | ||
342 | size += KEY_SIZE(&next->key); | |
343 | keys[nk++] = next; | |
344 | } while ((next = bch_keybuf_next(&dc->writeback_keys))); | |
345 | ||
346 | /* Now we have gathered a set of 1..5 keys to write back. */ | |
347 | for (i = 0; i < nk; i++) { | |
348 | w = keys[i]; | |
349 | ||
350 | io = kzalloc(sizeof(struct dirty_io) + | |
351 | sizeof(struct bio_vec) * | |
352 | DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), | |
353 | GFP_KERNEL); | |
354 | if (!io) | |
355 | goto err; | |
356 | ||
357 | w->private = io; | |
358 | io->dc = dc; | |
6e6ccc67 | 359 | io->sequence = sequence++; |
539d39eb TJ |
360 | |
361 | dirty_init(w); | |
362 | bio_set_op_attrs(&io->bio, REQ_OP_READ, 0); | |
363 | io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); | |
364 | bio_set_dev(&io->bio, | |
365 | PTR_CACHE(dc->disk.c, &w->key, 0)->bdev); | |
366 | io->bio.bi_end_io = read_dirty_endio; | |
367 | ||
368 | if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL)) | |
369 | goto err_free; | |
370 | ||
371 | trace_bcache_writeback(&w->key); | |
372 | ||
373 | down(&dc->in_flight); | |
374 | ||
375 | /* We've acquired a semaphore for the maximum | |
376 | * simultaneous number of writebacks; from here | |
377 | * everything happens asynchronously. | |
378 | */ | |
379 | closure_call(&io->cl, read_dirty_submit, NULL, &cl); | |
380 | } | |
381 | ||
382 | delay = writeback_delay(dc, size); | |
383 | ||
b1092c9a ML |
384 | /* If the control system would wait for at least half a |
385 | * second, and there's been no reqs hitting the backing disk | |
386 | * for awhile: use an alternate mode where we have at most | |
387 | * one contiguous set of writebacks in flight at a time. If | |
388 | * someone wants to do IO it will be quick, as it will only | |
389 | * have to contend with one operation in flight, and we'll | |
390 | * be round-tripping data to the backing disk as quickly as | |
391 | * it can accept it. | |
392 | */ | |
393 | if (delay >= HZ / 2) { | |
394 | /* 3 means at least 1.5 seconds, up to 7.5 if we | |
395 | * have slowed way down. | |
396 | */ | |
397 | if (atomic_inc_return(&dc->backing_idle) >= 3) { | |
398 | /* Wait for current I/Os to finish */ | |
399 | closure_sync(&cl); | |
400 | /* And immediately launch a new set. */ | |
401 | delay = 0; | |
402 | } | |
403 | } | |
404 | ||
539d39eb TJ |
405 | while (!kthread_should_stop() && delay) { |
406 | schedule_timeout_interruptible(delay); | |
407 | delay = writeback_delay(dc, 0); | |
408 | } | |
cafe5635 KO |
409 | } |
410 | ||
411 | if (0) { | |
412 | err_free: | |
413 | kfree(w->private); | |
414 | err: | |
415 | bch_keybuf_del(&dc->writeback_keys, w); | |
416 | } | |
417 | ||
c2a4f318 KO |
418 | /* |
419 | * Wait for outstanding writeback IOs to finish (and keybuf slots to be | |
420 | * freed) before refilling again | |
421 | */ | |
5e6926da KO |
422 | closure_sync(&cl); |
423 | } | |
424 | ||
425 | /* Scan for dirty data */ | |
426 | ||
427 | void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, | |
428 | uint64_t offset, int nr_sectors) | |
429 | { | |
430 | struct bcache_device *d = c->devices[inode]; | |
48a915a8 | 431 | unsigned stripe_offset, stripe, sectors_dirty; |
5e6926da KO |
432 | |
433 | if (!d) | |
434 | return; | |
435 | ||
48a915a8 | 436 | stripe = offset_to_stripe(d, offset); |
5e6926da KO |
437 | stripe_offset = offset & (d->stripe_size - 1); |
438 | ||
439 | while (nr_sectors) { | |
440 | int s = min_t(unsigned, abs(nr_sectors), | |
441 | d->stripe_size - stripe_offset); | |
442 | ||
443 | if (nr_sectors < 0) | |
444 | s = -s; | |
445 | ||
48a915a8 KO |
446 | if (stripe >= d->nr_stripes) |
447 | return; | |
448 | ||
449 | sectors_dirty = atomic_add_return(s, | |
450 | d->stripe_sectors_dirty + stripe); | |
451 | if (sectors_dirty == d->stripe_size) | |
452 | set_bit(stripe, d->full_dirty_stripes); | |
453 | else | |
454 | clear_bit(stripe, d->full_dirty_stripes); | |
455 | ||
5e6926da KO |
456 | nr_sectors -= s; |
457 | stripe_offset = 0; | |
458 | stripe++; | |
459 | } | |
460 | } | |
461 | ||
462 | static bool dirty_pred(struct keybuf *buf, struct bkey *k) | |
463 | { | |
627ccd20 KO |
464 | struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys); |
465 | ||
466 | BUG_ON(KEY_INODE(k) != dc->disk.id); | |
467 | ||
5e6926da KO |
468 | return KEY_DIRTY(k); |
469 | } | |
470 | ||
48a915a8 | 471 | static void refill_full_stripes(struct cached_dev *dc) |
5e6926da | 472 | { |
48a915a8 KO |
473 | struct keybuf *buf = &dc->writeback_keys; |
474 | unsigned start_stripe, stripe, next_stripe; | |
475 | bool wrapped = false; | |
476 | ||
477 | stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); | |
5e6926da | 478 | |
48a915a8 KO |
479 | if (stripe >= dc->disk.nr_stripes) |
480 | stripe = 0; | |
5e6926da | 481 | |
48a915a8 | 482 | start_stripe = stripe; |
5e6926da KO |
483 | |
484 | while (1) { | |
48a915a8 KO |
485 | stripe = find_next_bit(dc->disk.full_dirty_stripes, |
486 | dc->disk.nr_stripes, stripe); | |
5e6926da | 487 | |
48a915a8 KO |
488 | if (stripe == dc->disk.nr_stripes) |
489 | goto next; | |
5e6926da | 490 | |
48a915a8 KO |
491 | next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes, |
492 | dc->disk.nr_stripes, stripe); | |
493 | ||
494 | buf->last_scanned = KEY(dc->disk.id, | |
495 | stripe * dc->disk.stripe_size, 0); | |
496 | ||
497 | bch_refill_keybuf(dc->disk.c, buf, | |
498 | &KEY(dc->disk.id, | |
499 | next_stripe * dc->disk.stripe_size, 0), | |
500 | dirty_pred); | |
501 | ||
502 | if (array_freelist_empty(&buf->freelist)) | |
503 | return; | |
504 | ||
505 | stripe = next_stripe; | |
506 | next: | |
507 | if (wrapped && stripe > start_stripe) | |
508 | return; | |
509 | ||
510 | if (stripe == dc->disk.nr_stripes) { | |
511 | stripe = 0; | |
512 | wrapped = true; | |
513 | } | |
5e6926da KO |
514 | } |
515 | } | |
516 | ||
627ccd20 KO |
517 | /* |
518 | * Returns true if we scanned the entire disk | |
519 | */ | |
5e6926da KO |
520 | static bool refill_dirty(struct cached_dev *dc) |
521 | { | |
522 | struct keybuf *buf = &dc->writeback_keys; | |
627ccd20 | 523 | struct bkey start = KEY(dc->disk.id, 0, 0); |
5e6926da | 524 | struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0); |
627ccd20 KO |
525 | struct bkey start_pos; |
526 | ||
527 | /* | |
528 | * make sure keybuf pos is inside the range for this disk - at bringup | |
529 | * we might not be attached yet so this disk's inode nr isn't | |
530 | * initialized then | |
531 | */ | |
532 | if (bkey_cmp(&buf->last_scanned, &start) < 0 || | |
533 | bkey_cmp(&buf->last_scanned, &end) > 0) | |
534 | buf->last_scanned = start; | |
48a915a8 KO |
535 | |
536 | if (dc->partial_stripes_expensive) { | |
537 | refill_full_stripes(dc); | |
538 | if (array_freelist_empty(&buf->freelist)) | |
539 | return false; | |
540 | } | |
5e6926da | 541 | |
627ccd20 | 542 | start_pos = buf->last_scanned; |
48a915a8 | 543 | bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); |
5e6926da | 544 | |
627ccd20 KO |
545 | if (bkey_cmp(&buf->last_scanned, &end) < 0) |
546 | return false; | |
547 | ||
548 | /* | |
549 | * If we get to the end start scanning again from the beginning, and | |
550 | * only scan up to where we initially started scanning from: | |
551 | */ | |
552 | buf->last_scanned = start; | |
553 | bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred); | |
554 | ||
555 | return bkey_cmp(&buf->last_scanned, &start_pos) >= 0; | |
5e6926da KO |
556 | } |
557 | ||
558 | static int bch_writeback_thread(void *arg) | |
559 | { | |
560 | struct cached_dev *dc = arg; | |
561 | bool searched_full_index; | |
562 | ||
a8500fc8 ML |
563 | bch_ratelimit_reset(&dc->writeback_rate); |
564 | ||
5e6926da KO |
565 | while (!kthread_should_stop()) { |
566 | down_write(&dc->writeback_lock); | |
567 | if (!atomic_read(&dc->has_dirty) || | |
c4d951dd | 568 | (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && |
5e6926da KO |
569 | !dc->writeback_running)) { |
570 | up_write(&dc->writeback_lock); | |
571 | set_current_state(TASK_INTERRUPTIBLE); | |
572 | ||
573 | if (kthread_should_stop()) | |
574 | return 0; | |
575 | ||
5e6926da KO |
576 | schedule(); |
577 | continue; | |
578 | } | |
579 | ||
580 | searched_full_index = refill_dirty(dc); | |
581 | ||
582 | if (searched_full_index && | |
583 | RB_EMPTY_ROOT(&dc->writeback_keys.keys)) { | |
584 | atomic_set(&dc->has_dirty, 0); | |
585 | cached_dev_put(dc); | |
586 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); | |
587 | bch_write_bdev_super(dc, NULL); | |
588 | } | |
589 | ||
590 | up_write(&dc->writeback_lock); | |
591 | ||
5e6926da KO |
592 | read_dirty(dc); |
593 | ||
594 | if (searched_full_index) { | |
595 | unsigned delay = dc->writeback_delay * HZ; | |
596 | ||
597 | while (delay && | |
598 | !kthread_should_stop() && | |
c4d951dd | 599 | !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) |
9e5c3535 | 600 | delay = schedule_timeout_interruptible(delay); |
a8500fc8 ML |
601 | |
602 | bch_ratelimit_reset(&dc->writeback_rate); | |
5e6926da KO |
603 | } |
604 | } | |
605 | ||
606 | return 0; | |
cafe5635 KO |
607 | } |
608 | ||
444fc0b6 KO |
609 | /* Init */ |
610 | ||
c18536a7 KO |
611 | struct sectors_dirty_init { |
612 | struct btree_op op; | |
613 | unsigned inode; | |
614 | }; | |
615 | ||
616 | static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, | |
48dad8ba | 617 | struct bkey *k) |
444fc0b6 | 618 | { |
c18536a7 KO |
619 | struct sectors_dirty_init *op = container_of(_op, |
620 | struct sectors_dirty_init, op); | |
48dad8ba KO |
621 | if (KEY_INODE(k) > op->inode) |
622 | return MAP_DONE; | |
444fc0b6 | 623 | |
48dad8ba KO |
624 | if (KEY_DIRTY(k)) |
625 | bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), | |
626 | KEY_START(k), KEY_SIZE(k)); | |
627 | ||
628 | return MAP_CONTINUE; | |
444fc0b6 KO |
629 | } |
630 | ||
175206cf | 631 | void bch_sectors_dirty_init(struct bcache_device *d) |
444fc0b6 | 632 | { |
c18536a7 | 633 | struct sectors_dirty_init op; |
444fc0b6 | 634 | |
b54d6934 | 635 | bch_btree_op_init(&op.op, -1); |
175206cf | 636 | op.inode = d->id; |
48dad8ba | 637 | |
175206cf | 638 | bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0), |
48dad8ba | 639 | sectors_dirty_init_fn, 0); |
444fc0b6 KO |
640 | } |
641 | ||
9e5c3535 | 642 | void bch_cached_dev_writeback_init(struct cached_dev *dc) |
cafe5635 | 643 | { |
c2a4f318 | 644 | sema_init(&dc->in_flight, 64); |
cafe5635 | 645 | init_rwsem(&dc->writeback_lock); |
72c27061 | 646 | bch_keybuf_init(&dc->writeback_keys); |
cafe5635 KO |
647 | |
648 | dc->writeback_metadata = true; | |
649 | dc->writeback_running = true; | |
650 | dc->writeback_percent = 10; | |
651 | dc->writeback_delay = 30; | |
652 | dc->writeback_rate.rate = 1024; | |
ae82ddbf | 653 | dc->writeback_rate_minimum = 8; |
cafe5635 | 654 | |
16749c23 | 655 | dc->writeback_rate_update_seconds = 5; |
1d316e65 ML |
656 | dc->writeback_rate_p_term_inverse = 40; |
657 | dc->writeback_rate_i_term_inverse = 10000; | |
cafe5635 | 658 | |
9e5c3535 SP |
659 | INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); |
660 | } | |
661 | ||
662 | int bch_cached_dev_writeback_start(struct cached_dev *dc) | |
663 | { | |
9baf3097 TJ |
664 | dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq", |
665 | WQ_MEM_RECLAIM, 0); | |
666 | if (!dc->writeback_write_wq) | |
667 | return -ENOMEM; | |
668 | ||
5e6926da KO |
669 | dc->writeback_thread = kthread_create(bch_writeback_thread, dc, |
670 | "bcache_writeback"); | |
671 | if (IS_ERR(dc->writeback_thread)) | |
672 | return PTR_ERR(dc->writeback_thread); | |
673 | ||
cafe5635 KO |
674 | schedule_delayed_work(&dc->writeback_rate_update, |
675 | dc->writeback_rate_update_seconds * HZ); | |
cafe5635 | 676 | |
9e5c3535 SP |
677 | bch_writeback_queue(dc); |
678 | ||
cafe5635 KO |
679 | return 0; |
680 | } |