]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
cafe5635 KO |
2 | /* |
3 | * background writeback - scan btree for dirty data and write it to the backing | |
4 | * device | |
5 | * | |
6 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> | |
7 | * Copyright 2012 Google, Inc. | |
8 | */ | |
9 | ||
10 | #include "bcache.h" | |
11 | #include "btree.h" | |
12 | #include "debug.h" | |
279afbad | 13 | #include "writeback.h" |
cafe5635 | 14 | |
5e6926da | 15 | #include <linux/delay.h> |
5e6926da | 16 | #include <linux/kthread.h> |
e6017571 | 17 | #include <linux/sched/clock.h> |
c37511b8 KO |
18 | #include <trace/events/bcache.h> |
19 | ||
cafe5635 | 20 | /* Rate limiting */ |
95a9e126 | 21 | static uint64_t __calc_target_rate(struct cached_dev *dc) |
cafe5635 KO |
22 | { |
23 | struct cache_set *c = dc->disk.c; | |
95a9e126 ML |
24 | |
25 | /* | |
26 | * This is the size of the cache, minus the amount used for | |
27 | * flash-only devices | |
28 | */ | |
a8394090 TJ |
29 | uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size - |
30 | bcache_flash_devs_sectors_dirty(c); | |
95a9e126 ML |
31 | |
32 | /* | |
33 | * Unfortunately there is no control of global dirty data. If the | |
34 | * user states that they want 10% dirty data in the cache, and has, | |
35 | * e.g., 5 backing volumes of equal size, we try and ensure each | |
36 | * backing volume uses about 2% of the cache for dirty data. | |
37 | */ | |
38 | uint32_t bdev_share = | |
39 | div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT, | |
40 | c->cached_dev_sectors); | |
41 | ||
cafe5635 KO |
42 | uint64_t cache_dirty_target = |
43 | div_u64(cache_sectors * dc->writeback_percent, 100); | |
cafe5635 | 44 | |
95a9e126 ML |
45 | /* Ensure each backing dev gets at least one dirty share */ |
46 | if (bdev_share < 1) | |
47 | bdev_share = 1; | |
48 | ||
49 | return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT; | |
50 | } | |
51 | ||
52 | static void __update_writeback_rate(struct cached_dev *dc) | |
53 | { | |
1d316e65 ML |
54 | /* |
55 | * PI controller: | |
56 | * Figures out the amount that should be written per second. | |
57 | * | |
58 | * First, the error (number of sectors that are dirty beyond our | |
59 | * target) is calculated. The error is accumulated (numerically | |
60 | * integrated). | |
61 | * | |
62 | * Then, the proportional value and integral value are scaled | |
63 | * based on configured values. These are stored as inverses to | |
64 | * avoid fixed point math and to make configuration easy-- e.g. | |
65 | * the default value of 40 for writeback_rate_p_term_inverse | |
66 | * attempts to write at a rate that would retire all the dirty | |
67 | * blocks in 40 seconds. | |
68 | * | |
69 | * The writeback_rate_i_inverse value of 10000 means that 1/10000th | |
70 | * of the error is accumulated in the integral term per second. | |
71 | * This acts as a slow, long-term average that is not subject to | |
72 | * variations in usage like the p term. | |
73 | */ | |
95a9e126 | 74 | int64_t target = __calc_target_rate(dc); |
279afbad | 75 | int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); |
1d316e65 ML |
76 | int64_t error = dirty - target; |
77 | int64_t proportional_scaled = | |
78 | div_s64(error, dc->writeback_rate_p_term_inverse); | |
e41166c5 ML |
79 | int64_t integral_scaled; |
80 | uint32_t new_rate; | |
1d316e65 ML |
81 | |
82 | if ((error < 0 && dc->writeback_rate_integral > 0) || | |
83 | (error > 0 && time_before64(local_clock(), | |
84 | dc->writeback_rate.next + NSEC_PER_MSEC))) { | |
85 | /* | |
86 | * Only decrease the integral term if it's more than | |
87 | * zero. Only increase the integral term if the device | |
88 | * is keeping up. (Don't wind up the integral | |
89 | * ineffectively in either case). | |
90 | * | |
91 | * It's necessary to scale this by | |
92 | * writeback_rate_update_seconds to keep the integral | |
93 | * term dimensioned properly. | |
94 | */ | |
95 | dc->writeback_rate_integral += error * | |
96 | dc->writeback_rate_update_seconds; | |
97 | } | |
cafe5635 | 98 | |
1d316e65 ML |
99 | integral_scaled = div_s64(dc->writeback_rate_integral, |
100 | dc->writeback_rate_i_term_inverse); | |
cafe5635 | 101 | |
e41166c5 ML |
102 | new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled), |
103 | dc->writeback_rate_minimum, NSEC_PER_SEC); | |
16749c23 | 104 | |
1d316e65 ML |
105 | dc->writeback_rate_proportional = proportional_scaled; |
106 | dc->writeback_rate_integral_scaled = integral_scaled; | |
107 | dc->writeback_rate_change = new_rate - dc->writeback_rate.rate; | |
108 | dc->writeback_rate.rate = new_rate; | |
cafe5635 | 109 | dc->writeback_rate_target = target; |
cafe5635 KO |
110 | } |
111 | ||
112 | static void update_writeback_rate(struct work_struct *work) | |
113 | { | |
114 | struct cached_dev *dc = container_of(to_delayed_work(work), | |
115 | struct cached_dev, | |
116 | writeback_rate_update); | |
bc2e6da1 | 117 | struct cache_set *c = dc->disk.c; |
cafe5635 | 118 | |
85c3a370 CL |
119 | /* |
120 | * should check BCACHE_DEV_RATE_DW_RUNNING before calling | |
121 | * cancel_delayed_work_sync(). | |
122 | */ | |
123 | set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); | |
124 | /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ | |
125 | smp_mb(); | |
126 | ||
bc2e6da1 CL |
127 | /* |
128 | * CACHE_SET_IO_DISABLE might be set via sysfs interface, | |
129 | * check it here too. | |
130 | */ | |
131 | if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) || | |
132 | test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { | |
85c3a370 CL |
133 | clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); |
134 | /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ | |
135 | smp_mb(); | |
136 | return; | |
137 | } | |
138 | ||
cafe5635 KO |
139 | down_read(&dc->writeback_lock); |
140 | ||
141 | if (atomic_read(&dc->has_dirty) && | |
142 | dc->writeback_percent) | |
143 | __update_writeback_rate(dc); | |
144 | ||
145 | up_read(&dc->writeback_lock); | |
5e6926da | 146 | |
bc2e6da1 CL |
147 | /* |
148 | * CACHE_SET_IO_DISABLE might be set via sysfs interface, | |
149 | * check it here too. | |
150 | */ | |
151 | if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) && | |
152 | !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { | |
85c3a370 | 153 | schedule_delayed_work(&dc->writeback_rate_update, |
5e6926da | 154 | dc->writeback_rate_update_seconds * HZ); |
85c3a370 CL |
155 | } |
156 | ||
157 | /* | |
158 | * should check BCACHE_DEV_RATE_DW_RUNNING before calling | |
159 | * cancel_delayed_work_sync(). | |
160 | */ | |
161 | clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); | |
162 | /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ | |
163 | smp_mb(); | |
cafe5635 KO |
164 | } |
165 | ||
166 | static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) | |
167 | { | |
c4d951dd | 168 | if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || |
cafe5635 KO |
169 | !dc->writeback_percent) |
170 | return 0; | |
171 | ||
16749c23 | 172 | return bch_next_delay(&dc->writeback_rate, sectors); |
cafe5635 KO |
173 | } |
174 | ||
5e6926da KO |
175 | struct dirty_io { |
176 | struct closure cl; | |
177 | struct cached_dev *dc; | |
178 | struct bio bio; | |
179 | }; | |
72c27061 | 180 | |
cafe5635 KO |
181 | static void dirty_init(struct keybuf_key *w) |
182 | { | |
183 | struct dirty_io *io = w->private; | |
184 | struct bio *bio = &io->bio; | |
185 | ||
3a83f467 ML |
186 | bio_init(bio, bio->bi_inline_vecs, |
187 | DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)); | |
cafe5635 KO |
188 | if (!io->dc->writeback_percent) |
189 | bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); | |
190 | ||
4f024f37 | 191 | bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9; |
cafe5635 | 192 | bio->bi_private = w; |
169ef1cf | 193 | bch_bio_map(bio, NULL); |
cafe5635 KO |
194 | } |
195 | ||
cafe5635 KO |
196 | static void dirty_io_destructor(struct closure *cl) |
197 | { | |
198 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); | |
199 | kfree(io); | |
200 | } | |
201 | ||
202 | static void write_dirty_finish(struct closure *cl) | |
203 | { | |
204 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); | |
205 | struct keybuf_key *w = io->bio.bi_private; | |
206 | struct cached_dev *dc = io->dc; | |
cafe5635 | 207 | |
491221f8 | 208 | bio_free_pages(&io->bio); |
cafe5635 KO |
209 | |
210 | /* This is kind of a dumb way of signalling errors. */ | |
211 | if (KEY_DIRTY(&w->key)) { | |
cc7b8819 | 212 | int ret; |
cafe5635 | 213 | unsigned i; |
0b93207a KO |
214 | struct keylist keys; |
215 | ||
0b93207a | 216 | bch_keylist_init(&keys); |
cafe5635 | 217 | |
1b207d80 KO |
218 | bkey_copy(keys.top, &w->key); |
219 | SET_KEY_DIRTY(keys.top, false); | |
220 | bch_keylist_push(&keys); | |
cafe5635 KO |
221 | |
222 | for (i = 0; i < KEY_PTRS(&w->key); i++) | |
223 | atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); | |
224 | ||
cc7b8819 | 225 | ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key); |
cafe5635 | 226 | |
6054c6d4 | 227 | if (ret) |
c37511b8 KO |
228 | trace_bcache_writeback_collision(&w->key); |
229 | ||
6054c6d4 | 230 | atomic_long_inc(ret |
cafe5635 KO |
231 | ? &dc->disk.c->writeback_keys_failed |
232 | : &dc->disk.c->writeback_keys_done); | |
233 | } | |
234 | ||
235 | bch_keybuf_del(&dc->writeback_keys, w); | |
c2a4f318 | 236 | up(&dc->in_flight); |
cafe5635 KO |
237 | |
238 | closure_return_with_destructor(cl, dirty_io_destructor); | |
239 | } | |
240 | ||
4246a0b6 | 241 | static void dirty_endio(struct bio *bio) |
cafe5635 KO |
242 | { |
243 | struct keybuf_key *w = bio->bi_private; | |
244 | struct dirty_io *io = w->private; | |
245 | ||
44edbfe5 | 246 | if (bio->bi_status) { |
cafe5635 | 247 | SET_KEY_DIRTY(&w->key, false); |
44edbfe5 CL |
248 | bch_count_backing_io_errors(io->dc, bio); |
249 | } | |
cafe5635 KO |
250 | |
251 | closure_put(&io->cl); | |
252 | } | |
253 | ||
254 | static void write_dirty(struct closure *cl) | |
255 | { | |
256 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); | |
257 | struct keybuf_key *w = io->bio.bi_private; | |
258 | ||
5fa89fb9 ML |
259 | /* |
260 | * IO errors are signalled using the dirty bit on the key. | |
261 | * If we failed to read, we should not attempt to write to the | |
262 | * backing device. Instead, immediately go to write_dirty_finish | |
263 | * to clean up. | |
264 | */ | |
265 | if (KEY_DIRTY(&w->key)) { | |
266 | dirty_init(w); | |
267 | bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0); | |
268 | io->bio.bi_iter.bi_sector = KEY_START(&w->key); | |
269 | bio_set_dev(&io->bio, io->dc->bdev); | |
270 | io->bio.bi_end_io = dirty_endio; | |
cafe5635 | 271 | |
55afa739 | 272 | /* I/O request sent to backing device */ |
bc2e6da1 | 273 | closure_bio_submit(io->dc->disk.c, &io->bio, cl); |
5fa89fb9 | 274 | } |
cafe5635 | 275 | |
9baf3097 | 276 | continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq); |
cafe5635 KO |
277 | } |
278 | ||
4246a0b6 | 279 | static void read_dirty_endio(struct bio *bio) |
cafe5635 KO |
280 | { |
281 | struct keybuf_key *w = bio->bi_private; | |
282 | struct dirty_io *io = w->private; | |
283 | ||
284 | bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), | |
4e4cbee9 | 285 | bio->bi_status, "reading dirty data from cache"); |
cafe5635 | 286 | |
4246a0b6 | 287 | dirty_endio(bio); |
cafe5635 KO |
288 | } |
289 | ||
290 | static void read_dirty_submit(struct closure *cl) | |
291 | { | |
292 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); | |
293 | ||
bc2e6da1 | 294 | closure_bio_submit(io->dc->disk.c, &io->bio, cl); |
cafe5635 | 295 | |
9baf3097 | 296 | continue_at(cl, write_dirty, io->dc->writeback_write_wq); |
cafe5635 KO |
297 | } |
298 | ||
5e6926da | 299 | static void read_dirty(struct cached_dev *dc) |
cafe5635 | 300 | { |
5e6926da | 301 | unsigned delay = 0; |
cafe5635 KO |
302 | struct keybuf_key *w; |
303 | struct dirty_io *io; | |
5e6926da KO |
304 | struct closure cl; |
305 | ||
306 | closure_init_stack(&cl); | |
cafe5635 KO |
307 | |
308 | /* | |
309 | * XXX: if we error, background writeback just spins. Should use some | |
310 | * mempools. | |
311 | */ | |
312 | ||
bc2e6da1 CL |
313 | while (!kthread_should_stop() && |
314 | !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags)) { | |
5e6926da | 315 | |
cafe5635 KO |
316 | w = bch_keybuf_next(&dc->writeback_keys); |
317 | if (!w) | |
318 | break; | |
319 | ||
320 | BUG_ON(ptr_stale(dc->disk.c, &w->key, 0)); | |
321 | ||
5e6926da KO |
322 | if (KEY_START(&w->key) != dc->last_read || |
323 | jiffies_to_msecs(delay) > 50) | |
324 | while (!kthread_should_stop() && delay) | |
9e5c3535 | 325 | delay = schedule_timeout_interruptible(delay); |
cafe5635 KO |
326 | |
327 | dc->last_read = KEY_OFFSET(&w->key); | |
328 | ||
329 | io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec) | |
330 | * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), | |
331 | GFP_KERNEL); | |
332 | if (!io) | |
333 | goto err; | |
334 | ||
335 | w->private = io; | |
336 | io->dc = dc; | |
337 | ||
338 | dirty_init(w); | |
ad0d9e76 | 339 | bio_set_op_attrs(&io->bio, REQ_OP_READ, 0); |
4f024f37 | 340 | io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); |
74d46992 | 341 | bio_set_dev(&io->bio, PTR_CACHE(dc->disk.c, &w->key, 0)->bdev); |
cafe5635 KO |
342 | io->bio.bi_end_io = read_dirty_endio; |
343 | ||
8e51e414 | 344 | if (bio_alloc_pages(&io->bio, GFP_KERNEL)) |
cafe5635 KO |
345 | goto err_free; |
346 | ||
c37511b8 | 347 | trace_bcache_writeback(&w->key); |
cafe5635 | 348 | |
c2a4f318 | 349 | down(&dc->in_flight); |
5e6926da | 350 | closure_call(&io->cl, read_dirty_submit, NULL, &cl); |
cafe5635 KO |
351 | |
352 | delay = writeback_delay(dc, KEY_SIZE(&w->key)); | |
cafe5635 KO |
353 | } |
354 | ||
355 | if (0) { | |
356 | err_free: | |
357 | kfree(w->private); | |
358 | err: | |
359 | bch_keybuf_del(&dc->writeback_keys, w); | |
360 | } | |
361 | ||
c2a4f318 KO |
362 | /* |
363 | * Wait for outstanding writeback IOs to finish (and keybuf slots to be | |
364 | * freed) before refilling again | |
365 | */ | |
5e6926da KO |
366 | closure_sync(&cl); |
367 | } | |
368 | ||
369 | /* Scan for dirty data */ | |
370 | ||
371 | void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, | |
372 | uint64_t offset, int nr_sectors) | |
373 | { | |
374 | struct bcache_device *d = c->devices[inode]; | |
48a915a8 | 375 | unsigned stripe_offset, stripe, sectors_dirty; |
5e6926da KO |
376 | |
377 | if (!d) | |
378 | return; | |
379 | ||
48a915a8 | 380 | stripe = offset_to_stripe(d, offset); |
5e6926da KO |
381 | stripe_offset = offset & (d->stripe_size - 1); |
382 | ||
383 | while (nr_sectors) { | |
384 | int s = min_t(unsigned, abs(nr_sectors), | |
385 | d->stripe_size - stripe_offset); | |
386 | ||
387 | if (nr_sectors < 0) | |
388 | s = -s; | |
389 | ||
48a915a8 KO |
390 | if (stripe >= d->nr_stripes) |
391 | return; | |
392 | ||
393 | sectors_dirty = atomic_add_return(s, | |
394 | d->stripe_sectors_dirty + stripe); | |
395 | if (sectors_dirty == d->stripe_size) | |
396 | set_bit(stripe, d->full_dirty_stripes); | |
397 | else | |
398 | clear_bit(stripe, d->full_dirty_stripes); | |
399 | ||
5e6926da KO |
400 | nr_sectors -= s; |
401 | stripe_offset = 0; | |
402 | stripe++; | |
403 | } | |
404 | } | |
405 | ||
406 | static bool dirty_pred(struct keybuf *buf, struct bkey *k) | |
407 | { | |
627ccd20 KO |
408 | struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys); |
409 | ||
410 | BUG_ON(KEY_INODE(k) != dc->disk.id); | |
411 | ||
5e6926da KO |
412 | return KEY_DIRTY(k); |
413 | } | |
414 | ||
48a915a8 | 415 | static void refill_full_stripes(struct cached_dev *dc) |
5e6926da | 416 | { |
48a915a8 KO |
417 | struct keybuf *buf = &dc->writeback_keys; |
418 | unsigned start_stripe, stripe, next_stripe; | |
419 | bool wrapped = false; | |
420 | ||
421 | stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); | |
5e6926da | 422 | |
48a915a8 KO |
423 | if (stripe >= dc->disk.nr_stripes) |
424 | stripe = 0; | |
5e6926da | 425 | |
48a915a8 | 426 | start_stripe = stripe; |
5e6926da KO |
427 | |
428 | while (1) { | |
48a915a8 KO |
429 | stripe = find_next_bit(dc->disk.full_dirty_stripes, |
430 | dc->disk.nr_stripes, stripe); | |
5e6926da | 431 | |
48a915a8 KO |
432 | if (stripe == dc->disk.nr_stripes) |
433 | goto next; | |
5e6926da | 434 | |
48a915a8 KO |
435 | next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes, |
436 | dc->disk.nr_stripes, stripe); | |
437 | ||
438 | buf->last_scanned = KEY(dc->disk.id, | |
439 | stripe * dc->disk.stripe_size, 0); | |
440 | ||
441 | bch_refill_keybuf(dc->disk.c, buf, | |
442 | &KEY(dc->disk.id, | |
443 | next_stripe * dc->disk.stripe_size, 0), | |
444 | dirty_pred); | |
445 | ||
446 | if (array_freelist_empty(&buf->freelist)) | |
447 | return; | |
448 | ||
449 | stripe = next_stripe; | |
450 | next: | |
451 | if (wrapped && stripe > start_stripe) | |
452 | return; | |
453 | ||
454 | if (stripe == dc->disk.nr_stripes) { | |
455 | stripe = 0; | |
456 | wrapped = true; | |
457 | } | |
5e6926da KO |
458 | } |
459 | } | |
460 | ||
627ccd20 KO |
461 | /* |
462 | * Returns true if we scanned the entire disk | |
463 | */ | |
5e6926da KO |
464 | static bool refill_dirty(struct cached_dev *dc) |
465 | { | |
466 | struct keybuf *buf = &dc->writeback_keys; | |
627ccd20 | 467 | struct bkey start = KEY(dc->disk.id, 0, 0); |
5e6926da | 468 | struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0); |
627ccd20 KO |
469 | struct bkey start_pos; |
470 | ||
471 | /* | |
472 | * make sure keybuf pos is inside the range for this disk - at bringup | |
473 | * we might not be attached yet so this disk's inode nr isn't | |
474 | * initialized then | |
475 | */ | |
476 | if (bkey_cmp(&buf->last_scanned, &start) < 0 || | |
477 | bkey_cmp(&buf->last_scanned, &end) > 0) | |
478 | buf->last_scanned = start; | |
48a915a8 KO |
479 | |
480 | if (dc->partial_stripes_expensive) { | |
481 | refill_full_stripes(dc); | |
482 | if (array_freelist_empty(&buf->freelist)) | |
483 | return false; | |
484 | } | |
5e6926da | 485 | |
627ccd20 | 486 | start_pos = buf->last_scanned; |
48a915a8 | 487 | bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); |
5e6926da | 488 | |
627ccd20 KO |
489 | if (bkey_cmp(&buf->last_scanned, &end) < 0) |
490 | return false; | |
491 | ||
492 | /* | |
493 | * If we get to the end start scanning again from the beginning, and | |
494 | * only scan up to where we initially started scanning from: | |
495 | */ | |
496 | buf->last_scanned = start; | |
497 | bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred); | |
498 | ||
499 | return bkey_cmp(&buf->last_scanned, &start_pos) >= 0; | |
5e6926da KO |
500 | } |
501 | ||
502 | static int bch_writeback_thread(void *arg) | |
503 | { | |
504 | struct cached_dev *dc = arg; | |
bc2e6da1 | 505 | struct cache_set *c = dc->disk.c; |
5e6926da KO |
506 | bool searched_full_index; |
507 | ||
a8500fc8 ML |
508 | bch_ratelimit_reset(&dc->writeback_rate); |
509 | ||
bc2e6da1 CL |
510 | while (!kthread_should_stop() && |
511 | !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { | |
5e6926da | 512 | down_write(&dc->writeback_lock); |
412c93e2 | 513 | set_current_state(TASK_INTERRUPTIBLE); |
9b76f79e CL |
514 | /* |
515 | * If the bache device is detaching, skip here and continue | |
516 | * to perform writeback. Otherwise, if no dirty data on cache, | |
517 | * or there is dirty data on cache but writeback is disabled, | |
518 | * the writeback thread should sleep here and wait for others | |
519 | * to wake up it. | |
520 | */ | |
521 | if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && | |
522 | (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) { | |
5e6926da | 523 | up_write(&dc->writeback_lock); |
5e6926da | 524 | |
bc2e6da1 CL |
525 | if (kthread_should_stop() || |
526 | test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { | |
412c93e2 | 527 | set_current_state(TASK_RUNNING); |
2ec6bf71 | 528 | break; |
412c93e2 | 529 | } |
5e6926da | 530 | |
5e6926da KO |
531 | schedule(); |
532 | continue; | |
533 | } | |
412c93e2 | 534 | set_current_state(TASK_RUNNING); |
5e6926da KO |
535 | |
536 | searched_full_index = refill_dirty(dc); | |
537 | ||
538 | if (searched_full_index && | |
539 | RB_EMPTY_ROOT(&dc->writeback_keys.keys)) { | |
540 | atomic_set(&dc->has_dirty, 0); | |
5e6926da KO |
541 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); |
542 | bch_write_bdev_super(dc, NULL); | |
9b76f79e CL |
543 | /* |
544 | * If bcache device is detaching via sysfs interface, | |
545 | * writeback thread should stop after there is no dirty | |
546 | * data on cache. BCACHE_DEV_DETACHING flag is set in | |
547 | * bch_cached_dev_detach(). | |
548 | */ | |
5bc02837 SH |
549 | if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) { |
550 | up_write(&dc->writeback_lock); | |
9b76f79e | 551 | break; |
5bc02837 | 552 | } |
5e6926da KO |
553 | } |
554 | ||
555 | up_write(&dc->writeback_lock); | |
556 | ||
5e6926da KO |
557 | read_dirty(dc); |
558 | ||
559 | if (searched_full_index) { | |
560 | unsigned delay = dc->writeback_delay * HZ; | |
561 | ||
562 | while (delay && | |
563 | !kthread_should_stop() && | |
bc2e6da1 | 564 | !test_bit(CACHE_SET_IO_DISABLE, &c->flags) && |
c4d951dd | 565 | !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) |
9e5c3535 | 566 | delay = schedule_timeout_interruptible(delay); |
a8500fc8 ML |
567 | |
568 | bch_ratelimit_reset(&dc->writeback_rate); | |
5e6926da KO |
569 | } |
570 | } | |
571 | ||
7c8e213f CL |
572 | if (dc->writeback_write_wq) { |
573 | flush_workqueue(dc->writeback_write_wq); | |
574 | destroy_workqueue(dc->writeback_write_wq); | |
575 | } | |
2ec6bf71 | 576 | cached_dev_put(dc); |
bc2e6da1 | 577 | wait_for_kthread_stop(); |
2ec6bf71 | 578 | |
5e6926da | 579 | return 0; |
cafe5635 KO |
580 | } |
581 | ||
444fc0b6 KO |
582 | /* Init */ |
583 | ||
c18536a7 KO |
584 | struct sectors_dirty_init { |
585 | struct btree_op op; | |
586 | unsigned inode; | |
587 | }; | |
588 | ||
589 | static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, | |
48dad8ba | 590 | struct bkey *k) |
444fc0b6 | 591 | { |
c18536a7 KO |
592 | struct sectors_dirty_init *op = container_of(_op, |
593 | struct sectors_dirty_init, op); | |
48dad8ba KO |
594 | if (KEY_INODE(k) > op->inode) |
595 | return MAP_DONE; | |
444fc0b6 | 596 | |
48dad8ba KO |
597 | if (KEY_DIRTY(k)) |
598 | bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), | |
599 | KEY_START(k), KEY_SIZE(k)); | |
600 | ||
601 | return MAP_CONTINUE; | |
444fc0b6 KO |
602 | } |
603 | ||
175206cf | 604 | void bch_sectors_dirty_init(struct bcache_device *d) |
444fc0b6 | 605 | { |
c18536a7 | 606 | struct sectors_dirty_init op; |
444fc0b6 | 607 | |
b54d6934 | 608 | bch_btree_op_init(&op.op, -1); |
175206cf | 609 | op.inode = d->id; |
48dad8ba | 610 | |
175206cf | 611 | bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0), |
48dad8ba | 612 | sectors_dirty_init_fn, 0); |
444fc0b6 KO |
613 | } |
614 | ||
9e5c3535 | 615 | void bch_cached_dev_writeback_init(struct cached_dev *dc) |
cafe5635 | 616 | { |
c2a4f318 | 617 | sema_init(&dc->in_flight, 64); |
cafe5635 | 618 | init_rwsem(&dc->writeback_lock); |
72c27061 | 619 | bch_keybuf_init(&dc->writeback_keys); |
cafe5635 KO |
620 | |
621 | dc->writeback_metadata = true; | |
7f8988c0 | 622 | dc->writeback_running = false; |
cafe5635 KO |
623 | dc->writeback_percent = 10; |
624 | dc->writeback_delay = 30; | |
625 | dc->writeback_rate.rate = 1024; | |
ae82ddbf | 626 | dc->writeback_rate_minimum = 8; |
cafe5635 | 627 | |
d3f674a6 | 628 | dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT; |
1d316e65 ML |
629 | dc->writeback_rate_p_term_inverse = 40; |
630 | dc->writeback_rate_i_term_inverse = 10000; | |
cafe5635 | 631 | |
85c3a370 | 632 | WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); |
9e5c3535 SP |
633 | INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); |
634 | } | |
635 | ||
636 | int bch_cached_dev_writeback_start(struct cached_dev *dc) | |
637 | { | |
9baf3097 TJ |
638 | dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq", |
639 | WQ_MEM_RECLAIM, 0); | |
640 | if (!dc->writeback_write_wq) | |
641 | return -ENOMEM; | |
642 | ||
2ec6bf71 | 643 | cached_dev_get(dc); |
5e6926da KO |
644 | dc->writeback_thread = kthread_create(bch_writeback_thread, dc, |
645 | "bcache_writeback"); | |
2ec6bf71 CL |
646 | if (IS_ERR(dc->writeback_thread)) { |
647 | cached_dev_put(dc); | |
96eb7fba | 648 | destroy_workqueue(dc->writeback_write_wq); |
5e6926da | 649 | return PTR_ERR(dc->writeback_thread); |
2ec6bf71 | 650 | } |
7f8988c0 | 651 | dc->writeback_running = true; |
5e6926da | 652 | |
85c3a370 | 653 | WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); |
cafe5635 KO |
654 | schedule_delayed_work(&dc->writeback_rate_update, |
655 | dc->writeback_rate_update_seconds * HZ); | |
cafe5635 | 656 | |
9e5c3535 SP |
657 | bch_writeback_queue(dc); |
658 | ||
cafe5635 KO |
659 | return 0; |
660 | } |