]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
279afbad KO |
2 | #ifndef _BCACHE_WRITEBACK_H |
3 | #define _BCACHE_WRITEBACK_H | |
4 | ||
72c27061 KO |
5 | #define CUTOFF_WRITEBACK 40 |
6 | #define CUTOFF_WRITEBACK_SYNC 70 | |
7 | ||
279afbad KO |
8 | static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) |
9 | { | |
10 | uint64_t i, ret = 0; | |
11 | ||
12 | for (i = 0; i < d->nr_stripes; i++) | |
13 | ret += atomic_read(d->stripe_sectors_dirty + i); | |
14 | ||
15 | return ret; | |
16 | } | |
17 | ||
a8394090 TJ |
18 | static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c) |
19 | { | |
20 | uint64_t i, ret = 0; | |
21 | ||
22 | mutex_lock(&bch_register_lock); | |
23 | ||
24 | for (i = 0; i < c->nr_uuids; i++) { | |
25 | struct bcache_device *d = c->devices[i]; | |
26 | ||
27 | if (!d || !UUID_FLASH_ONLY(&c->uuids[i])) | |
28 | continue; | |
29 | ret += bcache_dev_sectors_dirty(d); | |
30 | } | |
31 | ||
32 | mutex_unlock(&bch_register_lock); | |
33 | ||
34 | return ret; | |
35 | } | |
36 | ||
48a915a8 KO |
37 | static inline unsigned offset_to_stripe(struct bcache_device *d, |
38 | uint64_t offset) | |
39 | { | |
40 | do_div(offset, d->stripe_size); | |
41 | return offset; | |
42 | } | |
43 | ||
44 | static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, | |
72c27061 KO |
45 | uint64_t offset, |
46 | unsigned nr_sectors) | |
47 | { | |
48a915a8 | 48 | unsigned stripe = offset_to_stripe(&dc->disk, offset); |
72c27061 KO |
49 | |
50 | while (1) { | |
48a915a8 | 51 | if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) |
72c27061 KO |
52 | return true; |
53 | ||
48a915a8 | 54 | if (nr_sectors <= dc->disk.stripe_size) |
72c27061 KO |
55 | return false; |
56 | ||
48a915a8 | 57 | nr_sectors -= dc->disk.stripe_size; |
72c27061 KO |
58 | stripe++; |
59 | } | |
60 | } | |
61 | ||
62 | static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, | |
63 | unsigned cache_mode, bool would_skip) | |
64 | { | |
65 | unsigned in_use = dc->disk.c->gc_stats.in_use; | |
66 | ||
67 | if (cache_mode != CACHE_MODE_WRITEBACK || | |
c4d951dd | 68 | test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || |
72c27061 KO |
69 | in_use > CUTOFF_WRITEBACK_SYNC) |
70 | return false; | |
71 | ||
72 | if (dc->partial_stripes_expensive && | |
4f024f37 | 73 | bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, |
72c27061 KO |
74 | bio_sectors(bio))) |
75 | return true; | |
76 | ||
77 | if (would_skip) | |
78 | return false; | |
79 | ||
83b5df67 | 80 | return op_is_sync(bio->bi_opf) || in_use <= CUTOFF_WRITEBACK; |
72c27061 KO |
81 | } |
82 | ||
5e6926da KO |
83 | static inline void bch_writeback_queue(struct cached_dev *dc) |
84 | { | |
8d16ce54 SB |
85 | if (!IS_ERR_OR_NULL(dc->writeback_thread)) |
86 | wake_up_process(dc->writeback_thread); | |
5e6926da KO |
87 | } |
88 | ||
89 | static inline void bch_writeback_add(struct cached_dev *dc) | |
90 | { | |
91 | if (!atomic_read(&dc->has_dirty) && | |
92 | !atomic_xchg(&dc->has_dirty, 1)) { | |
93 | atomic_inc(&dc->count); | |
94 | ||
95 | if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) { | |
96 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY); | |
97 | /* XXX: should do this synchronously */ | |
98 | bch_write_bdev_super(dc, NULL); | |
99 | } | |
100 | ||
101 | bch_writeback_queue(dc); | |
102 | } | |
103 | } | |
104 | ||
279afbad | 105 | void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); |
279afbad | 106 | |
175206cf | 107 | void bch_sectors_dirty_init(struct bcache_device *); |
9e5c3535 SP |
108 | void bch_cached_dev_writeback_init(struct cached_dev *); |
109 | int bch_cached_dev_writeback_start(struct cached_dev *); | |
279afbad KO |
110 | |
111 | #endif |