]>
Commit | Line | Data |
---|---|---|
cafe5635 KO |
1 | /* |
2 | * Main bcache entry point - handle a read or a write request and decide what to | |
3 | * do with it; the make_request functions are called by the block layer. | |
4 | * | |
5 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> | |
6 | * Copyright 2012 Google, Inc. | |
7 | */ | |
8 | ||
9 | #include "bcache.h" | |
10 | #include "btree.h" | |
11 | #include "debug.h" | |
12 | #include "request.h" | |
279afbad | 13 | #include "writeback.h" |
cafe5635 KO |
14 | |
15 | #include <linux/cgroup.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/hash.h> | |
18 | #include <linux/random.h> | |
19 | #include "blk-cgroup.h" | |
20 | ||
21 | #include <trace/events/bcache.h> | |
22 | ||
23 | #define CUTOFF_CACHE_ADD 95 | |
24 | #define CUTOFF_CACHE_READA 90 | |
cafe5635 KO |
25 | |
26 | struct kmem_cache *bch_search_cache; | |
27 | ||
a34a8bfd KO |
28 | static void bch_data_insert_start(struct closure *); |
29 | ||
cafe5635 KO |
30 | /* Cgroup interface */ |
31 | ||
32 | #ifdef CONFIG_CGROUP_BCACHE | |
33 | static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 }; | |
34 | ||
35 | static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup) | |
36 | { | |
37 | struct cgroup_subsys_state *css; | |
38 | return cgroup && | |
39 | (css = cgroup_subsys_state(cgroup, bcache_subsys_id)) | |
40 | ? container_of(css, struct bch_cgroup, css) | |
41 | : &bcache_default_cgroup; | |
42 | } | |
43 | ||
44 | struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio) | |
45 | { | |
46 | struct cgroup_subsys_state *css = bio->bi_css | |
47 | ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id) | |
48 | : task_subsys_state(current, bcache_subsys_id); | |
49 | ||
50 | return css | |
51 | ? container_of(css, struct bch_cgroup, css) | |
52 | : &bcache_default_cgroup; | |
53 | } | |
54 | ||
55 | static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft, | |
56 | struct file *file, | |
57 | char __user *buf, size_t nbytes, loff_t *ppos) | |
58 | { | |
59 | char tmp[1024]; | |
169ef1cf KO |
60 | int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes, |
61 | cgroup_to_bcache(cgrp)->cache_mode + 1); | |
cafe5635 KO |
62 | |
63 | if (len < 0) | |
64 | return len; | |
65 | ||
66 | return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); | |
67 | } | |
68 | ||
69 | static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft, | |
70 | const char *buf) | |
71 | { | |
169ef1cf | 72 | int v = bch_read_string_list(buf, bch_cache_modes); |
cafe5635 KO |
73 | if (v < 0) |
74 | return v; | |
75 | ||
76 | cgroup_to_bcache(cgrp)->cache_mode = v - 1; | |
77 | return 0; | |
78 | } | |
79 | ||
80 | static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft) | |
81 | { | |
82 | return cgroup_to_bcache(cgrp)->verify; | |
83 | } | |
84 | ||
85 | static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val) | |
86 | { | |
87 | cgroup_to_bcache(cgrp)->verify = val; | |
88 | return 0; | |
89 | } | |
90 | ||
91 | static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft) | |
92 | { | |
93 | struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); | |
94 | return atomic_read(&bcachecg->stats.cache_hits); | |
95 | } | |
96 | ||
97 | static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft) | |
98 | { | |
99 | struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); | |
100 | return atomic_read(&bcachecg->stats.cache_misses); | |
101 | } | |
102 | ||
103 | static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp, | |
104 | struct cftype *cft) | |
105 | { | |
106 | struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); | |
107 | return atomic_read(&bcachecg->stats.cache_bypass_hits); | |
108 | } | |
109 | ||
110 | static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp, | |
111 | struct cftype *cft) | |
112 | { | |
113 | struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); | |
114 | return atomic_read(&bcachecg->stats.cache_bypass_misses); | |
115 | } | |
116 | ||
117 | static struct cftype bch_files[] = { | |
118 | { | |
119 | .name = "cache_mode", | |
120 | .read = cache_mode_read, | |
121 | .write_string = cache_mode_write, | |
122 | }, | |
123 | { | |
124 | .name = "verify", | |
125 | .read_u64 = bch_verify_read, | |
126 | .write_u64 = bch_verify_write, | |
127 | }, | |
128 | { | |
129 | .name = "cache_hits", | |
130 | .read_u64 = bch_cache_hits_read, | |
131 | }, | |
132 | { | |
133 | .name = "cache_misses", | |
134 | .read_u64 = bch_cache_misses_read, | |
135 | }, | |
136 | { | |
137 | .name = "cache_bypass_hits", | |
138 | .read_u64 = bch_cache_bypass_hits_read, | |
139 | }, | |
140 | { | |
141 | .name = "cache_bypass_misses", | |
142 | .read_u64 = bch_cache_bypass_misses_read, | |
143 | }, | |
144 | { } /* terminate */ | |
145 | }; | |
146 | ||
147 | static void init_bch_cgroup(struct bch_cgroup *cg) | |
148 | { | |
149 | cg->cache_mode = -1; | |
150 | } | |
151 | ||
152 | static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup) | |
153 | { | |
154 | struct bch_cgroup *cg; | |
155 | ||
156 | cg = kzalloc(sizeof(*cg), GFP_KERNEL); | |
157 | if (!cg) | |
158 | return ERR_PTR(-ENOMEM); | |
159 | init_bch_cgroup(cg); | |
160 | return &cg->css; | |
161 | } | |
162 | ||
163 | static void bcachecg_destroy(struct cgroup *cgroup) | |
164 | { | |
165 | struct bch_cgroup *cg = cgroup_to_bcache(cgroup); | |
166 | free_css_id(&bcache_subsys, &cg->css); | |
167 | kfree(cg); | |
168 | } | |
169 | ||
170 | struct cgroup_subsys bcache_subsys = { | |
171 | .create = bcachecg_create, | |
172 | .destroy = bcachecg_destroy, | |
173 | .subsys_id = bcache_subsys_id, | |
174 | .name = "bcache", | |
175 | .module = THIS_MODULE, | |
176 | }; | |
177 | EXPORT_SYMBOL_GPL(bcache_subsys); | |
178 | #endif | |
179 | ||
180 | static unsigned cache_mode(struct cached_dev *dc, struct bio *bio) | |
181 | { | |
182 | #ifdef CONFIG_CGROUP_BCACHE | |
183 | int r = bch_bio_to_cgroup(bio)->cache_mode; | |
184 | if (r >= 0) | |
185 | return r; | |
186 | #endif | |
187 | return BDEV_CACHE_MODE(&dc->sb); | |
188 | } | |
189 | ||
190 | static bool verify(struct cached_dev *dc, struct bio *bio) | |
191 | { | |
192 | #ifdef CONFIG_CGROUP_BCACHE | |
193 | if (bch_bio_to_cgroup(bio)->verify) | |
194 | return true; | |
195 | #endif | |
196 | return dc->verify; | |
197 | } | |
198 | ||
199 | static void bio_csum(struct bio *bio, struct bkey *k) | |
200 | { | |
201 | struct bio_vec *bv; | |
202 | uint64_t csum = 0; | |
203 | int i; | |
204 | ||
205 | bio_for_each_segment(bv, bio, i) { | |
206 | void *d = kmap(bv->bv_page) + bv->bv_offset; | |
169ef1cf | 207 | csum = bch_crc64_update(csum, d, bv->bv_len); |
cafe5635 KO |
208 | kunmap(bv->bv_page); |
209 | } | |
210 | ||
211 | k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); | |
212 | } | |
213 | ||
214 | /* Insert data into cache */ | |
215 | ||
a34a8bfd | 216 | static void bch_data_insert_keys(struct closure *cl) |
cafe5635 | 217 | { |
220bb38c | 218 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
c18536a7 | 219 | atomic_t *journal_ref = NULL; |
220bb38c | 220 | struct bkey *replace_key = op->replace ? &op->replace_key : NULL; |
6054c6d4 | 221 | int ret; |
cafe5635 | 222 | |
a34a8bfd KO |
223 | /* |
224 | * If we're looping, might already be waiting on | |
225 | * another journal write - can't wait on more than one journal write at | |
226 | * a time | |
227 | * | |
228 | * XXX: this looks wrong | |
229 | */ | |
230 | #if 0 | |
231 | while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING) | |
232 | closure_sync(&s->cl); | |
233 | #endif | |
cafe5635 | 234 | |
220bb38c KO |
235 | if (!op->replace) |
236 | journal_ref = bch_journal(op->c, &op->insert_keys, | |
237 | op->flush_journal ? cl : NULL); | |
cafe5635 | 238 | |
220bb38c | 239 | ret = bch_btree_insert(op->c, &op->insert_keys, |
6054c6d4 KO |
240 | journal_ref, replace_key); |
241 | if (ret == -ESRCH) { | |
220bb38c | 242 | op->replace_collision = true; |
6054c6d4 | 243 | } else if (ret) { |
220bb38c KO |
244 | op->error = -ENOMEM; |
245 | op->insert_data_done = true; | |
a34a8bfd | 246 | } |
cafe5635 | 247 | |
c18536a7 KO |
248 | if (journal_ref) |
249 | atomic_dec_bug(journal_ref); | |
cafe5635 | 250 | |
220bb38c | 251 | if (!op->insert_data_done) |
a34a8bfd | 252 | continue_at(cl, bch_data_insert_start, bcache_wq); |
cafe5635 | 253 | |
220bb38c | 254 | bch_keylist_free(&op->insert_keys); |
a34a8bfd | 255 | closure_return(cl); |
cafe5635 KO |
256 | } |
257 | ||
a34a8bfd KO |
258 | static void bch_data_invalidate(struct closure *cl) |
259 | { | |
220bb38c KO |
260 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
261 | struct bio *bio = op->bio; | |
a34a8bfd KO |
262 | |
263 | pr_debug("invalidating %i sectors from %llu", | |
264 | bio_sectors(bio), (uint64_t) bio->bi_sector); | |
265 | ||
266 | while (bio_sectors(bio)) { | |
81ab4190 KO |
267 | unsigned sectors = min(bio_sectors(bio), |
268 | 1U << (KEY_SIZE_BITS - 1)); | |
a34a8bfd | 269 | |
220bb38c | 270 | if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) |
a34a8bfd KO |
271 | goto out; |
272 | ||
81ab4190 KO |
273 | bio->bi_sector += sectors; |
274 | bio->bi_size -= sectors << 9; | |
a34a8bfd | 275 | |
220bb38c | 276 | bch_keylist_add(&op->insert_keys, |
81ab4190 | 277 | &KEY(op->inode, bio->bi_sector, sectors)); |
a34a8bfd KO |
278 | } |
279 | ||
220bb38c | 280 | op->insert_data_done = true; |
a34a8bfd KO |
281 | bio_put(bio); |
282 | out: | |
283 | continue_at(cl, bch_data_insert_keys, bcache_wq); | |
284 | } | |
285 | ||
286 | static void bch_data_insert_error(struct closure *cl) | |
cafe5635 | 287 | { |
220bb38c | 288 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
cafe5635 KO |
289 | |
290 | /* | |
291 | * Our data write just errored, which means we've got a bunch of keys to | |
292 | * insert that point to data that wasn't succesfully written. | |
293 | * | |
294 | * We don't have to insert those keys but we still have to invalidate | |
295 | * that region of the cache - so, if we just strip off all the pointers | |
296 | * from the keys we'll accomplish just that. | |
297 | */ | |
298 | ||
220bb38c | 299 | struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys; |
cafe5635 | 300 | |
220bb38c | 301 | while (src != op->insert_keys.top) { |
cafe5635 KO |
302 | struct bkey *n = bkey_next(src); |
303 | ||
304 | SET_KEY_PTRS(src, 0); | |
c2f95ae2 | 305 | memmove(dst, src, bkey_bytes(src)); |
cafe5635 KO |
306 | |
307 | dst = bkey_next(dst); | |
308 | src = n; | |
309 | } | |
310 | ||
220bb38c | 311 | op->insert_keys.top = dst; |
cafe5635 | 312 | |
a34a8bfd | 313 | bch_data_insert_keys(cl); |
cafe5635 KO |
314 | } |
315 | ||
a34a8bfd | 316 | static void bch_data_insert_endio(struct bio *bio, int error) |
cafe5635 KO |
317 | { |
318 | struct closure *cl = bio->bi_private; | |
220bb38c | 319 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
cafe5635 KO |
320 | |
321 | if (error) { | |
322 | /* TODO: We could try to recover from this. */ | |
220bb38c KO |
323 | if (op->writeback) |
324 | op->error = error; | |
325 | else if (!op->replace) | |
a34a8bfd | 326 | set_closure_fn(cl, bch_data_insert_error, bcache_wq); |
cafe5635 KO |
327 | else |
328 | set_closure_fn(cl, NULL, NULL); | |
329 | } | |
330 | ||
220bb38c | 331 | bch_bbio_endio(op->c, bio, error, "writing data to cache"); |
cafe5635 KO |
332 | } |
333 | ||
a34a8bfd | 334 | static void bch_data_insert_start(struct closure *cl) |
cafe5635 | 335 | { |
220bb38c KO |
336 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
337 | struct bio *bio = op->bio, *n; | |
cafe5635 | 338 | |
220bb38c | 339 | if (op->bypass) |
a34a8bfd | 340 | return bch_data_invalidate(cl); |
cafe5635 | 341 | |
220bb38c KO |
342 | if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { |
343 | set_gc_sectors(op->c); | |
344 | wake_up_gc(op->c); | |
cafe5635 KO |
345 | } |
346 | ||
54d12f2b KO |
347 | /* |
348 | * Journal writes are marked REQ_FLUSH; if the original write was a | |
349 | * flush, it'll wait on the journal write. | |
350 | */ | |
351 | bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA); | |
352 | ||
cafe5635 KO |
353 | do { |
354 | unsigned i; | |
355 | struct bkey *k; | |
220bb38c | 356 | struct bio_set *split = op->c->bio_split; |
cafe5635 KO |
357 | |
358 | /* 1 for the device pointer and 1 for the chksum */ | |
220bb38c KO |
359 | if (bch_keylist_realloc(&op->insert_keys, |
360 | 1 + (op->csum ? 1 : 0), | |
361 | op->c)) | |
a34a8bfd | 362 | continue_at(cl, bch_data_insert_keys, bcache_wq); |
cafe5635 | 363 | |
220bb38c | 364 | k = op->insert_keys.top; |
cafe5635 | 365 | bkey_init(k); |
220bb38c | 366 | SET_KEY_INODE(k, op->inode); |
cafe5635 KO |
367 | SET_KEY_OFFSET(k, bio->bi_sector); |
368 | ||
2599b53b KO |
369 | if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), |
370 | op->write_point, op->write_prio, | |
371 | op->writeback)) | |
cafe5635 KO |
372 | goto err; |
373 | ||
374 | n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); | |
cafe5635 | 375 | |
a34a8bfd | 376 | n->bi_end_io = bch_data_insert_endio; |
cafe5635 KO |
377 | n->bi_private = cl; |
378 | ||
220bb38c | 379 | if (op->writeback) { |
cafe5635 KO |
380 | SET_KEY_DIRTY(k, true); |
381 | ||
382 | for (i = 0; i < KEY_PTRS(k); i++) | |
220bb38c | 383 | SET_GC_MARK(PTR_BUCKET(op->c, k, i), |
cafe5635 KO |
384 | GC_MARK_DIRTY); |
385 | } | |
386 | ||
220bb38c | 387 | SET_KEY_CSUM(k, op->csum); |
cafe5635 KO |
388 | if (KEY_CSUM(k)) |
389 | bio_csum(n, k); | |
390 | ||
c37511b8 | 391 | trace_bcache_cache_insert(k); |
220bb38c | 392 | bch_keylist_push(&op->insert_keys); |
cafe5635 | 393 | |
cafe5635 | 394 | n->bi_rw |= REQ_WRITE; |
220bb38c | 395 | bch_submit_bbio(n, op->c, k, 0); |
cafe5635 KO |
396 | } while (n != bio); |
397 | ||
220bb38c | 398 | op->insert_data_done = true; |
a34a8bfd | 399 | continue_at(cl, bch_data_insert_keys, bcache_wq); |
cafe5635 KO |
400 | err: |
401 | /* bch_alloc_sectors() blocks if s->writeback = true */ | |
220bb38c | 402 | BUG_ON(op->writeback); |
cafe5635 KO |
403 | |
404 | /* | |
405 | * But if it's not a writeback write we'd rather just bail out if | |
406 | * there aren't any buckets ready to write to - it might take awhile and | |
407 | * we might be starving btree writes for gc or something. | |
408 | */ | |
409 | ||
220bb38c | 410 | if (!op->replace) { |
cafe5635 KO |
411 | /* |
412 | * Writethrough write: We can't complete the write until we've | |
413 | * updated the index. But we don't want to delay the write while | |
414 | * we wait for buckets to be freed up, so just invalidate the | |
415 | * rest of the write. | |
416 | */ | |
220bb38c | 417 | op->bypass = true; |
a34a8bfd | 418 | return bch_data_invalidate(cl); |
cafe5635 KO |
419 | } else { |
420 | /* | |
421 | * From a cache miss, we can just insert the keys for the data | |
422 | * we have written or bail out if we didn't do anything. | |
423 | */ | |
220bb38c | 424 | op->insert_data_done = true; |
cafe5635 KO |
425 | bio_put(bio); |
426 | ||
220bb38c | 427 | if (!bch_keylist_empty(&op->insert_keys)) |
a34a8bfd | 428 | continue_at(cl, bch_data_insert_keys, bcache_wq); |
cafe5635 KO |
429 | else |
430 | closure_return(cl); | |
431 | } | |
432 | } | |
433 | ||
434 | /** | |
a34a8bfd | 435 | * bch_data_insert - stick some data in the cache |
cafe5635 KO |
436 | * |
437 | * This is the starting point for any data to end up in a cache device; it could | |
438 | * be from a normal write, or a writeback write, or a write to a flash only | |
439 | * volume - it's also used by the moving garbage collector to compact data in | |
440 | * mostly empty buckets. | |
441 | * | |
442 | * It first writes the data to the cache, creating a list of keys to be inserted | |
443 | * (if the data had to be fragmented there will be multiple keys); after the | |
444 | * data is written it calls bch_journal, and after the keys have been added to | |
445 | * the next journal write they're inserted into the btree. | |
446 | * | |
c18536a7 | 447 | * It inserts the data in s->cache_bio; bi_sector is used for the key offset, |
cafe5635 KO |
448 | * and op->inode is used for the key inode. |
449 | * | |
c18536a7 KO |
450 | * If s->bypass is true, instead of inserting the data it invalidates the |
451 | * region of the cache represented by s->cache_bio and op->inode. | |
cafe5635 | 452 | */ |
a34a8bfd | 453 | void bch_data_insert(struct closure *cl) |
cafe5635 | 454 | { |
220bb38c | 455 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
cafe5635 | 456 | |
220bb38c KO |
457 | trace_bcache_write(op->bio, op->writeback, op->bypass); |
458 | ||
459 | bch_keylist_init(&op->insert_keys); | |
460 | bio_get(op->bio); | |
a34a8bfd | 461 | bch_data_insert_start(cl); |
cafe5635 KO |
462 | } |
463 | ||
220bb38c KO |
464 | /* Congested? */ |
465 | ||
466 | unsigned bch_get_congested(struct cache_set *c) | |
467 | { | |
468 | int i; | |
469 | long rand; | |
470 | ||
471 | if (!c->congested_read_threshold_us && | |
472 | !c->congested_write_threshold_us) | |
473 | return 0; | |
474 | ||
475 | i = (local_clock_us() - c->congested_last_us) / 1024; | |
476 | if (i < 0) | |
477 | return 0; | |
478 | ||
479 | i += atomic_read(&c->congested); | |
480 | if (i >= 0) | |
481 | return 0; | |
482 | ||
483 | i += CONGESTED_MAX; | |
484 | ||
485 | if (i > 0) | |
486 | i = fract_exp_two(i, 6); | |
487 | ||
488 | rand = get_random_int(); | |
489 | i -= bitmap_weight(&rand, BITS_PER_LONG); | |
490 | ||
491 | return i > 0 ? i : 1; | |
492 | } | |
493 | ||
494 | static void add_sequential(struct task_struct *t) | |
495 | { | |
496 | ewma_add(t->sequential_io_avg, | |
497 | t->sequential_io, 8, 0); | |
498 | ||
499 | t->sequential_io = 0; | |
500 | } | |
501 | ||
502 | static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) | |
503 | { | |
504 | return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; | |
505 | } | |
506 | ||
507 | static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) | |
508 | { | |
509 | struct cache_set *c = dc->disk.c; | |
510 | unsigned mode = cache_mode(dc, bio); | |
511 | unsigned sectors, congested = bch_get_congested(c); | |
512 | struct task_struct *task = current; | |
8aee1220 | 513 | struct io *i; |
220bb38c | 514 | |
c4d951dd | 515 | if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || |
220bb38c KO |
516 | c->gc_stats.in_use > CUTOFF_CACHE_ADD || |
517 | (bio->bi_rw & REQ_DISCARD)) | |
518 | goto skip; | |
519 | ||
520 | if (mode == CACHE_MODE_NONE || | |
521 | (mode == CACHE_MODE_WRITEAROUND && | |
522 | (bio->bi_rw & REQ_WRITE))) | |
523 | goto skip; | |
524 | ||
525 | if (bio->bi_sector & (c->sb.block_size - 1) || | |
526 | bio_sectors(bio) & (c->sb.block_size - 1)) { | |
527 | pr_debug("skipping unaligned io"); | |
528 | goto skip; | |
529 | } | |
530 | ||
531 | if (!congested && !dc->sequential_cutoff) | |
532 | goto rescale; | |
533 | ||
534 | if (!congested && | |
535 | mode == CACHE_MODE_WRITEBACK && | |
536 | (bio->bi_rw & REQ_WRITE) && | |
537 | (bio->bi_rw & REQ_SYNC)) | |
538 | goto rescale; | |
539 | ||
8aee1220 | 540 | spin_lock(&dc->io_lock); |
220bb38c | 541 | |
8aee1220 KO |
542 | hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) |
543 | if (i->last == bio->bi_sector && | |
544 | time_before(jiffies, i->jiffies)) | |
545 | goto found; | |
220bb38c | 546 | |
8aee1220 | 547 | i = list_first_entry(&dc->io_lru, struct io, lru); |
220bb38c | 548 | |
8aee1220 KO |
549 | add_sequential(task); |
550 | i->sequential = 0; | |
220bb38c | 551 | found: |
8aee1220 KO |
552 | if (i->sequential + bio->bi_size > i->sequential) |
553 | i->sequential += bio->bi_size; | |
220bb38c | 554 | |
8aee1220 KO |
555 | i->last = bio_end_sector(bio); |
556 | i->jiffies = jiffies + msecs_to_jiffies(5000); | |
557 | task->sequential_io = i->sequential; | |
220bb38c | 558 | |
8aee1220 KO |
559 | hlist_del(&i->hash); |
560 | hlist_add_head(&i->hash, iohash(dc, i->last)); | |
561 | list_move_tail(&i->lru, &dc->io_lru); | |
220bb38c | 562 | |
8aee1220 | 563 | spin_unlock(&dc->io_lock); |
220bb38c KO |
564 | |
565 | sectors = max(task->sequential_io, | |
566 | task->sequential_io_avg) >> 9; | |
567 | ||
568 | if (dc->sequential_cutoff && | |
569 | sectors >= dc->sequential_cutoff >> 9) { | |
570 | trace_bcache_bypass_sequential(bio); | |
571 | goto skip; | |
572 | } | |
573 | ||
574 | if (congested && sectors >= congested) { | |
575 | trace_bcache_bypass_congested(bio); | |
576 | goto skip; | |
577 | } | |
578 | ||
579 | rescale: | |
580 | bch_rescale_priorities(c, bio_sectors(bio)); | |
581 | return false; | |
582 | skip: | |
583 | bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); | |
584 | return true; | |
585 | } | |
586 | ||
2c1953e2 | 587 | /* Cache lookup */ |
cafe5635 | 588 | |
220bb38c KO |
589 | struct search { |
590 | /* Stack frame for bio_complete */ | |
591 | struct closure cl; | |
592 | ||
593 | struct bcache_device *d; | |
594 | ||
595 | struct bbio bio; | |
596 | struct bio *orig_bio; | |
597 | struct bio *cache_miss; | |
598 | ||
599 | unsigned insert_bio_sectors; | |
600 | ||
601 | unsigned recoverable:1; | |
602 | unsigned unaligned_bvec:1; | |
603 | unsigned write:1; | |
604 | ||
605 | unsigned long start_time; | |
606 | ||
607 | struct btree_op op; | |
608 | struct data_insert_op iop; | |
609 | }; | |
610 | ||
2c1953e2 | 611 | static void bch_cache_read_endio(struct bio *bio, int error) |
cafe5635 KO |
612 | { |
613 | struct bbio *b = container_of(bio, struct bbio, bio); | |
614 | struct closure *cl = bio->bi_private; | |
615 | struct search *s = container_of(cl, struct search, cl); | |
616 | ||
617 | /* | |
618 | * If the bucket was reused while our bio was in flight, we might have | |
619 | * read the wrong data. Set s->error but not error so it doesn't get | |
620 | * counted against the cache device, but we'll still reread the data | |
621 | * from the backing device. | |
622 | */ | |
623 | ||
624 | if (error) | |
220bb38c KO |
625 | s->iop.error = error; |
626 | else if (ptr_stale(s->iop.c, &b->key, 0)) { | |
627 | atomic_long_inc(&s->iop.c->cache_read_races); | |
628 | s->iop.error = -EINTR; | |
cafe5635 KO |
629 | } |
630 | ||
220bb38c | 631 | bch_bbio_endio(s->iop.c, bio, error, "reading from cache"); |
cafe5635 KO |
632 | } |
633 | ||
2c1953e2 KO |
634 | /* |
635 | * Read from a single key, handling the initial cache miss if the key starts in | |
636 | * the middle of the bio | |
637 | */ | |
cc231966 | 638 | static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) |
2c1953e2 KO |
639 | { |
640 | struct search *s = container_of(op, struct search, op); | |
cc231966 KO |
641 | struct bio *n, *bio = &s->bio.bio; |
642 | struct bkey *bio_key; | |
2c1953e2 | 643 | unsigned ptr; |
2c1953e2 | 644 | |
220bb38c | 645 | if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0) |
cc231966 KO |
646 | return MAP_CONTINUE; |
647 | ||
220bb38c | 648 | if (KEY_INODE(k) != s->iop.inode || |
cc231966 KO |
649 | KEY_START(k) > bio->bi_sector) { |
650 | unsigned bio_sectors = bio_sectors(bio); | |
220bb38c | 651 | unsigned sectors = KEY_INODE(k) == s->iop.inode |
cc231966 KO |
652 | ? min_t(uint64_t, INT_MAX, |
653 | KEY_START(k) - bio->bi_sector) | |
654 | : INT_MAX; | |
655 | ||
656 | int ret = s->d->cache_miss(b, s, bio, sectors); | |
657 | if (ret != MAP_CONTINUE) | |
658 | return ret; | |
659 | ||
660 | /* if this was a complete miss we shouldn't get here */ | |
661 | BUG_ON(bio_sectors <= sectors); | |
662 | } | |
663 | ||
664 | if (!KEY_SIZE(k)) | |
665 | return MAP_CONTINUE; | |
2c1953e2 KO |
666 | |
667 | /* XXX: figure out best pointer - for multiple cache devices */ | |
668 | ptr = 0; | |
669 | ||
670 | PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; | |
671 | ||
cc231966 KO |
672 | n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, |
673 | KEY_OFFSET(k) - bio->bi_sector), | |
674 | GFP_NOIO, s->d->bio_split); | |
2c1953e2 | 675 | |
cc231966 KO |
676 | bio_key = &container_of(n, struct bbio, bio)->key; |
677 | bch_bkey_copy_single_ptr(bio_key, k, ptr); | |
2c1953e2 | 678 | |
220bb38c KO |
679 | bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key); |
680 | bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); | |
2c1953e2 | 681 | |
cc231966 KO |
682 | n->bi_end_io = bch_cache_read_endio; |
683 | n->bi_private = &s->cl; | |
2c1953e2 | 684 | |
cc231966 KO |
685 | /* |
686 | * The bucket we're reading from might be reused while our bio | |
687 | * is in flight, and we could then end up reading the wrong | |
688 | * data. | |
689 | * | |
690 | * We guard against this by checking (in cache_read_endio()) if | |
691 | * the pointer is stale again; if so, we treat it as an error | |
692 | * and reread from the backing device (but we don't pass that | |
693 | * error up anywhere). | |
694 | */ | |
2c1953e2 | 695 | |
cc231966 KO |
696 | __bch_submit_bbio(n, b->c); |
697 | return n == bio ? MAP_DONE : MAP_CONTINUE; | |
2c1953e2 KO |
698 | } |
699 | ||
700 | static void cache_lookup(struct closure *cl) | |
701 | { | |
220bb38c | 702 | struct search *s = container_of(cl, struct search, iop.cl); |
2c1953e2 KO |
703 | struct bio *bio = &s->bio.bio; |
704 | ||
220bb38c KO |
705 | int ret = bch_btree_map_keys(&s->op, s->iop.c, |
706 | &KEY(s->iop.inode, bio->bi_sector, 0), | |
cc231966 | 707 | cache_lookup_fn, MAP_END_KEY); |
2c1953e2 KO |
708 | if (ret == -EAGAIN) |
709 | continue_at(cl, cache_lookup, bcache_wq); | |
710 | ||
711 | closure_return(cl); | |
712 | } | |
713 | ||
714 | /* Common code for the make_request functions */ | |
715 | ||
716 | static void request_endio(struct bio *bio, int error) | |
717 | { | |
718 | struct closure *cl = bio->bi_private; | |
719 | ||
720 | if (error) { | |
721 | struct search *s = container_of(cl, struct search, cl); | |
220bb38c | 722 | s->iop.error = error; |
2c1953e2 KO |
723 | /* Only cache read errors are recoverable */ |
724 | s->recoverable = false; | |
725 | } | |
726 | ||
727 | bio_put(bio); | |
728 | closure_put(cl); | |
729 | } | |
730 | ||
cafe5635 KO |
731 | static void bio_complete(struct search *s) |
732 | { | |
733 | if (s->orig_bio) { | |
734 | int cpu, rw = bio_data_dir(s->orig_bio); | |
735 | unsigned long duration = jiffies - s->start_time; | |
736 | ||
737 | cpu = part_stat_lock(); | |
738 | part_round_stats(cpu, &s->d->disk->part0); | |
739 | part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration); | |
740 | part_stat_unlock(); | |
741 | ||
220bb38c KO |
742 | trace_bcache_request_end(s->d, s->orig_bio); |
743 | bio_endio(s->orig_bio, s->iop.error); | |
cafe5635 KO |
744 | s->orig_bio = NULL; |
745 | } | |
746 | } | |
747 | ||
748 | static void do_bio_hook(struct search *s) | |
749 | { | |
750 | struct bio *bio = &s->bio.bio; | |
751 | memcpy(bio, s->orig_bio, sizeof(struct bio)); | |
752 | ||
753 | bio->bi_end_io = request_endio; | |
754 | bio->bi_private = &s->cl; | |
755 | atomic_set(&bio->bi_cnt, 3); | |
756 | } | |
757 | ||
758 | static void search_free(struct closure *cl) | |
759 | { | |
760 | struct search *s = container_of(cl, struct search, cl); | |
761 | bio_complete(s); | |
762 | ||
220bb38c KO |
763 | if (s->iop.bio) |
764 | bio_put(s->iop.bio); | |
cafe5635 KO |
765 | |
766 | if (s->unaligned_bvec) | |
767 | mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec); | |
768 | ||
769 | closure_debug_destroy(cl); | |
770 | mempool_free(s, s->d->c->search); | |
771 | } | |
772 | ||
773 | static struct search *search_alloc(struct bio *bio, struct bcache_device *d) | |
774 | { | |
0b93207a | 775 | struct search *s; |
cafe5635 | 776 | struct bio_vec *bv; |
0b93207a KO |
777 | |
778 | s = mempool_alloc(d->c->search, GFP_NOIO); | |
220bb38c | 779 | memset(s, 0, offsetof(struct search, iop.insert_keys)); |
cafe5635 KO |
780 | |
781 | __closure_init(&s->cl, NULL); | |
782 | ||
220bb38c KO |
783 | s->iop.inode = d->id; |
784 | s->iop.c = d->c; | |
cafe5635 KO |
785 | s->d = d; |
786 | s->op.lock = -1; | |
2599b53b | 787 | s->iop.write_point = hash_long((unsigned long) current, 16); |
cafe5635 KO |
788 | s->orig_bio = bio; |
789 | s->write = (bio->bi_rw & REQ_WRITE) != 0; | |
220bb38c | 790 | s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; |
cafe5635 KO |
791 | s->recoverable = 1; |
792 | s->start_time = jiffies; | |
793 | do_bio_hook(s); | |
794 | ||
795 | if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) { | |
796 | bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO); | |
797 | memcpy(bv, bio_iovec(bio), | |
798 | sizeof(struct bio_vec) * bio_segments(bio)); | |
799 | ||
800 | s->bio.bio.bi_io_vec = bv; | |
801 | s->unaligned_bvec = 1; | |
802 | } | |
803 | ||
804 | return s; | |
805 | } | |
806 | ||
cafe5635 KO |
807 | /* Cached devices */ |
808 | ||
809 | static void cached_dev_bio_complete(struct closure *cl) | |
810 | { | |
811 | struct search *s = container_of(cl, struct search, cl); | |
812 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | |
813 | ||
814 | search_free(cl); | |
815 | cached_dev_put(dc); | |
816 | } | |
817 | ||
818 | /* Process reads */ | |
819 | ||
cdd972b1 | 820 | static void cached_dev_cache_miss_done(struct closure *cl) |
cafe5635 KO |
821 | { |
822 | struct search *s = container_of(cl, struct search, cl); | |
823 | ||
220bb38c KO |
824 | if (s->iop.replace_collision) |
825 | bch_mark_cache_miss_collision(s->iop.c, s->d); | |
cafe5635 | 826 | |
220bb38c | 827 | if (s->iop.bio) { |
cafe5635 KO |
828 | int i; |
829 | struct bio_vec *bv; | |
830 | ||
220bb38c | 831 | bio_for_each_segment_all(bv, s->iop.bio, i) |
cafe5635 KO |
832 | __free_page(bv->bv_page); |
833 | } | |
834 | ||
835 | cached_dev_bio_complete(cl); | |
836 | } | |
837 | ||
cdd972b1 | 838 | static void cached_dev_read_error(struct closure *cl) |
cafe5635 KO |
839 | { |
840 | struct search *s = container_of(cl, struct search, cl); | |
cdd972b1 | 841 | struct bio *bio = &s->bio.bio; |
cafe5635 KO |
842 | struct bio_vec *bv; |
843 | int i; | |
844 | ||
845 | if (s->recoverable) { | |
c37511b8 KO |
846 | /* Retry from the backing device: */ |
847 | trace_bcache_read_retry(s->orig_bio); | |
cafe5635 | 848 | |
220bb38c | 849 | s->iop.error = 0; |
cafe5635 KO |
850 | bv = s->bio.bio.bi_io_vec; |
851 | do_bio_hook(s); | |
852 | s->bio.bio.bi_io_vec = bv; | |
853 | ||
854 | if (!s->unaligned_bvec) | |
855 | bio_for_each_segment(bv, s->orig_bio, i) | |
856 | bv->bv_offset = 0, bv->bv_len = PAGE_SIZE; | |
857 | else | |
858 | memcpy(s->bio.bio.bi_io_vec, | |
859 | bio_iovec(s->orig_bio), | |
860 | sizeof(struct bio_vec) * | |
861 | bio_segments(s->orig_bio)); | |
862 | ||
863 | /* XXX: invalidate cache */ | |
864 | ||
cdd972b1 | 865 | closure_bio_submit(bio, cl, s->d); |
cafe5635 KO |
866 | } |
867 | ||
cdd972b1 | 868 | continue_at(cl, cached_dev_cache_miss_done, NULL); |
cafe5635 KO |
869 | } |
870 | ||
cdd972b1 | 871 | static void cached_dev_read_done(struct closure *cl) |
cafe5635 KO |
872 | { |
873 | struct search *s = container_of(cl, struct search, cl); | |
874 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | |
875 | ||
876 | /* | |
cdd972b1 KO |
877 | * We had a cache miss; cache_bio now contains data ready to be inserted |
878 | * into the cache. | |
cafe5635 KO |
879 | * |
880 | * First, we copy the data we just read from cache_bio's bounce buffers | |
881 | * to the buffers the original bio pointed to: | |
882 | */ | |
883 | ||
220bb38c KO |
884 | if (s->iop.bio) { |
885 | bio_reset(s->iop.bio); | |
886 | s->iop.bio->bi_sector = s->cache_miss->bi_sector; | |
887 | s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; | |
888 | s->iop.bio->bi_size = s->insert_bio_sectors << 9; | |
889 | bch_bio_map(s->iop.bio, NULL); | |
cafe5635 | 890 | |
220bb38c | 891 | bio_copy_data(s->cache_miss, s->iop.bio); |
cafe5635 KO |
892 | |
893 | bio_put(s->cache_miss); | |
894 | s->cache_miss = NULL; | |
895 | } | |
896 | ||
220bb38c KO |
897 | if (verify(dc, &s->bio.bio) && s->recoverable && !s->unaligned_bvec) |
898 | bch_data_verify(dc, s->orig_bio); | |
cafe5635 KO |
899 | |
900 | bio_complete(s); | |
901 | ||
220bb38c KO |
902 | if (s->iop.bio && |
903 | !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) { | |
904 | BUG_ON(!s->iop.replace); | |
905 | closure_call(&s->iop.cl, bch_data_insert, NULL, cl); | |
cafe5635 KO |
906 | } |
907 | ||
cdd972b1 | 908 | continue_at(cl, cached_dev_cache_miss_done, NULL); |
cafe5635 KO |
909 | } |
910 | ||
cdd972b1 | 911 | static void cached_dev_read_done_bh(struct closure *cl) |
cafe5635 KO |
912 | { |
913 | struct search *s = container_of(cl, struct search, cl); | |
914 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | |
915 | ||
220bb38c KO |
916 | bch_mark_cache_accounting(s->iop.c, s->d, |
917 | !s->cache_miss, s->iop.bypass); | |
918 | trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); | |
cafe5635 | 919 | |
220bb38c | 920 | if (s->iop.error) |
cdd972b1 | 921 | continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); |
220bb38c | 922 | else if (s->iop.bio || verify(dc, &s->bio.bio)) |
cdd972b1 | 923 | continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); |
cafe5635 | 924 | else |
cdd972b1 | 925 | continue_at_nobarrier(cl, cached_dev_bio_complete, NULL); |
cafe5635 KO |
926 | } |
927 | ||
928 | static int cached_dev_cache_miss(struct btree *b, struct search *s, | |
929 | struct bio *bio, unsigned sectors) | |
930 | { | |
2c1953e2 | 931 | int ret = MAP_CONTINUE; |
e7c590eb | 932 | unsigned reada = 0; |
cafe5635 | 933 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); |
cdd972b1 | 934 | struct bio *miss, *cache_bio; |
cafe5635 | 935 | |
220bb38c | 936 | if (s->cache_miss || s->iop.bypass) { |
e7c590eb | 937 | miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); |
2c1953e2 | 938 | ret = miss == bio ? MAP_DONE : MAP_CONTINUE; |
e7c590eb KO |
939 | goto out_submit; |
940 | } | |
cafe5635 | 941 | |
e7c590eb KO |
942 | if (!(bio->bi_rw & REQ_RAHEAD) && |
943 | !(bio->bi_rw & REQ_META) && | |
220bb38c | 944 | s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) |
e7c590eb KO |
945 | reada = min_t(sector_t, dc->readahead >> 9, |
946 | bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); | |
cafe5635 | 947 | |
220bb38c | 948 | s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); |
cafe5635 | 949 | |
220bb38c KO |
950 | s->iop.replace_key = KEY(s->iop.inode, |
951 | bio->bi_sector + s->insert_bio_sectors, | |
952 | s->insert_bio_sectors); | |
e7c590eb | 953 | |
220bb38c | 954 | ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); |
e7c590eb KO |
955 | if (ret) |
956 | return ret; | |
957 | ||
220bb38c | 958 | s->iop.replace = true; |
1b207d80 | 959 | |
e7c590eb | 960 | miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); |
2c1953e2 KO |
961 | |
962 | /* btree_search_recurse()'s btree iterator is no good anymore */ | |
963 | ret = miss == bio ? MAP_DONE : -EINTR; | |
cafe5635 | 964 | |
cdd972b1 | 965 | cache_bio = bio_alloc_bioset(GFP_NOWAIT, |
220bb38c | 966 | DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS), |
cafe5635 | 967 | dc->disk.bio_split); |
cdd972b1 | 968 | if (!cache_bio) |
cafe5635 KO |
969 | goto out_submit; |
970 | ||
cdd972b1 KO |
971 | cache_bio->bi_sector = miss->bi_sector; |
972 | cache_bio->bi_bdev = miss->bi_bdev; | |
220bb38c | 973 | cache_bio->bi_size = s->insert_bio_sectors << 9; |
cafe5635 | 974 | |
cdd972b1 KO |
975 | cache_bio->bi_end_io = request_endio; |
976 | cache_bio->bi_private = &s->cl; | |
cafe5635 | 977 | |
cdd972b1 KO |
978 | bch_bio_map(cache_bio, NULL); |
979 | if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO)) | |
cafe5635 KO |
980 | goto out_put; |
981 | ||
220bb38c KO |
982 | if (reada) |
983 | bch_mark_cache_readahead(s->iop.c, s->d); | |
984 | ||
cdd972b1 | 985 | s->cache_miss = miss; |
220bb38c | 986 | s->iop.bio = cache_bio; |
cdd972b1 KO |
987 | bio_get(cache_bio); |
988 | closure_bio_submit(cache_bio, &s->cl, s->d); | |
cafe5635 KO |
989 | |
990 | return ret; | |
991 | out_put: | |
cdd972b1 | 992 | bio_put(cache_bio); |
cafe5635 | 993 | out_submit: |
e7c590eb KO |
994 | miss->bi_end_io = request_endio; |
995 | miss->bi_private = &s->cl; | |
cafe5635 KO |
996 | closure_bio_submit(miss, &s->cl, s->d); |
997 | return ret; | |
998 | } | |
999 | ||
cdd972b1 | 1000 | static void cached_dev_read(struct cached_dev *dc, struct search *s) |
cafe5635 KO |
1001 | { |
1002 | struct closure *cl = &s->cl; | |
1003 | ||
220bb38c | 1004 | closure_call(&s->iop.cl, cache_lookup, NULL, cl); |
cdd972b1 | 1005 | continue_at(cl, cached_dev_read_done_bh, NULL); |
cafe5635 KO |
1006 | } |
1007 | ||
1008 | /* Process writes */ | |
1009 | ||
1010 | static void cached_dev_write_complete(struct closure *cl) | |
1011 | { | |
1012 | struct search *s = container_of(cl, struct search, cl); | |
1013 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | |
1014 | ||
1015 | up_read_non_owner(&dc->writeback_lock); | |
1016 | cached_dev_bio_complete(cl); | |
1017 | } | |
1018 | ||
cdd972b1 | 1019 | static void cached_dev_write(struct cached_dev *dc, struct search *s) |
cafe5635 KO |
1020 | { |
1021 | struct closure *cl = &s->cl; | |
1022 | struct bio *bio = &s->bio.bio; | |
84f0db03 KO |
1023 | struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0); |
1024 | struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); | |
cafe5635 | 1025 | |
220bb38c | 1026 | bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); |
cafe5635 | 1027 | |
cafe5635 | 1028 | down_read_non_owner(&dc->writeback_lock); |
cafe5635 | 1029 | if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) { |
84f0db03 KO |
1030 | /* |
1031 | * We overlap with some dirty data undergoing background | |
1032 | * writeback, force this write to writeback | |
1033 | */ | |
220bb38c KO |
1034 | s->iop.bypass = false; |
1035 | s->iop.writeback = true; | |
cafe5635 KO |
1036 | } |
1037 | ||
84f0db03 KO |
1038 | /* |
1039 | * Discards aren't _required_ to do anything, so skipping if | |
1040 | * check_overlapping returned true is ok | |
1041 | * | |
1042 | * But check_overlapping drops dirty keys for which io hasn't started, | |
1043 | * so we still want to call it. | |
1044 | */ | |
cafe5635 | 1045 | if (bio->bi_rw & REQ_DISCARD) |
220bb38c | 1046 | s->iop.bypass = true; |
cafe5635 | 1047 | |
72c27061 KO |
1048 | if (should_writeback(dc, s->orig_bio, |
1049 | cache_mode(dc, bio), | |
220bb38c KO |
1050 | s->iop.bypass)) { |
1051 | s->iop.bypass = false; | |
1052 | s->iop.writeback = true; | |
72c27061 KO |
1053 | } |
1054 | ||
220bb38c KO |
1055 | if (s->iop.bypass) { |
1056 | s->iop.bio = s->orig_bio; | |
1057 | bio_get(s->iop.bio); | |
cafe5635 | 1058 | |
84f0db03 KO |
1059 | if (!(bio->bi_rw & REQ_DISCARD) || |
1060 | blk_queue_discard(bdev_get_queue(dc->bdev))) | |
1061 | closure_bio_submit(bio, cl, s->d); | |
220bb38c | 1062 | } else if (s->iop.writeback) { |
279afbad | 1063 | bch_writeback_add(dc); |
220bb38c | 1064 | s->iop.bio = bio; |
e49c7c37 | 1065 | |
c0f04d88 | 1066 | if (bio->bi_rw & REQ_FLUSH) { |
e49c7c37 | 1067 | /* Also need to send a flush to the backing device */ |
d4eddd42 | 1068 | struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, |
c0f04d88 | 1069 | dc->disk.bio_split); |
e49c7c37 | 1070 | |
c0f04d88 KO |
1071 | flush->bi_rw = WRITE_FLUSH; |
1072 | flush->bi_bdev = bio->bi_bdev; | |
1073 | flush->bi_end_io = request_endio; | |
1074 | flush->bi_private = cl; | |
1075 | ||
1076 | closure_bio_submit(flush, cl, s->d); | |
e49c7c37 | 1077 | } |
84f0db03 | 1078 | } else { |
220bb38c KO |
1079 | s->iop.bio = bio_clone_bioset(bio, GFP_NOIO, |
1080 | dc->disk.bio_split); | |
84f0db03 KO |
1081 | |
1082 | closure_bio_submit(bio, cl, s->d); | |
cafe5635 | 1083 | } |
84f0db03 | 1084 | |
220bb38c | 1085 | closure_call(&s->iop.cl, bch_data_insert, NULL, cl); |
cafe5635 | 1086 | continue_at(cl, cached_dev_write_complete, NULL); |
cafe5635 KO |
1087 | } |
1088 | ||
a34a8bfd | 1089 | static void cached_dev_nodata(struct closure *cl) |
cafe5635 | 1090 | { |
a34a8bfd | 1091 | struct search *s = container_of(cl, struct search, cl); |
cafe5635 KO |
1092 | struct bio *bio = &s->bio.bio; |
1093 | ||
220bb38c KO |
1094 | if (s->iop.flush_journal) |
1095 | bch_journal_meta(s->iop.c, cl); | |
cafe5635 | 1096 | |
84f0db03 | 1097 | /* If it's a flush, we send the flush to the backing device too */ |
cafe5635 KO |
1098 | closure_bio_submit(bio, cl, s->d); |
1099 | ||
1100 | continue_at(cl, cached_dev_bio_complete, NULL); | |
1101 | } | |
1102 | ||
1103 | /* Cached devices - read & write stuff */ | |
1104 | ||
cafe5635 KO |
1105 | static void cached_dev_make_request(struct request_queue *q, struct bio *bio) |
1106 | { | |
1107 | struct search *s; | |
1108 | struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; | |
1109 | struct cached_dev *dc = container_of(d, struct cached_dev, disk); | |
1110 | int cpu, rw = bio_data_dir(bio); | |
1111 | ||
1112 | cpu = part_stat_lock(); | |
1113 | part_stat_inc(cpu, &d->disk->part0, ios[rw]); | |
1114 | part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio)); | |
1115 | part_stat_unlock(); | |
1116 | ||
1117 | bio->bi_bdev = dc->bdev; | |
2903381f | 1118 | bio->bi_sector += dc->sb.data_offset; |
cafe5635 KO |
1119 | |
1120 | if (cached_dev_get(dc)) { | |
1121 | s = search_alloc(bio, d); | |
220bb38c | 1122 | trace_bcache_request_start(s->d, bio); |
cafe5635 | 1123 | |
a34a8bfd KO |
1124 | if (!bio->bi_size) { |
1125 | /* | |
1126 | * can't call bch_journal_meta from under | |
1127 | * generic_make_request | |
1128 | */ | |
1129 | continue_at_nobarrier(&s->cl, | |
1130 | cached_dev_nodata, | |
1131 | bcache_wq); | |
1132 | } else { | |
220bb38c | 1133 | s->iop.bypass = check_should_bypass(dc, bio); |
84f0db03 KO |
1134 | |
1135 | if (rw) | |
cdd972b1 | 1136 | cached_dev_write(dc, s); |
84f0db03 | 1137 | else |
cdd972b1 | 1138 | cached_dev_read(dc, s); |
84f0db03 | 1139 | } |
cafe5635 KO |
1140 | } else { |
1141 | if ((bio->bi_rw & REQ_DISCARD) && | |
1142 | !blk_queue_discard(bdev_get_queue(dc->bdev))) | |
1143 | bio_endio(bio, 0); | |
1144 | else | |
1145 | bch_generic_make_request(bio, &d->bio_split_hook); | |
1146 | } | |
1147 | } | |
1148 | ||
1149 | static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, | |
1150 | unsigned int cmd, unsigned long arg) | |
1151 | { | |
1152 | struct cached_dev *dc = container_of(d, struct cached_dev, disk); | |
1153 | return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); | |
1154 | } | |
1155 | ||
1156 | static int cached_dev_congested(void *data, int bits) | |
1157 | { | |
1158 | struct bcache_device *d = data; | |
1159 | struct cached_dev *dc = container_of(d, struct cached_dev, disk); | |
1160 | struct request_queue *q = bdev_get_queue(dc->bdev); | |
1161 | int ret = 0; | |
1162 | ||
1163 | if (bdi_congested(&q->backing_dev_info, bits)) | |
1164 | return 1; | |
1165 | ||
1166 | if (cached_dev_get(dc)) { | |
1167 | unsigned i; | |
1168 | struct cache *ca; | |
1169 | ||
1170 | for_each_cache(ca, d->c, i) { | |
1171 | q = bdev_get_queue(ca->bdev); | |
1172 | ret |= bdi_congested(&q->backing_dev_info, bits); | |
1173 | } | |
1174 | ||
1175 | cached_dev_put(dc); | |
1176 | } | |
1177 | ||
1178 | return ret; | |
1179 | } | |
1180 | ||
1181 | void bch_cached_dev_request_init(struct cached_dev *dc) | |
1182 | { | |
1183 | struct gendisk *g = dc->disk.disk; | |
1184 | ||
1185 | g->queue->make_request_fn = cached_dev_make_request; | |
1186 | g->queue->backing_dev_info.congested_fn = cached_dev_congested; | |
1187 | dc->disk.cache_miss = cached_dev_cache_miss; | |
1188 | dc->disk.ioctl = cached_dev_ioctl; | |
1189 | } | |
1190 | ||
1191 | /* Flash backed devices */ | |
1192 | ||
1193 | static int flash_dev_cache_miss(struct btree *b, struct search *s, | |
1194 | struct bio *bio, unsigned sectors) | |
1195 | { | |
8e51e414 KO |
1196 | struct bio_vec *bv; |
1197 | int i; | |
1198 | ||
cafe5635 KO |
1199 | /* Zero fill bio */ |
1200 | ||
8e51e414 | 1201 | bio_for_each_segment(bv, bio, i) { |
cafe5635 KO |
1202 | unsigned j = min(bv->bv_len >> 9, sectors); |
1203 | ||
1204 | void *p = kmap(bv->bv_page); | |
1205 | memset(p + bv->bv_offset, 0, j << 9); | |
1206 | kunmap(bv->bv_page); | |
1207 | ||
8e51e414 | 1208 | sectors -= j; |
cafe5635 KO |
1209 | } |
1210 | ||
8e51e414 KO |
1211 | bio_advance(bio, min(sectors << 9, bio->bi_size)); |
1212 | ||
1213 | if (!bio->bi_size) | |
2c1953e2 | 1214 | return MAP_DONE; |
cafe5635 | 1215 | |
2c1953e2 | 1216 | return MAP_CONTINUE; |
cafe5635 KO |
1217 | } |
1218 | ||
a34a8bfd KO |
1219 | static void flash_dev_nodata(struct closure *cl) |
1220 | { | |
1221 | struct search *s = container_of(cl, struct search, cl); | |
1222 | ||
220bb38c KO |
1223 | if (s->iop.flush_journal) |
1224 | bch_journal_meta(s->iop.c, cl); | |
a34a8bfd KO |
1225 | |
1226 | continue_at(cl, search_free, NULL); | |
1227 | } | |
1228 | ||
cafe5635 KO |
1229 | static void flash_dev_make_request(struct request_queue *q, struct bio *bio) |
1230 | { | |
1231 | struct search *s; | |
1232 | struct closure *cl; | |
1233 | struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; | |
1234 | int cpu, rw = bio_data_dir(bio); | |
1235 | ||
1236 | cpu = part_stat_lock(); | |
1237 | part_stat_inc(cpu, &d->disk->part0, ios[rw]); | |
1238 | part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio)); | |
1239 | part_stat_unlock(); | |
1240 | ||
1241 | s = search_alloc(bio, d); | |
1242 | cl = &s->cl; | |
1243 | bio = &s->bio.bio; | |
1244 | ||
220bb38c | 1245 | trace_bcache_request_start(s->d, bio); |
cafe5635 | 1246 | |
84f0db03 | 1247 | if (!bio->bi_size) { |
a34a8bfd KO |
1248 | /* |
1249 | * can't call bch_journal_meta from under | |
1250 | * generic_make_request | |
1251 | */ | |
1252 | continue_at_nobarrier(&s->cl, | |
1253 | flash_dev_nodata, | |
1254 | bcache_wq); | |
84f0db03 | 1255 | } else if (rw) { |
220bb38c | 1256 | bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, |
8e51e414 KO |
1257 | &KEY(d->id, bio->bi_sector, 0), |
1258 | &KEY(d->id, bio_end_sector(bio), 0)); | |
cafe5635 | 1259 | |
220bb38c KO |
1260 | s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; |
1261 | s->iop.writeback = true; | |
1262 | s->iop.bio = bio; | |
cafe5635 | 1263 | |
220bb38c | 1264 | closure_call(&s->iop.cl, bch_data_insert, NULL, cl); |
cafe5635 | 1265 | } else { |
220bb38c | 1266 | closure_call(&s->iop.cl, cache_lookup, NULL, cl); |
cafe5635 KO |
1267 | } |
1268 | ||
1269 | continue_at(cl, search_free, NULL); | |
1270 | } | |
1271 | ||
1272 | static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode, | |
1273 | unsigned int cmd, unsigned long arg) | |
1274 | { | |
1275 | return -ENOTTY; | |
1276 | } | |
1277 | ||
1278 | static int flash_dev_congested(void *data, int bits) | |
1279 | { | |
1280 | struct bcache_device *d = data; | |
1281 | struct request_queue *q; | |
1282 | struct cache *ca; | |
1283 | unsigned i; | |
1284 | int ret = 0; | |
1285 | ||
1286 | for_each_cache(ca, d->c, i) { | |
1287 | q = bdev_get_queue(ca->bdev); | |
1288 | ret |= bdi_congested(&q->backing_dev_info, bits); | |
1289 | } | |
1290 | ||
1291 | return ret; | |
1292 | } | |
1293 | ||
1294 | void bch_flash_dev_request_init(struct bcache_device *d) | |
1295 | { | |
1296 | struct gendisk *g = d->disk; | |
1297 | ||
1298 | g->queue->make_request_fn = flash_dev_make_request; | |
1299 | g->queue->backing_dev_info.congested_fn = flash_dev_congested; | |
1300 | d->cache_miss = flash_dev_cache_miss; | |
1301 | d->ioctl = flash_dev_ioctl; | |
1302 | } | |
1303 | ||
1304 | void bch_request_exit(void) | |
1305 | { | |
1306 | #ifdef CONFIG_CGROUP_BCACHE | |
1307 | cgroup_unload_subsys(&bcache_subsys); | |
1308 | #endif | |
1309 | if (bch_search_cache) | |
1310 | kmem_cache_destroy(bch_search_cache); | |
1311 | } | |
1312 | ||
1313 | int __init bch_request_init(void) | |
1314 | { | |
1315 | bch_search_cache = KMEM_CACHE(search, 0); | |
1316 | if (!bch_search_cache) | |
1317 | return -ENOMEM; | |
1318 | ||
1319 | #ifdef CONFIG_CGROUP_BCACHE | |
1320 | cgroup_load_subsys(&bcache_subsys); | |
1321 | init_bch_cgroup(&bcache_default_cgroup); | |
1322 | ||
1323 | cgroup_add_cftypes(&bcache_subsys, bch_files); | |
1324 | #endif | |
1325 | return 0; | |
1326 | } |