]>
Commit | Line | Data |
---|---|---|
cafe5635 KO |
1 | /* |
2 | * Main bcache entry point - handle a read or a write request and decide what to | |
3 | * do with it; the make_request functions are called by the block layer. | |
4 | * | |
5 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> | |
6 | * Copyright 2012 Google, Inc. | |
7 | */ | |
8 | ||
9 | #include "bcache.h" | |
10 | #include "btree.h" | |
11 | #include "debug.h" | |
12 | #include "request.h" | |
279afbad | 13 | #include "writeback.h" |
cafe5635 | 14 | |
cafe5635 KO |
15 | #include <linux/module.h> |
16 | #include <linux/hash.h> | |
17 | #include <linux/random.h> | |
cafe5635 KO |
18 | |
19 | #include <trace/events/bcache.h> | |
20 | ||
21 | #define CUTOFF_CACHE_ADD 95 | |
22 | #define CUTOFF_CACHE_READA 90 | |
cafe5635 KO |
23 | |
24 | struct kmem_cache *bch_search_cache; | |
25 | ||
a34a8bfd KO |
26 | static void bch_data_insert_start(struct closure *); |
27 | ||
cafe5635 KO |
28 | static unsigned cache_mode(struct cached_dev *dc, struct bio *bio) |
29 | { | |
cafe5635 KO |
30 | return BDEV_CACHE_MODE(&dc->sb); |
31 | } | |
32 | ||
33 | static bool verify(struct cached_dev *dc, struct bio *bio) | |
34 | { | |
cafe5635 KO |
35 | return dc->verify; |
36 | } | |
37 | ||
38 | static void bio_csum(struct bio *bio, struct bkey *k) | |
39 | { | |
7988613b KO |
40 | struct bio_vec bv; |
41 | struct bvec_iter iter; | |
cafe5635 | 42 | uint64_t csum = 0; |
cafe5635 | 43 | |
7988613b KO |
44 | bio_for_each_segment(bv, bio, iter) { |
45 | void *d = kmap(bv.bv_page) + bv.bv_offset; | |
46 | csum = bch_crc64_update(csum, d, bv.bv_len); | |
47 | kunmap(bv.bv_page); | |
cafe5635 KO |
48 | } |
49 | ||
50 | k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); | |
51 | } | |
52 | ||
53 | /* Insert data into cache */ | |
54 | ||
a34a8bfd | 55 | static void bch_data_insert_keys(struct closure *cl) |
cafe5635 | 56 | { |
220bb38c | 57 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
c18536a7 | 58 | atomic_t *journal_ref = NULL; |
220bb38c | 59 | struct bkey *replace_key = op->replace ? &op->replace_key : NULL; |
6054c6d4 | 60 | int ret; |
cafe5635 | 61 | |
a34a8bfd KO |
62 | /* |
63 | * If we're looping, might already be waiting on | |
64 | * another journal write - can't wait on more than one journal write at | |
65 | * a time | |
66 | * | |
67 | * XXX: this looks wrong | |
68 | */ | |
69 | #if 0 | |
70 | while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING) | |
71 | closure_sync(&s->cl); | |
72 | #endif | |
cafe5635 | 73 | |
220bb38c KO |
74 | if (!op->replace) |
75 | journal_ref = bch_journal(op->c, &op->insert_keys, | |
76 | op->flush_journal ? cl : NULL); | |
cafe5635 | 77 | |
220bb38c | 78 | ret = bch_btree_insert(op->c, &op->insert_keys, |
6054c6d4 KO |
79 | journal_ref, replace_key); |
80 | if (ret == -ESRCH) { | |
220bb38c | 81 | op->replace_collision = true; |
6054c6d4 | 82 | } else if (ret) { |
220bb38c KO |
83 | op->error = -ENOMEM; |
84 | op->insert_data_done = true; | |
a34a8bfd | 85 | } |
cafe5635 | 86 | |
c18536a7 KO |
87 | if (journal_ref) |
88 | atomic_dec_bug(journal_ref); | |
cafe5635 | 89 | |
220bb38c | 90 | if (!op->insert_data_done) |
da415a09 | 91 | continue_at(cl, bch_data_insert_start, op->wq); |
cafe5635 | 92 | |
220bb38c | 93 | bch_keylist_free(&op->insert_keys); |
a34a8bfd | 94 | closure_return(cl); |
cafe5635 KO |
95 | } |
96 | ||
085d2a3d KO |
97 | static int bch_keylist_realloc(struct keylist *l, unsigned u64s, |
98 | struct cache_set *c) | |
99 | { | |
100 | size_t oldsize = bch_keylist_nkeys(l); | |
101 | size_t newsize = oldsize + u64s; | |
102 | ||
103 | /* | |
104 | * The journalling code doesn't handle the case where the keys to insert | |
105 | * is bigger than an empty write: If we just return -ENOMEM here, | |
106 | * bio_insert() and bio_invalidate() will insert the keys created so far | |
107 | * and finish the rest when the keylist is empty. | |
108 | */ | |
109 | if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset)) | |
110 | return -ENOMEM; | |
111 | ||
112 | return __bch_keylist_realloc(l, u64s); | |
113 | } | |
114 | ||
a34a8bfd KO |
115 | static void bch_data_invalidate(struct closure *cl) |
116 | { | |
220bb38c KO |
117 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
118 | struct bio *bio = op->bio; | |
a34a8bfd KO |
119 | |
120 | pr_debug("invalidating %i sectors from %llu", | |
4f024f37 | 121 | bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); |
a34a8bfd KO |
122 | |
123 | while (bio_sectors(bio)) { | |
81ab4190 KO |
124 | unsigned sectors = min(bio_sectors(bio), |
125 | 1U << (KEY_SIZE_BITS - 1)); | |
a34a8bfd | 126 | |
085d2a3d | 127 | if (bch_keylist_realloc(&op->insert_keys, 2, op->c)) |
a34a8bfd KO |
128 | goto out; |
129 | ||
4f024f37 KO |
130 | bio->bi_iter.bi_sector += sectors; |
131 | bio->bi_iter.bi_size -= sectors << 9; | |
a34a8bfd | 132 | |
220bb38c | 133 | bch_keylist_add(&op->insert_keys, |
4f024f37 | 134 | &KEY(op->inode, bio->bi_iter.bi_sector, sectors)); |
a34a8bfd KO |
135 | } |
136 | ||
220bb38c | 137 | op->insert_data_done = true; |
a34a8bfd KO |
138 | bio_put(bio); |
139 | out: | |
da415a09 | 140 | continue_at(cl, bch_data_insert_keys, op->wq); |
a34a8bfd KO |
141 | } |
142 | ||
143 | static void bch_data_insert_error(struct closure *cl) | |
cafe5635 | 144 | { |
220bb38c | 145 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
cafe5635 KO |
146 | |
147 | /* | |
148 | * Our data write just errored, which means we've got a bunch of keys to | |
149 | * insert that point to data that wasn't succesfully written. | |
150 | * | |
151 | * We don't have to insert those keys but we still have to invalidate | |
152 | * that region of the cache - so, if we just strip off all the pointers | |
153 | * from the keys we'll accomplish just that. | |
154 | */ | |
155 | ||
220bb38c | 156 | struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys; |
cafe5635 | 157 | |
220bb38c | 158 | while (src != op->insert_keys.top) { |
cafe5635 KO |
159 | struct bkey *n = bkey_next(src); |
160 | ||
161 | SET_KEY_PTRS(src, 0); | |
c2f95ae2 | 162 | memmove(dst, src, bkey_bytes(src)); |
cafe5635 KO |
163 | |
164 | dst = bkey_next(dst); | |
165 | src = n; | |
166 | } | |
167 | ||
220bb38c | 168 | op->insert_keys.top = dst; |
cafe5635 | 169 | |
a34a8bfd | 170 | bch_data_insert_keys(cl); |
cafe5635 KO |
171 | } |
172 | ||
a34a8bfd | 173 | static void bch_data_insert_endio(struct bio *bio, int error) |
cafe5635 KO |
174 | { |
175 | struct closure *cl = bio->bi_private; | |
220bb38c | 176 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
cafe5635 KO |
177 | |
178 | if (error) { | |
179 | /* TODO: We could try to recover from this. */ | |
220bb38c KO |
180 | if (op->writeback) |
181 | op->error = error; | |
182 | else if (!op->replace) | |
da415a09 | 183 | set_closure_fn(cl, bch_data_insert_error, op->wq); |
cafe5635 KO |
184 | else |
185 | set_closure_fn(cl, NULL, NULL); | |
186 | } | |
187 | ||
220bb38c | 188 | bch_bbio_endio(op->c, bio, error, "writing data to cache"); |
cafe5635 KO |
189 | } |
190 | ||
a34a8bfd | 191 | static void bch_data_insert_start(struct closure *cl) |
cafe5635 | 192 | { |
220bb38c KO |
193 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
194 | struct bio *bio = op->bio, *n; | |
cafe5635 | 195 | |
220bb38c KO |
196 | if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { |
197 | set_gc_sectors(op->c); | |
198 | wake_up_gc(op->c); | |
cafe5635 KO |
199 | } |
200 | ||
e3b4825b NS |
201 | if (op->bypass) |
202 | return bch_data_invalidate(cl); | |
203 | ||
54d12f2b KO |
204 | /* |
205 | * Journal writes are marked REQ_FLUSH; if the original write was a | |
206 | * flush, it'll wait on the journal write. | |
207 | */ | |
208 | bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA); | |
209 | ||
cafe5635 KO |
210 | do { |
211 | unsigned i; | |
212 | struct bkey *k; | |
220bb38c | 213 | struct bio_set *split = op->c->bio_split; |
cafe5635 KO |
214 | |
215 | /* 1 for the device pointer and 1 for the chksum */ | |
220bb38c | 216 | if (bch_keylist_realloc(&op->insert_keys, |
085d2a3d | 217 | 3 + (op->csum ? 1 : 0), |
220bb38c | 218 | op->c)) |
da415a09 | 219 | continue_at(cl, bch_data_insert_keys, op->wq); |
cafe5635 | 220 | |
220bb38c | 221 | k = op->insert_keys.top; |
cafe5635 | 222 | bkey_init(k); |
220bb38c | 223 | SET_KEY_INODE(k, op->inode); |
4f024f37 | 224 | SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); |
cafe5635 | 225 | |
2599b53b KO |
226 | if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), |
227 | op->write_point, op->write_prio, | |
228 | op->writeback)) | |
cafe5635 KO |
229 | goto err; |
230 | ||
20d0189b | 231 | n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split); |
cafe5635 | 232 | |
a34a8bfd | 233 | n->bi_end_io = bch_data_insert_endio; |
cafe5635 KO |
234 | n->bi_private = cl; |
235 | ||
220bb38c | 236 | if (op->writeback) { |
cafe5635 KO |
237 | SET_KEY_DIRTY(k, true); |
238 | ||
239 | for (i = 0; i < KEY_PTRS(k); i++) | |
220bb38c | 240 | SET_GC_MARK(PTR_BUCKET(op->c, k, i), |
cafe5635 KO |
241 | GC_MARK_DIRTY); |
242 | } | |
243 | ||
220bb38c | 244 | SET_KEY_CSUM(k, op->csum); |
cafe5635 KO |
245 | if (KEY_CSUM(k)) |
246 | bio_csum(n, k); | |
247 | ||
c37511b8 | 248 | trace_bcache_cache_insert(k); |
220bb38c | 249 | bch_keylist_push(&op->insert_keys); |
cafe5635 | 250 | |
cafe5635 | 251 | n->bi_rw |= REQ_WRITE; |
220bb38c | 252 | bch_submit_bbio(n, op->c, k, 0); |
cafe5635 KO |
253 | } while (n != bio); |
254 | ||
220bb38c | 255 | op->insert_data_done = true; |
da415a09 | 256 | continue_at(cl, bch_data_insert_keys, op->wq); |
cafe5635 KO |
257 | err: |
258 | /* bch_alloc_sectors() blocks if s->writeback = true */ | |
220bb38c | 259 | BUG_ON(op->writeback); |
cafe5635 KO |
260 | |
261 | /* | |
262 | * But if it's not a writeback write we'd rather just bail out if | |
263 | * there aren't any buckets ready to write to - it might take awhile and | |
264 | * we might be starving btree writes for gc or something. | |
265 | */ | |
266 | ||
220bb38c | 267 | if (!op->replace) { |
cafe5635 KO |
268 | /* |
269 | * Writethrough write: We can't complete the write until we've | |
270 | * updated the index. But we don't want to delay the write while | |
271 | * we wait for buckets to be freed up, so just invalidate the | |
272 | * rest of the write. | |
273 | */ | |
220bb38c | 274 | op->bypass = true; |
a34a8bfd | 275 | return bch_data_invalidate(cl); |
cafe5635 KO |
276 | } else { |
277 | /* | |
278 | * From a cache miss, we can just insert the keys for the data | |
279 | * we have written or bail out if we didn't do anything. | |
280 | */ | |
220bb38c | 281 | op->insert_data_done = true; |
cafe5635 KO |
282 | bio_put(bio); |
283 | ||
220bb38c | 284 | if (!bch_keylist_empty(&op->insert_keys)) |
da415a09 | 285 | continue_at(cl, bch_data_insert_keys, op->wq); |
cafe5635 KO |
286 | else |
287 | closure_return(cl); | |
288 | } | |
289 | } | |
290 | ||
291 | /** | |
a34a8bfd | 292 | * bch_data_insert - stick some data in the cache |
cafe5635 KO |
293 | * |
294 | * This is the starting point for any data to end up in a cache device; it could | |
295 | * be from a normal write, or a writeback write, or a write to a flash only | |
296 | * volume - it's also used by the moving garbage collector to compact data in | |
297 | * mostly empty buckets. | |
298 | * | |
299 | * It first writes the data to the cache, creating a list of keys to be inserted | |
300 | * (if the data had to be fragmented there will be multiple keys); after the | |
301 | * data is written it calls bch_journal, and after the keys have been added to | |
302 | * the next journal write they're inserted into the btree. | |
303 | * | |
c18536a7 | 304 | * It inserts the data in s->cache_bio; bi_sector is used for the key offset, |
cafe5635 KO |
305 | * and op->inode is used for the key inode. |
306 | * | |
c18536a7 KO |
307 | * If s->bypass is true, instead of inserting the data it invalidates the |
308 | * region of the cache represented by s->cache_bio and op->inode. | |
cafe5635 | 309 | */ |
a34a8bfd | 310 | void bch_data_insert(struct closure *cl) |
cafe5635 | 311 | { |
220bb38c | 312 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
cafe5635 | 313 | |
60ae81ee SP |
314 | trace_bcache_write(op->c, op->inode, op->bio, |
315 | op->writeback, op->bypass); | |
220bb38c KO |
316 | |
317 | bch_keylist_init(&op->insert_keys); | |
318 | bio_get(op->bio); | |
a34a8bfd | 319 | bch_data_insert_start(cl); |
cafe5635 KO |
320 | } |
321 | ||
220bb38c KO |
322 | /* Congested? */ |
323 | ||
324 | unsigned bch_get_congested(struct cache_set *c) | |
325 | { | |
326 | int i; | |
327 | long rand; | |
328 | ||
329 | if (!c->congested_read_threshold_us && | |
330 | !c->congested_write_threshold_us) | |
331 | return 0; | |
332 | ||
333 | i = (local_clock_us() - c->congested_last_us) / 1024; | |
334 | if (i < 0) | |
335 | return 0; | |
336 | ||
337 | i += atomic_read(&c->congested); | |
338 | if (i >= 0) | |
339 | return 0; | |
340 | ||
341 | i += CONGESTED_MAX; | |
342 | ||
343 | if (i > 0) | |
344 | i = fract_exp_two(i, 6); | |
345 | ||
346 | rand = get_random_int(); | |
347 | i -= bitmap_weight(&rand, BITS_PER_LONG); | |
348 | ||
349 | return i > 0 ? i : 1; | |
350 | } | |
351 | ||
352 | static void add_sequential(struct task_struct *t) | |
353 | { | |
354 | ewma_add(t->sequential_io_avg, | |
355 | t->sequential_io, 8, 0); | |
356 | ||
357 | t->sequential_io = 0; | |
358 | } | |
359 | ||
360 | static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) | |
361 | { | |
362 | return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; | |
363 | } | |
364 | ||
365 | static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) | |
366 | { | |
367 | struct cache_set *c = dc->disk.c; | |
368 | unsigned mode = cache_mode(dc, bio); | |
369 | unsigned sectors, congested = bch_get_congested(c); | |
370 | struct task_struct *task = current; | |
8aee1220 | 371 | struct io *i; |
220bb38c | 372 | |
c4d951dd | 373 | if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || |
220bb38c KO |
374 | c->gc_stats.in_use > CUTOFF_CACHE_ADD || |
375 | (bio->bi_rw & REQ_DISCARD)) | |
376 | goto skip; | |
377 | ||
378 | if (mode == CACHE_MODE_NONE || | |
379 | (mode == CACHE_MODE_WRITEAROUND && | |
380 | (bio->bi_rw & REQ_WRITE))) | |
381 | goto skip; | |
382 | ||
4f024f37 | 383 | if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || |
220bb38c KO |
384 | bio_sectors(bio) & (c->sb.block_size - 1)) { |
385 | pr_debug("skipping unaligned io"); | |
386 | goto skip; | |
387 | } | |
388 | ||
5ceaaad7 KO |
389 | if (bypass_torture_test(dc)) { |
390 | if ((get_random_int() & 3) == 3) | |
391 | goto skip; | |
392 | else | |
393 | goto rescale; | |
394 | } | |
395 | ||
220bb38c KO |
396 | if (!congested && !dc->sequential_cutoff) |
397 | goto rescale; | |
398 | ||
399 | if (!congested && | |
400 | mode == CACHE_MODE_WRITEBACK && | |
401 | (bio->bi_rw & REQ_WRITE) && | |
402 | (bio->bi_rw & REQ_SYNC)) | |
403 | goto rescale; | |
404 | ||
8aee1220 | 405 | spin_lock(&dc->io_lock); |
220bb38c | 406 | |
4f024f37 KO |
407 | hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) |
408 | if (i->last == bio->bi_iter.bi_sector && | |
8aee1220 KO |
409 | time_before(jiffies, i->jiffies)) |
410 | goto found; | |
220bb38c | 411 | |
8aee1220 | 412 | i = list_first_entry(&dc->io_lru, struct io, lru); |
220bb38c | 413 | |
8aee1220 KO |
414 | add_sequential(task); |
415 | i->sequential = 0; | |
220bb38c | 416 | found: |
4f024f37 KO |
417 | if (i->sequential + bio->bi_iter.bi_size > i->sequential) |
418 | i->sequential += bio->bi_iter.bi_size; | |
220bb38c | 419 | |
8aee1220 KO |
420 | i->last = bio_end_sector(bio); |
421 | i->jiffies = jiffies + msecs_to_jiffies(5000); | |
422 | task->sequential_io = i->sequential; | |
220bb38c | 423 | |
8aee1220 KO |
424 | hlist_del(&i->hash); |
425 | hlist_add_head(&i->hash, iohash(dc, i->last)); | |
426 | list_move_tail(&i->lru, &dc->io_lru); | |
220bb38c | 427 | |
8aee1220 | 428 | spin_unlock(&dc->io_lock); |
220bb38c KO |
429 | |
430 | sectors = max(task->sequential_io, | |
431 | task->sequential_io_avg) >> 9; | |
432 | ||
433 | if (dc->sequential_cutoff && | |
434 | sectors >= dc->sequential_cutoff >> 9) { | |
435 | trace_bcache_bypass_sequential(bio); | |
436 | goto skip; | |
437 | } | |
438 | ||
439 | if (congested && sectors >= congested) { | |
440 | trace_bcache_bypass_congested(bio); | |
441 | goto skip; | |
442 | } | |
443 | ||
444 | rescale: | |
445 | bch_rescale_priorities(c, bio_sectors(bio)); | |
446 | return false; | |
447 | skip: | |
448 | bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); | |
449 | return true; | |
450 | } | |
451 | ||
2c1953e2 | 452 | /* Cache lookup */ |
cafe5635 | 453 | |
220bb38c KO |
454 | struct search { |
455 | /* Stack frame for bio_complete */ | |
456 | struct closure cl; | |
457 | ||
220bb38c KO |
458 | struct bbio bio; |
459 | struct bio *orig_bio; | |
460 | struct bio *cache_miss; | |
a5ae4300 | 461 | struct bcache_device *d; |
220bb38c KO |
462 | |
463 | unsigned insert_bio_sectors; | |
220bb38c | 464 | unsigned recoverable:1; |
220bb38c | 465 | unsigned write:1; |
5ceaaad7 | 466 | unsigned read_dirty_data:1; |
220bb38c KO |
467 | |
468 | unsigned long start_time; | |
469 | ||
470 | struct btree_op op; | |
471 | struct data_insert_op iop; | |
472 | }; | |
473 | ||
2c1953e2 | 474 | static void bch_cache_read_endio(struct bio *bio, int error) |
cafe5635 KO |
475 | { |
476 | struct bbio *b = container_of(bio, struct bbio, bio); | |
477 | struct closure *cl = bio->bi_private; | |
478 | struct search *s = container_of(cl, struct search, cl); | |
479 | ||
480 | /* | |
481 | * If the bucket was reused while our bio was in flight, we might have | |
482 | * read the wrong data. Set s->error but not error so it doesn't get | |
483 | * counted against the cache device, but we'll still reread the data | |
484 | * from the backing device. | |
485 | */ | |
486 | ||
487 | if (error) | |
220bb38c | 488 | s->iop.error = error; |
d56d000a KO |
489 | else if (!KEY_DIRTY(&b->key) && |
490 | ptr_stale(s->iop.c, &b->key, 0)) { | |
220bb38c KO |
491 | atomic_long_inc(&s->iop.c->cache_read_races); |
492 | s->iop.error = -EINTR; | |
cafe5635 KO |
493 | } |
494 | ||
220bb38c | 495 | bch_bbio_endio(s->iop.c, bio, error, "reading from cache"); |
cafe5635 KO |
496 | } |
497 | ||
2c1953e2 KO |
498 | /* |
499 | * Read from a single key, handling the initial cache miss if the key starts in | |
500 | * the middle of the bio | |
501 | */ | |
cc231966 | 502 | static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) |
2c1953e2 KO |
503 | { |
504 | struct search *s = container_of(op, struct search, op); | |
cc231966 KO |
505 | struct bio *n, *bio = &s->bio.bio; |
506 | struct bkey *bio_key; | |
2c1953e2 | 507 | unsigned ptr; |
2c1953e2 | 508 | |
4f024f37 | 509 | if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) |
cc231966 KO |
510 | return MAP_CONTINUE; |
511 | ||
220bb38c | 512 | if (KEY_INODE(k) != s->iop.inode || |
4f024f37 | 513 | KEY_START(k) > bio->bi_iter.bi_sector) { |
cc231966 | 514 | unsigned bio_sectors = bio_sectors(bio); |
220bb38c | 515 | unsigned sectors = KEY_INODE(k) == s->iop.inode |
cc231966 | 516 | ? min_t(uint64_t, INT_MAX, |
4f024f37 | 517 | KEY_START(k) - bio->bi_iter.bi_sector) |
cc231966 KO |
518 | : INT_MAX; |
519 | ||
520 | int ret = s->d->cache_miss(b, s, bio, sectors); | |
521 | if (ret != MAP_CONTINUE) | |
522 | return ret; | |
523 | ||
524 | /* if this was a complete miss we shouldn't get here */ | |
525 | BUG_ON(bio_sectors <= sectors); | |
526 | } | |
527 | ||
528 | if (!KEY_SIZE(k)) | |
529 | return MAP_CONTINUE; | |
2c1953e2 KO |
530 | |
531 | /* XXX: figure out best pointer - for multiple cache devices */ | |
532 | ptr = 0; | |
533 | ||
534 | PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; | |
535 | ||
5ceaaad7 KO |
536 | if (KEY_DIRTY(k)) |
537 | s->read_dirty_data = true; | |
538 | ||
20d0189b KO |
539 | n = bio_next_split(bio, min_t(uint64_t, INT_MAX, |
540 | KEY_OFFSET(k) - bio->bi_iter.bi_sector), | |
541 | GFP_NOIO, s->d->bio_split); | |
2c1953e2 | 542 | |
cc231966 KO |
543 | bio_key = &container_of(n, struct bbio, bio)->key; |
544 | bch_bkey_copy_single_ptr(bio_key, k, ptr); | |
2c1953e2 | 545 | |
4f024f37 | 546 | bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key); |
220bb38c | 547 | bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); |
2c1953e2 | 548 | |
cc231966 KO |
549 | n->bi_end_io = bch_cache_read_endio; |
550 | n->bi_private = &s->cl; | |
2c1953e2 | 551 | |
cc231966 KO |
552 | /* |
553 | * The bucket we're reading from might be reused while our bio | |
554 | * is in flight, and we could then end up reading the wrong | |
555 | * data. | |
556 | * | |
557 | * We guard against this by checking (in cache_read_endio()) if | |
558 | * the pointer is stale again; if so, we treat it as an error | |
559 | * and reread from the backing device (but we don't pass that | |
560 | * error up anywhere). | |
561 | */ | |
2c1953e2 | 562 | |
cc231966 KO |
563 | __bch_submit_bbio(n, b->c); |
564 | return n == bio ? MAP_DONE : MAP_CONTINUE; | |
2c1953e2 KO |
565 | } |
566 | ||
567 | static void cache_lookup(struct closure *cl) | |
568 | { | |
220bb38c | 569 | struct search *s = container_of(cl, struct search, iop.cl); |
2c1953e2 | 570 | struct bio *bio = &s->bio.bio; |
a5ae4300 | 571 | int ret; |
2c1953e2 | 572 | |
a5ae4300 | 573 | bch_btree_op_init(&s->op, -1); |
2c1953e2 | 574 | |
a5ae4300 KO |
575 | ret = bch_btree_map_keys(&s->op, s->iop.c, |
576 | &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), | |
577 | cache_lookup_fn, MAP_END_KEY); | |
2c1953e2 KO |
578 | if (ret == -EAGAIN) |
579 | continue_at(cl, cache_lookup, bcache_wq); | |
580 | ||
581 | closure_return(cl); | |
582 | } | |
583 | ||
584 | /* Common code for the make_request functions */ | |
585 | ||
586 | static void request_endio(struct bio *bio, int error) | |
587 | { | |
588 | struct closure *cl = bio->bi_private; | |
589 | ||
590 | if (error) { | |
591 | struct search *s = container_of(cl, struct search, cl); | |
220bb38c | 592 | s->iop.error = error; |
2c1953e2 KO |
593 | /* Only cache read errors are recoverable */ |
594 | s->recoverable = false; | |
595 | } | |
596 | ||
597 | bio_put(bio); | |
598 | closure_put(cl); | |
599 | } | |
600 | ||
cafe5635 KO |
601 | static void bio_complete(struct search *s) |
602 | { | |
603 | if (s->orig_bio) { | |
604 | int cpu, rw = bio_data_dir(s->orig_bio); | |
605 | unsigned long duration = jiffies - s->start_time; | |
606 | ||
607 | cpu = part_stat_lock(); | |
608 | part_round_stats(cpu, &s->d->disk->part0); | |
609 | part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration); | |
610 | part_stat_unlock(); | |
611 | ||
220bb38c KO |
612 | trace_bcache_request_end(s->d, s->orig_bio); |
613 | bio_endio(s->orig_bio, s->iop.error); | |
cafe5635 KO |
614 | s->orig_bio = NULL; |
615 | } | |
616 | } | |
617 | ||
a5ae4300 | 618 | static void do_bio_hook(struct search *s, struct bio *orig_bio) |
cafe5635 KO |
619 | { |
620 | struct bio *bio = &s->bio.bio; | |
cafe5635 | 621 | |
ed9c47be | 622 | bio_init(bio); |
a5ae4300 | 623 | __bio_clone_fast(bio, orig_bio); |
cafe5635 KO |
624 | bio->bi_end_io = request_endio; |
625 | bio->bi_private = &s->cl; | |
ed9c47be | 626 | |
cafe5635 KO |
627 | atomic_set(&bio->bi_cnt, 3); |
628 | } | |
629 | ||
630 | static void search_free(struct closure *cl) | |
631 | { | |
632 | struct search *s = container_of(cl, struct search, cl); | |
633 | bio_complete(s); | |
634 | ||
220bb38c KO |
635 | if (s->iop.bio) |
636 | bio_put(s->iop.bio); | |
cafe5635 | 637 | |
cafe5635 KO |
638 | closure_debug_destroy(cl); |
639 | mempool_free(s, s->d->c->search); | |
640 | } | |
641 | ||
a5ae4300 KO |
642 | static inline struct search *search_alloc(struct bio *bio, |
643 | struct bcache_device *d) | |
cafe5635 | 644 | { |
0b93207a | 645 | struct search *s; |
0b93207a KO |
646 | |
647 | s = mempool_alloc(d->c->search, GFP_NOIO); | |
cafe5635 | 648 | |
a5ae4300 KO |
649 | closure_init(&s->cl, NULL); |
650 | do_bio_hook(s, bio); | |
cafe5635 | 651 | |
cafe5635 | 652 | s->orig_bio = bio; |
a5ae4300 KO |
653 | s->cache_miss = NULL; |
654 | s->d = d; | |
cafe5635 | 655 | s->recoverable = 1; |
a5ae4300 KO |
656 | s->write = (bio->bi_rw & REQ_WRITE) != 0; |
657 | s->read_dirty_data = 0; | |
cafe5635 | 658 | s->start_time = jiffies; |
a5ae4300 KO |
659 | |
660 | s->iop.c = d->c; | |
661 | s->iop.bio = NULL; | |
662 | s->iop.inode = d->id; | |
663 | s->iop.write_point = hash_long((unsigned long) current, 16); | |
664 | s->iop.write_prio = 0; | |
665 | s->iop.error = 0; | |
666 | s->iop.flags = 0; | |
667 | s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; | |
da415a09 | 668 | s->iop.wq = bcache_wq; |
cafe5635 | 669 | |
cafe5635 KO |
670 | return s; |
671 | } | |
672 | ||
cafe5635 KO |
673 | /* Cached devices */ |
674 | ||
675 | static void cached_dev_bio_complete(struct closure *cl) | |
676 | { | |
677 | struct search *s = container_of(cl, struct search, cl); | |
678 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | |
679 | ||
680 | search_free(cl); | |
681 | cached_dev_put(dc); | |
682 | } | |
683 | ||
684 | /* Process reads */ | |
685 | ||
cdd972b1 | 686 | static void cached_dev_cache_miss_done(struct closure *cl) |
cafe5635 KO |
687 | { |
688 | struct search *s = container_of(cl, struct search, cl); | |
689 | ||
220bb38c KO |
690 | if (s->iop.replace_collision) |
691 | bch_mark_cache_miss_collision(s->iop.c, s->d); | |
cafe5635 | 692 | |
220bb38c | 693 | if (s->iop.bio) { |
cafe5635 KO |
694 | int i; |
695 | struct bio_vec *bv; | |
696 | ||
220bb38c | 697 | bio_for_each_segment_all(bv, s->iop.bio, i) |
cafe5635 KO |
698 | __free_page(bv->bv_page); |
699 | } | |
700 | ||
701 | cached_dev_bio_complete(cl); | |
702 | } | |
703 | ||
cdd972b1 | 704 | static void cached_dev_read_error(struct closure *cl) |
cafe5635 KO |
705 | { |
706 | struct search *s = container_of(cl, struct search, cl); | |
cdd972b1 | 707 | struct bio *bio = &s->bio.bio; |
cafe5635 KO |
708 | |
709 | if (s->recoverable) { | |
c37511b8 KO |
710 | /* Retry from the backing device: */ |
711 | trace_bcache_read_retry(s->orig_bio); | |
cafe5635 | 712 | |
220bb38c | 713 | s->iop.error = 0; |
a5ae4300 | 714 | do_bio_hook(s, s->orig_bio); |
cafe5635 KO |
715 | |
716 | /* XXX: invalidate cache */ | |
717 | ||
cdd972b1 | 718 | closure_bio_submit(bio, cl, s->d); |
cafe5635 KO |
719 | } |
720 | ||
cdd972b1 | 721 | continue_at(cl, cached_dev_cache_miss_done, NULL); |
cafe5635 KO |
722 | } |
723 | ||
cdd972b1 | 724 | static void cached_dev_read_done(struct closure *cl) |
cafe5635 KO |
725 | { |
726 | struct search *s = container_of(cl, struct search, cl); | |
727 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | |
728 | ||
729 | /* | |
cdd972b1 KO |
730 | * We had a cache miss; cache_bio now contains data ready to be inserted |
731 | * into the cache. | |
cafe5635 KO |
732 | * |
733 | * First, we copy the data we just read from cache_bio's bounce buffers | |
734 | * to the buffers the original bio pointed to: | |
735 | */ | |
736 | ||
220bb38c KO |
737 | if (s->iop.bio) { |
738 | bio_reset(s->iop.bio); | |
4f024f37 | 739 | s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; |
220bb38c | 740 | s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; |
4f024f37 | 741 | s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; |
220bb38c | 742 | bch_bio_map(s->iop.bio, NULL); |
cafe5635 | 743 | |
220bb38c | 744 | bio_copy_data(s->cache_miss, s->iop.bio); |
cafe5635 KO |
745 | |
746 | bio_put(s->cache_miss); | |
747 | s->cache_miss = NULL; | |
748 | } | |
749 | ||
ed9c47be | 750 | if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data) |
220bb38c | 751 | bch_data_verify(dc, s->orig_bio); |
cafe5635 KO |
752 | |
753 | bio_complete(s); | |
754 | ||
220bb38c KO |
755 | if (s->iop.bio && |
756 | !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) { | |
757 | BUG_ON(!s->iop.replace); | |
758 | closure_call(&s->iop.cl, bch_data_insert, NULL, cl); | |
cafe5635 KO |
759 | } |
760 | ||
cdd972b1 | 761 | continue_at(cl, cached_dev_cache_miss_done, NULL); |
cafe5635 KO |
762 | } |
763 | ||
cdd972b1 | 764 | static void cached_dev_read_done_bh(struct closure *cl) |
cafe5635 KO |
765 | { |
766 | struct search *s = container_of(cl, struct search, cl); | |
767 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | |
768 | ||
220bb38c KO |
769 | bch_mark_cache_accounting(s->iop.c, s->d, |
770 | !s->cache_miss, s->iop.bypass); | |
771 | trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); | |
cafe5635 | 772 | |
220bb38c | 773 | if (s->iop.error) |
cdd972b1 | 774 | continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); |
220bb38c | 775 | else if (s->iop.bio || verify(dc, &s->bio.bio)) |
cdd972b1 | 776 | continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); |
cafe5635 | 777 | else |
cdd972b1 | 778 | continue_at_nobarrier(cl, cached_dev_bio_complete, NULL); |
cafe5635 KO |
779 | } |
780 | ||
781 | static int cached_dev_cache_miss(struct btree *b, struct search *s, | |
782 | struct bio *bio, unsigned sectors) | |
783 | { | |
2c1953e2 | 784 | int ret = MAP_CONTINUE; |
e7c590eb | 785 | unsigned reada = 0; |
cafe5635 | 786 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); |
cdd972b1 | 787 | struct bio *miss, *cache_bio; |
cafe5635 | 788 | |
220bb38c | 789 | if (s->cache_miss || s->iop.bypass) { |
20d0189b | 790 | miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); |
2c1953e2 | 791 | ret = miss == bio ? MAP_DONE : MAP_CONTINUE; |
e7c590eb KO |
792 | goto out_submit; |
793 | } | |
cafe5635 | 794 | |
e7c590eb KO |
795 | if (!(bio->bi_rw & REQ_RAHEAD) && |
796 | !(bio->bi_rw & REQ_META) && | |
220bb38c | 797 | s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) |
e7c590eb KO |
798 | reada = min_t(sector_t, dc->readahead >> 9, |
799 | bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); | |
cafe5635 | 800 | |
220bb38c | 801 | s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); |
cafe5635 | 802 | |
220bb38c | 803 | s->iop.replace_key = KEY(s->iop.inode, |
4f024f37 | 804 | bio->bi_iter.bi_sector + s->insert_bio_sectors, |
220bb38c | 805 | s->insert_bio_sectors); |
e7c590eb | 806 | |
220bb38c | 807 | ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); |
e7c590eb KO |
808 | if (ret) |
809 | return ret; | |
810 | ||
220bb38c | 811 | s->iop.replace = true; |
1b207d80 | 812 | |
20d0189b | 813 | miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); |
2c1953e2 KO |
814 | |
815 | /* btree_search_recurse()'s btree iterator is no good anymore */ | |
816 | ret = miss == bio ? MAP_DONE : -EINTR; | |
cafe5635 | 817 | |
cdd972b1 | 818 | cache_bio = bio_alloc_bioset(GFP_NOWAIT, |
220bb38c | 819 | DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS), |
cafe5635 | 820 | dc->disk.bio_split); |
cdd972b1 | 821 | if (!cache_bio) |
cafe5635 KO |
822 | goto out_submit; |
823 | ||
4f024f37 KO |
824 | cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; |
825 | cache_bio->bi_bdev = miss->bi_bdev; | |
826 | cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; | |
cafe5635 | 827 | |
cdd972b1 KO |
828 | cache_bio->bi_end_io = request_endio; |
829 | cache_bio->bi_private = &s->cl; | |
cafe5635 | 830 | |
cdd972b1 KO |
831 | bch_bio_map(cache_bio, NULL); |
832 | if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO)) | |
cafe5635 KO |
833 | goto out_put; |
834 | ||
220bb38c KO |
835 | if (reada) |
836 | bch_mark_cache_readahead(s->iop.c, s->d); | |
837 | ||
cdd972b1 | 838 | s->cache_miss = miss; |
220bb38c | 839 | s->iop.bio = cache_bio; |
cdd972b1 KO |
840 | bio_get(cache_bio); |
841 | closure_bio_submit(cache_bio, &s->cl, s->d); | |
cafe5635 KO |
842 | |
843 | return ret; | |
844 | out_put: | |
cdd972b1 | 845 | bio_put(cache_bio); |
cafe5635 | 846 | out_submit: |
e7c590eb KO |
847 | miss->bi_end_io = request_endio; |
848 | miss->bi_private = &s->cl; | |
cafe5635 KO |
849 | closure_bio_submit(miss, &s->cl, s->d); |
850 | return ret; | |
851 | } | |
852 | ||
cdd972b1 | 853 | static void cached_dev_read(struct cached_dev *dc, struct search *s) |
cafe5635 KO |
854 | { |
855 | struct closure *cl = &s->cl; | |
856 | ||
220bb38c | 857 | closure_call(&s->iop.cl, cache_lookup, NULL, cl); |
cdd972b1 | 858 | continue_at(cl, cached_dev_read_done_bh, NULL); |
cafe5635 KO |
859 | } |
860 | ||
861 | /* Process writes */ | |
862 | ||
863 | static void cached_dev_write_complete(struct closure *cl) | |
864 | { | |
865 | struct search *s = container_of(cl, struct search, cl); | |
866 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | |
867 | ||
868 | up_read_non_owner(&dc->writeback_lock); | |
869 | cached_dev_bio_complete(cl); | |
870 | } | |
871 | ||
cdd972b1 | 872 | static void cached_dev_write(struct cached_dev *dc, struct search *s) |
cafe5635 KO |
873 | { |
874 | struct closure *cl = &s->cl; | |
875 | struct bio *bio = &s->bio.bio; | |
4f024f37 | 876 | struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); |
84f0db03 | 877 | struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); |
cafe5635 | 878 | |
220bb38c | 879 | bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); |
cafe5635 | 880 | |
cafe5635 | 881 | down_read_non_owner(&dc->writeback_lock); |
cafe5635 | 882 | if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) { |
84f0db03 KO |
883 | /* |
884 | * We overlap with some dirty data undergoing background | |
885 | * writeback, force this write to writeback | |
886 | */ | |
220bb38c KO |
887 | s->iop.bypass = false; |
888 | s->iop.writeback = true; | |
cafe5635 KO |
889 | } |
890 | ||
84f0db03 KO |
891 | /* |
892 | * Discards aren't _required_ to do anything, so skipping if | |
893 | * check_overlapping returned true is ok | |
894 | * | |
895 | * But check_overlapping drops dirty keys for which io hasn't started, | |
896 | * so we still want to call it. | |
897 | */ | |
cafe5635 | 898 | if (bio->bi_rw & REQ_DISCARD) |
220bb38c | 899 | s->iop.bypass = true; |
cafe5635 | 900 | |
72c27061 KO |
901 | if (should_writeback(dc, s->orig_bio, |
902 | cache_mode(dc, bio), | |
220bb38c KO |
903 | s->iop.bypass)) { |
904 | s->iop.bypass = false; | |
905 | s->iop.writeback = true; | |
72c27061 KO |
906 | } |
907 | ||
220bb38c KO |
908 | if (s->iop.bypass) { |
909 | s->iop.bio = s->orig_bio; | |
910 | bio_get(s->iop.bio); | |
cafe5635 | 911 | |
84f0db03 KO |
912 | if (!(bio->bi_rw & REQ_DISCARD) || |
913 | blk_queue_discard(bdev_get_queue(dc->bdev))) | |
914 | closure_bio_submit(bio, cl, s->d); | |
220bb38c | 915 | } else if (s->iop.writeback) { |
279afbad | 916 | bch_writeback_add(dc); |
220bb38c | 917 | s->iop.bio = bio; |
e49c7c37 | 918 | |
c0f04d88 | 919 | if (bio->bi_rw & REQ_FLUSH) { |
e49c7c37 | 920 | /* Also need to send a flush to the backing device */ |
d4eddd42 | 921 | struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, |
c0f04d88 | 922 | dc->disk.bio_split); |
e49c7c37 | 923 | |
c0f04d88 KO |
924 | flush->bi_rw = WRITE_FLUSH; |
925 | flush->bi_bdev = bio->bi_bdev; | |
926 | flush->bi_end_io = request_endio; | |
927 | flush->bi_private = cl; | |
928 | ||
929 | closure_bio_submit(flush, cl, s->d); | |
e49c7c37 | 930 | } |
84f0db03 | 931 | } else { |
59d276fe | 932 | s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); |
84f0db03 KO |
933 | |
934 | closure_bio_submit(bio, cl, s->d); | |
cafe5635 | 935 | } |
84f0db03 | 936 | |
220bb38c | 937 | closure_call(&s->iop.cl, bch_data_insert, NULL, cl); |
cafe5635 | 938 | continue_at(cl, cached_dev_write_complete, NULL); |
cafe5635 KO |
939 | } |
940 | ||
a34a8bfd | 941 | static void cached_dev_nodata(struct closure *cl) |
cafe5635 | 942 | { |
a34a8bfd | 943 | struct search *s = container_of(cl, struct search, cl); |
cafe5635 KO |
944 | struct bio *bio = &s->bio.bio; |
945 | ||
220bb38c KO |
946 | if (s->iop.flush_journal) |
947 | bch_journal_meta(s->iop.c, cl); | |
cafe5635 | 948 | |
84f0db03 | 949 | /* If it's a flush, we send the flush to the backing device too */ |
cafe5635 KO |
950 | closure_bio_submit(bio, cl, s->d); |
951 | ||
952 | continue_at(cl, cached_dev_bio_complete, NULL); | |
953 | } | |
954 | ||
955 | /* Cached devices - read & write stuff */ | |
956 | ||
cafe5635 KO |
957 | static void cached_dev_make_request(struct request_queue *q, struct bio *bio) |
958 | { | |
959 | struct search *s; | |
960 | struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; | |
961 | struct cached_dev *dc = container_of(d, struct cached_dev, disk); | |
962 | int cpu, rw = bio_data_dir(bio); | |
963 | ||
964 | cpu = part_stat_lock(); | |
965 | part_stat_inc(cpu, &d->disk->part0, ios[rw]); | |
966 | part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio)); | |
967 | part_stat_unlock(); | |
968 | ||
969 | bio->bi_bdev = dc->bdev; | |
4f024f37 | 970 | bio->bi_iter.bi_sector += dc->sb.data_offset; |
cafe5635 KO |
971 | |
972 | if (cached_dev_get(dc)) { | |
973 | s = search_alloc(bio, d); | |
220bb38c | 974 | trace_bcache_request_start(s->d, bio); |
cafe5635 | 975 | |
4f024f37 | 976 | if (!bio->bi_iter.bi_size) { |
a34a8bfd KO |
977 | /* |
978 | * can't call bch_journal_meta from under | |
979 | * generic_make_request | |
980 | */ | |
981 | continue_at_nobarrier(&s->cl, | |
982 | cached_dev_nodata, | |
983 | bcache_wq); | |
984 | } else { | |
220bb38c | 985 | s->iop.bypass = check_should_bypass(dc, bio); |
84f0db03 KO |
986 | |
987 | if (rw) | |
cdd972b1 | 988 | cached_dev_write(dc, s); |
84f0db03 | 989 | else |
cdd972b1 | 990 | cached_dev_read(dc, s); |
84f0db03 | 991 | } |
cafe5635 KO |
992 | } else { |
993 | if ((bio->bi_rw & REQ_DISCARD) && | |
994 | !blk_queue_discard(bdev_get_queue(dc->bdev))) | |
995 | bio_endio(bio, 0); | |
996 | else | |
997 | bch_generic_make_request(bio, &d->bio_split_hook); | |
998 | } | |
999 | } | |
1000 | ||
1001 | static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, | |
1002 | unsigned int cmd, unsigned long arg) | |
1003 | { | |
1004 | struct cached_dev *dc = container_of(d, struct cached_dev, disk); | |
1005 | return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); | |
1006 | } | |
1007 | ||
1008 | static int cached_dev_congested(void *data, int bits) | |
1009 | { | |
1010 | struct bcache_device *d = data; | |
1011 | struct cached_dev *dc = container_of(d, struct cached_dev, disk); | |
1012 | struct request_queue *q = bdev_get_queue(dc->bdev); | |
1013 | int ret = 0; | |
1014 | ||
1015 | if (bdi_congested(&q->backing_dev_info, bits)) | |
1016 | return 1; | |
1017 | ||
1018 | if (cached_dev_get(dc)) { | |
1019 | unsigned i; | |
1020 | struct cache *ca; | |
1021 | ||
1022 | for_each_cache(ca, d->c, i) { | |
1023 | q = bdev_get_queue(ca->bdev); | |
1024 | ret |= bdi_congested(&q->backing_dev_info, bits); | |
1025 | } | |
1026 | ||
1027 | cached_dev_put(dc); | |
1028 | } | |
1029 | ||
1030 | return ret; | |
1031 | } | |
1032 | ||
1033 | void bch_cached_dev_request_init(struct cached_dev *dc) | |
1034 | { | |
1035 | struct gendisk *g = dc->disk.disk; | |
1036 | ||
1037 | g->queue->make_request_fn = cached_dev_make_request; | |
1038 | g->queue->backing_dev_info.congested_fn = cached_dev_congested; | |
1039 | dc->disk.cache_miss = cached_dev_cache_miss; | |
1040 | dc->disk.ioctl = cached_dev_ioctl; | |
1041 | } | |
1042 | ||
1043 | /* Flash backed devices */ | |
1044 | ||
1045 | static int flash_dev_cache_miss(struct btree *b, struct search *s, | |
1046 | struct bio *bio, unsigned sectors) | |
1047 | { | |
1b4eaf3d | 1048 | unsigned bytes = min(sectors, bio_sectors(bio)) << 9; |
cafe5635 | 1049 | |
1b4eaf3d KO |
1050 | swap(bio->bi_iter.bi_size, bytes); |
1051 | zero_fill_bio(bio); | |
1052 | swap(bio->bi_iter.bi_size, bytes); | |
cafe5635 | 1053 | |
1b4eaf3d | 1054 | bio_advance(bio, bytes); |
8e51e414 | 1055 | |
4f024f37 | 1056 | if (!bio->bi_iter.bi_size) |
2c1953e2 | 1057 | return MAP_DONE; |
cafe5635 | 1058 | |
2c1953e2 | 1059 | return MAP_CONTINUE; |
cafe5635 KO |
1060 | } |
1061 | ||
a34a8bfd KO |
1062 | static void flash_dev_nodata(struct closure *cl) |
1063 | { | |
1064 | struct search *s = container_of(cl, struct search, cl); | |
1065 | ||
220bb38c KO |
1066 | if (s->iop.flush_journal) |
1067 | bch_journal_meta(s->iop.c, cl); | |
a34a8bfd KO |
1068 | |
1069 | continue_at(cl, search_free, NULL); | |
1070 | } | |
1071 | ||
cafe5635 KO |
1072 | static void flash_dev_make_request(struct request_queue *q, struct bio *bio) |
1073 | { | |
1074 | struct search *s; | |
1075 | struct closure *cl; | |
1076 | struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; | |
1077 | int cpu, rw = bio_data_dir(bio); | |
1078 | ||
1079 | cpu = part_stat_lock(); | |
1080 | part_stat_inc(cpu, &d->disk->part0, ios[rw]); | |
1081 | part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio)); | |
1082 | part_stat_unlock(); | |
1083 | ||
1084 | s = search_alloc(bio, d); | |
1085 | cl = &s->cl; | |
1086 | bio = &s->bio.bio; | |
1087 | ||
220bb38c | 1088 | trace_bcache_request_start(s->d, bio); |
cafe5635 | 1089 | |
4f024f37 | 1090 | if (!bio->bi_iter.bi_size) { |
a34a8bfd KO |
1091 | /* |
1092 | * can't call bch_journal_meta from under | |
1093 | * generic_make_request | |
1094 | */ | |
1095 | continue_at_nobarrier(&s->cl, | |
1096 | flash_dev_nodata, | |
1097 | bcache_wq); | |
84f0db03 | 1098 | } else if (rw) { |
220bb38c | 1099 | bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, |
4f024f37 | 1100 | &KEY(d->id, bio->bi_iter.bi_sector, 0), |
8e51e414 | 1101 | &KEY(d->id, bio_end_sector(bio), 0)); |
cafe5635 | 1102 | |
220bb38c KO |
1103 | s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; |
1104 | s->iop.writeback = true; | |
1105 | s->iop.bio = bio; | |
cafe5635 | 1106 | |
220bb38c | 1107 | closure_call(&s->iop.cl, bch_data_insert, NULL, cl); |
cafe5635 | 1108 | } else { |
220bb38c | 1109 | closure_call(&s->iop.cl, cache_lookup, NULL, cl); |
cafe5635 KO |
1110 | } |
1111 | ||
1112 | continue_at(cl, search_free, NULL); | |
1113 | } | |
1114 | ||
1115 | static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode, | |
1116 | unsigned int cmd, unsigned long arg) | |
1117 | { | |
1118 | return -ENOTTY; | |
1119 | } | |
1120 | ||
1121 | static int flash_dev_congested(void *data, int bits) | |
1122 | { | |
1123 | struct bcache_device *d = data; | |
1124 | struct request_queue *q; | |
1125 | struct cache *ca; | |
1126 | unsigned i; | |
1127 | int ret = 0; | |
1128 | ||
1129 | for_each_cache(ca, d->c, i) { | |
1130 | q = bdev_get_queue(ca->bdev); | |
1131 | ret |= bdi_congested(&q->backing_dev_info, bits); | |
1132 | } | |
1133 | ||
1134 | return ret; | |
1135 | } | |
1136 | ||
1137 | void bch_flash_dev_request_init(struct bcache_device *d) | |
1138 | { | |
1139 | struct gendisk *g = d->disk; | |
1140 | ||
1141 | g->queue->make_request_fn = flash_dev_make_request; | |
1142 | g->queue->backing_dev_info.congested_fn = flash_dev_congested; | |
1143 | d->cache_miss = flash_dev_cache_miss; | |
1144 | d->ioctl = flash_dev_ioctl; | |
1145 | } | |
1146 | ||
1147 | void bch_request_exit(void) | |
1148 | { | |
cafe5635 KO |
1149 | if (bch_search_cache) |
1150 | kmem_cache_destroy(bch_search_cache); | |
1151 | } | |
1152 | ||
1153 | int __init bch_request_init(void) | |
1154 | { | |
1155 | bch_search_cache = KMEM_CACHE(search, 0); | |
1156 | if (!bch_search_cache) | |
1157 | return -ENOMEM; | |
1158 | ||
cafe5635 KO |
1159 | return 0; |
1160 | } |