]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/md/bcache/request.c
block: add a bi_error field to struct bio
[mirror_ubuntu-bionic-kernel.git] / drivers / md / bcache / request.c
1 /*
2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9 #include "bcache.h"
10 #include "btree.h"
11 #include "debug.h"
12 #include "request.h"
13 #include "writeback.h"
14
15 #include <linux/module.h>
16 #include <linux/hash.h>
17 #include <linux/random.h>
18 #include <linux/backing-dev.h>
19
20 #include <trace/events/bcache.h>
21
22 #define CUTOFF_CACHE_ADD 95
23 #define CUTOFF_CACHE_READA 90
24
25 struct kmem_cache *bch_search_cache;
26
27 static void bch_data_insert_start(struct closure *);
28
29 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
30 {
31 return BDEV_CACHE_MODE(&dc->sb);
32 }
33
34 static bool verify(struct cached_dev *dc, struct bio *bio)
35 {
36 return dc->verify;
37 }
38
39 static void bio_csum(struct bio *bio, struct bkey *k)
40 {
41 struct bio_vec bv;
42 struct bvec_iter iter;
43 uint64_t csum = 0;
44
45 bio_for_each_segment(bv, bio, iter) {
46 void *d = kmap(bv.bv_page) + bv.bv_offset;
47 csum = bch_crc64_update(csum, d, bv.bv_len);
48 kunmap(bv.bv_page);
49 }
50
51 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
52 }
53
54 /* Insert data into cache */
55
56 static void bch_data_insert_keys(struct closure *cl)
57 {
58 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
59 atomic_t *journal_ref = NULL;
60 struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
61 int ret;
62
63 /*
64 * If we're looping, might already be waiting on
65 * another journal write - can't wait on more than one journal write at
66 * a time
67 *
68 * XXX: this looks wrong
69 */
70 #if 0
71 while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
72 closure_sync(&s->cl);
73 #endif
74
75 if (!op->replace)
76 journal_ref = bch_journal(op->c, &op->insert_keys,
77 op->flush_journal ? cl : NULL);
78
79 ret = bch_btree_insert(op->c, &op->insert_keys,
80 journal_ref, replace_key);
81 if (ret == -ESRCH) {
82 op->replace_collision = true;
83 } else if (ret) {
84 op->error = -ENOMEM;
85 op->insert_data_done = true;
86 }
87
88 if (journal_ref)
89 atomic_dec_bug(journal_ref);
90
91 if (!op->insert_data_done) {
92 continue_at(cl, bch_data_insert_start, op->wq);
93 return;
94 }
95
96 bch_keylist_free(&op->insert_keys);
97 closure_return(cl);
98 }
99
100 static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
101 struct cache_set *c)
102 {
103 size_t oldsize = bch_keylist_nkeys(l);
104 size_t newsize = oldsize + u64s;
105
106 /*
107 * The journalling code doesn't handle the case where the keys to insert
108 * is bigger than an empty write: If we just return -ENOMEM here,
109 * bio_insert() and bio_invalidate() will insert the keys created so far
110 * and finish the rest when the keylist is empty.
111 */
112 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
113 return -ENOMEM;
114
115 return __bch_keylist_realloc(l, u64s);
116 }
117
118 static void bch_data_invalidate(struct closure *cl)
119 {
120 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
121 struct bio *bio = op->bio;
122
123 pr_debug("invalidating %i sectors from %llu",
124 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
125
126 while (bio_sectors(bio)) {
127 unsigned sectors = min(bio_sectors(bio),
128 1U << (KEY_SIZE_BITS - 1));
129
130 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
131 goto out;
132
133 bio->bi_iter.bi_sector += sectors;
134 bio->bi_iter.bi_size -= sectors << 9;
135
136 bch_keylist_add(&op->insert_keys,
137 &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
138 }
139
140 op->insert_data_done = true;
141 bio_put(bio);
142 out:
143 continue_at(cl, bch_data_insert_keys, op->wq);
144 }
145
146 static void bch_data_insert_error(struct closure *cl)
147 {
148 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
149
150 /*
151 * Our data write just errored, which means we've got a bunch of keys to
152 * insert that point to data that wasn't succesfully written.
153 *
154 * We don't have to insert those keys but we still have to invalidate
155 * that region of the cache - so, if we just strip off all the pointers
156 * from the keys we'll accomplish just that.
157 */
158
159 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
160
161 while (src != op->insert_keys.top) {
162 struct bkey *n = bkey_next(src);
163
164 SET_KEY_PTRS(src, 0);
165 memmove(dst, src, bkey_bytes(src));
166
167 dst = bkey_next(dst);
168 src = n;
169 }
170
171 op->insert_keys.top = dst;
172
173 bch_data_insert_keys(cl);
174 }
175
176 static void bch_data_insert_endio(struct bio *bio)
177 {
178 struct closure *cl = bio->bi_private;
179 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
180
181 if (bio->bi_error) {
182 /* TODO: We could try to recover from this. */
183 if (op->writeback)
184 op->error = bio->bi_error;
185 else if (!op->replace)
186 set_closure_fn(cl, bch_data_insert_error, op->wq);
187 else
188 set_closure_fn(cl, NULL, NULL);
189 }
190
191 bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache");
192 }
193
194 static void bch_data_insert_start(struct closure *cl)
195 {
196 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
197 struct bio *bio = op->bio, *n;
198
199 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
200 set_gc_sectors(op->c);
201 wake_up_gc(op->c);
202 }
203
204 if (op->bypass)
205 return bch_data_invalidate(cl);
206
207 /*
208 * Journal writes are marked REQ_FLUSH; if the original write was a
209 * flush, it'll wait on the journal write.
210 */
211 bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
212
213 do {
214 unsigned i;
215 struct bkey *k;
216 struct bio_set *split = op->c->bio_split;
217
218 /* 1 for the device pointer and 1 for the chksum */
219 if (bch_keylist_realloc(&op->insert_keys,
220 3 + (op->csum ? 1 : 0),
221 op->c)) {
222 continue_at(cl, bch_data_insert_keys, op->wq);
223 return;
224 }
225
226 k = op->insert_keys.top;
227 bkey_init(k);
228 SET_KEY_INODE(k, op->inode);
229 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
230
231 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
232 op->write_point, op->write_prio,
233 op->writeback))
234 goto err;
235
236 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
237
238 n->bi_end_io = bch_data_insert_endio;
239 n->bi_private = cl;
240
241 if (op->writeback) {
242 SET_KEY_DIRTY(k, true);
243
244 for (i = 0; i < KEY_PTRS(k); i++)
245 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
246 GC_MARK_DIRTY);
247 }
248
249 SET_KEY_CSUM(k, op->csum);
250 if (KEY_CSUM(k))
251 bio_csum(n, k);
252
253 trace_bcache_cache_insert(k);
254 bch_keylist_push(&op->insert_keys);
255
256 n->bi_rw |= REQ_WRITE;
257 bch_submit_bbio(n, op->c, k, 0);
258 } while (n != bio);
259
260 op->insert_data_done = true;
261 continue_at(cl, bch_data_insert_keys, op->wq);
262 return;
263 err:
264 /* bch_alloc_sectors() blocks if s->writeback = true */
265 BUG_ON(op->writeback);
266
267 /*
268 * But if it's not a writeback write we'd rather just bail out if
269 * there aren't any buckets ready to write to - it might take awhile and
270 * we might be starving btree writes for gc or something.
271 */
272
273 if (!op->replace) {
274 /*
275 * Writethrough write: We can't complete the write until we've
276 * updated the index. But we don't want to delay the write while
277 * we wait for buckets to be freed up, so just invalidate the
278 * rest of the write.
279 */
280 op->bypass = true;
281 return bch_data_invalidate(cl);
282 } else {
283 /*
284 * From a cache miss, we can just insert the keys for the data
285 * we have written or bail out if we didn't do anything.
286 */
287 op->insert_data_done = true;
288 bio_put(bio);
289
290 if (!bch_keylist_empty(&op->insert_keys))
291 continue_at(cl, bch_data_insert_keys, op->wq);
292 else
293 closure_return(cl);
294 }
295 }
296
297 /**
298 * bch_data_insert - stick some data in the cache
299 *
300 * This is the starting point for any data to end up in a cache device; it could
301 * be from a normal write, or a writeback write, or a write to a flash only
302 * volume - it's also used by the moving garbage collector to compact data in
303 * mostly empty buckets.
304 *
305 * It first writes the data to the cache, creating a list of keys to be inserted
306 * (if the data had to be fragmented there will be multiple keys); after the
307 * data is written it calls bch_journal, and after the keys have been added to
308 * the next journal write they're inserted into the btree.
309 *
310 * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
311 * and op->inode is used for the key inode.
312 *
313 * If s->bypass is true, instead of inserting the data it invalidates the
314 * region of the cache represented by s->cache_bio and op->inode.
315 */
316 void bch_data_insert(struct closure *cl)
317 {
318 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
319
320 trace_bcache_write(op->c, op->inode, op->bio,
321 op->writeback, op->bypass);
322
323 bch_keylist_init(&op->insert_keys);
324 bio_get(op->bio);
325 bch_data_insert_start(cl);
326 }
327
328 /* Congested? */
329
330 unsigned bch_get_congested(struct cache_set *c)
331 {
332 int i;
333 long rand;
334
335 if (!c->congested_read_threshold_us &&
336 !c->congested_write_threshold_us)
337 return 0;
338
339 i = (local_clock_us() - c->congested_last_us) / 1024;
340 if (i < 0)
341 return 0;
342
343 i += atomic_read(&c->congested);
344 if (i >= 0)
345 return 0;
346
347 i += CONGESTED_MAX;
348
349 if (i > 0)
350 i = fract_exp_two(i, 6);
351
352 rand = get_random_int();
353 i -= bitmap_weight(&rand, BITS_PER_LONG);
354
355 return i > 0 ? i : 1;
356 }
357
358 static void add_sequential(struct task_struct *t)
359 {
360 ewma_add(t->sequential_io_avg,
361 t->sequential_io, 8, 0);
362
363 t->sequential_io = 0;
364 }
365
366 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
367 {
368 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
369 }
370
371 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
372 {
373 struct cache_set *c = dc->disk.c;
374 unsigned mode = cache_mode(dc, bio);
375 unsigned sectors, congested = bch_get_congested(c);
376 struct task_struct *task = current;
377 struct io *i;
378
379 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
380 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
381 (bio->bi_rw & REQ_DISCARD))
382 goto skip;
383
384 if (mode == CACHE_MODE_NONE ||
385 (mode == CACHE_MODE_WRITEAROUND &&
386 (bio->bi_rw & REQ_WRITE)))
387 goto skip;
388
389 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
390 bio_sectors(bio) & (c->sb.block_size - 1)) {
391 pr_debug("skipping unaligned io");
392 goto skip;
393 }
394
395 if (bypass_torture_test(dc)) {
396 if ((get_random_int() & 3) == 3)
397 goto skip;
398 else
399 goto rescale;
400 }
401
402 if (!congested && !dc->sequential_cutoff)
403 goto rescale;
404
405 if (!congested &&
406 mode == CACHE_MODE_WRITEBACK &&
407 (bio->bi_rw & REQ_WRITE) &&
408 (bio->bi_rw & REQ_SYNC))
409 goto rescale;
410
411 spin_lock(&dc->io_lock);
412
413 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
414 if (i->last == bio->bi_iter.bi_sector &&
415 time_before(jiffies, i->jiffies))
416 goto found;
417
418 i = list_first_entry(&dc->io_lru, struct io, lru);
419
420 add_sequential(task);
421 i->sequential = 0;
422 found:
423 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
424 i->sequential += bio->bi_iter.bi_size;
425
426 i->last = bio_end_sector(bio);
427 i->jiffies = jiffies + msecs_to_jiffies(5000);
428 task->sequential_io = i->sequential;
429
430 hlist_del(&i->hash);
431 hlist_add_head(&i->hash, iohash(dc, i->last));
432 list_move_tail(&i->lru, &dc->io_lru);
433
434 spin_unlock(&dc->io_lock);
435
436 sectors = max(task->sequential_io,
437 task->sequential_io_avg) >> 9;
438
439 if (dc->sequential_cutoff &&
440 sectors >= dc->sequential_cutoff >> 9) {
441 trace_bcache_bypass_sequential(bio);
442 goto skip;
443 }
444
445 if (congested && sectors >= congested) {
446 trace_bcache_bypass_congested(bio);
447 goto skip;
448 }
449
450 rescale:
451 bch_rescale_priorities(c, bio_sectors(bio));
452 return false;
453 skip:
454 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
455 return true;
456 }
457
458 /* Cache lookup */
459
460 struct search {
461 /* Stack frame for bio_complete */
462 struct closure cl;
463
464 struct bbio bio;
465 struct bio *orig_bio;
466 struct bio *cache_miss;
467 struct bcache_device *d;
468
469 unsigned insert_bio_sectors;
470 unsigned recoverable:1;
471 unsigned write:1;
472 unsigned read_dirty_data:1;
473
474 unsigned long start_time;
475
476 struct btree_op op;
477 struct data_insert_op iop;
478 };
479
480 static void bch_cache_read_endio(struct bio *bio)
481 {
482 struct bbio *b = container_of(bio, struct bbio, bio);
483 struct closure *cl = bio->bi_private;
484 struct search *s = container_of(cl, struct search, cl);
485
486 /*
487 * If the bucket was reused while our bio was in flight, we might have
488 * read the wrong data. Set s->error but not error so it doesn't get
489 * counted against the cache device, but we'll still reread the data
490 * from the backing device.
491 */
492
493 if (bio->bi_error)
494 s->iop.error = bio->bi_error;
495 else if (!KEY_DIRTY(&b->key) &&
496 ptr_stale(s->iop.c, &b->key, 0)) {
497 atomic_long_inc(&s->iop.c->cache_read_races);
498 s->iop.error = -EINTR;
499 }
500
501 bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache");
502 }
503
504 /*
505 * Read from a single key, handling the initial cache miss if the key starts in
506 * the middle of the bio
507 */
508 static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
509 {
510 struct search *s = container_of(op, struct search, op);
511 struct bio *n, *bio = &s->bio.bio;
512 struct bkey *bio_key;
513 unsigned ptr;
514
515 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
516 return MAP_CONTINUE;
517
518 if (KEY_INODE(k) != s->iop.inode ||
519 KEY_START(k) > bio->bi_iter.bi_sector) {
520 unsigned bio_sectors = bio_sectors(bio);
521 unsigned sectors = KEY_INODE(k) == s->iop.inode
522 ? min_t(uint64_t, INT_MAX,
523 KEY_START(k) - bio->bi_iter.bi_sector)
524 : INT_MAX;
525
526 int ret = s->d->cache_miss(b, s, bio, sectors);
527 if (ret != MAP_CONTINUE)
528 return ret;
529
530 /* if this was a complete miss we shouldn't get here */
531 BUG_ON(bio_sectors <= sectors);
532 }
533
534 if (!KEY_SIZE(k))
535 return MAP_CONTINUE;
536
537 /* XXX: figure out best pointer - for multiple cache devices */
538 ptr = 0;
539
540 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
541
542 if (KEY_DIRTY(k))
543 s->read_dirty_data = true;
544
545 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
546 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
547 GFP_NOIO, s->d->bio_split);
548
549 bio_key = &container_of(n, struct bbio, bio)->key;
550 bch_bkey_copy_single_ptr(bio_key, k, ptr);
551
552 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
553 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
554
555 n->bi_end_io = bch_cache_read_endio;
556 n->bi_private = &s->cl;
557
558 /*
559 * The bucket we're reading from might be reused while our bio
560 * is in flight, and we could then end up reading the wrong
561 * data.
562 *
563 * We guard against this by checking (in cache_read_endio()) if
564 * the pointer is stale again; if so, we treat it as an error
565 * and reread from the backing device (but we don't pass that
566 * error up anywhere).
567 */
568
569 __bch_submit_bbio(n, b->c);
570 return n == bio ? MAP_DONE : MAP_CONTINUE;
571 }
572
573 static void cache_lookup(struct closure *cl)
574 {
575 struct search *s = container_of(cl, struct search, iop.cl);
576 struct bio *bio = &s->bio.bio;
577 int ret;
578
579 bch_btree_op_init(&s->op, -1);
580
581 ret = bch_btree_map_keys(&s->op, s->iop.c,
582 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
583 cache_lookup_fn, MAP_END_KEY);
584 if (ret == -EAGAIN) {
585 continue_at(cl, cache_lookup, bcache_wq);
586 return;
587 }
588
589 closure_return(cl);
590 }
591
592 /* Common code for the make_request functions */
593
594 static void request_endio(struct bio *bio)
595 {
596 struct closure *cl = bio->bi_private;
597
598 if (bio->bi_error) {
599 struct search *s = container_of(cl, struct search, cl);
600 s->iop.error = bio->bi_error;
601 /* Only cache read errors are recoverable */
602 s->recoverable = false;
603 }
604
605 bio_put(bio);
606 closure_put(cl);
607 }
608
609 static void bio_complete(struct search *s)
610 {
611 if (s->orig_bio) {
612 generic_end_io_acct(bio_data_dir(s->orig_bio),
613 &s->d->disk->part0, s->start_time);
614
615 trace_bcache_request_end(s->d, s->orig_bio);
616 s->orig_bio->bi_error = s->iop.error;
617 bio_endio(s->orig_bio);
618 s->orig_bio = NULL;
619 }
620 }
621
622 static void do_bio_hook(struct search *s, struct bio *orig_bio)
623 {
624 struct bio *bio = &s->bio.bio;
625
626 bio_init(bio);
627 __bio_clone_fast(bio, orig_bio);
628 bio->bi_end_io = request_endio;
629 bio->bi_private = &s->cl;
630
631 bio_cnt_set(bio, 3);
632 }
633
634 static void search_free(struct closure *cl)
635 {
636 struct search *s = container_of(cl, struct search, cl);
637 bio_complete(s);
638
639 if (s->iop.bio)
640 bio_put(s->iop.bio);
641
642 closure_debug_destroy(cl);
643 mempool_free(s, s->d->c->search);
644 }
645
646 static inline struct search *search_alloc(struct bio *bio,
647 struct bcache_device *d)
648 {
649 struct search *s;
650
651 s = mempool_alloc(d->c->search, GFP_NOIO);
652
653 closure_init(&s->cl, NULL);
654 do_bio_hook(s, bio);
655
656 s->orig_bio = bio;
657 s->cache_miss = NULL;
658 s->d = d;
659 s->recoverable = 1;
660 s->write = (bio->bi_rw & REQ_WRITE) != 0;
661 s->read_dirty_data = 0;
662 s->start_time = jiffies;
663
664 s->iop.c = d->c;
665 s->iop.bio = NULL;
666 s->iop.inode = d->id;
667 s->iop.write_point = hash_long((unsigned long) current, 16);
668 s->iop.write_prio = 0;
669 s->iop.error = 0;
670 s->iop.flags = 0;
671 s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
672 s->iop.wq = bcache_wq;
673
674 return s;
675 }
676
677 /* Cached devices */
678
679 static void cached_dev_bio_complete(struct closure *cl)
680 {
681 struct search *s = container_of(cl, struct search, cl);
682 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
683
684 search_free(cl);
685 cached_dev_put(dc);
686 }
687
688 /* Process reads */
689
690 static void cached_dev_cache_miss_done(struct closure *cl)
691 {
692 struct search *s = container_of(cl, struct search, cl);
693
694 if (s->iop.replace_collision)
695 bch_mark_cache_miss_collision(s->iop.c, s->d);
696
697 if (s->iop.bio) {
698 int i;
699 struct bio_vec *bv;
700
701 bio_for_each_segment_all(bv, s->iop.bio, i)
702 __free_page(bv->bv_page);
703 }
704
705 cached_dev_bio_complete(cl);
706 }
707
708 static void cached_dev_read_error(struct closure *cl)
709 {
710 struct search *s = container_of(cl, struct search, cl);
711 struct bio *bio = &s->bio.bio;
712
713 if (s->recoverable) {
714 /* Retry from the backing device: */
715 trace_bcache_read_retry(s->orig_bio);
716
717 s->iop.error = 0;
718 do_bio_hook(s, s->orig_bio);
719
720 /* XXX: invalidate cache */
721
722 closure_bio_submit(bio, cl, s->d);
723 }
724
725 continue_at(cl, cached_dev_cache_miss_done, NULL);
726 }
727
728 static void cached_dev_read_done(struct closure *cl)
729 {
730 struct search *s = container_of(cl, struct search, cl);
731 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
732
733 /*
734 * We had a cache miss; cache_bio now contains data ready to be inserted
735 * into the cache.
736 *
737 * First, we copy the data we just read from cache_bio's bounce buffers
738 * to the buffers the original bio pointed to:
739 */
740
741 if (s->iop.bio) {
742 bio_reset(s->iop.bio);
743 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
744 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
745 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
746 bch_bio_map(s->iop.bio, NULL);
747
748 bio_copy_data(s->cache_miss, s->iop.bio);
749
750 bio_put(s->cache_miss);
751 s->cache_miss = NULL;
752 }
753
754 if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
755 bch_data_verify(dc, s->orig_bio);
756
757 bio_complete(s);
758
759 if (s->iop.bio &&
760 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
761 BUG_ON(!s->iop.replace);
762 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
763 }
764
765 continue_at(cl, cached_dev_cache_miss_done, NULL);
766 }
767
768 static void cached_dev_read_done_bh(struct closure *cl)
769 {
770 struct search *s = container_of(cl, struct search, cl);
771 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
772
773 bch_mark_cache_accounting(s->iop.c, s->d,
774 !s->cache_miss, s->iop.bypass);
775 trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
776
777 if (s->iop.error)
778 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
779 else if (s->iop.bio || verify(dc, &s->bio.bio))
780 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
781 else
782 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
783 }
784
785 static int cached_dev_cache_miss(struct btree *b, struct search *s,
786 struct bio *bio, unsigned sectors)
787 {
788 int ret = MAP_CONTINUE;
789 unsigned reada = 0;
790 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
791 struct bio *miss, *cache_bio;
792
793 if (s->cache_miss || s->iop.bypass) {
794 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
795 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
796 goto out_submit;
797 }
798
799 if (!(bio->bi_rw & REQ_RAHEAD) &&
800 !(bio->bi_rw & REQ_META) &&
801 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
802 reada = min_t(sector_t, dc->readahead >> 9,
803 bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
804
805 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
806
807 s->iop.replace_key = KEY(s->iop.inode,
808 bio->bi_iter.bi_sector + s->insert_bio_sectors,
809 s->insert_bio_sectors);
810
811 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
812 if (ret)
813 return ret;
814
815 s->iop.replace = true;
816
817 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
818
819 /* btree_search_recurse()'s btree iterator is no good anymore */
820 ret = miss == bio ? MAP_DONE : -EINTR;
821
822 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
823 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
824 dc->disk.bio_split);
825 if (!cache_bio)
826 goto out_submit;
827
828 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
829 cache_bio->bi_bdev = miss->bi_bdev;
830 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
831
832 cache_bio->bi_end_io = request_endio;
833 cache_bio->bi_private = &s->cl;
834
835 bch_bio_map(cache_bio, NULL);
836 if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
837 goto out_put;
838
839 if (reada)
840 bch_mark_cache_readahead(s->iop.c, s->d);
841
842 s->cache_miss = miss;
843 s->iop.bio = cache_bio;
844 bio_get(cache_bio);
845 closure_bio_submit(cache_bio, &s->cl, s->d);
846
847 return ret;
848 out_put:
849 bio_put(cache_bio);
850 out_submit:
851 miss->bi_end_io = request_endio;
852 miss->bi_private = &s->cl;
853 closure_bio_submit(miss, &s->cl, s->d);
854 return ret;
855 }
856
857 static void cached_dev_read(struct cached_dev *dc, struct search *s)
858 {
859 struct closure *cl = &s->cl;
860
861 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
862 continue_at(cl, cached_dev_read_done_bh, NULL);
863 }
864
865 /* Process writes */
866
867 static void cached_dev_write_complete(struct closure *cl)
868 {
869 struct search *s = container_of(cl, struct search, cl);
870 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
871
872 up_read_non_owner(&dc->writeback_lock);
873 cached_dev_bio_complete(cl);
874 }
875
876 static void cached_dev_write(struct cached_dev *dc, struct search *s)
877 {
878 struct closure *cl = &s->cl;
879 struct bio *bio = &s->bio.bio;
880 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
881 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
882
883 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
884
885 down_read_non_owner(&dc->writeback_lock);
886 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
887 /*
888 * We overlap with some dirty data undergoing background
889 * writeback, force this write to writeback
890 */
891 s->iop.bypass = false;
892 s->iop.writeback = true;
893 }
894
895 /*
896 * Discards aren't _required_ to do anything, so skipping if
897 * check_overlapping returned true is ok
898 *
899 * But check_overlapping drops dirty keys for which io hasn't started,
900 * so we still want to call it.
901 */
902 if (bio->bi_rw & REQ_DISCARD)
903 s->iop.bypass = true;
904
905 if (should_writeback(dc, s->orig_bio,
906 cache_mode(dc, bio),
907 s->iop.bypass)) {
908 s->iop.bypass = false;
909 s->iop.writeback = true;
910 }
911
912 if (s->iop.bypass) {
913 s->iop.bio = s->orig_bio;
914 bio_get(s->iop.bio);
915
916 if (!(bio->bi_rw & REQ_DISCARD) ||
917 blk_queue_discard(bdev_get_queue(dc->bdev)))
918 closure_bio_submit(bio, cl, s->d);
919 } else if (s->iop.writeback) {
920 bch_writeback_add(dc);
921 s->iop.bio = bio;
922
923 if (bio->bi_rw & REQ_FLUSH) {
924 /* Also need to send a flush to the backing device */
925 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
926 dc->disk.bio_split);
927
928 flush->bi_rw = WRITE_FLUSH;
929 flush->bi_bdev = bio->bi_bdev;
930 flush->bi_end_io = request_endio;
931 flush->bi_private = cl;
932
933 closure_bio_submit(flush, cl, s->d);
934 }
935 } else {
936 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
937
938 closure_bio_submit(bio, cl, s->d);
939 }
940
941 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
942 continue_at(cl, cached_dev_write_complete, NULL);
943 }
944
945 static void cached_dev_nodata(struct closure *cl)
946 {
947 struct search *s = container_of(cl, struct search, cl);
948 struct bio *bio = &s->bio.bio;
949
950 if (s->iop.flush_journal)
951 bch_journal_meta(s->iop.c, cl);
952
953 /* If it's a flush, we send the flush to the backing device too */
954 closure_bio_submit(bio, cl, s->d);
955
956 continue_at(cl, cached_dev_bio_complete, NULL);
957 }
958
959 /* Cached devices - read & write stuff */
960
961 static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
962 {
963 struct search *s;
964 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
965 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
966 int rw = bio_data_dir(bio);
967
968 generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0);
969
970 bio->bi_bdev = dc->bdev;
971 bio->bi_iter.bi_sector += dc->sb.data_offset;
972
973 if (cached_dev_get(dc)) {
974 s = search_alloc(bio, d);
975 trace_bcache_request_start(s->d, bio);
976
977 if (!bio->bi_iter.bi_size) {
978 /*
979 * can't call bch_journal_meta from under
980 * generic_make_request
981 */
982 continue_at_nobarrier(&s->cl,
983 cached_dev_nodata,
984 bcache_wq);
985 } else {
986 s->iop.bypass = check_should_bypass(dc, bio);
987
988 if (rw)
989 cached_dev_write(dc, s);
990 else
991 cached_dev_read(dc, s);
992 }
993 } else {
994 if ((bio->bi_rw & REQ_DISCARD) &&
995 !blk_queue_discard(bdev_get_queue(dc->bdev)))
996 bio_endio(bio);
997 else
998 bch_generic_make_request(bio, &d->bio_split_hook);
999 }
1000 }
1001
1002 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1003 unsigned int cmd, unsigned long arg)
1004 {
1005 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1006 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1007 }
1008
1009 static int cached_dev_congested(void *data, int bits)
1010 {
1011 struct bcache_device *d = data;
1012 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1013 struct request_queue *q = bdev_get_queue(dc->bdev);
1014 int ret = 0;
1015
1016 if (bdi_congested(&q->backing_dev_info, bits))
1017 return 1;
1018
1019 if (cached_dev_get(dc)) {
1020 unsigned i;
1021 struct cache *ca;
1022
1023 for_each_cache(ca, d->c, i) {
1024 q = bdev_get_queue(ca->bdev);
1025 ret |= bdi_congested(&q->backing_dev_info, bits);
1026 }
1027
1028 cached_dev_put(dc);
1029 }
1030
1031 return ret;
1032 }
1033
1034 void bch_cached_dev_request_init(struct cached_dev *dc)
1035 {
1036 struct gendisk *g = dc->disk.disk;
1037
1038 g->queue->make_request_fn = cached_dev_make_request;
1039 g->queue->backing_dev_info.congested_fn = cached_dev_congested;
1040 dc->disk.cache_miss = cached_dev_cache_miss;
1041 dc->disk.ioctl = cached_dev_ioctl;
1042 }
1043
1044 /* Flash backed devices */
1045
1046 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1047 struct bio *bio, unsigned sectors)
1048 {
1049 unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
1050
1051 swap(bio->bi_iter.bi_size, bytes);
1052 zero_fill_bio(bio);
1053 swap(bio->bi_iter.bi_size, bytes);
1054
1055 bio_advance(bio, bytes);
1056
1057 if (!bio->bi_iter.bi_size)
1058 return MAP_DONE;
1059
1060 return MAP_CONTINUE;
1061 }
1062
1063 static void flash_dev_nodata(struct closure *cl)
1064 {
1065 struct search *s = container_of(cl, struct search, cl);
1066
1067 if (s->iop.flush_journal)
1068 bch_journal_meta(s->iop.c, cl);
1069
1070 continue_at(cl, search_free, NULL);
1071 }
1072
1073 static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1074 {
1075 struct search *s;
1076 struct closure *cl;
1077 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1078 int rw = bio_data_dir(bio);
1079
1080 generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0);
1081
1082 s = search_alloc(bio, d);
1083 cl = &s->cl;
1084 bio = &s->bio.bio;
1085
1086 trace_bcache_request_start(s->d, bio);
1087
1088 if (!bio->bi_iter.bi_size) {
1089 /*
1090 * can't call bch_journal_meta from under
1091 * generic_make_request
1092 */
1093 continue_at_nobarrier(&s->cl,
1094 flash_dev_nodata,
1095 bcache_wq);
1096 return;
1097 } else if (rw) {
1098 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1099 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1100 &KEY(d->id, bio_end_sector(bio), 0));
1101
1102 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
1103 s->iop.writeback = true;
1104 s->iop.bio = bio;
1105
1106 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1107 } else {
1108 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1109 }
1110
1111 continue_at(cl, search_free, NULL);
1112 }
1113
1114 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1115 unsigned int cmd, unsigned long arg)
1116 {
1117 return -ENOTTY;
1118 }
1119
1120 static int flash_dev_congested(void *data, int bits)
1121 {
1122 struct bcache_device *d = data;
1123 struct request_queue *q;
1124 struct cache *ca;
1125 unsigned i;
1126 int ret = 0;
1127
1128 for_each_cache(ca, d->c, i) {
1129 q = bdev_get_queue(ca->bdev);
1130 ret |= bdi_congested(&q->backing_dev_info, bits);
1131 }
1132
1133 return ret;
1134 }
1135
1136 void bch_flash_dev_request_init(struct bcache_device *d)
1137 {
1138 struct gendisk *g = d->disk;
1139
1140 g->queue->make_request_fn = flash_dev_make_request;
1141 g->queue->backing_dev_info.congested_fn = flash_dev_congested;
1142 d->cache_miss = flash_dev_cache_miss;
1143 d->ioctl = flash_dev_ioctl;
1144 }
1145
1146 void bch_request_exit(void)
1147 {
1148 if (bch_search_cache)
1149 kmem_cache_destroy(bch_search_cache);
1150 }
1151
1152 int __init bch_request_init(void)
1153 {
1154 bch_search_cache = KMEM_CACHE(search, 0);
1155 if (!bch_search_cache)
1156 return -ENOMEM;
1157
1158 return 0;
1159 }