]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/md/bcache/request.c
writeback: reorganize mm/backing-dev.c
[mirror_ubuntu-artful-kernel.git] / drivers / md / bcache / request.c
CommitLineData
cafe5635
KO
1/*
2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9#include "bcache.h"
10#include "btree.h"
11#include "debug.h"
12#include "request.h"
279afbad 13#include "writeback.h"
cafe5635 14
cafe5635
KO
15#include <linux/module.h>
16#include <linux/hash.h>
17#include <linux/random.h>
cafe5635
KO
18
19#include <trace/events/bcache.h>
20
21#define CUTOFF_CACHE_ADD 95
22#define CUTOFF_CACHE_READA 90
cafe5635
KO
23
24struct kmem_cache *bch_search_cache;
25
a34a8bfd
KO
26static void bch_data_insert_start(struct closure *);
27
cafe5635
KO
28static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
29{
cafe5635
KO
30 return BDEV_CACHE_MODE(&dc->sb);
31}
32
33static bool verify(struct cached_dev *dc, struct bio *bio)
34{
cafe5635
KO
35 return dc->verify;
36}
37
38static void bio_csum(struct bio *bio, struct bkey *k)
39{
7988613b
KO
40 struct bio_vec bv;
41 struct bvec_iter iter;
cafe5635 42 uint64_t csum = 0;
cafe5635 43
7988613b
KO
44 bio_for_each_segment(bv, bio, iter) {
45 void *d = kmap(bv.bv_page) + bv.bv_offset;
46 csum = bch_crc64_update(csum, d, bv.bv_len);
47 kunmap(bv.bv_page);
cafe5635
KO
48 }
49
50 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
51}
52
53/* Insert data into cache */
54
a34a8bfd 55static void bch_data_insert_keys(struct closure *cl)
cafe5635 56{
220bb38c 57 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
c18536a7 58 atomic_t *journal_ref = NULL;
220bb38c 59 struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
6054c6d4 60 int ret;
cafe5635 61
a34a8bfd
KO
62 /*
63 * If we're looping, might already be waiting on
64 * another journal write - can't wait on more than one journal write at
65 * a time
66 *
67 * XXX: this looks wrong
68 */
69#if 0
70 while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
71 closure_sync(&s->cl);
72#endif
cafe5635 73
220bb38c
KO
74 if (!op->replace)
75 journal_ref = bch_journal(op->c, &op->insert_keys,
76 op->flush_journal ? cl : NULL);
cafe5635 77
220bb38c 78 ret = bch_btree_insert(op->c, &op->insert_keys,
6054c6d4
KO
79 journal_ref, replace_key);
80 if (ret == -ESRCH) {
220bb38c 81 op->replace_collision = true;
6054c6d4 82 } else if (ret) {
220bb38c
KO
83 op->error = -ENOMEM;
84 op->insert_data_done = true;
a34a8bfd 85 }
cafe5635 86
c18536a7
KO
87 if (journal_ref)
88 atomic_dec_bug(journal_ref);
cafe5635 89
220bb38c 90 if (!op->insert_data_done)
da415a09 91 continue_at(cl, bch_data_insert_start, op->wq);
cafe5635 92
220bb38c 93 bch_keylist_free(&op->insert_keys);
a34a8bfd 94 closure_return(cl);
cafe5635
KO
95}
96
085d2a3d
KO
97static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
98 struct cache_set *c)
99{
100 size_t oldsize = bch_keylist_nkeys(l);
101 size_t newsize = oldsize + u64s;
102
103 /*
104 * The journalling code doesn't handle the case where the keys to insert
105 * is bigger than an empty write: If we just return -ENOMEM here,
106 * bio_insert() and bio_invalidate() will insert the keys created so far
107 * and finish the rest when the keylist is empty.
108 */
109 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
110 return -ENOMEM;
111
112 return __bch_keylist_realloc(l, u64s);
113}
114
a34a8bfd
KO
115static void bch_data_invalidate(struct closure *cl)
116{
220bb38c
KO
117 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
118 struct bio *bio = op->bio;
a34a8bfd
KO
119
120 pr_debug("invalidating %i sectors from %llu",
4f024f37 121 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
a34a8bfd
KO
122
123 while (bio_sectors(bio)) {
81ab4190
KO
124 unsigned sectors = min(bio_sectors(bio),
125 1U << (KEY_SIZE_BITS - 1));
a34a8bfd 126
085d2a3d 127 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
a34a8bfd
KO
128 goto out;
129
4f024f37
KO
130 bio->bi_iter.bi_sector += sectors;
131 bio->bi_iter.bi_size -= sectors << 9;
a34a8bfd 132
220bb38c 133 bch_keylist_add(&op->insert_keys,
4f024f37 134 &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
a34a8bfd
KO
135 }
136
220bb38c 137 op->insert_data_done = true;
a34a8bfd
KO
138 bio_put(bio);
139out:
da415a09 140 continue_at(cl, bch_data_insert_keys, op->wq);
a34a8bfd
KO
141}
142
143static void bch_data_insert_error(struct closure *cl)
cafe5635 144{
220bb38c 145 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
cafe5635
KO
146
147 /*
148 * Our data write just errored, which means we've got a bunch of keys to
149 * insert that point to data that wasn't succesfully written.
150 *
151 * We don't have to insert those keys but we still have to invalidate
152 * that region of the cache - so, if we just strip off all the pointers
153 * from the keys we'll accomplish just that.
154 */
155
220bb38c 156 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
cafe5635 157
220bb38c 158 while (src != op->insert_keys.top) {
cafe5635
KO
159 struct bkey *n = bkey_next(src);
160
161 SET_KEY_PTRS(src, 0);
c2f95ae2 162 memmove(dst, src, bkey_bytes(src));
cafe5635
KO
163
164 dst = bkey_next(dst);
165 src = n;
166 }
167
220bb38c 168 op->insert_keys.top = dst;
cafe5635 169
a34a8bfd 170 bch_data_insert_keys(cl);
cafe5635
KO
171}
172
a34a8bfd 173static void bch_data_insert_endio(struct bio *bio, int error)
cafe5635
KO
174{
175 struct closure *cl = bio->bi_private;
220bb38c 176 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
cafe5635
KO
177
178 if (error) {
179 /* TODO: We could try to recover from this. */
220bb38c
KO
180 if (op->writeback)
181 op->error = error;
182 else if (!op->replace)
da415a09 183 set_closure_fn(cl, bch_data_insert_error, op->wq);
cafe5635
KO
184 else
185 set_closure_fn(cl, NULL, NULL);
186 }
187
220bb38c 188 bch_bbio_endio(op->c, bio, error, "writing data to cache");
cafe5635
KO
189}
190
a34a8bfd 191static void bch_data_insert_start(struct closure *cl)
cafe5635 192{
220bb38c
KO
193 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
194 struct bio *bio = op->bio, *n;
cafe5635 195
220bb38c
KO
196 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
197 set_gc_sectors(op->c);
198 wake_up_gc(op->c);
cafe5635
KO
199 }
200
e3b4825b
NS
201 if (op->bypass)
202 return bch_data_invalidate(cl);
203
54d12f2b
KO
204 /*
205 * Journal writes are marked REQ_FLUSH; if the original write was a
206 * flush, it'll wait on the journal write.
207 */
208 bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
209
cafe5635
KO
210 do {
211 unsigned i;
212 struct bkey *k;
220bb38c 213 struct bio_set *split = op->c->bio_split;
cafe5635
KO
214
215 /* 1 for the device pointer and 1 for the chksum */
220bb38c 216 if (bch_keylist_realloc(&op->insert_keys,
085d2a3d 217 3 + (op->csum ? 1 : 0),
220bb38c 218 op->c))
da415a09 219 continue_at(cl, bch_data_insert_keys, op->wq);
cafe5635 220
220bb38c 221 k = op->insert_keys.top;
cafe5635 222 bkey_init(k);
220bb38c 223 SET_KEY_INODE(k, op->inode);
4f024f37 224 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
cafe5635 225
2599b53b
KO
226 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
227 op->write_point, op->write_prio,
228 op->writeback))
cafe5635
KO
229 goto err;
230
20d0189b 231 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
cafe5635 232
a34a8bfd 233 n->bi_end_io = bch_data_insert_endio;
cafe5635
KO
234 n->bi_private = cl;
235
220bb38c 236 if (op->writeback) {
cafe5635
KO
237 SET_KEY_DIRTY(k, true);
238
239 for (i = 0; i < KEY_PTRS(k); i++)
220bb38c 240 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
cafe5635
KO
241 GC_MARK_DIRTY);
242 }
243
220bb38c 244 SET_KEY_CSUM(k, op->csum);
cafe5635
KO
245 if (KEY_CSUM(k))
246 bio_csum(n, k);
247
c37511b8 248 trace_bcache_cache_insert(k);
220bb38c 249 bch_keylist_push(&op->insert_keys);
cafe5635 250
cafe5635 251 n->bi_rw |= REQ_WRITE;
220bb38c 252 bch_submit_bbio(n, op->c, k, 0);
cafe5635
KO
253 } while (n != bio);
254
220bb38c 255 op->insert_data_done = true;
da415a09 256 continue_at(cl, bch_data_insert_keys, op->wq);
cafe5635
KO
257err:
258 /* bch_alloc_sectors() blocks if s->writeback = true */
220bb38c 259 BUG_ON(op->writeback);
cafe5635
KO
260
261 /*
262 * But if it's not a writeback write we'd rather just bail out if
263 * there aren't any buckets ready to write to - it might take awhile and
264 * we might be starving btree writes for gc or something.
265 */
266
220bb38c 267 if (!op->replace) {
cafe5635
KO
268 /*
269 * Writethrough write: We can't complete the write until we've
270 * updated the index. But we don't want to delay the write while
271 * we wait for buckets to be freed up, so just invalidate the
272 * rest of the write.
273 */
220bb38c 274 op->bypass = true;
a34a8bfd 275 return bch_data_invalidate(cl);
cafe5635
KO
276 } else {
277 /*
278 * From a cache miss, we can just insert the keys for the data
279 * we have written or bail out if we didn't do anything.
280 */
220bb38c 281 op->insert_data_done = true;
cafe5635
KO
282 bio_put(bio);
283
220bb38c 284 if (!bch_keylist_empty(&op->insert_keys))
da415a09 285 continue_at(cl, bch_data_insert_keys, op->wq);
cafe5635
KO
286 else
287 closure_return(cl);
288 }
289}
290
291/**
a34a8bfd 292 * bch_data_insert - stick some data in the cache
cafe5635
KO
293 *
294 * This is the starting point for any data to end up in a cache device; it could
295 * be from a normal write, or a writeback write, or a write to a flash only
296 * volume - it's also used by the moving garbage collector to compact data in
297 * mostly empty buckets.
298 *
299 * It first writes the data to the cache, creating a list of keys to be inserted
300 * (if the data had to be fragmented there will be multiple keys); after the
301 * data is written it calls bch_journal, and after the keys have been added to
302 * the next journal write they're inserted into the btree.
303 *
c18536a7 304 * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
cafe5635
KO
305 * and op->inode is used for the key inode.
306 *
c18536a7
KO
307 * If s->bypass is true, instead of inserting the data it invalidates the
308 * region of the cache represented by s->cache_bio and op->inode.
cafe5635 309 */
a34a8bfd 310void bch_data_insert(struct closure *cl)
cafe5635 311{
220bb38c 312 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
cafe5635 313
60ae81ee
SP
314 trace_bcache_write(op->c, op->inode, op->bio,
315 op->writeback, op->bypass);
220bb38c
KO
316
317 bch_keylist_init(&op->insert_keys);
318 bio_get(op->bio);
a34a8bfd 319 bch_data_insert_start(cl);
cafe5635
KO
320}
321
220bb38c
KO
322/* Congested? */
323
324unsigned bch_get_congested(struct cache_set *c)
325{
326 int i;
327 long rand;
328
329 if (!c->congested_read_threshold_us &&
330 !c->congested_write_threshold_us)
331 return 0;
332
333 i = (local_clock_us() - c->congested_last_us) / 1024;
334 if (i < 0)
335 return 0;
336
337 i += atomic_read(&c->congested);
338 if (i >= 0)
339 return 0;
340
341 i += CONGESTED_MAX;
342
343 if (i > 0)
344 i = fract_exp_two(i, 6);
345
346 rand = get_random_int();
347 i -= bitmap_weight(&rand, BITS_PER_LONG);
348
349 return i > 0 ? i : 1;
350}
351
352static void add_sequential(struct task_struct *t)
353{
354 ewma_add(t->sequential_io_avg,
355 t->sequential_io, 8, 0);
356
357 t->sequential_io = 0;
358}
359
360static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
361{
362 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
363}
364
365static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
366{
367 struct cache_set *c = dc->disk.c;
368 unsigned mode = cache_mode(dc, bio);
369 unsigned sectors, congested = bch_get_congested(c);
370 struct task_struct *task = current;
8aee1220 371 struct io *i;
220bb38c 372
c4d951dd 373 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
220bb38c
KO
374 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
375 (bio->bi_rw & REQ_DISCARD))
376 goto skip;
377
378 if (mode == CACHE_MODE_NONE ||
379 (mode == CACHE_MODE_WRITEAROUND &&
380 (bio->bi_rw & REQ_WRITE)))
381 goto skip;
382
4f024f37 383 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
220bb38c
KO
384 bio_sectors(bio) & (c->sb.block_size - 1)) {
385 pr_debug("skipping unaligned io");
386 goto skip;
387 }
388
5ceaaad7
KO
389 if (bypass_torture_test(dc)) {
390 if ((get_random_int() & 3) == 3)
391 goto skip;
392 else
393 goto rescale;
394 }
395
220bb38c
KO
396 if (!congested && !dc->sequential_cutoff)
397 goto rescale;
398
399 if (!congested &&
400 mode == CACHE_MODE_WRITEBACK &&
401 (bio->bi_rw & REQ_WRITE) &&
402 (bio->bi_rw & REQ_SYNC))
403 goto rescale;
404
8aee1220 405 spin_lock(&dc->io_lock);
220bb38c 406
4f024f37
KO
407 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
408 if (i->last == bio->bi_iter.bi_sector &&
8aee1220
KO
409 time_before(jiffies, i->jiffies))
410 goto found;
220bb38c 411
8aee1220 412 i = list_first_entry(&dc->io_lru, struct io, lru);
220bb38c 413
8aee1220
KO
414 add_sequential(task);
415 i->sequential = 0;
220bb38c 416found:
4f024f37
KO
417 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
418 i->sequential += bio->bi_iter.bi_size;
220bb38c 419
8aee1220
KO
420 i->last = bio_end_sector(bio);
421 i->jiffies = jiffies + msecs_to_jiffies(5000);
422 task->sequential_io = i->sequential;
220bb38c 423
8aee1220
KO
424 hlist_del(&i->hash);
425 hlist_add_head(&i->hash, iohash(dc, i->last));
426 list_move_tail(&i->lru, &dc->io_lru);
220bb38c 427
8aee1220 428 spin_unlock(&dc->io_lock);
220bb38c
KO
429
430 sectors = max(task->sequential_io,
431 task->sequential_io_avg) >> 9;
432
433 if (dc->sequential_cutoff &&
434 sectors >= dc->sequential_cutoff >> 9) {
435 trace_bcache_bypass_sequential(bio);
436 goto skip;
437 }
438
439 if (congested && sectors >= congested) {
440 trace_bcache_bypass_congested(bio);
441 goto skip;
442 }
443
444rescale:
445 bch_rescale_priorities(c, bio_sectors(bio));
446 return false;
447skip:
448 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
449 return true;
450}
451
2c1953e2 452/* Cache lookup */
cafe5635 453
220bb38c
KO
454struct search {
455 /* Stack frame for bio_complete */
456 struct closure cl;
457
220bb38c
KO
458 struct bbio bio;
459 struct bio *orig_bio;
460 struct bio *cache_miss;
a5ae4300 461 struct bcache_device *d;
220bb38c
KO
462
463 unsigned insert_bio_sectors;
220bb38c 464 unsigned recoverable:1;
220bb38c 465 unsigned write:1;
5ceaaad7 466 unsigned read_dirty_data:1;
220bb38c
KO
467
468 unsigned long start_time;
469
470 struct btree_op op;
471 struct data_insert_op iop;
472};
473
2c1953e2 474static void bch_cache_read_endio(struct bio *bio, int error)
cafe5635
KO
475{
476 struct bbio *b = container_of(bio, struct bbio, bio);
477 struct closure *cl = bio->bi_private;
478 struct search *s = container_of(cl, struct search, cl);
479
480 /*
481 * If the bucket was reused while our bio was in flight, we might have
482 * read the wrong data. Set s->error but not error so it doesn't get
483 * counted against the cache device, but we'll still reread the data
484 * from the backing device.
485 */
486
487 if (error)
220bb38c 488 s->iop.error = error;
d56d000a
KO
489 else if (!KEY_DIRTY(&b->key) &&
490 ptr_stale(s->iop.c, &b->key, 0)) {
220bb38c
KO
491 atomic_long_inc(&s->iop.c->cache_read_races);
492 s->iop.error = -EINTR;
cafe5635
KO
493 }
494
220bb38c 495 bch_bbio_endio(s->iop.c, bio, error, "reading from cache");
cafe5635
KO
496}
497
2c1953e2
KO
498/*
499 * Read from a single key, handling the initial cache miss if the key starts in
500 * the middle of the bio
501 */
cc231966 502static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
2c1953e2
KO
503{
504 struct search *s = container_of(op, struct search, op);
cc231966
KO
505 struct bio *n, *bio = &s->bio.bio;
506 struct bkey *bio_key;
2c1953e2 507 unsigned ptr;
2c1953e2 508
4f024f37 509 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
cc231966
KO
510 return MAP_CONTINUE;
511
220bb38c 512 if (KEY_INODE(k) != s->iop.inode ||
4f024f37 513 KEY_START(k) > bio->bi_iter.bi_sector) {
cc231966 514 unsigned bio_sectors = bio_sectors(bio);
220bb38c 515 unsigned sectors = KEY_INODE(k) == s->iop.inode
cc231966 516 ? min_t(uint64_t, INT_MAX,
4f024f37 517 KEY_START(k) - bio->bi_iter.bi_sector)
cc231966
KO
518 : INT_MAX;
519
520 int ret = s->d->cache_miss(b, s, bio, sectors);
521 if (ret != MAP_CONTINUE)
522 return ret;
523
524 /* if this was a complete miss we shouldn't get here */
525 BUG_ON(bio_sectors <= sectors);
526 }
527
528 if (!KEY_SIZE(k))
529 return MAP_CONTINUE;
2c1953e2
KO
530
531 /* XXX: figure out best pointer - for multiple cache devices */
532 ptr = 0;
533
534 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
535
5ceaaad7
KO
536 if (KEY_DIRTY(k))
537 s->read_dirty_data = true;
538
20d0189b
KO
539 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
540 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
541 GFP_NOIO, s->d->bio_split);
2c1953e2 542
cc231966
KO
543 bio_key = &container_of(n, struct bbio, bio)->key;
544 bch_bkey_copy_single_ptr(bio_key, k, ptr);
2c1953e2 545
4f024f37 546 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
220bb38c 547 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
2c1953e2 548
cc231966
KO
549 n->bi_end_io = bch_cache_read_endio;
550 n->bi_private = &s->cl;
2c1953e2 551
cc231966
KO
552 /*
553 * The bucket we're reading from might be reused while our bio
554 * is in flight, and we could then end up reading the wrong
555 * data.
556 *
557 * We guard against this by checking (in cache_read_endio()) if
558 * the pointer is stale again; if so, we treat it as an error
559 * and reread from the backing device (but we don't pass that
560 * error up anywhere).
561 */
2c1953e2 562
cc231966
KO
563 __bch_submit_bbio(n, b->c);
564 return n == bio ? MAP_DONE : MAP_CONTINUE;
2c1953e2
KO
565}
566
567static void cache_lookup(struct closure *cl)
568{
220bb38c 569 struct search *s = container_of(cl, struct search, iop.cl);
2c1953e2 570 struct bio *bio = &s->bio.bio;
a5ae4300 571 int ret;
2c1953e2 572
a5ae4300 573 bch_btree_op_init(&s->op, -1);
2c1953e2 574
a5ae4300
KO
575 ret = bch_btree_map_keys(&s->op, s->iop.c,
576 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
577 cache_lookup_fn, MAP_END_KEY);
2c1953e2
KO
578 if (ret == -EAGAIN)
579 continue_at(cl, cache_lookup, bcache_wq);
580
581 closure_return(cl);
582}
583
584/* Common code for the make_request functions */
585
586static void request_endio(struct bio *bio, int error)
587{
588 struct closure *cl = bio->bi_private;
589
590 if (error) {
591 struct search *s = container_of(cl, struct search, cl);
220bb38c 592 s->iop.error = error;
2c1953e2
KO
593 /* Only cache read errors are recoverable */
594 s->recoverable = false;
595 }
596
597 bio_put(bio);
598 closure_put(cl);
599}
600
cafe5635
KO
601static void bio_complete(struct search *s)
602{
603 if (s->orig_bio) {
aae4933d
GZ
604 generic_end_io_acct(bio_data_dir(s->orig_bio),
605 &s->d->disk->part0, s->start_time);
cafe5635 606
220bb38c
KO
607 trace_bcache_request_end(s->d, s->orig_bio);
608 bio_endio(s->orig_bio, s->iop.error);
cafe5635
KO
609 s->orig_bio = NULL;
610 }
611}
612
a5ae4300 613static void do_bio_hook(struct search *s, struct bio *orig_bio)
cafe5635
KO
614{
615 struct bio *bio = &s->bio.bio;
cafe5635 616
ed9c47be 617 bio_init(bio);
a5ae4300 618 __bio_clone_fast(bio, orig_bio);
cafe5635
KO
619 bio->bi_end_io = request_endio;
620 bio->bi_private = &s->cl;
ed9c47be 621
dac56212 622 bio_cnt_set(bio, 3);
cafe5635
KO
623}
624
625static void search_free(struct closure *cl)
626{
627 struct search *s = container_of(cl, struct search, cl);
628 bio_complete(s);
629
220bb38c
KO
630 if (s->iop.bio)
631 bio_put(s->iop.bio);
cafe5635 632
cafe5635
KO
633 closure_debug_destroy(cl);
634 mempool_free(s, s->d->c->search);
635}
636
a5ae4300
KO
637static inline struct search *search_alloc(struct bio *bio,
638 struct bcache_device *d)
cafe5635 639{
0b93207a 640 struct search *s;
0b93207a
KO
641
642 s = mempool_alloc(d->c->search, GFP_NOIO);
cafe5635 643
a5ae4300
KO
644 closure_init(&s->cl, NULL);
645 do_bio_hook(s, bio);
cafe5635 646
cafe5635 647 s->orig_bio = bio;
a5ae4300
KO
648 s->cache_miss = NULL;
649 s->d = d;
cafe5635 650 s->recoverable = 1;
a5ae4300
KO
651 s->write = (bio->bi_rw & REQ_WRITE) != 0;
652 s->read_dirty_data = 0;
cafe5635 653 s->start_time = jiffies;
a5ae4300
KO
654
655 s->iop.c = d->c;
656 s->iop.bio = NULL;
657 s->iop.inode = d->id;
658 s->iop.write_point = hash_long((unsigned long) current, 16);
659 s->iop.write_prio = 0;
660 s->iop.error = 0;
661 s->iop.flags = 0;
662 s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
da415a09 663 s->iop.wq = bcache_wq;
cafe5635 664
cafe5635
KO
665 return s;
666}
667
cafe5635
KO
668/* Cached devices */
669
670static void cached_dev_bio_complete(struct closure *cl)
671{
672 struct search *s = container_of(cl, struct search, cl);
673 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
674
675 search_free(cl);
676 cached_dev_put(dc);
677}
678
679/* Process reads */
680
cdd972b1 681static void cached_dev_cache_miss_done(struct closure *cl)
cafe5635
KO
682{
683 struct search *s = container_of(cl, struct search, cl);
684
220bb38c
KO
685 if (s->iop.replace_collision)
686 bch_mark_cache_miss_collision(s->iop.c, s->d);
cafe5635 687
220bb38c 688 if (s->iop.bio) {
cafe5635
KO
689 int i;
690 struct bio_vec *bv;
691
220bb38c 692 bio_for_each_segment_all(bv, s->iop.bio, i)
cafe5635
KO
693 __free_page(bv->bv_page);
694 }
695
696 cached_dev_bio_complete(cl);
697}
698
cdd972b1 699static void cached_dev_read_error(struct closure *cl)
cafe5635
KO
700{
701 struct search *s = container_of(cl, struct search, cl);
cdd972b1 702 struct bio *bio = &s->bio.bio;
cafe5635
KO
703
704 if (s->recoverable) {
c37511b8
KO
705 /* Retry from the backing device: */
706 trace_bcache_read_retry(s->orig_bio);
cafe5635 707
220bb38c 708 s->iop.error = 0;
a5ae4300 709 do_bio_hook(s, s->orig_bio);
cafe5635
KO
710
711 /* XXX: invalidate cache */
712
cdd972b1 713 closure_bio_submit(bio, cl, s->d);
cafe5635
KO
714 }
715
cdd972b1 716 continue_at(cl, cached_dev_cache_miss_done, NULL);
cafe5635
KO
717}
718
cdd972b1 719static void cached_dev_read_done(struct closure *cl)
cafe5635
KO
720{
721 struct search *s = container_of(cl, struct search, cl);
722 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
723
724 /*
cdd972b1
KO
725 * We had a cache miss; cache_bio now contains data ready to be inserted
726 * into the cache.
cafe5635
KO
727 *
728 * First, we copy the data we just read from cache_bio's bounce buffers
729 * to the buffers the original bio pointed to:
730 */
731
220bb38c
KO
732 if (s->iop.bio) {
733 bio_reset(s->iop.bio);
4f024f37 734 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
220bb38c 735 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
4f024f37 736 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
220bb38c 737 bch_bio_map(s->iop.bio, NULL);
cafe5635 738
220bb38c 739 bio_copy_data(s->cache_miss, s->iop.bio);
cafe5635
KO
740
741 bio_put(s->cache_miss);
742 s->cache_miss = NULL;
743 }
744
ed9c47be 745 if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
220bb38c 746 bch_data_verify(dc, s->orig_bio);
cafe5635
KO
747
748 bio_complete(s);
749
220bb38c
KO
750 if (s->iop.bio &&
751 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
752 BUG_ON(!s->iop.replace);
753 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
cafe5635
KO
754 }
755
cdd972b1 756 continue_at(cl, cached_dev_cache_miss_done, NULL);
cafe5635
KO
757}
758
cdd972b1 759static void cached_dev_read_done_bh(struct closure *cl)
cafe5635
KO
760{
761 struct search *s = container_of(cl, struct search, cl);
762 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
763
220bb38c
KO
764 bch_mark_cache_accounting(s->iop.c, s->d,
765 !s->cache_miss, s->iop.bypass);
766 trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
cafe5635 767
220bb38c 768 if (s->iop.error)
cdd972b1 769 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
220bb38c 770 else if (s->iop.bio || verify(dc, &s->bio.bio))
cdd972b1 771 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
cafe5635 772 else
cdd972b1 773 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
cafe5635
KO
774}
775
776static int cached_dev_cache_miss(struct btree *b, struct search *s,
777 struct bio *bio, unsigned sectors)
778{
2c1953e2 779 int ret = MAP_CONTINUE;
e7c590eb 780 unsigned reada = 0;
cafe5635 781 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
cdd972b1 782 struct bio *miss, *cache_bio;
cafe5635 783
220bb38c 784 if (s->cache_miss || s->iop.bypass) {
20d0189b 785 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
2c1953e2 786 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
e7c590eb
KO
787 goto out_submit;
788 }
cafe5635 789
e7c590eb
KO
790 if (!(bio->bi_rw & REQ_RAHEAD) &&
791 !(bio->bi_rw & REQ_META) &&
220bb38c 792 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
e7c590eb
KO
793 reada = min_t(sector_t, dc->readahead >> 9,
794 bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
cafe5635 795
220bb38c 796 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
cafe5635 797
220bb38c 798 s->iop.replace_key = KEY(s->iop.inode,
4f024f37 799 bio->bi_iter.bi_sector + s->insert_bio_sectors,
220bb38c 800 s->insert_bio_sectors);
e7c590eb 801
220bb38c 802 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
e7c590eb
KO
803 if (ret)
804 return ret;
805
220bb38c 806 s->iop.replace = true;
1b207d80 807
20d0189b 808 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
2c1953e2
KO
809
810 /* btree_search_recurse()'s btree iterator is no good anymore */
811 ret = miss == bio ? MAP_DONE : -EINTR;
cafe5635 812
cdd972b1 813 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
220bb38c 814 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
cafe5635 815 dc->disk.bio_split);
cdd972b1 816 if (!cache_bio)
cafe5635
KO
817 goto out_submit;
818
4f024f37
KO
819 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
820 cache_bio->bi_bdev = miss->bi_bdev;
821 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
cafe5635 822
cdd972b1
KO
823 cache_bio->bi_end_io = request_endio;
824 cache_bio->bi_private = &s->cl;
cafe5635 825
cdd972b1
KO
826 bch_bio_map(cache_bio, NULL);
827 if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
cafe5635
KO
828 goto out_put;
829
220bb38c
KO
830 if (reada)
831 bch_mark_cache_readahead(s->iop.c, s->d);
832
cdd972b1 833 s->cache_miss = miss;
220bb38c 834 s->iop.bio = cache_bio;
cdd972b1
KO
835 bio_get(cache_bio);
836 closure_bio_submit(cache_bio, &s->cl, s->d);
cafe5635
KO
837
838 return ret;
839out_put:
cdd972b1 840 bio_put(cache_bio);
cafe5635 841out_submit:
e7c590eb
KO
842 miss->bi_end_io = request_endio;
843 miss->bi_private = &s->cl;
cafe5635
KO
844 closure_bio_submit(miss, &s->cl, s->d);
845 return ret;
846}
847
cdd972b1 848static void cached_dev_read(struct cached_dev *dc, struct search *s)
cafe5635
KO
849{
850 struct closure *cl = &s->cl;
851
220bb38c 852 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
cdd972b1 853 continue_at(cl, cached_dev_read_done_bh, NULL);
cafe5635
KO
854}
855
856/* Process writes */
857
858static void cached_dev_write_complete(struct closure *cl)
859{
860 struct search *s = container_of(cl, struct search, cl);
861 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
862
863 up_read_non_owner(&dc->writeback_lock);
864 cached_dev_bio_complete(cl);
865}
866
cdd972b1 867static void cached_dev_write(struct cached_dev *dc, struct search *s)
cafe5635
KO
868{
869 struct closure *cl = &s->cl;
870 struct bio *bio = &s->bio.bio;
4f024f37 871 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
84f0db03 872 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
cafe5635 873
220bb38c 874 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
cafe5635 875
cafe5635 876 down_read_non_owner(&dc->writeback_lock);
cafe5635 877 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
84f0db03
KO
878 /*
879 * We overlap with some dirty data undergoing background
880 * writeback, force this write to writeback
881 */
220bb38c
KO
882 s->iop.bypass = false;
883 s->iop.writeback = true;
cafe5635
KO
884 }
885
84f0db03
KO
886 /*
887 * Discards aren't _required_ to do anything, so skipping if
888 * check_overlapping returned true is ok
889 *
890 * But check_overlapping drops dirty keys for which io hasn't started,
891 * so we still want to call it.
892 */
cafe5635 893 if (bio->bi_rw & REQ_DISCARD)
220bb38c 894 s->iop.bypass = true;
cafe5635 895
72c27061
KO
896 if (should_writeback(dc, s->orig_bio,
897 cache_mode(dc, bio),
220bb38c
KO
898 s->iop.bypass)) {
899 s->iop.bypass = false;
900 s->iop.writeback = true;
72c27061
KO
901 }
902
220bb38c
KO
903 if (s->iop.bypass) {
904 s->iop.bio = s->orig_bio;
905 bio_get(s->iop.bio);
cafe5635 906
84f0db03
KO
907 if (!(bio->bi_rw & REQ_DISCARD) ||
908 blk_queue_discard(bdev_get_queue(dc->bdev)))
909 closure_bio_submit(bio, cl, s->d);
220bb38c 910 } else if (s->iop.writeback) {
279afbad 911 bch_writeback_add(dc);
220bb38c 912 s->iop.bio = bio;
e49c7c37 913
c0f04d88 914 if (bio->bi_rw & REQ_FLUSH) {
e49c7c37 915 /* Also need to send a flush to the backing device */
d4eddd42 916 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
c0f04d88 917 dc->disk.bio_split);
e49c7c37 918
c0f04d88
KO
919 flush->bi_rw = WRITE_FLUSH;
920 flush->bi_bdev = bio->bi_bdev;
921 flush->bi_end_io = request_endio;
922 flush->bi_private = cl;
923
924 closure_bio_submit(flush, cl, s->d);
e49c7c37 925 }
84f0db03 926 } else {
59d276fe 927 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
84f0db03
KO
928
929 closure_bio_submit(bio, cl, s->d);
cafe5635 930 }
84f0db03 931
220bb38c 932 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
cafe5635 933 continue_at(cl, cached_dev_write_complete, NULL);
cafe5635
KO
934}
935
a34a8bfd 936static void cached_dev_nodata(struct closure *cl)
cafe5635 937{
a34a8bfd 938 struct search *s = container_of(cl, struct search, cl);
cafe5635
KO
939 struct bio *bio = &s->bio.bio;
940
220bb38c
KO
941 if (s->iop.flush_journal)
942 bch_journal_meta(s->iop.c, cl);
cafe5635 943
84f0db03 944 /* If it's a flush, we send the flush to the backing device too */
cafe5635
KO
945 closure_bio_submit(bio, cl, s->d);
946
947 continue_at(cl, cached_dev_bio_complete, NULL);
948}
949
950/* Cached devices - read & write stuff */
951
cafe5635
KO
952static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
953{
954 struct search *s;
955 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
956 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
aae4933d 957 int rw = bio_data_dir(bio);
cafe5635 958
aae4933d 959 generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0);
cafe5635
KO
960
961 bio->bi_bdev = dc->bdev;
4f024f37 962 bio->bi_iter.bi_sector += dc->sb.data_offset;
cafe5635
KO
963
964 if (cached_dev_get(dc)) {
965 s = search_alloc(bio, d);
220bb38c 966 trace_bcache_request_start(s->d, bio);
cafe5635 967
4f024f37 968 if (!bio->bi_iter.bi_size) {
a34a8bfd
KO
969 /*
970 * can't call bch_journal_meta from under
971 * generic_make_request
972 */
973 continue_at_nobarrier(&s->cl,
974 cached_dev_nodata,
975 bcache_wq);
976 } else {
220bb38c 977 s->iop.bypass = check_should_bypass(dc, bio);
84f0db03
KO
978
979 if (rw)
cdd972b1 980 cached_dev_write(dc, s);
84f0db03 981 else
cdd972b1 982 cached_dev_read(dc, s);
84f0db03 983 }
cafe5635
KO
984 } else {
985 if ((bio->bi_rw & REQ_DISCARD) &&
986 !blk_queue_discard(bdev_get_queue(dc->bdev)))
987 bio_endio(bio, 0);
988 else
989 bch_generic_make_request(bio, &d->bio_split_hook);
990 }
991}
992
993static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
994 unsigned int cmd, unsigned long arg)
995{
996 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
997 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
998}
999
1000static int cached_dev_congested(void *data, int bits)
1001{
1002 struct bcache_device *d = data;
1003 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1004 struct request_queue *q = bdev_get_queue(dc->bdev);
1005 int ret = 0;
1006
1007 if (bdi_congested(&q->backing_dev_info, bits))
1008 return 1;
1009
1010 if (cached_dev_get(dc)) {
1011 unsigned i;
1012 struct cache *ca;
1013
1014 for_each_cache(ca, d->c, i) {
1015 q = bdev_get_queue(ca->bdev);
1016 ret |= bdi_congested(&q->backing_dev_info, bits);
1017 }
1018
1019 cached_dev_put(dc);
1020 }
1021
1022 return ret;
1023}
1024
1025void bch_cached_dev_request_init(struct cached_dev *dc)
1026{
1027 struct gendisk *g = dc->disk.disk;
1028
1029 g->queue->make_request_fn = cached_dev_make_request;
1030 g->queue->backing_dev_info.congested_fn = cached_dev_congested;
1031 dc->disk.cache_miss = cached_dev_cache_miss;
1032 dc->disk.ioctl = cached_dev_ioctl;
1033}
1034
1035/* Flash backed devices */
1036
1037static int flash_dev_cache_miss(struct btree *b, struct search *s,
1038 struct bio *bio, unsigned sectors)
1039{
1b4eaf3d 1040 unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
cafe5635 1041
1b4eaf3d
KO
1042 swap(bio->bi_iter.bi_size, bytes);
1043 zero_fill_bio(bio);
1044 swap(bio->bi_iter.bi_size, bytes);
cafe5635 1045
1b4eaf3d 1046 bio_advance(bio, bytes);
8e51e414 1047
4f024f37 1048 if (!bio->bi_iter.bi_size)
2c1953e2 1049 return MAP_DONE;
cafe5635 1050
2c1953e2 1051 return MAP_CONTINUE;
cafe5635
KO
1052}
1053
a34a8bfd
KO
1054static void flash_dev_nodata(struct closure *cl)
1055{
1056 struct search *s = container_of(cl, struct search, cl);
1057
220bb38c
KO
1058 if (s->iop.flush_journal)
1059 bch_journal_meta(s->iop.c, cl);
a34a8bfd
KO
1060
1061 continue_at(cl, search_free, NULL);
1062}
1063
cafe5635
KO
1064static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1065{
1066 struct search *s;
1067 struct closure *cl;
1068 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
aae4933d 1069 int rw = bio_data_dir(bio);
cafe5635 1070
aae4933d 1071 generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0);
cafe5635
KO
1072
1073 s = search_alloc(bio, d);
1074 cl = &s->cl;
1075 bio = &s->bio.bio;
1076
220bb38c 1077 trace_bcache_request_start(s->d, bio);
cafe5635 1078
4f024f37 1079 if (!bio->bi_iter.bi_size) {
a34a8bfd
KO
1080 /*
1081 * can't call bch_journal_meta from under
1082 * generic_make_request
1083 */
1084 continue_at_nobarrier(&s->cl,
1085 flash_dev_nodata,
1086 bcache_wq);
84f0db03 1087 } else if (rw) {
220bb38c 1088 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
4f024f37 1089 &KEY(d->id, bio->bi_iter.bi_sector, 0),
8e51e414 1090 &KEY(d->id, bio_end_sector(bio), 0));
cafe5635 1091
220bb38c
KO
1092 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
1093 s->iop.writeback = true;
1094 s->iop.bio = bio;
cafe5635 1095
220bb38c 1096 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
cafe5635 1097 } else {
220bb38c 1098 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
cafe5635
KO
1099 }
1100
1101 continue_at(cl, search_free, NULL);
1102}
1103
1104static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1105 unsigned int cmd, unsigned long arg)
1106{
1107 return -ENOTTY;
1108}
1109
1110static int flash_dev_congested(void *data, int bits)
1111{
1112 struct bcache_device *d = data;
1113 struct request_queue *q;
1114 struct cache *ca;
1115 unsigned i;
1116 int ret = 0;
1117
1118 for_each_cache(ca, d->c, i) {
1119 q = bdev_get_queue(ca->bdev);
1120 ret |= bdi_congested(&q->backing_dev_info, bits);
1121 }
1122
1123 return ret;
1124}
1125
1126void bch_flash_dev_request_init(struct bcache_device *d)
1127{
1128 struct gendisk *g = d->disk;
1129
1130 g->queue->make_request_fn = flash_dev_make_request;
1131 g->queue->backing_dev_info.congested_fn = flash_dev_congested;
1132 d->cache_miss = flash_dev_cache_miss;
1133 d->ioctl = flash_dev_ioctl;
1134}
1135
1136void bch_request_exit(void)
1137{
cafe5635
KO
1138 if (bch_search_cache)
1139 kmem_cache_destroy(bch_search_cache);
1140}
1141
1142int __init bch_request_init(void)
1143{
1144 bch_search_cache = KMEM_CACHE(search, 0);
1145 if (!bch_search_cache)
1146 return -ENOMEM;
1147
cafe5635
KO
1148 return 0;
1149}