]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/md/bcache/request.c
bcache: Initialize sectors_dirty when attaching
[mirror_ubuntu-artful-kernel.git] / drivers / md / bcache / request.c
CommitLineData
cafe5635
KO
1/*
2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9#include "bcache.h"
10#include "btree.h"
11#include "debug.h"
12#include "request.h"
13
14#include <linux/cgroup.h>
15#include <linux/module.h>
16#include <linux/hash.h>
17#include <linux/random.h>
18#include "blk-cgroup.h"
19
20#include <trace/events/bcache.h>
21
22#define CUTOFF_CACHE_ADD 95
23#define CUTOFF_CACHE_READA 90
24#define CUTOFF_WRITEBACK 50
25#define CUTOFF_WRITEBACK_SYNC 75
26
27struct kmem_cache *bch_search_cache;
28
29static void check_should_skip(struct cached_dev *, struct search *);
30
31/* Cgroup interface */
32
33#ifdef CONFIG_CGROUP_BCACHE
34static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 };
35
36static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup)
37{
38 struct cgroup_subsys_state *css;
39 return cgroup &&
40 (css = cgroup_subsys_state(cgroup, bcache_subsys_id))
41 ? container_of(css, struct bch_cgroup, css)
42 : &bcache_default_cgroup;
43}
44
45struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio)
46{
47 struct cgroup_subsys_state *css = bio->bi_css
48 ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id)
49 : task_subsys_state(current, bcache_subsys_id);
50
51 return css
52 ? container_of(css, struct bch_cgroup, css)
53 : &bcache_default_cgroup;
54}
55
56static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
57 struct file *file,
58 char __user *buf, size_t nbytes, loff_t *ppos)
59{
60 char tmp[1024];
169ef1cf
KO
61 int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
62 cgroup_to_bcache(cgrp)->cache_mode + 1);
cafe5635
KO
63
64 if (len < 0)
65 return len;
66
67 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
68}
69
70static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
71 const char *buf)
72{
169ef1cf 73 int v = bch_read_string_list(buf, bch_cache_modes);
cafe5635
KO
74 if (v < 0)
75 return v;
76
77 cgroup_to_bcache(cgrp)->cache_mode = v - 1;
78 return 0;
79}
80
81static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft)
82{
83 return cgroup_to_bcache(cgrp)->verify;
84}
85
86static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val)
87{
88 cgroup_to_bcache(cgrp)->verify = val;
89 return 0;
90}
91
92static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft)
93{
94 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
95 return atomic_read(&bcachecg->stats.cache_hits);
96}
97
98static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft)
99{
100 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
101 return atomic_read(&bcachecg->stats.cache_misses);
102}
103
104static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp,
105 struct cftype *cft)
106{
107 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
108 return atomic_read(&bcachecg->stats.cache_bypass_hits);
109}
110
111static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp,
112 struct cftype *cft)
113{
114 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
115 return atomic_read(&bcachecg->stats.cache_bypass_misses);
116}
117
118static struct cftype bch_files[] = {
119 {
120 .name = "cache_mode",
121 .read = cache_mode_read,
122 .write_string = cache_mode_write,
123 },
124 {
125 .name = "verify",
126 .read_u64 = bch_verify_read,
127 .write_u64 = bch_verify_write,
128 },
129 {
130 .name = "cache_hits",
131 .read_u64 = bch_cache_hits_read,
132 },
133 {
134 .name = "cache_misses",
135 .read_u64 = bch_cache_misses_read,
136 },
137 {
138 .name = "cache_bypass_hits",
139 .read_u64 = bch_cache_bypass_hits_read,
140 },
141 {
142 .name = "cache_bypass_misses",
143 .read_u64 = bch_cache_bypass_misses_read,
144 },
145 { } /* terminate */
146};
147
148static void init_bch_cgroup(struct bch_cgroup *cg)
149{
150 cg->cache_mode = -1;
151}
152
153static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
154{
155 struct bch_cgroup *cg;
156
157 cg = kzalloc(sizeof(*cg), GFP_KERNEL);
158 if (!cg)
159 return ERR_PTR(-ENOMEM);
160 init_bch_cgroup(cg);
161 return &cg->css;
162}
163
164static void bcachecg_destroy(struct cgroup *cgroup)
165{
166 struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
167 free_css_id(&bcache_subsys, &cg->css);
168 kfree(cg);
169}
170
171struct cgroup_subsys bcache_subsys = {
172 .create = bcachecg_create,
173 .destroy = bcachecg_destroy,
174 .subsys_id = bcache_subsys_id,
175 .name = "bcache",
176 .module = THIS_MODULE,
177};
178EXPORT_SYMBOL_GPL(bcache_subsys);
179#endif
180
181static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
182{
183#ifdef CONFIG_CGROUP_BCACHE
184 int r = bch_bio_to_cgroup(bio)->cache_mode;
185 if (r >= 0)
186 return r;
187#endif
188 return BDEV_CACHE_MODE(&dc->sb);
189}
190
191static bool verify(struct cached_dev *dc, struct bio *bio)
192{
193#ifdef CONFIG_CGROUP_BCACHE
194 if (bch_bio_to_cgroup(bio)->verify)
195 return true;
196#endif
197 return dc->verify;
198}
199
200static void bio_csum(struct bio *bio, struct bkey *k)
201{
202 struct bio_vec *bv;
203 uint64_t csum = 0;
204 int i;
205
206 bio_for_each_segment(bv, bio, i) {
207 void *d = kmap(bv->bv_page) + bv->bv_offset;
169ef1cf 208 csum = bch_crc64_update(csum, d, bv->bv_len);
cafe5635
KO
209 kunmap(bv->bv_page);
210 }
211
212 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
213}
214
215/* Insert data into cache */
216
217static void bio_invalidate(struct closure *cl)
218{
219 struct btree_op *op = container_of(cl, struct btree_op, cl);
220 struct bio *bio = op->cache_bio;
221
222 pr_debug("invalidating %i sectors from %llu",
223 bio_sectors(bio), (uint64_t) bio->bi_sector);
224
225 while (bio_sectors(bio)) {
226 unsigned len = min(bio_sectors(bio), 1U << 14);
227
228 if (bch_keylist_realloc(&op->keys, 0, op->c))
229 goto out;
230
231 bio->bi_sector += len;
232 bio->bi_size -= len << 9;
233
234 bch_keylist_add(&op->keys,
235 &KEY(op->inode, bio->bi_sector, len));
236 }
237
238 op->insert_data_done = true;
239 bio_put(bio);
240out:
241 continue_at(cl, bch_journal, bcache_wq);
242}
243
244struct open_bucket {
245 struct list_head list;
246 struct task_struct *last;
247 unsigned sectors_free;
248 BKEY_PADDED(key);
249};
250
251void bch_open_buckets_free(struct cache_set *c)
252{
253 struct open_bucket *b;
254
255 while (!list_empty(&c->data_buckets)) {
256 b = list_first_entry(&c->data_buckets,
257 struct open_bucket, list);
258 list_del(&b->list);
259 kfree(b);
260 }
261}
262
263int bch_open_buckets_alloc(struct cache_set *c)
264{
265 int i;
266
267 spin_lock_init(&c->data_bucket_lock);
268
269 for (i = 0; i < 6; i++) {
270 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
271 if (!b)
272 return -ENOMEM;
273
274 list_add(&b->list, &c->data_buckets);
275 }
276
277 return 0;
278}
279
280/*
281 * We keep multiple buckets open for writes, and try to segregate different
282 * write streams for better cache utilization: first we look for a bucket where
283 * the last write to it was sequential with the current write, and failing that
284 * we look for a bucket that was last used by the same task.
285 *
286 * The ideas is if you've got multiple tasks pulling data into the cache at the
287 * same time, you'll get better cache utilization if you try to segregate their
288 * data and preserve locality.
289 *
290 * For example, say you've starting Firefox at the same time you're copying a
291 * bunch of files. Firefox will likely end up being fairly hot and stay in the
292 * cache awhile, but the data you copied might not be; if you wrote all that
293 * data to the same buckets it'd get invalidated at the same time.
294 *
295 * Both of those tasks will be doing fairly random IO so we can't rely on
296 * detecting sequential IO to segregate their data, but going off of the task
297 * should be a sane heuristic.
298 */
299static struct open_bucket *pick_data_bucket(struct cache_set *c,
300 const struct bkey *search,
301 struct task_struct *task,
302 struct bkey *alloc)
303{
304 struct open_bucket *ret, *ret_task = NULL;
305
306 list_for_each_entry_reverse(ret, &c->data_buckets, list)
307 if (!bkey_cmp(&ret->key, search))
308 goto found;
309 else if (ret->last == task)
310 ret_task = ret;
311
312 ret = ret_task ?: list_first_entry(&c->data_buckets,
313 struct open_bucket, list);
314found:
315 if (!ret->sectors_free && KEY_PTRS(alloc)) {
316 ret->sectors_free = c->sb.bucket_size;
317 bkey_copy(&ret->key, alloc);
318 bkey_init(alloc);
319 }
320
321 if (!ret->sectors_free)
322 ret = NULL;
323
324 return ret;
325}
326
327/*
328 * Allocates some space in the cache to write to, and k to point to the newly
329 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
330 * end of the newly allocated space).
331 *
332 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
333 * sectors were actually allocated.
334 *
335 * If s->writeback is true, will not fail.
336 */
337static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
338 struct search *s)
339{
340 struct cache_set *c = s->op.c;
341 struct open_bucket *b;
342 BKEY_PADDED(key) alloc;
343 struct closure cl, *w = NULL;
344 unsigned i;
345
346 if (s->writeback) {
347 closure_init_stack(&cl);
348 w = &cl;
349 }
350
351 /*
352 * We might have to allocate a new bucket, which we can't do with a
353 * spinlock held. So if we have to allocate, we drop the lock, allocate
354 * and then retry. KEY_PTRS() indicates whether alloc points to
355 * allocated bucket(s).
356 */
357
358 bkey_init(&alloc.key);
359 spin_lock(&c->data_bucket_lock);
360
361 while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) {
362 unsigned watermark = s->op.write_prio
363 ? WATERMARK_MOVINGGC
364 : WATERMARK_NONE;
365
366 spin_unlock(&c->data_bucket_lock);
367
368 if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w))
369 return false;
370
371 spin_lock(&c->data_bucket_lock);
372 }
373
374 /*
375 * If we had to allocate, we might race and not need to allocate the
376 * second time we call find_data_bucket(). If we allocated a bucket but
377 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
378 */
379 if (KEY_PTRS(&alloc.key))
380 __bkey_put(c, &alloc.key);
381
382 for (i = 0; i < KEY_PTRS(&b->key); i++)
383 EBUG_ON(ptr_stale(c, &b->key, i));
384
385 /* Set up the pointer to the space we're allocating: */
386
387 for (i = 0; i < KEY_PTRS(&b->key); i++)
388 k->ptr[i] = b->key.ptr[i];
389
390 sectors = min(sectors, b->sectors_free);
391
392 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
393 SET_KEY_SIZE(k, sectors);
394 SET_KEY_PTRS(k, KEY_PTRS(&b->key));
395
396 /*
397 * Move b to the end of the lru, and keep track of what this bucket was
398 * last used for:
399 */
400 list_move_tail(&b->list, &c->data_buckets);
401 bkey_copy_key(&b->key, k);
402 b->last = s->task;
403
404 b->sectors_free -= sectors;
405
406 for (i = 0; i < KEY_PTRS(&b->key); i++) {
407 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
408
409 atomic_long_add(sectors,
410 &PTR_CACHE(c, &b->key, i)->sectors_written);
411 }
412
413 if (b->sectors_free < c->sb.block_size)
414 b->sectors_free = 0;
415
416 /*
417 * k takes refcounts on the buckets it points to until it's inserted
418 * into the btree, but if we're done with this bucket we just transfer
419 * get_data_bucket()'s refcount.
420 */
421 if (b->sectors_free)
422 for (i = 0; i < KEY_PTRS(&b->key); i++)
423 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
424
425 spin_unlock(&c->data_bucket_lock);
426 return true;
427}
428
429static void bch_insert_data_error(struct closure *cl)
430{
431 struct btree_op *op = container_of(cl, struct btree_op, cl);
432
433 /*
434 * Our data write just errored, which means we've got a bunch of keys to
435 * insert that point to data that wasn't succesfully written.
436 *
437 * We don't have to insert those keys but we still have to invalidate
438 * that region of the cache - so, if we just strip off all the pointers
439 * from the keys we'll accomplish just that.
440 */
441
442 struct bkey *src = op->keys.bottom, *dst = op->keys.bottom;
443
444 while (src != op->keys.top) {
445 struct bkey *n = bkey_next(src);
446
447 SET_KEY_PTRS(src, 0);
448 bkey_copy(dst, src);
449
450 dst = bkey_next(dst);
451 src = n;
452 }
453
454 op->keys.top = dst;
455
456 bch_journal(cl);
457}
458
459static void bch_insert_data_endio(struct bio *bio, int error)
460{
461 struct closure *cl = bio->bi_private;
462 struct btree_op *op = container_of(cl, struct btree_op, cl);
463 struct search *s = container_of(op, struct search, op);
464
465 if (error) {
466 /* TODO: We could try to recover from this. */
467 if (s->writeback)
468 s->error = error;
469 else if (s->write)
470 set_closure_fn(cl, bch_insert_data_error, bcache_wq);
471 else
472 set_closure_fn(cl, NULL, NULL);
473 }
474
475 bch_bbio_endio(op->c, bio, error, "writing data to cache");
476}
477
478static void bch_insert_data_loop(struct closure *cl)
479{
480 struct btree_op *op = container_of(cl, struct btree_op, cl);
481 struct search *s = container_of(op, struct search, op);
482 struct bio *bio = op->cache_bio, *n;
483
484 if (op->skip)
485 return bio_invalidate(cl);
486
487 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
488 set_gc_sectors(op->c);
489 bch_queue_gc(op->c);
490 }
491
492 do {
493 unsigned i;
494 struct bkey *k;
495 struct bio_set *split = s->d
496 ? s->d->bio_split : op->c->bio_split;
497
498 /* 1 for the device pointer and 1 for the chksum */
499 if (bch_keylist_realloc(&op->keys,
500 1 + (op->csum ? 1 : 0),
501 op->c))
502 continue_at(cl, bch_journal, bcache_wq);
503
504 k = op->keys.top;
505 bkey_init(k);
506 SET_KEY_INODE(k, op->inode);
507 SET_KEY_OFFSET(k, bio->bi_sector);
508
509 if (!bch_alloc_sectors(k, bio_sectors(bio), s))
510 goto err;
511
512 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
513 if (!n) {
514 __bkey_put(op->c, k);
515 continue_at(cl, bch_insert_data_loop, bcache_wq);
516 }
517
518 n->bi_end_io = bch_insert_data_endio;
519 n->bi_private = cl;
520
521 if (s->writeback) {
522 SET_KEY_DIRTY(k, true);
523
524 for (i = 0; i < KEY_PTRS(k); i++)
525 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
526 GC_MARK_DIRTY);
527 }
528
529 SET_KEY_CSUM(k, op->csum);
530 if (KEY_CSUM(k))
531 bio_csum(n, k);
532
c37511b8 533 trace_bcache_cache_insert(k);
cafe5635
KO
534 bch_keylist_push(&op->keys);
535
cafe5635
KO
536 n->bi_rw |= REQ_WRITE;
537 bch_submit_bbio(n, op->c, k, 0);
538 } while (n != bio);
539
540 op->insert_data_done = true;
541 continue_at(cl, bch_journal, bcache_wq);
542err:
543 /* bch_alloc_sectors() blocks if s->writeback = true */
544 BUG_ON(s->writeback);
545
546 /*
547 * But if it's not a writeback write we'd rather just bail out if
548 * there aren't any buckets ready to write to - it might take awhile and
549 * we might be starving btree writes for gc or something.
550 */
551
552 if (s->write) {
553 /*
554 * Writethrough write: We can't complete the write until we've
555 * updated the index. But we don't want to delay the write while
556 * we wait for buckets to be freed up, so just invalidate the
557 * rest of the write.
558 */
559 op->skip = true;
560 return bio_invalidate(cl);
561 } else {
562 /*
563 * From a cache miss, we can just insert the keys for the data
564 * we have written or bail out if we didn't do anything.
565 */
566 op->insert_data_done = true;
567 bio_put(bio);
568
569 if (!bch_keylist_empty(&op->keys))
570 continue_at(cl, bch_journal, bcache_wq);
571 else
572 closure_return(cl);
573 }
574}
575
576/**
577 * bch_insert_data - stick some data in the cache
578 *
579 * This is the starting point for any data to end up in a cache device; it could
580 * be from a normal write, or a writeback write, or a write to a flash only
581 * volume - it's also used by the moving garbage collector to compact data in
582 * mostly empty buckets.
583 *
584 * It first writes the data to the cache, creating a list of keys to be inserted
585 * (if the data had to be fragmented there will be multiple keys); after the
586 * data is written it calls bch_journal, and after the keys have been added to
587 * the next journal write they're inserted into the btree.
588 *
589 * It inserts the data in op->cache_bio; bi_sector is used for the key offset,
590 * and op->inode is used for the key inode.
591 *
592 * If op->skip is true, instead of inserting the data it invalidates the region
593 * of the cache represented by op->cache_bio and op->inode.
594 */
595void bch_insert_data(struct closure *cl)
596{
597 struct btree_op *op = container_of(cl, struct btree_op, cl);
598
599 bch_keylist_init(&op->keys);
600 bio_get(op->cache_bio);
601 bch_insert_data_loop(cl);
602}
603
604void bch_btree_insert_async(struct closure *cl)
605{
606 struct btree_op *op = container_of(cl, struct btree_op, cl);
607 struct search *s = container_of(op, struct search, op);
608
609 if (bch_btree_insert(op, op->c)) {
610 s->error = -ENOMEM;
611 op->insert_data_done = true;
612 }
613
614 if (op->insert_data_done) {
615 bch_keylist_free(&op->keys);
616 closure_return(cl);
617 } else
618 continue_at(cl, bch_insert_data_loop, bcache_wq);
619}
620
621/* Common code for the make_request functions */
622
623static void request_endio(struct bio *bio, int error)
624{
625 struct closure *cl = bio->bi_private;
626
627 if (error) {
628 struct search *s = container_of(cl, struct search, cl);
629 s->error = error;
630 /* Only cache read errors are recoverable */
631 s->recoverable = false;
632 }
633
634 bio_put(bio);
635 closure_put(cl);
636}
637
638void bch_cache_read_endio(struct bio *bio, int error)
639{
640 struct bbio *b = container_of(bio, struct bbio, bio);
641 struct closure *cl = bio->bi_private;
642 struct search *s = container_of(cl, struct search, cl);
643
644 /*
645 * If the bucket was reused while our bio was in flight, we might have
646 * read the wrong data. Set s->error but not error so it doesn't get
647 * counted against the cache device, but we'll still reread the data
648 * from the backing device.
649 */
650
651 if (error)
652 s->error = error;
653 else if (ptr_stale(s->op.c, &b->key, 0)) {
654 atomic_long_inc(&s->op.c->cache_read_races);
655 s->error = -EINTR;
656 }
657
658 bch_bbio_endio(s->op.c, bio, error, "reading from cache");
659}
660
661static void bio_complete(struct search *s)
662{
663 if (s->orig_bio) {
664 int cpu, rw = bio_data_dir(s->orig_bio);
665 unsigned long duration = jiffies - s->start_time;
666
667 cpu = part_stat_lock();
668 part_round_stats(cpu, &s->d->disk->part0);
669 part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
670 part_stat_unlock();
671
672 trace_bcache_request_end(s, s->orig_bio);
673 bio_endio(s->orig_bio, s->error);
674 s->orig_bio = NULL;
675 }
676}
677
678static void do_bio_hook(struct search *s)
679{
680 struct bio *bio = &s->bio.bio;
681 memcpy(bio, s->orig_bio, sizeof(struct bio));
682
683 bio->bi_end_io = request_endio;
684 bio->bi_private = &s->cl;
685 atomic_set(&bio->bi_cnt, 3);
686}
687
688static void search_free(struct closure *cl)
689{
690 struct search *s = container_of(cl, struct search, cl);
691 bio_complete(s);
692
693 if (s->op.cache_bio)
694 bio_put(s->op.cache_bio);
695
696 if (s->unaligned_bvec)
697 mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
698
699 closure_debug_destroy(cl);
700 mempool_free(s, s->d->c->search);
701}
702
703static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
704{
705 struct bio_vec *bv;
706 struct search *s = mempool_alloc(d->c->search, GFP_NOIO);
707 memset(s, 0, offsetof(struct search, op.keys));
708
709 __closure_init(&s->cl, NULL);
710
711 s->op.inode = d->id;
712 s->op.c = d->c;
713 s->d = d;
714 s->op.lock = -1;
715 s->task = current;
716 s->orig_bio = bio;
717 s->write = (bio->bi_rw & REQ_WRITE) != 0;
718 s->op.flush_journal = (bio->bi_rw & REQ_FLUSH) != 0;
719 s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0;
720 s->recoverable = 1;
721 s->start_time = jiffies;
722 do_bio_hook(s);
723
724 if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
725 bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
726 memcpy(bv, bio_iovec(bio),
727 sizeof(struct bio_vec) * bio_segments(bio));
728
729 s->bio.bio.bi_io_vec = bv;
730 s->unaligned_bvec = 1;
731 }
732
733 return s;
734}
735
736static void btree_read_async(struct closure *cl)
737{
738 struct btree_op *op = container_of(cl, struct btree_op, cl);
739
740 int ret = btree_root(search_recurse, op->c, op);
741
742 if (ret == -EAGAIN)
743 continue_at(cl, btree_read_async, bcache_wq);
744
745 closure_return(cl);
746}
747
748/* Cached devices */
749
750static void cached_dev_bio_complete(struct closure *cl)
751{
752 struct search *s = container_of(cl, struct search, cl);
753 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
754
755 search_free(cl);
756 cached_dev_put(dc);
757}
758
759/* Process reads */
760
761static void cached_dev_read_complete(struct closure *cl)
762{
763 struct search *s = container_of(cl, struct search, cl);
764
765 if (s->op.insert_collision)
766 bch_mark_cache_miss_collision(s);
767
768 if (s->op.cache_bio) {
769 int i;
770 struct bio_vec *bv;
771
772 __bio_for_each_segment(bv, s->op.cache_bio, i, 0)
773 __free_page(bv->bv_page);
774 }
775
776 cached_dev_bio_complete(cl);
777}
778
779static void request_read_error(struct closure *cl)
780{
781 struct search *s = container_of(cl, struct search, cl);
782 struct bio_vec *bv;
783 int i;
784
785 if (s->recoverable) {
c37511b8
KO
786 /* Retry from the backing device: */
787 trace_bcache_read_retry(s->orig_bio);
cafe5635
KO
788
789 s->error = 0;
790 bv = s->bio.bio.bi_io_vec;
791 do_bio_hook(s);
792 s->bio.bio.bi_io_vec = bv;
793
794 if (!s->unaligned_bvec)
795 bio_for_each_segment(bv, s->orig_bio, i)
796 bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
797 else
798 memcpy(s->bio.bio.bi_io_vec,
799 bio_iovec(s->orig_bio),
800 sizeof(struct bio_vec) *
801 bio_segments(s->orig_bio));
802
803 /* XXX: invalidate cache */
804
cafe5635
KO
805 closure_bio_submit(&s->bio.bio, &s->cl, s->d);
806 }
807
808 continue_at(cl, cached_dev_read_complete, NULL);
809}
810
811static void request_read_done(struct closure *cl)
812{
813 struct search *s = container_of(cl, struct search, cl);
814 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
815
816 /*
817 * s->cache_bio != NULL implies that we had a cache miss; cache_bio now
818 * contains data ready to be inserted into the cache.
819 *
820 * First, we copy the data we just read from cache_bio's bounce buffers
821 * to the buffers the original bio pointed to:
822 */
823
824 if (s->op.cache_bio) {
825 struct bio_vec *src, *dst;
826 unsigned src_offset, dst_offset, bytes;
827 void *dst_ptr;
828
829 bio_reset(s->op.cache_bio);
830 s->op.cache_bio->bi_sector = s->cache_miss->bi_sector;
831 s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev;
832 s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
169ef1cf 833 bch_bio_map(s->op.cache_bio, NULL);
cafe5635
KO
834
835 src = bio_iovec(s->op.cache_bio);
836 dst = bio_iovec(s->cache_miss);
837 src_offset = src->bv_offset;
838 dst_offset = dst->bv_offset;
839 dst_ptr = kmap(dst->bv_page);
840
841 while (1) {
842 if (dst_offset == dst->bv_offset + dst->bv_len) {
843 kunmap(dst->bv_page);
844 dst++;
845 if (dst == bio_iovec_idx(s->cache_miss,
846 s->cache_miss->bi_vcnt))
847 break;
848
849 dst_offset = dst->bv_offset;
850 dst_ptr = kmap(dst->bv_page);
851 }
852
853 if (src_offset == src->bv_offset + src->bv_len) {
854 src++;
855 if (src == bio_iovec_idx(s->op.cache_bio,
856 s->op.cache_bio->bi_vcnt))
857 BUG();
858
859 src_offset = src->bv_offset;
860 }
861
862 bytes = min(dst->bv_offset + dst->bv_len - dst_offset,
863 src->bv_offset + src->bv_len - src_offset);
864
865 memcpy(dst_ptr + dst_offset,
866 page_address(src->bv_page) + src_offset,
867 bytes);
868
869 src_offset += bytes;
870 dst_offset += bytes;
871 }
872
873 bio_put(s->cache_miss);
874 s->cache_miss = NULL;
875 }
876
877 if (verify(dc, &s->bio.bio) && s->recoverable)
878 bch_data_verify(s);
879
880 bio_complete(s);
881
882 if (s->op.cache_bio &&
883 !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) {
884 s->op.type = BTREE_REPLACE;
885 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
886 }
887
888 continue_at(cl, cached_dev_read_complete, NULL);
889}
890
891static void request_read_done_bh(struct closure *cl)
892{
893 struct search *s = container_of(cl, struct search, cl);
894 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
895
896 bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip);
c37511b8 897 trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip);
cafe5635
KO
898
899 if (s->error)
900 continue_at_nobarrier(cl, request_read_error, bcache_wq);
901 else if (s->op.cache_bio || verify(dc, &s->bio.bio))
902 continue_at_nobarrier(cl, request_read_done, bcache_wq);
903 else
904 continue_at_nobarrier(cl, cached_dev_read_complete, NULL);
905}
906
907static int cached_dev_cache_miss(struct btree *b, struct search *s,
908 struct bio *bio, unsigned sectors)
909{
910 int ret = 0;
911 unsigned reada;
912 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
913 struct bio *miss;
914
915 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
916 if (!miss)
917 return -EAGAIN;
918
919 if (miss == bio)
920 s->op.lookup_done = true;
921
922 miss->bi_end_io = request_endio;
923 miss->bi_private = &s->cl;
924
925 if (s->cache_miss || s->op.skip)
926 goto out_submit;
927
928 if (miss != bio ||
929 (bio->bi_rw & REQ_RAHEAD) ||
930 (bio->bi_rw & REQ_META) ||
931 s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA)
932 reada = 0;
933 else {
934 reada = min(dc->readahead >> 9,
935 sectors - bio_sectors(miss));
936
937 if (bio_end(miss) + reada > bdev_sectors(miss->bi_bdev))
938 reada = bdev_sectors(miss->bi_bdev) - bio_end(miss);
939 }
940
941 s->cache_bio_sectors = bio_sectors(miss) + reada;
942 s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT,
943 DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS),
944 dc->disk.bio_split);
945
946 if (!s->op.cache_bio)
947 goto out_submit;
948
949 s->op.cache_bio->bi_sector = miss->bi_sector;
950 s->op.cache_bio->bi_bdev = miss->bi_bdev;
951 s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
952
953 s->op.cache_bio->bi_end_io = request_endio;
954 s->op.cache_bio->bi_private = &s->cl;
955
956 /* btree_search_recurse()'s btree iterator is no good anymore */
957 ret = -EINTR;
958 if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio))
959 goto out_put;
960
169ef1cf
KO
961 bch_bio_map(s->op.cache_bio, NULL);
962 if (bch_bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
cafe5635
KO
963 goto out_put;
964
965 s->cache_miss = miss;
966 bio_get(s->op.cache_bio);
967
cafe5635
KO
968 closure_bio_submit(s->op.cache_bio, &s->cl, s->d);
969
970 return ret;
971out_put:
972 bio_put(s->op.cache_bio);
973 s->op.cache_bio = NULL;
974out_submit:
975 closure_bio_submit(miss, &s->cl, s->d);
976 return ret;
977}
978
979static void request_read(struct cached_dev *dc, struct search *s)
980{
981 struct closure *cl = &s->cl;
982
983 check_should_skip(dc, s);
984 closure_call(&s->op.cl, btree_read_async, NULL, cl);
985
986 continue_at(cl, request_read_done_bh, NULL);
987}
988
989/* Process writes */
990
991static void cached_dev_write_complete(struct closure *cl)
992{
993 struct search *s = container_of(cl, struct search, cl);
994 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
995
996 up_read_non_owner(&dc->writeback_lock);
997 cached_dev_bio_complete(cl);
998}
999
1000static bool should_writeback(struct cached_dev *dc, struct bio *bio)
1001{
1002 unsigned threshold = (bio->bi_rw & REQ_SYNC)
1003 ? CUTOFF_WRITEBACK_SYNC
1004 : CUTOFF_WRITEBACK;
1005
1006 return !atomic_read(&dc->disk.detaching) &&
1007 cache_mode(dc, bio) == CACHE_MODE_WRITEBACK &&
1008 dc->disk.c->gc_stats.in_use < threshold;
1009}
1010
1011static void request_write(struct cached_dev *dc, struct search *s)
1012{
1013 struct closure *cl = &s->cl;
1014 struct bio *bio = &s->bio.bio;
1015 struct bkey start, end;
1016 start = KEY(dc->disk.id, bio->bi_sector, 0);
1017 end = KEY(dc->disk.id, bio_end(bio), 0);
1018
1019 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
1020
1021 check_should_skip(dc, s);
1022 down_read_non_owner(&dc->writeback_lock);
1023
1024 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
1025 s->op.skip = false;
1026 s->writeback = true;
1027 }
1028
1029 if (bio->bi_rw & REQ_DISCARD)
1030 goto skip;
1031
1032 if (s->op.skip)
1033 goto skip;
1034
1035 if (should_writeback(dc, s->orig_bio))
1036 s->writeback = true;
1037
c37511b8
KO
1038 trace_bcache_write(s->orig_bio, s->writeback, s->op.skip);
1039
cafe5635
KO
1040 if (!s->writeback) {
1041 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
1042 dc->disk.bio_split);
1043
cafe5635
KO
1044 closure_bio_submit(bio, cl, s->d);
1045 } else {
1046 s->op.cache_bio = bio;
cafe5635
KO
1047 bch_writeback_add(dc, bio_sectors(bio));
1048 }
1049out:
1050 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
1051 continue_at(cl, cached_dev_write_complete, NULL);
1052skip:
1053 s->op.skip = true;
1054 s->op.cache_bio = s->orig_bio;
1055 bio_get(s->op.cache_bio);
cafe5635
KO
1056
1057 if ((bio->bi_rw & REQ_DISCARD) &&
1058 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1059 goto out;
1060
1061 closure_bio_submit(bio, cl, s->d);
1062 goto out;
1063}
1064
1065static void request_nodata(struct cached_dev *dc, struct search *s)
1066{
1067 struct closure *cl = &s->cl;
1068 struct bio *bio = &s->bio.bio;
1069
1070 if (bio->bi_rw & REQ_DISCARD) {
1071 request_write(dc, s);
1072 return;
1073 }
1074
1075 if (s->op.flush_journal)
1076 bch_journal_meta(s->op.c, cl);
1077
1078 closure_bio_submit(bio, cl, s->d);
1079
1080 continue_at(cl, cached_dev_bio_complete, NULL);
1081}
1082
1083/* Cached devices - read & write stuff */
1084
c37511b8 1085unsigned bch_get_congested(struct cache_set *c)
cafe5635
KO
1086{
1087 int i;
c37511b8 1088 long rand;
cafe5635
KO
1089
1090 if (!c->congested_read_threshold_us &&
1091 !c->congested_write_threshold_us)
1092 return 0;
1093
1094 i = (local_clock_us() - c->congested_last_us) / 1024;
1095 if (i < 0)
1096 return 0;
1097
1098 i += atomic_read(&c->congested);
1099 if (i >= 0)
1100 return 0;
1101
1102 i += CONGESTED_MAX;
1103
c37511b8
KO
1104 if (i > 0)
1105 i = fract_exp_two(i, 6);
1106
1107 rand = get_random_int();
1108 i -= bitmap_weight(&rand, BITS_PER_LONG);
1109
1110 return i > 0 ? i : 1;
cafe5635
KO
1111}
1112
1113static void add_sequential(struct task_struct *t)
1114{
1115 ewma_add(t->sequential_io_avg,
1116 t->sequential_io, 8, 0);
1117
1118 t->sequential_io = 0;
1119}
1120
b1a67b0f 1121static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
cafe5635 1122{
b1a67b0f
KO
1123 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
1124}
cafe5635 1125
b1a67b0f
KO
1126static void check_should_skip(struct cached_dev *dc, struct search *s)
1127{
cafe5635
KO
1128 struct cache_set *c = s->op.c;
1129 struct bio *bio = &s->bio.bio;
cafe5635 1130 unsigned mode = cache_mode(dc, bio);
c37511b8 1131 unsigned sectors, congested = bch_get_congested(c);
cafe5635
KO
1132
1133 if (atomic_read(&dc->disk.detaching) ||
1134 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
1135 (bio->bi_rw & REQ_DISCARD))
1136 goto skip;
1137
1138 if (mode == CACHE_MODE_NONE ||
1139 (mode == CACHE_MODE_WRITEAROUND &&
1140 (bio->bi_rw & REQ_WRITE)))
1141 goto skip;
1142
1143 if (bio->bi_sector & (c->sb.block_size - 1) ||
1144 bio_sectors(bio) & (c->sb.block_size - 1)) {
1145 pr_debug("skipping unaligned io");
1146 goto skip;
1147 }
1148
c37511b8
KO
1149 if (!congested && !dc->sequential_cutoff)
1150 goto rescale;
cafe5635 1151
c37511b8
KO
1152 if (!congested &&
1153 mode == CACHE_MODE_WRITEBACK &&
1154 (bio->bi_rw & REQ_WRITE) &&
1155 (bio->bi_rw & REQ_SYNC))
1156 goto rescale;
cafe5635
KO
1157
1158 if (dc->sequential_merge) {
1159 struct io *i;
1160
1161 spin_lock(&dc->io_lock);
1162
b1a67b0f 1163 hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
cafe5635
KO
1164 if (i->last == bio->bi_sector &&
1165 time_before(jiffies, i->jiffies))
1166 goto found;
1167
1168 i = list_first_entry(&dc->io_lru, struct io, lru);
1169
1170 add_sequential(s->task);
1171 i->sequential = 0;
1172found:
1173 if (i->sequential + bio->bi_size > i->sequential)
1174 i->sequential += bio->bi_size;
1175
1176 i->last = bio_end(bio);
1177 i->jiffies = jiffies + msecs_to_jiffies(5000);
1178 s->task->sequential_io = i->sequential;
1179
1180 hlist_del(&i->hash);
b1a67b0f 1181 hlist_add_head(&i->hash, iohash(dc, i->last));
cafe5635
KO
1182 list_move_tail(&i->lru, &dc->io_lru);
1183
1184 spin_unlock(&dc->io_lock);
1185 } else {
1186 s->task->sequential_io = bio->bi_size;
1187
1188 add_sequential(s->task);
1189 }
1190
c37511b8
KO
1191 sectors = max(s->task->sequential_io,
1192 s->task->sequential_io_avg) >> 9;
cafe5635 1193
c37511b8
KO
1194 if (dc->sequential_cutoff &&
1195 sectors >= dc->sequential_cutoff >> 9) {
1196 trace_bcache_bypass_sequential(s->orig_bio);
cafe5635 1197 goto skip;
c37511b8
KO
1198 }
1199
1200 if (congested && sectors >= congested) {
1201 trace_bcache_bypass_congested(s->orig_bio);
1202 goto skip;
1203 }
cafe5635
KO
1204
1205rescale:
1206 bch_rescale_priorities(c, bio_sectors(bio));
1207 return;
1208skip:
1209 bch_mark_sectors_bypassed(s, bio_sectors(bio));
1210 s->op.skip = true;
1211}
1212
1213static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1214{
1215 struct search *s;
1216 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1217 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1218 int cpu, rw = bio_data_dir(bio);
1219
1220 cpu = part_stat_lock();
1221 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1222 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1223 part_stat_unlock();
1224
1225 bio->bi_bdev = dc->bdev;
2903381f 1226 bio->bi_sector += dc->sb.data_offset;
cafe5635
KO
1227
1228 if (cached_dev_get(dc)) {
1229 s = search_alloc(bio, d);
1230 trace_bcache_request_start(s, bio);
1231
1232 if (!bio_has_data(bio))
1233 request_nodata(dc, s);
1234 else if (rw)
1235 request_write(dc, s);
1236 else
1237 request_read(dc, s);
1238 } else {
1239 if ((bio->bi_rw & REQ_DISCARD) &&
1240 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1241 bio_endio(bio, 0);
1242 else
1243 bch_generic_make_request(bio, &d->bio_split_hook);
1244 }
1245}
1246
1247static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1248 unsigned int cmd, unsigned long arg)
1249{
1250 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1251 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1252}
1253
1254static int cached_dev_congested(void *data, int bits)
1255{
1256 struct bcache_device *d = data;
1257 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1258 struct request_queue *q = bdev_get_queue(dc->bdev);
1259 int ret = 0;
1260
1261 if (bdi_congested(&q->backing_dev_info, bits))
1262 return 1;
1263
1264 if (cached_dev_get(dc)) {
1265 unsigned i;
1266 struct cache *ca;
1267
1268 for_each_cache(ca, d->c, i) {
1269 q = bdev_get_queue(ca->bdev);
1270 ret |= bdi_congested(&q->backing_dev_info, bits);
1271 }
1272
1273 cached_dev_put(dc);
1274 }
1275
1276 return ret;
1277}
1278
1279void bch_cached_dev_request_init(struct cached_dev *dc)
1280{
1281 struct gendisk *g = dc->disk.disk;
1282
1283 g->queue->make_request_fn = cached_dev_make_request;
1284 g->queue->backing_dev_info.congested_fn = cached_dev_congested;
1285 dc->disk.cache_miss = cached_dev_cache_miss;
1286 dc->disk.ioctl = cached_dev_ioctl;
1287}
1288
1289/* Flash backed devices */
1290
1291static int flash_dev_cache_miss(struct btree *b, struct search *s,
1292 struct bio *bio, unsigned sectors)
1293{
1294 /* Zero fill bio */
1295
1296 while (bio->bi_idx != bio->bi_vcnt) {
1297 struct bio_vec *bv = bio_iovec(bio);
1298 unsigned j = min(bv->bv_len >> 9, sectors);
1299
1300 void *p = kmap(bv->bv_page);
1301 memset(p + bv->bv_offset, 0, j << 9);
1302 kunmap(bv->bv_page);
1303
1304 bv->bv_len -= j << 9;
1305 bv->bv_offset += j << 9;
1306
1307 if (bv->bv_len)
1308 return 0;
1309
1310 bio->bi_sector += j;
1311 bio->bi_size -= j << 9;
1312
1313 bio->bi_idx++;
1314 sectors -= j;
1315 }
1316
1317 s->op.lookup_done = true;
1318
1319 return 0;
1320}
1321
1322static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1323{
1324 struct search *s;
1325 struct closure *cl;
1326 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1327 int cpu, rw = bio_data_dir(bio);
1328
1329 cpu = part_stat_lock();
1330 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1331 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1332 part_stat_unlock();
1333
1334 s = search_alloc(bio, d);
1335 cl = &s->cl;
1336 bio = &s->bio.bio;
1337
1338 trace_bcache_request_start(s, bio);
1339
1340 if (bio_has_data(bio) && !rw) {
1341 closure_call(&s->op.cl, btree_read_async, NULL, cl);
1342 } else if (bio_has_data(bio) || s->op.skip) {
1343 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
1344 &KEY(d->id, bio->bi_sector, 0),
1345 &KEY(d->id, bio_end(bio), 0));
1346
1347 s->writeback = true;
1348 s->op.cache_bio = bio;
1349
1350 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
1351 } else {
1352 /* No data - probably a cache flush */
1353 if (s->op.flush_journal)
1354 bch_journal_meta(s->op.c, cl);
1355 }
1356
1357 continue_at(cl, search_free, NULL);
1358}
1359
1360static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1361 unsigned int cmd, unsigned long arg)
1362{
1363 return -ENOTTY;
1364}
1365
1366static int flash_dev_congested(void *data, int bits)
1367{
1368 struct bcache_device *d = data;
1369 struct request_queue *q;
1370 struct cache *ca;
1371 unsigned i;
1372 int ret = 0;
1373
1374 for_each_cache(ca, d->c, i) {
1375 q = bdev_get_queue(ca->bdev);
1376 ret |= bdi_congested(&q->backing_dev_info, bits);
1377 }
1378
1379 return ret;
1380}
1381
1382void bch_flash_dev_request_init(struct bcache_device *d)
1383{
1384 struct gendisk *g = d->disk;
1385
1386 g->queue->make_request_fn = flash_dev_make_request;
1387 g->queue->backing_dev_info.congested_fn = flash_dev_congested;
1388 d->cache_miss = flash_dev_cache_miss;
1389 d->ioctl = flash_dev_ioctl;
1390}
1391
1392void bch_request_exit(void)
1393{
1394#ifdef CONFIG_CGROUP_BCACHE
1395 cgroup_unload_subsys(&bcache_subsys);
1396#endif
1397 if (bch_search_cache)
1398 kmem_cache_destroy(bch_search_cache);
1399}
1400
1401int __init bch_request_init(void)
1402{
1403 bch_search_cache = KMEM_CACHE(search, 0);
1404 if (!bch_search_cache)
1405 return -ENOMEM;
1406
1407#ifdef CONFIG_CGROUP_BCACHE
1408 cgroup_load_subsys(&bcache_subsys);
1409 init_bch_cgroup(&bcache_default_cgroup);
1410
1411 cgroup_add_cftypes(&bcache_subsys, bch_files);
1412#endif
1413 return 0;
1414}