2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
15 #include <linux/cgroup.h>
16 #include <linux/module.h>
17 #include <linux/hash.h>
18 #include <linux/random.h>
19 #include "blk-cgroup.h"
21 #include <trace/events/bcache.h>
23 #define CUTOFF_CACHE_ADD 95
24 #define CUTOFF_CACHE_READA 90
26 struct kmem_cache
*bch_search_cache
;
28 static void bch_data_insert_start(struct closure
*);
30 /* Cgroup interface */
32 #ifdef CONFIG_CGROUP_BCACHE
33 static struct bch_cgroup bcache_default_cgroup
= { .cache_mode
= -1 };
35 static struct bch_cgroup
*cgroup_to_bcache(struct cgroup
*cgroup
)
37 struct cgroup_subsys_state
*css
;
39 (css
= cgroup_subsys_state(cgroup
, bcache_subsys_id
))
40 ? container_of(css
, struct bch_cgroup
, css
)
41 : &bcache_default_cgroup
;
44 struct bch_cgroup
*bch_bio_to_cgroup(struct bio
*bio
)
46 struct cgroup_subsys_state
*css
= bio
->bi_css
47 ? cgroup_subsys_state(bio
->bi_css
->cgroup
, bcache_subsys_id
)
48 : task_subsys_state(current
, bcache_subsys_id
);
51 ? container_of(css
, struct bch_cgroup
, css
)
52 : &bcache_default_cgroup
;
55 static ssize_t
cache_mode_read(struct cgroup
*cgrp
, struct cftype
*cft
,
57 char __user
*buf
, size_t nbytes
, loff_t
*ppos
)
60 int len
= bch_snprint_string_list(tmp
, PAGE_SIZE
, bch_cache_modes
,
61 cgroup_to_bcache(cgrp
)->cache_mode
+ 1);
66 return simple_read_from_buffer(buf
, nbytes
, ppos
, tmp
, len
);
69 static int cache_mode_write(struct cgroup
*cgrp
, struct cftype
*cft
,
72 int v
= bch_read_string_list(buf
, bch_cache_modes
);
76 cgroup_to_bcache(cgrp
)->cache_mode
= v
- 1;
80 static u64
bch_verify_read(struct cgroup
*cgrp
, struct cftype
*cft
)
82 return cgroup_to_bcache(cgrp
)->verify
;
85 static int bch_verify_write(struct cgroup
*cgrp
, struct cftype
*cft
, u64 val
)
87 cgroup_to_bcache(cgrp
)->verify
= val
;
91 static u64
bch_cache_hits_read(struct cgroup
*cgrp
, struct cftype
*cft
)
93 struct bch_cgroup
*bcachecg
= cgroup_to_bcache(cgrp
);
94 return atomic_read(&bcachecg
->stats
.cache_hits
);
97 static u64
bch_cache_misses_read(struct cgroup
*cgrp
, struct cftype
*cft
)
99 struct bch_cgroup
*bcachecg
= cgroup_to_bcache(cgrp
);
100 return atomic_read(&bcachecg
->stats
.cache_misses
);
103 static u64
bch_cache_bypass_hits_read(struct cgroup
*cgrp
,
106 struct bch_cgroup
*bcachecg
= cgroup_to_bcache(cgrp
);
107 return atomic_read(&bcachecg
->stats
.cache_bypass_hits
);
110 static u64
bch_cache_bypass_misses_read(struct cgroup
*cgrp
,
113 struct bch_cgroup
*bcachecg
= cgroup_to_bcache(cgrp
);
114 return atomic_read(&bcachecg
->stats
.cache_bypass_misses
);
117 static struct cftype bch_files
[] = {
119 .name
= "cache_mode",
120 .read
= cache_mode_read
,
121 .write_string
= cache_mode_write
,
125 .read_u64
= bch_verify_read
,
126 .write_u64
= bch_verify_write
,
129 .name
= "cache_hits",
130 .read_u64
= bch_cache_hits_read
,
133 .name
= "cache_misses",
134 .read_u64
= bch_cache_misses_read
,
137 .name
= "cache_bypass_hits",
138 .read_u64
= bch_cache_bypass_hits_read
,
141 .name
= "cache_bypass_misses",
142 .read_u64
= bch_cache_bypass_misses_read
,
147 static void init_bch_cgroup(struct bch_cgroup
*cg
)
152 static struct cgroup_subsys_state
*bcachecg_create(struct cgroup
*cgroup
)
154 struct bch_cgroup
*cg
;
156 cg
= kzalloc(sizeof(*cg
), GFP_KERNEL
);
158 return ERR_PTR(-ENOMEM
);
163 static void bcachecg_destroy(struct cgroup
*cgroup
)
165 struct bch_cgroup
*cg
= cgroup_to_bcache(cgroup
);
166 free_css_id(&bcache_subsys
, &cg
->css
);
170 struct cgroup_subsys bcache_subsys
= {
171 .create
= bcachecg_create
,
172 .destroy
= bcachecg_destroy
,
173 .subsys_id
= bcache_subsys_id
,
175 .module
= THIS_MODULE
,
177 EXPORT_SYMBOL_GPL(bcache_subsys
);
180 static unsigned cache_mode(struct cached_dev
*dc
, struct bio
*bio
)
182 #ifdef CONFIG_CGROUP_BCACHE
183 int r
= bch_bio_to_cgroup(bio
)->cache_mode
;
187 return BDEV_CACHE_MODE(&dc
->sb
);
190 static bool verify(struct cached_dev
*dc
, struct bio
*bio
)
192 #ifdef CONFIG_CGROUP_BCACHE
193 if (bch_bio_to_cgroup(bio
)->verify
)
199 static void bio_csum(struct bio
*bio
, struct bkey
*k
)
205 bio_for_each_segment(bv
, bio
, i
) {
206 void *d
= kmap(bv
->bv_page
) + bv
->bv_offset
;
207 csum
= bch_crc64_update(csum
, d
, bv
->bv_len
);
211 k
->ptr
[KEY_PTRS(k
)] = csum
& (~0ULL >> 1);
214 /* Insert data into cache */
216 static void bch_data_insert_keys(struct closure
*cl
)
218 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
219 struct search
*s
= container_of(op
, struct search
, op
);
222 * If we're looping, might already be waiting on
223 * another journal write - can't wait on more than one journal write at
226 * XXX: this looks wrong
229 while (atomic_read(&s
->cl
.remaining
) & CLOSURE_WAITING
)
230 closure_sync(&s
->cl
);
234 op
->journal
= bch_journal(op
->c
, &s
->insert_keys
,
238 if (bch_btree_insert(op
, op
->c
, &s
->insert_keys
)) {
240 op
->insert_data_done
= true;
244 atomic_dec_bug(op
->journal
);
247 if (!op
->insert_data_done
)
248 continue_at(cl
, bch_data_insert_start
, bcache_wq
);
250 bch_keylist_free(&s
->insert_keys
);
255 struct list_head list
;
256 struct task_struct
*last
;
257 unsigned sectors_free
;
261 void bch_open_buckets_free(struct cache_set
*c
)
263 struct open_bucket
*b
;
265 while (!list_empty(&c
->data_buckets
)) {
266 b
= list_first_entry(&c
->data_buckets
,
267 struct open_bucket
, list
);
273 int bch_open_buckets_alloc(struct cache_set
*c
)
277 spin_lock_init(&c
->data_bucket_lock
);
279 for (i
= 0; i
< 6; i
++) {
280 struct open_bucket
*b
= kzalloc(sizeof(*b
), GFP_KERNEL
);
284 list_add(&b
->list
, &c
->data_buckets
);
291 * We keep multiple buckets open for writes, and try to segregate different
292 * write streams for better cache utilization: first we look for a bucket where
293 * the last write to it was sequential with the current write, and failing that
294 * we look for a bucket that was last used by the same task.
296 * The ideas is if you've got multiple tasks pulling data into the cache at the
297 * same time, you'll get better cache utilization if you try to segregate their
298 * data and preserve locality.
300 * For example, say you've starting Firefox at the same time you're copying a
301 * bunch of files. Firefox will likely end up being fairly hot and stay in the
302 * cache awhile, but the data you copied might not be; if you wrote all that
303 * data to the same buckets it'd get invalidated at the same time.
305 * Both of those tasks will be doing fairly random IO so we can't rely on
306 * detecting sequential IO to segregate their data, but going off of the task
307 * should be a sane heuristic.
309 static struct open_bucket
*pick_data_bucket(struct cache_set
*c
,
310 const struct bkey
*search
,
311 struct task_struct
*task
,
314 struct open_bucket
*ret
, *ret_task
= NULL
;
316 list_for_each_entry_reverse(ret
, &c
->data_buckets
, list
)
317 if (!bkey_cmp(&ret
->key
, search
))
319 else if (ret
->last
== task
)
322 ret
= ret_task
?: list_first_entry(&c
->data_buckets
,
323 struct open_bucket
, list
);
325 if (!ret
->sectors_free
&& KEY_PTRS(alloc
)) {
326 ret
->sectors_free
= c
->sb
.bucket_size
;
327 bkey_copy(&ret
->key
, alloc
);
331 if (!ret
->sectors_free
)
338 * Allocates some space in the cache to write to, and k to point to the newly
339 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
340 * end of the newly allocated space).
342 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
343 * sectors were actually allocated.
345 * If s->writeback is true, will not fail.
347 static bool bch_alloc_sectors(struct bkey
*k
, unsigned sectors
,
350 struct cache_set
*c
= s
->op
.c
;
351 struct open_bucket
*b
;
352 BKEY_PADDED(key
) alloc
;
356 * We might have to allocate a new bucket, which we can't do with a
357 * spinlock held. So if we have to allocate, we drop the lock, allocate
358 * and then retry. KEY_PTRS() indicates whether alloc points to
359 * allocated bucket(s).
362 bkey_init(&alloc
.key
);
363 spin_lock(&c
->data_bucket_lock
);
365 while (!(b
= pick_data_bucket(c
, k
, s
->task
, &alloc
.key
))) {
366 unsigned watermark
= s
->op
.write_prio
370 spin_unlock(&c
->data_bucket_lock
);
372 if (bch_bucket_alloc_set(c
, watermark
, &alloc
.key
,
376 spin_lock(&c
->data_bucket_lock
);
380 * If we had to allocate, we might race and not need to allocate the
381 * second time we call find_data_bucket(). If we allocated a bucket but
382 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
384 if (KEY_PTRS(&alloc
.key
))
385 __bkey_put(c
, &alloc
.key
);
387 for (i
= 0; i
< KEY_PTRS(&b
->key
); i
++)
388 EBUG_ON(ptr_stale(c
, &b
->key
, i
));
390 /* Set up the pointer to the space we're allocating: */
392 for (i
= 0; i
< KEY_PTRS(&b
->key
); i
++)
393 k
->ptr
[i
] = b
->key
.ptr
[i
];
395 sectors
= min(sectors
, b
->sectors_free
);
397 SET_KEY_OFFSET(k
, KEY_OFFSET(k
) + sectors
);
398 SET_KEY_SIZE(k
, sectors
);
399 SET_KEY_PTRS(k
, KEY_PTRS(&b
->key
));
402 * Move b to the end of the lru, and keep track of what this bucket was
405 list_move_tail(&b
->list
, &c
->data_buckets
);
406 bkey_copy_key(&b
->key
, k
);
409 b
->sectors_free
-= sectors
;
411 for (i
= 0; i
< KEY_PTRS(&b
->key
); i
++) {
412 SET_PTR_OFFSET(&b
->key
, i
, PTR_OFFSET(&b
->key
, i
) + sectors
);
414 atomic_long_add(sectors
,
415 &PTR_CACHE(c
, &b
->key
, i
)->sectors_written
);
418 if (b
->sectors_free
< c
->sb
.block_size
)
422 * k takes refcounts on the buckets it points to until it's inserted
423 * into the btree, but if we're done with this bucket we just transfer
424 * get_data_bucket()'s refcount.
427 for (i
= 0; i
< KEY_PTRS(&b
->key
); i
++)
428 atomic_inc(&PTR_BUCKET(c
, &b
->key
, i
)->pin
);
430 spin_unlock(&c
->data_bucket_lock
);
434 static void bch_data_invalidate(struct closure
*cl
)
436 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
437 struct search
*s
= container_of(op
, struct search
, op
);
438 struct bio
*bio
= op
->cache_bio
;
440 pr_debug("invalidating %i sectors from %llu",
441 bio_sectors(bio
), (uint64_t) bio
->bi_sector
);
443 while (bio_sectors(bio
)) {
444 unsigned len
= min(bio_sectors(bio
), 1U << 14);
446 if (bch_keylist_realloc(&s
->insert_keys
, 0, op
->c
))
449 bio
->bi_sector
+= len
;
450 bio
->bi_size
-= len
<< 9;
452 bch_keylist_add(&s
->insert_keys
,
453 &KEY(op
->inode
, bio
->bi_sector
, len
));
456 op
->insert_data_done
= true;
459 continue_at(cl
, bch_data_insert_keys
, bcache_wq
);
462 static void bch_data_insert_error(struct closure
*cl
)
464 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
465 struct search
*s
= container_of(op
, struct search
, op
);
468 * Our data write just errored, which means we've got a bunch of keys to
469 * insert that point to data that wasn't succesfully written.
471 * We don't have to insert those keys but we still have to invalidate
472 * that region of the cache - so, if we just strip off all the pointers
473 * from the keys we'll accomplish just that.
476 struct bkey
*src
= s
->insert_keys
.keys
, *dst
= s
->insert_keys
.keys
;
478 while (src
!= s
->insert_keys
.top
) {
479 struct bkey
*n
= bkey_next(src
);
481 SET_KEY_PTRS(src
, 0);
482 memmove(dst
, src
, bkey_bytes(src
));
484 dst
= bkey_next(dst
);
488 s
->insert_keys
.top
= dst
;
490 bch_data_insert_keys(cl
);
493 static void bch_data_insert_endio(struct bio
*bio
, int error
)
495 struct closure
*cl
= bio
->bi_private
;
496 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
497 struct search
*s
= container_of(op
, struct search
, op
);
500 /* TODO: We could try to recover from this. */
504 set_closure_fn(cl
, bch_data_insert_error
, bcache_wq
);
506 set_closure_fn(cl
, NULL
, NULL
);
509 bch_bbio_endio(op
->c
, bio
, error
, "writing data to cache");
512 static void bch_data_insert_start(struct closure
*cl
)
514 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
515 struct search
*s
= container_of(op
, struct search
, op
);
516 struct bio
*bio
= op
->cache_bio
, *n
;
519 return bch_data_invalidate(cl
);
521 if (atomic_sub_return(bio_sectors(bio
), &op
->c
->sectors_to_gc
) < 0) {
522 set_gc_sectors(op
->c
);
527 * Journal writes are marked REQ_FLUSH; if the original write was a
528 * flush, it'll wait on the journal write.
530 bio
->bi_rw
&= ~(REQ_FLUSH
|REQ_FUA
);
535 struct bio_set
*split
= s
->d
536 ? s
->d
->bio_split
: op
->c
->bio_split
;
538 /* 1 for the device pointer and 1 for the chksum */
539 if (bch_keylist_realloc(&s
->insert_keys
,
540 1 + (op
->csum
? 1 : 0),
542 continue_at(cl
, bch_data_insert_keys
, bcache_wq
);
544 k
= s
->insert_keys
.top
;
546 SET_KEY_INODE(k
, op
->inode
);
547 SET_KEY_OFFSET(k
, bio
->bi_sector
);
549 if (!bch_alloc_sectors(k
, bio_sectors(bio
), s
))
552 n
= bch_bio_split(bio
, KEY_SIZE(k
), GFP_NOIO
, split
);
554 n
->bi_end_io
= bch_data_insert_endio
;
558 SET_KEY_DIRTY(k
, true);
560 for (i
= 0; i
< KEY_PTRS(k
); i
++)
561 SET_GC_MARK(PTR_BUCKET(op
->c
, k
, i
),
565 SET_KEY_CSUM(k
, op
->csum
);
569 trace_bcache_cache_insert(k
);
570 bch_keylist_push(&s
->insert_keys
);
572 n
->bi_rw
|= REQ_WRITE
;
573 bch_submit_bbio(n
, op
->c
, k
, 0);
576 op
->insert_data_done
= true;
577 continue_at(cl
, bch_data_insert_keys
, bcache_wq
);
579 /* bch_alloc_sectors() blocks if s->writeback = true */
580 BUG_ON(s
->writeback
);
583 * But if it's not a writeback write we'd rather just bail out if
584 * there aren't any buckets ready to write to - it might take awhile and
585 * we might be starving btree writes for gc or something.
590 * Writethrough write: We can't complete the write until we've
591 * updated the index. But we don't want to delay the write while
592 * we wait for buckets to be freed up, so just invalidate the
596 return bch_data_invalidate(cl
);
599 * From a cache miss, we can just insert the keys for the data
600 * we have written or bail out if we didn't do anything.
602 op
->insert_data_done
= true;
605 if (!bch_keylist_empty(&s
->insert_keys
))
606 continue_at(cl
, bch_data_insert_keys
, bcache_wq
);
613 * bch_data_insert - stick some data in the cache
615 * This is the starting point for any data to end up in a cache device; it could
616 * be from a normal write, or a writeback write, or a write to a flash only
617 * volume - it's also used by the moving garbage collector to compact data in
618 * mostly empty buckets.
620 * It first writes the data to the cache, creating a list of keys to be inserted
621 * (if the data had to be fragmented there will be multiple keys); after the
622 * data is written it calls bch_journal, and after the keys have been added to
623 * the next journal write they're inserted into the btree.
625 * It inserts the data in op->cache_bio; bi_sector is used for the key offset,
626 * and op->inode is used for the key inode.
628 * If op->bypass is true, instead of inserting the data it invalidates the
629 * region of the cache represented by op->cache_bio and op->inode.
631 void bch_data_insert(struct closure
*cl
)
633 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
634 struct search
*s
= container_of(op
, struct search
, op
);
636 bch_keylist_init(&s
->insert_keys
);
637 bio_get(op
->cache_bio
);
638 bch_data_insert_start(cl
);
643 static void bch_cache_read_endio(struct bio
*bio
, int error
)
645 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
646 struct closure
*cl
= bio
->bi_private
;
647 struct search
*s
= container_of(cl
, struct search
, cl
);
650 * If the bucket was reused while our bio was in flight, we might have
651 * read the wrong data. Set s->error but not error so it doesn't get
652 * counted against the cache device, but we'll still reread the data
653 * from the backing device.
658 else if (ptr_stale(s
->op
.c
, &b
->key
, 0)) {
659 atomic_long_inc(&s
->op
.c
->cache_read_races
);
663 bch_bbio_endio(s
->op
.c
, bio
, error
, "reading from cache");
667 * Read from a single key, handling the initial cache miss if the key starts in
668 * the middle of the bio
670 static int cache_lookup_fn(struct btree_op
*op
, struct btree
*b
, struct bkey
*k
)
672 struct search
*s
= container_of(op
, struct search
, op
);
673 struct bio
*n
, *bio
= &s
->bio
.bio
;
674 struct bkey
*bio_key
;
677 if (bkey_cmp(k
, &KEY(op
->inode
, bio
->bi_sector
, 0)) <= 0)
680 if (KEY_INODE(k
) != s
->op
.inode
||
681 KEY_START(k
) > bio
->bi_sector
) {
682 unsigned bio_sectors
= bio_sectors(bio
);
683 unsigned sectors
= KEY_INODE(k
) == s
->op
.inode
684 ? min_t(uint64_t, INT_MAX
,
685 KEY_START(k
) - bio
->bi_sector
)
688 int ret
= s
->d
->cache_miss(b
, s
, bio
, sectors
);
689 if (ret
!= MAP_CONTINUE
)
692 /* if this was a complete miss we shouldn't get here */
693 BUG_ON(bio_sectors
<= sectors
);
699 /* XXX: figure out best pointer - for multiple cache devices */
702 PTR_BUCKET(b
->c
, k
, ptr
)->prio
= INITIAL_PRIO
;
704 n
= bch_bio_split(bio
, min_t(uint64_t, INT_MAX
,
705 KEY_OFFSET(k
) - bio
->bi_sector
),
706 GFP_NOIO
, s
->d
->bio_split
);
708 bio_key
= &container_of(n
, struct bbio
, bio
)->key
;
709 bch_bkey_copy_single_ptr(bio_key
, k
, ptr
);
711 bch_cut_front(&KEY(s
->op
.inode
, n
->bi_sector
, 0), bio_key
);
712 bch_cut_back(&KEY(s
->op
.inode
, bio_end_sector(n
), 0), bio_key
);
714 n
->bi_end_io
= bch_cache_read_endio
;
715 n
->bi_private
= &s
->cl
;
718 * The bucket we're reading from might be reused while our bio
719 * is in flight, and we could then end up reading the wrong
722 * We guard against this by checking (in cache_read_endio()) if
723 * the pointer is stale again; if so, we treat it as an error
724 * and reread from the backing device (but we don't pass that
725 * error up anywhere).
728 __bch_submit_bbio(n
, b
->c
);
729 return n
== bio
? MAP_DONE
: MAP_CONTINUE
;
732 static void cache_lookup(struct closure
*cl
)
734 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
735 struct search
*s
= container_of(op
, struct search
, op
);
736 struct bio
*bio
= &s
->bio
.bio
;
738 int ret
= bch_btree_map_keys(op
, op
->c
,
739 &KEY(op
->inode
, bio
->bi_sector
, 0),
740 cache_lookup_fn
, MAP_END_KEY
);
742 continue_at(cl
, cache_lookup
, bcache_wq
);
747 /* Common code for the make_request functions */
749 static void request_endio(struct bio
*bio
, int error
)
751 struct closure
*cl
= bio
->bi_private
;
754 struct search
*s
= container_of(cl
, struct search
, cl
);
756 /* Only cache read errors are recoverable */
757 s
->recoverable
= false;
764 static void bio_complete(struct search
*s
)
767 int cpu
, rw
= bio_data_dir(s
->orig_bio
);
768 unsigned long duration
= jiffies
- s
->start_time
;
770 cpu
= part_stat_lock();
771 part_round_stats(cpu
, &s
->d
->disk
->part0
);
772 part_stat_add(cpu
, &s
->d
->disk
->part0
, ticks
[rw
], duration
);
775 trace_bcache_request_end(s
, s
->orig_bio
);
776 bio_endio(s
->orig_bio
, s
->error
);
781 static void do_bio_hook(struct search
*s
)
783 struct bio
*bio
= &s
->bio
.bio
;
784 memcpy(bio
, s
->orig_bio
, sizeof(struct bio
));
786 bio
->bi_end_io
= request_endio
;
787 bio
->bi_private
= &s
->cl
;
788 atomic_set(&bio
->bi_cnt
, 3);
791 static void search_free(struct closure
*cl
)
793 struct search
*s
= container_of(cl
, struct search
, cl
);
797 bio_put(s
->op
.cache_bio
);
799 if (s
->unaligned_bvec
)
800 mempool_free(s
->bio
.bio
.bi_io_vec
, s
->d
->unaligned_bvec
);
802 closure_debug_destroy(cl
);
803 mempool_free(s
, s
->d
->c
->search
);
806 static struct search
*search_alloc(struct bio
*bio
, struct bcache_device
*d
)
811 s
= mempool_alloc(d
->c
->search
, GFP_NOIO
);
812 memset(s
, 0, offsetof(struct search
, insert_keys
));
814 __closure_init(&s
->cl
, NULL
);
822 s
->write
= (bio
->bi_rw
& REQ_WRITE
) != 0;
823 s
->op
.flush_journal
= (bio
->bi_rw
& (REQ_FLUSH
|REQ_FUA
)) != 0;
825 s
->start_time
= jiffies
;
828 if (bio
->bi_size
!= bio_segments(bio
) * PAGE_SIZE
) {
829 bv
= mempool_alloc(d
->unaligned_bvec
, GFP_NOIO
);
830 memcpy(bv
, bio_iovec(bio
),
831 sizeof(struct bio_vec
) * bio_segments(bio
));
833 s
->bio
.bio
.bi_io_vec
= bv
;
834 s
->unaligned_bvec
= 1;
842 static void cached_dev_bio_complete(struct closure
*cl
)
844 struct search
*s
= container_of(cl
, struct search
, cl
);
845 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
851 unsigned bch_get_congested(struct cache_set
*c
)
856 if (!c
->congested_read_threshold_us
&&
857 !c
->congested_write_threshold_us
)
860 i
= (local_clock_us() - c
->congested_last_us
) / 1024;
864 i
+= atomic_read(&c
->congested
);
871 i
= fract_exp_two(i
, 6);
873 rand
= get_random_int();
874 i
-= bitmap_weight(&rand
, BITS_PER_LONG
);
876 return i
> 0 ? i
: 1;
879 static void add_sequential(struct task_struct
*t
)
881 ewma_add(t
->sequential_io_avg
,
882 t
->sequential_io
, 8, 0);
884 t
->sequential_io
= 0;
887 static struct hlist_head
*iohash(struct cached_dev
*dc
, uint64_t k
)
889 return &dc
->io_hash
[hash_64(k
, RECENT_IO_BITS
)];
892 static bool check_should_bypass(struct cached_dev
*dc
, struct search
*s
)
894 struct cache_set
*c
= s
->op
.c
;
895 struct bio
*bio
= &s
->bio
.bio
;
896 unsigned mode
= cache_mode(dc
, bio
);
897 unsigned sectors
, congested
= bch_get_congested(c
);
899 if (atomic_read(&dc
->disk
.detaching
) ||
900 c
->gc_stats
.in_use
> CUTOFF_CACHE_ADD
||
901 (bio
->bi_rw
& REQ_DISCARD
))
904 if (mode
== CACHE_MODE_NONE
||
905 (mode
== CACHE_MODE_WRITEAROUND
&&
906 (bio
->bi_rw
& REQ_WRITE
)))
909 if (bio
->bi_sector
& (c
->sb
.block_size
- 1) ||
910 bio_sectors(bio
) & (c
->sb
.block_size
- 1)) {
911 pr_debug("skipping unaligned io");
915 if (!congested
&& !dc
->sequential_cutoff
)
919 mode
== CACHE_MODE_WRITEBACK
&&
920 (bio
->bi_rw
& REQ_WRITE
) &&
921 (bio
->bi_rw
& REQ_SYNC
))
924 if (dc
->sequential_merge
) {
927 spin_lock(&dc
->io_lock
);
929 hlist_for_each_entry(i
, iohash(dc
, bio
->bi_sector
), hash
)
930 if (i
->last
== bio
->bi_sector
&&
931 time_before(jiffies
, i
->jiffies
))
934 i
= list_first_entry(&dc
->io_lru
, struct io
, lru
);
936 add_sequential(s
->task
);
939 if (i
->sequential
+ bio
->bi_size
> i
->sequential
)
940 i
->sequential
+= bio
->bi_size
;
942 i
->last
= bio_end_sector(bio
);
943 i
->jiffies
= jiffies
+ msecs_to_jiffies(5000);
944 s
->task
->sequential_io
= i
->sequential
;
947 hlist_add_head(&i
->hash
, iohash(dc
, i
->last
));
948 list_move_tail(&i
->lru
, &dc
->io_lru
);
950 spin_unlock(&dc
->io_lock
);
952 s
->task
->sequential_io
= bio
->bi_size
;
954 add_sequential(s
->task
);
957 sectors
= max(s
->task
->sequential_io
,
958 s
->task
->sequential_io_avg
) >> 9;
960 if (dc
->sequential_cutoff
&&
961 sectors
>= dc
->sequential_cutoff
>> 9) {
962 trace_bcache_bypass_sequential(s
->orig_bio
);
966 if (congested
&& sectors
>= congested
) {
967 trace_bcache_bypass_congested(s
->orig_bio
);
972 bch_rescale_priorities(c
, bio_sectors(bio
));
975 bch_mark_sectors_bypassed(s
, bio_sectors(bio
));
981 static void cached_dev_cache_miss_done(struct closure
*cl
)
983 struct search
*s
= container_of(cl
, struct search
, cl
);
985 if (s
->op
.insert_collision
)
986 bch_mark_cache_miss_collision(s
);
988 if (s
->op
.cache_bio
) {
992 __bio_for_each_segment(bv
, s
->op
.cache_bio
, i
, 0)
993 __free_page(bv
->bv_page
);
996 cached_dev_bio_complete(cl
);
999 static void cached_dev_read_error(struct closure
*cl
)
1001 struct search
*s
= container_of(cl
, struct search
, cl
);
1002 struct bio
*bio
= &s
->bio
.bio
;
1006 if (s
->recoverable
) {
1007 /* Retry from the backing device: */
1008 trace_bcache_read_retry(s
->orig_bio
);
1011 bv
= s
->bio
.bio
.bi_io_vec
;
1013 s
->bio
.bio
.bi_io_vec
= bv
;
1015 if (!s
->unaligned_bvec
)
1016 bio_for_each_segment(bv
, s
->orig_bio
, i
)
1017 bv
->bv_offset
= 0, bv
->bv_len
= PAGE_SIZE
;
1019 memcpy(s
->bio
.bio
.bi_io_vec
,
1020 bio_iovec(s
->orig_bio
),
1021 sizeof(struct bio_vec
) *
1022 bio_segments(s
->orig_bio
));
1024 /* XXX: invalidate cache */
1026 closure_bio_submit(bio
, cl
, s
->d
);
1029 continue_at(cl
, cached_dev_cache_miss_done
, NULL
);
1032 static void cached_dev_read_done(struct closure
*cl
)
1034 struct search
*s
= container_of(cl
, struct search
, cl
);
1035 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
1038 * We had a cache miss; cache_bio now contains data ready to be inserted
1041 * First, we copy the data we just read from cache_bio's bounce buffers
1042 * to the buffers the original bio pointed to:
1045 if (s
->op
.cache_bio
) {
1046 bio_reset(s
->op
.cache_bio
);
1047 s
->op
.cache_bio
->bi_sector
= s
->cache_miss
->bi_sector
;
1048 s
->op
.cache_bio
->bi_bdev
= s
->cache_miss
->bi_bdev
;
1049 s
->op
.cache_bio
->bi_size
= s
->cache_bio_sectors
<< 9;
1050 bch_bio_map(s
->op
.cache_bio
, NULL
);
1052 bio_copy_data(s
->cache_miss
, s
->op
.cache_bio
);
1054 bio_put(s
->cache_miss
);
1055 s
->cache_miss
= NULL
;
1058 if (verify(dc
, &s
->bio
.bio
) && s
->recoverable
)
1063 if (s
->op
.cache_bio
&&
1064 !test_bit(CACHE_SET_STOPPING
, &s
->op
.c
->flags
)) {
1065 s
->op
.type
= BTREE_REPLACE
;
1066 closure_call(&s
->op
.cl
, bch_data_insert
, NULL
, cl
);
1069 continue_at(cl
, cached_dev_cache_miss_done
, NULL
);
1072 static void cached_dev_read_done_bh(struct closure
*cl
)
1074 struct search
*s
= container_of(cl
, struct search
, cl
);
1075 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
1077 bch_mark_cache_accounting(s
, !s
->cache_miss
, s
->op
.bypass
);
1078 trace_bcache_read(s
->orig_bio
, !s
->cache_miss
, s
->op
.bypass
);
1081 continue_at_nobarrier(cl
, cached_dev_read_error
, bcache_wq
);
1082 else if (s
->op
.cache_bio
|| verify(dc
, &s
->bio
.bio
))
1083 continue_at_nobarrier(cl
, cached_dev_read_done
, bcache_wq
);
1085 continue_at_nobarrier(cl
, cached_dev_bio_complete
, NULL
);
1088 static int cached_dev_cache_miss(struct btree
*b
, struct search
*s
,
1089 struct bio
*bio
, unsigned sectors
)
1091 int ret
= MAP_CONTINUE
;
1093 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
1094 struct bio
*miss
, *cache_bio
;
1096 if (s
->cache_miss
|| s
->op
.bypass
) {
1097 miss
= bch_bio_split(bio
, sectors
, GFP_NOIO
, s
->d
->bio_split
);
1098 ret
= miss
== bio
? MAP_DONE
: MAP_CONTINUE
;
1102 if (!(bio
->bi_rw
& REQ_RAHEAD
) &&
1103 !(bio
->bi_rw
& REQ_META
) &&
1104 s
->op
.c
->gc_stats
.in_use
< CUTOFF_CACHE_READA
)
1105 reada
= min_t(sector_t
, dc
->readahead
>> 9,
1106 bdev_sectors(bio
->bi_bdev
) - bio_end_sector(bio
));
1108 s
->cache_bio_sectors
= min(sectors
, bio_sectors(bio
) + reada
);
1110 s
->op
.replace
= KEY(s
->op
.inode
, bio
->bi_sector
+
1111 s
->cache_bio_sectors
, s
->cache_bio_sectors
);
1113 ret
= bch_btree_insert_check_key(b
, &s
->op
, &s
->op
.replace
);
1117 miss
= bch_bio_split(bio
, sectors
, GFP_NOIO
, s
->d
->bio_split
);
1119 /* btree_search_recurse()'s btree iterator is no good anymore */
1120 ret
= miss
== bio
? MAP_DONE
: -EINTR
;
1122 cache_bio
= bio_alloc_bioset(GFP_NOWAIT
,
1123 DIV_ROUND_UP(s
->cache_bio_sectors
, PAGE_SECTORS
),
1124 dc
->disk
.bio_split
);
1128 cache_bio
->bi_sector
= miss
->bi_sector
;
1129 cache_bio
->bi_bdev
= miss
->bi_bdev
;
1130 cache_bio
->bi_size
= s
->cache_bio_sectors
<< 9;
1132 cache_bio
->bi_end_io
= request_endio
;
1133 cache_bio
->bi_private
= &s
->cl
;
1135 bch_bio_map(cache_bio
, NULL
);
1136 if (bio_alloc_pages(cache_bio
, __GFP_NOWARN
|GFP_NOIO
))
1139 s
->cache_miss
= miss
;
1140 s
->op
.cache_bio
= cache_bio
;
1142 closure_bio_submit(cache_bio
, &s
->cl
, s
->d
);
1148 miss
->bi_end_io
= request_endio
;
1149 miss
->bi_private
= &s
->cl
;
1150 closure_bio_submit(miss
, &s
->cl
, s
->d
);
1154 static void cached_dev_read(struct cached_dev
*dc
, struct search
*s
)
1156 struct closure
*cl
= &s
->cl
;
1158 closure_call(&s
->op
.cl
, cache_lookup
, NULL
, cl
);
1159 continue_at(cl
, cached_dev_read_done_bh
, NULL
);
1162 /* Process writes */
1164 static void cached_dev_write_complete(struct closure
*cl
)
1166 struct search
*s
= container_of(cl
, struct search
, cl
);
1167 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
1169 up_read_non_owner(&dc
->writeback_lock
);
1170 cached_dev_bio_complete(cl
);
1173 static void cached_dev_write(struct cached_dev
*dc
, struct search
*s
)
1175 struct closure
*cl
= &s
->cl
;
1176 struct bio
*bio
= &s
->bio
.bio
;
1177 struct bkey start
= KEY(dc
->disk
.id
, bio
->bi_sector
, 0);
1178 struct bkey end
= KEY(dc
->disk
.id
, bio_end_sector(bio
), 0);
1180 bch_keybuf_check_overlapping(&s
->op
.c
->moving_gc_keys
, &start
, &end
);
1182 down_read_non_owner(&dc
->writeback_lock
);
1183 if (bch_keybuf_check_overlapping(&dc
->writeback_keys
, &start
, &end
)) {
1185 * We overlap with some dirty data undergoing background
1186 * writeback, force this write to writeback
1188 s
->op
.bypass
= false;
1189 s
->writeback
= true;
1193 * Discards aren't _required_ to do anything, so skipping if
1194 * check_overlapping returned true is ok
1196 * But check_overlapping drops dirty keys for which io hasn't started,
1197 * so we still want to call it.
1199 if (bio
->bi_rw
& REQ_DISCARD
)
1200 s
->op
.bypass
= true;
1202 if (should_writeback(dc
, s
->orig_bio
,
1203 cache_mode(dc
, bio
),
1205 s
->op
.bypass
= false;
1206 s
->writeback
= true;
1209 trace_bcache_write(s
->orig_bio
, s
->writeback
, s
->op
.bypass
);
1212 s
->op
.cache_bio
= s
->orig_bio
;
1213 bio_get(s
->op
.cache_bio
);
1215 if (!(bio
->bi_rw
& REQ_DISCARD
) ||
1216 blk_queue_discard(bdev_get_queue(dc
->bdev
)))
1217 closure_bio_submit(bio
, cl
, s
->d
);
1218 } else if (s
->writeback
) {
1219 bch_writeback_add(dc
);
1220 s
->op
.cache_bio
= bio
;
1222 if (bio
->bi_rw
& REQ_FLUSH
) {
1223 /* Also need to send a flush to the backing device */
1224 struct bio
*flush
= bio_alloc_bioset(GFP_NOIO
, 0,
1225 dc
->disk
.bio_split
);
1227 flush
->bi_rw
= WRITE_FLUSH
;
1228 flush
->bi_bdev
= bio
->bi_bdev
;
1229 flush
->bi_end_io
= request_endio
;
1230 flush
->bi_private
= cl
;
1232 closure_bio_submit(flush
, cl
, s
->d
);
1235 s
->op
.cache_bio
= bio_clone_bioset(bio
, GFP_NOIO
,
1236 dc
->disk
.bio_split
);
1238 closure_bio_submit(bio
, cl
, s
->d
);
1241 closure_call(&s
->op
.cl
, bch_data_insert
, NULL
, cl
);
1242 continue_at(cl
, cached_dev_write_complete
, NULL
);
1245 static void cached_dev_nodata(struct closure
*cl
)
1247 struct search
*s
= container_of(cl
, struct search
, cl
);
1248 struct bio
*bio
= &s
->bio
.bio
;
1250 if (s
->op
.flush_journal
)
1251 bch_journal_meta(s
->op
.c
, cl
);
1253 /* If it's a flush, we send the flush to the backing device too */
1254 closure_bio_submit(bio
, cl
, s
->d
);
1256 continue_at(cl
, cached_dev_bio_complete
, NULL
);
1259 /* Cached devices - read & write stuff */
1261 static void cached_dev_make_request(struct request_queue
*q
, struct bio
*bio
)
1264 struct bcache_device
*d
= bio
->bi_bdev
->bd_disk
->private_data
;
1265 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
1266 int cpu
, rw
= bio_data_dir(bio
);
1268 cpu
= part_stat_lock();
1269 part_stat_inc(cpu
, &d
->disk
->part0
, ios
[rw
]);
1270 part_stat_add(cpu
, &d
->disk
->part0
, sectors
[rw
], bio_sectors(bio
));
1273 bio
->bi_bdev
= dc
->bdev
;
1274 bio
->bi_sector
+= dc
->sb
.data_offset
;
1276 if (cached_dev_get(dc
)) {
1277 s
= search_alloc(bio
, d
);
1278 trace_bcache_request_start(s
, bio
);
1280 if (!bio
->bi_size
) {
1282 * can't call bch_journal_meta from under
1283 * generic_make_request
1285 continue_at_nobarrier(&s
->cl
,
1289 s
->op
.bypass
= check_should_bypass(dc
, s
);
1292 cached_dev_write(dc
, s
);
1294 cached_dev_read(dc
, s
);
1297 if ((bio
->bi_rw
& REQ_DISCARD
) &&
1298 !blk_queue_discard(bdev_get_queue(dc
->bdev
)))
1301 bch_generic_make_request(bio
, &d
->bio_split_hook
);
1305 static int cached_dev_ioctl(struct bcache_device
*d
, fmode_t mode
,
1306 unsigned int cmd
, unsigned long arg
)
1308 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
1309 return __blkdev_driver_ioctl(dc
->bdev
, mode
, cmd
, arg
);
1312 static int cached_dev_congested(void *data
, int bits
)
1314 struct bcache_device
*d
= data
;
1315 struct cached_dev
*dc
= container_of(d
, struct cached_dev
, disk
);
1316 struct request_queue
*q
= bdev_get_queue(dc
->bdev
);
1319 if (bdi_congested(&q
->backing_dev_info
, bits
))
1322 if (cached_dev_get(dc
)) {
1326 for_each_cache(ca
, d
->c
, i
) {
1327 q
= bdev_get_queue(ca
->bdev
);
1328 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
1337 void bch_cached_dev_request_init(struct cached_dev
*dc
)
1339 struct gendisk
*g
= dc
->disk
.disk
;
1341 g
->queue
->make_request_fn
= cached_dev_make_request
;
1342 g
->queue
->backing_dev_info
.congested_fn
= cached_dev_congested
;
1343 dc
->disk
.cache_miss
= cached_dev_cache_miss
;
1344 dc
->disk
.ioctl
= cached_dev_ioctl
;
1347 /* Flash backed devices */
1349 static int flash_dev_cache_miss(struct btree
*b
, struct search
*s
,
1350 struct bio
*bio
, unsigned sectors
)
1357 bio_for_each_segment(bv
, bio
, i
) {
1358 unsigned j
= min(bv
->bv_len
>> 9, sectors
);
1360 void *p
= kmap(bv
->bv_page
);
1361 memset(p
+ bv
->bv_offset
, 0, j
<< 9);
1362 kunmap(bv
->bv_page
);
1367 bio_advance(bio
, min(sectors
<< 9, bio
->bi_size
));
1372 return MAP_CONTINUE
;
1375 static void flash_dev_nodata(struct closure
*cl
)
1377 struct search
*s
= container_of(cl
, struct search
, cl
);
1379 if (s
->op
.flush_journal
)
1380 bch_journal_meta(s
->op
.c
, cl
);
1382 continue_at(cl
, search_free
, NULL
);
1385 static void flash_dev_make_request(struct request_queue
*q
, struct bio
*bio
)
1389 struct bcache_device
*d
= bio
->bi_bdev
->bd_disk
->private_data
;
1390 int cpu
, rw
= bio_data_dir(bio
);
1392 cpu
= part_stat_lock();
1393 part_stat_inc(cpu
, &d
->disk
->part0
, ios
[rw
]);
1394 part_stat_add(cpu
, &d
->disk
->part0
, sectors
[rw
], bio_sectors(bio
));
1397 s
= search_alloc(bio
, d
);
1401 trace_bcache_request_start(s
, bio
);
1403 if (!bio
->bi_size
) {
1405 * can't call bch_journal_meta from under
1406 * generic_make_request
1408 continue_at_nobarrier(&s
->cl
,
1412 bch_keybuf_check_overlapping(&s
->op
.c
->moving_gc_keys
,
1413 &KEY(d
->id
, bio
->bi_sector
, 0),
1414 &KEY(d
->id
, bio_end_sector(bio
), 0));
1416 s
->op
.bypass
= (bio
->bi_rw
& REQ_DISCARD
) != 0;
1417 s
->writeback
= true;
1418 s
->op
.cache_bio
= bio
;
1420 closure_call(&s
->op
.cl
, bch_data_insert
, NULL
, cl
);
1422 closure_call(&s
->op
.cl
, cache_lookup
, NULL
, cl
);
1425 continue_at(cl
, search_free
, NULL
);
1428 static int flash_dev_ioctl(struct bcache_device
*d
, fmode_t mode
,
1429 unsigned int cmd
, unsigned long arg
)
1434 static int flash_dev_congested(void *data
, int bits
)
1436 struct bcache_device
*d
= data
;
1437 struct request_queue
*q
;
1442 for_each_cache(ca
, d
->c
, i
) {
1443 q
= bdev_get_queue(ca
->bdev
);
1444 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
1450 void bch_flash_dev_request_init(struct bcache_device
*d
)
1452 struct gendisk
*g
= d
->disk
;
1454 g
->queue
->make_request_fn
= flash_dev_make_request
;
1455 g
->queue
->backing_dev_info
.congested_fn
= flash_dev_congested
;
1456 d
->cache_miss
= flash_dev_cache_miss
;
1457 d
->ioctl
= flash_dev_ioctl
;
1460 void bch_request_exit(void)
1462 #ifdef CONFIG_CGROUP_BCACHE
1463 cgroup_unload_subsys(&bcache_subsys
);
1465 if (bch_search_cache
)
1466 kmem_cache_destroy(bch_search_cache
);
1469 int __init
bch_request_init(void)
1471 bch_search_cache
= KMEM_CACHE(search
, 0);
1472 if (!bch_search_cache
)
1475 #ifdef CONFIG_CGROUP_BCACHE
1476 cgroup_load_subsys(&bcache_subsys
);
1477 init_bch_cgroup(&bcache_default_cgroup
);
1479 cgroup_add_cftypes(&bcache_subsys
, bch_files
);