]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/md/bcache/journal.c
2 * bcache journalling code, for btree insertions
4 * Copyright 2012 Google, Inc.
13 * Journal replay/recovery:
15 * This code is all driven from run_cache_set(); we first read the journal
16 * entries, do some other stuff, then we mark all the keys in the journal
17 * entries (same as garbage collection would), then we replay them - reinserting
18 * them into the cache in precisely the same order as they appear in the
21 * We only journal keys that go in leaf nodes, which simplifies things quite a
25 static void journal_read_endio(struct bio
*bio
, int error
)
27 struct closure
*cl
= bio
->bi_private
;
31 static int journal_read_bucket(struct cache
*ca
, struct list_head
*list
,
32 struct btree_op
*op
, unsigned bucket_index
)
34 struct journal_device
*ja
= &ca
->journal
;
35 struct bio
*bio
= &ja
->bio
;
37 struct journal_replay
*i
;
38 struct jset
*j
, *data
= ca
->set
->journal
.w
[0].data
;
39 unsigned len
, left
, offset
= 0;
41 sector_t bucket
= bucket_to_sector(ca
->set
, ca
->sb
.d
[bucket_index
]);
43 pr_debug("reading %llu", (uint64_t) bucket
);
45 while (offset
< ca
->sb
.bucket_size
) {
46 reread
: left
= ca
->sb
.bucket_size
- offset
;
47 len
= min_t(unsigned, left
, PAGE_SECTORS
* 8);
50 bio
->bi_sector
= bucket
+ offset
;
51 bio
->bi_bdev
= ca
->bdev
;
53 bio
->bi_size
= len
<< 9;
55 bio
->bi_end_io
= journal_read_endio
;
56 bio
->bi_private
= &op
->cl
;
57 bch_bio_map(bio
, data
);
59 closure_bio_submit(bio
, &op
->cl
, ca
);
60 closure_sync(&op
->cl
);
62 /* This function could be simpler now since we no longer write
63 * journal entries that overlap bucket boundaries; this means
64 * the start of a bucket will always have a valid journal entry
65 * if it has any journal entries at all.
70 struct list_head
*where
;
71 size_t blocks
, bytes
= set_bytes(j
);
73 if (j
->magic
!= jset_magic(ca
->set
))
76 if (bytes
> left
<< 9)
82 if (j
->csum
!= csum_set(j
))
85 blocks
= set_blocks(j
, ca
->set
);
87 while (!list_empty(list
)) {
88 i
= list_first_entry(list
,
89 struct journal_replay
, list
);
90 if (i
->j
.seq
>= j
->last_seq
)
96 list_for_each_entry_reverse(i
, list
, list
) {
97 if (j
->seq
== i
->j
.seq
)
100 if (j
->seq
< i
->j
.last_seq
)
103 if (j
->seq
> i
->j
.seq
) {
111 i
= kmalloc(offsetof(struct journal_replay
, j
) +
115 memcpy(&i
->j
, j
, bytes
);
116 list_add(&i
->list
, where
);
119 ja
->seq
[bucket_index
] = j
->seq
;
121 offset
+= blocks
* ca
->sb
.block_size
;
122 len
-= blocks
* ca
->sb
.block_size
;
123 j
= ((void *) j
) + blocks
* block_bytes(ca
);
130 int bch_journal_read(struct cache_set
*c
, struct list_head
*list
,
133 #define read_bucket(b) \
135 int ret = journal_read_bucket(ca, list, op, b); \
136 __set_bit(b, bitmap); \
145 for_each_cache(ca
, c
, iter
) {
146 struct journal_device
*ja
= &ca
->journal
;
147 unsigned long bitmap
[SB_JOURNAL_BUCKETS
/ BITS_PER_LONG
];
151 bitmap_zero(bitmap
, SB_JOURNAL_BUCKETS
);
152 pr_debug("%u journal buckets", ca
->sb
.njournal_buckets
);
154 /* Read journal buckets ordered by golden ratio hash to quickly
155 * find a sequence of buckets with valid journal entries
157 for (i
= 0; i
< ca
->sb
.njournal_buckets
; i
++) {
158 l
= (i
* 2654435769U) % ca
->sb
.njournal_buckets
;
160 if (test_bit(l
, bitmap
))
167 /* If that fails, check all the buckets we haven't checked
170 pr_debug("falling back to linear search");
172 for (l
= 0; l
< ca
->sb
.njournal_buckets
; l
++) {
173 if (test_bit(l
, bitmap
))
181 m
= r
= find_next_bit(bitmap
, ca
->sb
.njournal_buckets
, l
+ 1);
182 pr_debug("starting binary search, l %u r %u", l
, r
);
193 /* Read buckets in reverse order until we stop finding more
196 pr_debug("finishing up");
201 l
= ca
->sb
.njournal_buckets
- 1;
206 if (test_bit(l
, bitmap
))
215 for (i
= 0; i
< ca
->sb
.njournal_buckets
; i
++)
216 if (ja
->seq
[i
] > seq
) {
218 ja
->cur_idx
= ja
->discard_idx
=
224 c
->journal
.seq
= list_entry(list
->prev
,
225 struct journal_replay
,
232 void bch_journal_mark(struct cache_set
*c
, struct list_head
*list
)
236 struct journal_replay
*i
;
237 struct journal
*j
= &c
->journal
;
238 uint64_t last
= j
->seq
;
241 * journal.pin should never fill up - we never write a journal
242 * entry when it would fill up. But if for some reason it does, we
243 * iterate over the list in reverse order so that we can just skip that
244 * refcount instead of bugging.
247 list_for_each_entry_reverse(i
, list
, list
) {
248 BUG_ON(last
< i
->j
.seq
);
251 while (last
-- != i
->j
.seq
)
252 if (fifo_free(&j
->pin
) > 1) {
253 fifo_push_front(&j
->pin
, p
);
254 atomic_set(&fifo_front(&j
->pin
), 0);
257 if (fifo_free(&j
->pin
) > 1) {
258 fifo_push_front(&j
->pin
, p
);
259 i
->pin
= &fifo_front(&j
->pin
);
260 atomic_set(i
->pin
, 1);
268 for (j
= 0; j
< KEY_PTRS(k
); j
++) {
269 struct bucket
*g
= PTR_BUCKET(c
, k
, j
);
272 if (g
->prio
== BTREE_PRIO
&&
274 g
->prio
= INITIAL_PRIO
;
277 __bch_btree_mark_key(c
, 0, k
);
282 int bch_journal_replay(struct cache_set
*s
, struct list_head
*list
,
285 int ret
= 0, keys
= 0, entries
= 0;
287 struct journal_replay
*i
=
288 list_entry(list
->prev
, struct journal_replay
, list
);
290 uint64_t start
= i
->j
.last_seq
, end
= i
->j
.seq
, n
= start
;
292 list_for_each_entry(i
, list
, list
) {
293 BUG_ON(i
->pin
&& atomic_read(i
->pin
) != 1);
297 "journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
298 n
, i
->j
.seq
- 1, start
, end
);
303 pr_debug("%s", pkey(k
));
304 bkey_copy(op
->keys
.top
, k
);
305 bch_keylist_push(&op
->keys
);
307 op
->journal
= i
->pin
;
308 atomic_inc(op
->journal
);
310 ret
= bch_btree_insert(op
, s
);
314 BUG_ON(!bch_keylist_empty(&op
->keys
));
326 pr_info("journal replay done, %i keys in %i entries, seq %llu",
329 while (!list_empty(list
)) {
330 i
= list_first_entry(list
, struct journal_replay
, list
);
335 closure_sync(&op
->cl
);
341 static void btree_flush_write(struct cache_set
*c
)
344 * Try to find the btree node with that references the oldest journal
345 * entry, best is our current candidate and is locked if non NULL:
347 struct btree
*b
, *best
= NULL
;
350 for_each_cached_btree(b
, c
, iter
) {
351 if (!down_write_trylock(&b
->lock
))
354 if (!btree_node_dirty(b
) ||
355 !btree_current_write(b
)->journal
) {
362 else if (journal_pin_cmp(c
,
363 btree_current_write(best
),
364 btree_current_write(b
))) {
365 rw_unlock(true, best
);
374 /* We can't find the best btree node, just pick the first */
375 list_for_each_entry(b
, &c
->btree_cache
, list
)
376 if (!b
->level
&& btree_node_dirty(b
)) {
378 rw_lock(true, best
, best
->level
);
386 if (btree_node_dirty(best
))
387 bch_btree_write(best
, true, NULL
);
388 rw_unlock(true, best
);
391 #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
393 static void journal_discard_endio(struct bio
*bio
, int error
)
395 struct journal_device
*ja
=
396 container_of(bio
, struct journal_device
, discard_bio
);
397 struct cache
*ca
= container_of(ja
, struct cache
, journal
);
399 atomic_set(&ja
->discard_in_flight
, DISCARD_DONE
);
401 closure_wake_up(&ca
->set
->journal
.wait
);
402 closure_put(&ca
->set
->cl
);
405 static void journal_discard_work(struct work_struct
*work
)
407 struct journal_device
*ja
=
408 container_of(work
, struct journal_device
, discard_work
);
410 submit_bio(0, &ja
->discard_bio
);
413 static void do_journal_discard(struct cache
*ca
)
415 struct journal_device
*ja
= &ca
->journal
;
416 struct bio
*bio
= &ja
->discard_bio
;
419 ja
->discard_idx
= ja
->last_idx
;
423 switch (atomic_read(&ja
->discard_in_flight
) == DISCARD_IN_FLIGHT
) {
424 case DISCARD_IN_FLIGHT
:
428 ja
->discard_idx
= (ja
->discard_idx
+ 1) %
429 ca
->sb
.njournal_buckets
;
431 atomic_set(&ja
->discard_in_flight
, DISCARD_READY
);
435 if (ja
->discard_idx
== ja
->last_idx
)
438 atomic_set(&ja
->discard_in_flight
, DISCARD_IN_FLIGHT
);
441 bio
->bi_sector
= bucket_to_sector(ca
->set
,
442 ca
->sb
.d
[ja
->discard_idx
]);
443 bio
->bi_bdev
= ca
->bdev
;
444 bio
->bi_rw
= REQ_WRITE
|REQ_DISCARD
;
445 bio
->bi_max_vecs
= 1;
446 bio
->bi_io_vec
= bio
->bi_inline_vecs
;
447 bio
->bi_size
= bucket_bytes(ca
);
448 bio
->bi_end_io
= journal_discard_endio
;
450 closure_get(&ca
->set
->cl
);
451 INIT_WORK(&ja
->discard_work
, journal_discard_work
);
452 schedule_work(&ja
->discard_work
);
456 static void journal_reclaim(struct cache_set
*c
)
458 struct bkey
*k
= &c
->journal
.key
;
461 unsigned iter
, n
= 0;
464 while (!atomic_read(&fifo_front(&c
->journal
.pin
)))
465 fifo_pop(&c
->journal
.pin
, p
);
467 last_seq
= last_seq(&c
->journal
);
469 /* Update last_idx */
471 for_each_cache(ca
, c
, iter
) {
472 struct journal_device
*ja
= &ca
->journal
;
474 while (ja
->last_idx
!= ja
->cur_idx
&&
475 ja
->seq
[ja
->last_idx
] < last_seq
)
476 ja
->last_idx
= (ja
->last_idx
+ 1) %
477 ca
->sb
.njournal_buckets
;
480 for_each_cache(ca
, c
, iter
)
481 do_journal_discard(ca
);
483 if (c
->journal
.blocks_free
)
488 * XXX: Sort by free journal space
491 for_each_cache(ca
, c
, iter
) {
492 struct journal_device
*ja
= &ca
->journal
;
493 unsigned next
= (ja
->cur_idx
+ 1) % ca
->sb
.njournal_buckets
;
495 /* No space available on this device */
496 if (next
== ja
->discard_idx
)
501 bucket_to_sector(c
, ca
->sb
.d
[ja
->cur_idx
]),
509 c
->journal
.blocks_free
= c
->sb
.bucket_size
>> c
->block_bits
;
511 if (!journal_full(&c
->journal
))
512 __closure_wake_up(&c
->journal
.wait
);
515 void bch_journal_next(struct journal
*j
)
519 j
->cur
= (j
->cur
== j
->w
)
524 * The fifo_push() needs to happen at the same time as j->seq is
525 * incremented for last_seq() to be calculated correctly
527 BUG_ON(!fifo_push(&j
->pin
, p
));
528 atomic_set(&fifo_back(&j
->pin
), 1);
530 j
->cur
->data
->seq
= ++j
->seq
;
531 j
->cur
->need_write
= false;
532 j
->cur
->data
->keys
= 0;
534 if (fifo_full(&j
->pin
))
535 pr_debug("journal_pin full (%zu)", fifo_used(&j
->pin
));
538 static void journal_write_endio(struct bio
*bio
, int error
)
540 struct journal_write
*w
= bio
->bi_private
;
542 cache_set_err_on(error
, w
->c
, "journal io error");
543 closure_put(&w
->c
->journal
.io
.cl
);
546 static void journal_write(struct closure
*);
548 static void journal_write_done(struct closure
*cl
)
550 struct journal
*j
= container_of(cl
, struct journal
, io
.cl
);
551 struct cache_set
*c
= container_of(j
, struct cache_set
, journal
);
553 struct journal_write
*w
= (j
->cur
== j
->w
)
557 __closure_wake_up(&w
->wait
);
559 if (c
->journal_delay_ms
)
560 closure_delay(&j
->io
, msecs_to_jiffies(c
->journal_delay_ms
));
562 continue_at(cl
, journal_write
, system_wq
);
565 static void journal_write_unlocked(struct closure
*cl
)
566 __releases(c
->journal
.lock
)
568 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
.cl
);
570 struct journal_write
*w
= c
->journal
.cur
;
571 struct bkey
*k
= &c
->journal
.key
;
572 unsigned i
, sectors
= set_blocks(w
->data
, c
) * c
->sb
.block_size
;
575 struct bio_list list
;
576 bio_list_init(&list
);
578 if (!w
->need_write
) {
580 * XXX: have to unlock closure before we unlock journal lock,
581 * else we race with bch_journal(). But this way we race
582 * against cache set unregister. Doh.
584 set_closure_fn(cl
, NULL
, NULL
);
585 closure_sub(cl
, CLOSURE_RUNNING
+ 1);
586 spin_unlock(&c
->journal
.lock
);
588 } else if (journal_full(&c
->journal
)) {
590 spin_unlock(&c
->journal
.lock
);
592 btree_flush_write(c
);
593 continue_at(cl
, journal_write
, system_wq
);
596 c
->journal
.blocks_free
-= set_blocks(w
->data
, c
);
598 w
->data
->btree_level
= c
->root
->level
;
600 bkey_copy(&w
->data
->btree_root
, &c
->root
->key
);
601 bkey_copy(&w
->data
->uuid_bucket
, &c
->uuid_bucket
);
603 for_each_cache(ca
, c
, i
)
604 w
->data
->prio_bucket
[ca
->sb
.nr_this_dev
] = ca
->prio_buckets
[0];
606 w
->data
->magic
= jset_magic(c
);
607 w
->data
->version
= BCACHE_JSET_VERSION
;
608 w
->data
->last_seq
= last_seq(&c
->journal
);
609 w
->data
->csum
= csum_set(w
->data
);
611 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
612 ca
= PTR_CACHE(c
, k
, i
);
613 bio
= &ca
->journal
.bio
;
615 atomic_long_add(sectors
, &ca
->meta_sectors_written
);
618 bio
->bi_sector
= PTR_OFFSET(k
, i
);
619 bio
->bi_bdev
= ca
->bdev
;
620 bio
->bi_rw
= REQ_WRITE
|REQ_SYNC
|REQ_META
|REQ_FLUSH
;
621 bio
->bi_size
= sectors
<< 9;
623 bio
->bi_end_io
= journal_write_endio
;
625 bch_bio_map(bio
, w
->data
);
627 trace_bcache_journal_write(bio
);
628 bio_list_add(&list
, bio
);
630 SET_PTR_OFFSET(k
, i
, PTR_OFFSET(k
, i
) + sectors
);
632 ca
->journal
.seq
[ca
->journal
.cur_idx
] = w
->data
->seq
;
635 atomic_dec_bug(&fifo_back(&c
->journal
.pin
));
636 bch_journal_next(&c
->journal
);
639 spin_unlock(&c
->journal
.lock
);
641 while ((bio
= bio_list_pop(&list
)))
642 closure_bio_submit(bio
, cl
, c
->cache
[0]);
644 continue_at(cl
, journal_write_done
, NULL
);
647 static void journal_write(struct closure
*cl
)
649 struct cache_set
*c
= container_of(cl
, struct cache_set
, journal
.io
.cl
);
651 spin_lock(&c
->journal
.lock
);
652 journal_write_unlocked(cl
);
655 static void __journal_try_write(struct cache_set
*c
, bool noflush
)
656 __releases(c
->journal
.lock
)
658 struct closure
*cl
= &c
->journal
.io
.cl
;
660 if (!closure_trylock(cl
, &c
->cl
))
661 spin_unlock(&c
->journal
.lock
);
662 else if (noflush
&& journal_full(&c
->journal
)) {
663 spin_unlock(&c
->journal
.lock
);
664 continue_at(cl
, journal_write
, system_wq
);
666 journal_write_unlocked(cl
);
669 #define journal_try_write(c) __journal_try_write(c, false)
671 void bch_journal_meta(struct cache_set
*c
, struct closure
*cl
)
673 struct journal_write
*w
;
675 if (CACHE_SYNC(&c
->sb
)) {
676 spin_lock(&c
->journal
.lock
);
679 w
->need_write
= true;
682 BUG_ON(!closure_wait(&w
->wait
, cl
));
684 __journal_try_write(c
, true);
689 * Entry point to the journalling code - bio_insert() and btree_invalidate()
690 * pass bch_journal() a list of keys to be journalled, and then
691 * bch_journal() hands those same keys off to btree_insert_async()
694 void bch_journal(struct closure
*cl
)
696 struct btree_op
*op
= container_of(cl
, struct btree_op
, cl
);
697 struct cache_set
*c
= op
->c
;
698 struct journal_write
*w
;
699 size_t b
, n
= ((uint64_t *) op
->keys
.top
) - op
->keys
.list
;
701 if (op
->type
!= BTREE_INSERT
||
706 * If we're looping because we errored, might already be waiting on
707 * another journal write:
709 while (atomic_read(&cl
->parent
->remaining
) & CLOSURE_WAITING
)
710 closure_sync(cl
->parent
);
712 spin_lock(&c
->journal
.lock
);
714 if (journal_full(&c
->journal
)) {
715 /* XXX: tracepoint */
716 closure_wait(&c
->journal
.wait
, cl
);
719 spin_unlock(&c
->journal
.lock
);
721 btree_flush_write(c
);
722 continue_at(cl
, bch_journal
, bcache_wq
);
726 w
->need_write
= true;
727 b
= __set_blocks(w
->data
, w
->data
->keys
+ n
, c
);
729 if (b
* c
->sb
.block_size
> PAGE_SECTORS
<< JSET_BITS
||
730 b
> c
->journal
.blocks_free
) {
731 /* XXX: If we were inserting so many keys that they won't fit in
732 * an _empty_ journal write, we'll deadlock. For now, handle
733 * this in bch_keylist_realloc() - but something to think about.
735 BUG_ON(!w
->data
->keys
);
737 /* XXX: tracepoint */
738 BUG_ON(!closure_wait(&w
->wait
, cl
));
740 closure_flush(&c
->journal
.io
);
742 journal_try_write(c
);
743 continue_at(cl
, bch_journal
, bcache_wq
);
746 memcpy(end(w
->data
), op
->keys
.list
, n
* sizeof(uint64_t));
749 op
->journal
= &fifo_back(&c
->journal
.pin
);
750 atomic_inc(op
->journal
);
752 if (op
->flush_journal
) {
753 closure_flush(&c
->journal
.io
);
754 closure_wait(&w
->wait
, cl
->parent
);
757 journal_try_write(c
);
759 bch_btree_insert_async(cl
);
762 void bch_journal_free(struct cache_set
*c
)
764 free_pages((unsigned long) c
->journal
.w
[1].data
, JSET_BITS
);
765 free_pages((unsigned long) c
->journal
.w
[0].data
, JSET_BITS
);
766 free_fifo(&c
->journal
.pin
);
769 int bch_journal_alloc(struct cache_set
*c
)
771 struct journal
*j
= &c
->journal
;
773 closure_init_unlocked(&j
->io
);
774 spin_lock_init(&j
->lock
);
776 c
->journal_delay_ms
= 100;
781 if (!(init_fifo(&j
->pin
, JOURNAL_PIN
, GFP_KERNEL
)) ||
782 !(j
->w
[0].data
= (void *) __get_free_pages(GFP_KERNEL
, JSET_BITS
)) ||
783 !(j
->w
[1].data
= (void *) __get_free_pages(GFP_KERNEL
, JSET_BITS
)))