]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/md/bcache/journal.c
Merge tag 'sound-4.15-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[mirror_ubuntu-bionic-kernel.git] / drivers / md / bcache / journal.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * bcache journalling code, for btree insertions
4 *
5 * Copyright 2012 Google, Inc.
6 */
7
8 #include "bcache.h"
9 #include "btree.h"
10 #include "debug.h"
11 #include "extents.h"
12
13 #include <trace/events/bcache.h>
14
15 /*
16 * Journal replay/recovery:
17 *
18 * This code is all driven from run_cache_set(); we first read the journal
19 * entries, do some other stuff, then we mark all the keys in the journal
20 * entries (same as garbage collection would), then we replay them - reinserting
21 * them into the cache in precisely the same order as they appear in the
22 * journal.
23 *
24 * We only journal keys that go in leaf nodes, which simplifies things quite a
25 * bit.
26 */
27
28 static void journal_read_endio(struct bio *bio)
29 {
30 struct closure *cl = bio->bi_private;
31 closure_put(cl);
32 }
33
34 static int journal_read_bucket(struct cache *ca, struct list_head *list,
35 unsigned bucket_index)
36 {
37 struct journal_device *ja = &ca->journal;
38 struct bio *bio = &ja->bio;
39
40 struct journal_replay *i;
41 struct jset *j, *data = ca->set->journal.w[0].data;
42 struct closure cl;
43 unsigned len, left, offset = 0;
44 int ret = 0;
45 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
46
47 closure_init_stack(&cl);
48
49 pr_debug("reading %u", bucket_index);
50
51 while (offset < ca->sb.bucket_size) {
52 reread: left = ca->sb.bucket_size - offset;
53 len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
54
55 bio_reset(bio);
56 bio->bi_iter.bi_sector = bucket + offset;
57 bio_set_dev(bio, ca->bdev);
58 bio->bi_iter.bi_size = len << 9;
59
60 bio->bi_end_io = journal_read_endio;
61 bio->bi_private = &cl;
62 bio_set_op_attrs(bio, REQ_OP_READ, 0);
63 bch_bio_map(bio, data);
64
65 closure_bio_submit(bio, &cl);
66 closure_sync(&cl);
67
68 /* This function could be simpler now since we no longer write
69 * journal entries that overlap bucket boundaries; this means
70 * the start of a bucket will always have a valid journal entry
71 * if it has any journal entries at all.
72 */
73
74 j = data;
75 while (len) {
76 struct list_head *where;
77 size_t blocks, bytes = set_bytes(j);
78
79 if (j->magic != jset_magic(&ca->sb)) {
80 pr_debug("%u: bad magic", bucket_index);
81 return ret;
82 }
83
84 if (bytes > left << 9 ||
85 bytes > PAGE_SIZE << JSET_BITS) {
86 pr_info("%u: too big, %zu bytes, offset %u",
87 bucket_index, bytes, offset);
88 return ret;
89 }
90
91 if (bytes > len << 9)
92 goto reread;
93
94 if (j->csum != csum_set(j)) {
95 pr_info("%u: bad csum, %zu bytes, offset %u",
96 bucket_index, bytes, offset);
97 return ret;
98 }
99
100 blocks = set_blocks(j, block_bytes(ca->set));
101
102 while (!list_empty(list)) {
103 i = list_first_entry(list,
104 struct journal_replay, list);
105 if (i->j.seq >= j->last_seq)
106 break;
107 list_del(&i->list);
108 kfree(i);
109 }
110
111 list_for_each_entry_reverse(i, list, list) {
112 if (j->seq == i->j.seq)
113 goto next_set;
114
115 if (j->seq < i->j.last_seq)
116 goto next_set;
117
118 if (j->seq > i->j.seq) {
119 where = &i->list;
120 goto add;
121 }
122 }
123
124 where = list;
125 add:
126 i = kmalloc(offsetof(struct journal_replay, j) +
127 bytes, GFP_KERNEL);
128 if (!i)
129 return -ENOMEM;
130 memcpy(&i->j, j, bytes);
131 list_add(&i->list, where);
132 ret = 1;
133
134 ja->seq[bucket_index] = j->seq;
135 next_set:
136 offset += blocks * ca->sb.block_size;
137 len -= blocks * ca->sb.block_size;
138 j = ((void *) j) + blocks * block_bytes(ca);
139 }
140 }
141
142 return ret;
143 }
144
145 int bch_journal_read(struct cache_set *c, struct list_head *list)
146 {
147 #define read_bucket(b) \
148 ({ \
149 int ret = journal_read_bucket(ca, list, b); \
150 __set_bit(b, bitmap); \
151 if (ret < 0) \
152 return ret; \
153 ret; \
154 })
155
156 struct cache *ca;
157 unsigned iter;
158
159 for_each_cache(ca, c, iter) {
160 struct journal_device *ja = &ca->journal;
161 DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
162 unsigned i, l, r, m;
163 uint64_t seq;
164
165 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
166 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
167
168 /*
169 * Read journal buckets ordered by golden ratio hash to quickly
170 * find a sequence of buckets with valid journal entries
171 */
172 for (i = 0; i < ca->sb.njournal_buckets; i++) {
173 /*
174 * We must try the index l with ZERO first for
175 * correctness due to the scenario that the journal
176 * bucket is circular buffer which might have wrapped
177 */
178 l = (i * 2654435769U) % ca->sb.njournal_buckets;
179
180 if (test_bit(l, bitmap))
181 break;
182
183 if (read_bucket(l))
184 goto bsearch;
185 }
186
187 /*
188 * If that fails, check all the buckets we haven't checked
189 * already
190 */
191 pr_debug("falling back to linear search");
192
193 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
194 l < ca->sb.njournal_buckets;
195 l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
196 if (read_bucket(l))
197 goto bsearch;
198
199 /* no journal entries on this device? */
200 if (l == ca->sb.njournal_buckets)
201 continue;
202 bsearch:
203 BUG_ON(list_empty(list));
204
205 /* Binary search */
206 m = l;
207 r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
208 pr_debug("starting binary search, l %u r %u", l, r);
209
210 while (l + 1 < r) {
211 seq = list_entry(list->prev, struct journal_replay,
212 list)->j.seq;
213
214 m = (l + r) >> 1;
215 read_bucket(m);
216
217 if (seq != list_entry(list->prev, struct journal_replay,
218 list)->j.seq)
219 l = m;
220 else
221 r = m;
222 }
223
224 /*
225 * Read buckets in reverse order until we stop finding more
226 * journal entries
227 */
228 pr_debug("finishing up: m %u njournal_buckets %u",
229 m, ca->sb.njournal_buckets);
230 l = m;
231
232 while (1) {
233 if (!l--)
234 l = ca->sb.njournal_buckets - 1;
235
236 if (l == m)
237 break;
238
239 if (test_bit(l, bitmap))
240 continue;
241
242 if (!read_bucket(l))
243 break;
244 }
245
246 seq = 0;
247
248 for (i = 0; i < ca->sb.njournal_buckets; i++)
249 if (ja->seq[i] > seq) {
250 seq = ja->seq[i];
251 /*
252 * When journal_reclaim() goes to allocate for
253 * the first time, it'll use the bucket after
254 * ja->cur_idx
255 */
256 ja->cur_idx = i;
257 ja->last_idx = ja->discard_idx = (i + 1) %
258 ca->sb.njournal_buckets;
259
260 }
261 }
262
263 if (!list_empty(list))
264 c->journal.seq = list_entry(list->prev,
265 struct journal_replay,
266 list)->j.seq;
267
268 return 0;
269 #undef read_bucket
270 }
271
272 void bch_journal_mark(struct cache_set *c, struct list_head *list)
273 {
274 atomic_t p = { 0 };
275 struct bkey *k;
276 struct journal_replay *i;
277 struct journal *j = &c->journal;
278 uint64_t last = j->seq;
279
280 /*
281 * journal.pin should never fill up - we never write a journal
282 * entry when it would fill up. But if for some reason it does, we
283 * iterate over the list in reverse order so that we can just skip that
284 * refcount instead of bugging.
285 */
286
287 list_for_each_entry_reverse(i, list, list) {
288 BUG_ON(last < i->j.seq);
289 i->pin = NULL;
290
291 while (last-- != i->j.seq)
292 if (fifo_free(&j->pin) > 1) {
293 fifo_push_front(&j->pin, p);
294 atomic_set(&fifo_front(&j->pin), 0);
295 }
296
297 if (fifo_free(&j->pin) > 1) {
298 fifo_push_front(&j->pin, p);
299 i->pin = &fifo_front(&j->pin);
300 atomic_set(i->pin, 1);
301 }
302
303 for (k = i->j.start;
304 k < bset_bkey_last(&i->j);
305 k = bkey_next(k))
306 if (!__bch_extent_invalid(c, k)) {
307 unsigned j;
308
309 for (j = 0; j < KEY_PTRS(k); j++)
310 if (ptr_available(c, k, j))
311 atomic_inc(&PTR_BUCKET(c, k, j)->pin);
312
313 bch_initial_mark_key(c, 0, k);
314 }
315 }
316 }
317
318 int bch_journal_replay(struct cache_set *s, struct list_head *list)
319 {
320 int ret = 0, keys = 0, entries = 0;
321 struct bkey *k;
322 struct journal_replay *i =
323 list_entry(list->prev, struct journal_replay, list);
324
325 uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
326 struct keylist keylist;
327
328 list_for_each_entry(i, list, list) {
329 BUG_ON(i->pin && atomic_read(i->pin) != 1);
330
331 cache_set_err_on(n != i->j.seq, s,
332 "bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
333 n, i->j.seq - 1, start, end);
334
335 for (k = i->j.start;
336 k < bset_bkey_last(&i->j);
337 k = bkey_next(k)) {
338 trace_bcache_journal_replay_key(k);
339
340 bch_keylist_init_single(&keylist, k);
341
342 ret = bch_btree_insert(s, &keylist, i->pin, NULL);
343 if (ret)
344 goto err;
345
346 BUG_ON(!bch_keylist_empty(&keylist));
347 keys++;
348
349 cond_resched();
350 }
351
352 if (i->pin)
353 atomic_dec(i->pin);
354 n = i->j.seq + 1;
355 entries++;
356 }
357
358 pr_info("journal replay done, %i keys in %i entries, seq %llu",
359 keys, entries, end);
360 err:
361 while (!list_empty(list)) {
362 i = list_first_entry(list, struct journal_replay, list);
363 list_del(&i->list);
364 kfree(i);
365 }
366
367 return ret;
368 }
369
370 /* Journalling */
371
372 static void btree_flush_write(struct cache_set *c)
373 {
374 /*
375 * Try to find the btree node with that references the oldest journal
376 * entry, best is our current candidate and is locked if non NULL:
377 */
378 struct btree *b, *best;
379 unsigned i;
380 retry:
381 best = NULL;
382
383 for_each_cached_btree(b, c, i)
384 if (btree_current_write(b)->journal) {
385 if (!best)
386 best = b;
387 else if (journal_pin_cmp(c,
388 btree_current_write(best)->journal,
389 btree_current_write(b)->journal)) {
390 best = b;
391 }
392 }
393
394 b = best;
395 if (b) {
396 mutex_lock(&b->write_lock);
397 if (!btree_current_write(b)->journal) {
398 mutex_unlock(&b->write_lock);
399 /* We raced */
400 goto retry;
401 }
402
403 __bch_btree_node_write(b, NULL);
404 mutex_unlock(&b->write_lock);
405 }
406 }
407
408 #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
409
410 static void journal_discard_endio(struct bio *bio)
411 {
412 struct journal_device *ja =
413 container_of(bio, struct journal_device, discard_bio);
414 struct cache *ca = container_of(ja, struct cache, journal);
415
416 atomic_set(&ja->discard_in_flight, DISCARD_DONE);
417
418 closure_wake_up(&ca->set->journal.wait);
419 closure_put(&ca->set->cl);
420 }
421
422 static void journal_discard_work(struct work_struct *work)
423 {
424 struct journal_device *ja =
425 container_of(work, struct journal_device, discard_work);
426
427 submit_bio(&ja->discard_bio);
428 }
429
430 static void do_journal_discard(struct cache *ca)
431 {
432 struct journal_device *ja = &ca->journal;
433 struct bio *bio = &ja->discard_bio;
434
435 if (!ca->discard) {
436 ja->discard_idx = ja->last_idx;
437 return;
438 }
439
440 switch (atomic_read(&ja->discard_in_flight)) {
441 case DISCARD_IN_FLIGHT:
442 return;
443
444 case DISCARD_DONE:
445 ja->discard_idx = (ja->discard_idx + 1) %
446 ca->sb.njournal_buckets;
447
448 atomic_set(&ja->discard_in_flight, DISCARD_READY);
449 /* fallthrough */
450
451 case DISCARD_READY:
452 if (ja->discard_idx == ja->last_idx)
453 return;
454
455 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
456
457 bio_init(bio, bio->bi_inline_vecs, 1);
458 bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
459 bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
460 ca->sb.d[ja->discard_idx]);
461 bio_set_dev(bio, ca->bdev);
462 bio->bi_iter.bi_size = bucket_bytes(ca);
463 bio->bi_end_io = journal_discard_endio;
464
465 closure_get(&ca->set->cl);
466 INIT_WORK(&ja->discard_work, journal_discard_work);
467 schedule_work(&ja->discard_work);
468 }
469 }
470
471 static void journal_reclaim(struct cache_set *c)
472 {
473 struct bkey *k = &c->journal.key;
474 struct cache *ca;
475 uint64_t last_seq;
476 unsigned iter, n = 0;
477 atomic_t p;
478
479 while (!atomic_read(&fifo_front(&c->journal.pin)))
480 fifo_pop(&c->journal.pin, p);
481
482 last_seq = last_seq(&c->journal);
483
484 /* Update last_idx */
485
486 for_each_cache(ca, c, iter) {
487 struct journal_device *ja = &ca->journal;
488
489 while (ja->last_idx != ja->cur_idx &&
490 ja->seq[ja->last_idx] < last_seq)
491 ja->last_idx = (ja->last_idx + 1) %
492 ca->sb.njournal_buckets;
493 }
494
495 for_each_cache(ca, c, iter)
496 do_journal_discard(ca);
497
498 if (c->journal.blocks_free)
499 goto out;
500
501 /*
502 * Allocate:
503 * XXX: Sort by free journal space
504 */
505
506 for_each_cache(ca, c, iter) {
507 struct journal_device *ja = &ca->journal;
508 unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
509
510 /* No space available on this device */
511 if (next == ja->discard_idx)
512 continue;
513
514 ja->cur_idx = next;
515 k->ptr[n++] = MAKE_PTR(0,
516 bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
517 ca->sb.nr_this_dev);
518 }
519
520 bkey_init(k);
521 SET_KEY_PTRS(k, n);
522
523 if (n)
524 c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
525 out:
526 if (!journal_full(&c->journal))
527 __closure_wake_up(&c->journal.wait);
528 }
529
530 void bch_journal_next(struct journal *j)
531 {
532 atomic_t p = { 1 };
533
534 j->cur = (j->cur == j->w)
535 ? &j->w[1]
536 : &j->w[0];
537
538 /*
539 * The fifo_push() needs to happen at the same time as j->seq is
540 * incremented for last_seq() to be calculated correctly
541 */
542 BUG_ON(!fifo_push(&j->pin, p));
543 atomic_set(&fifo_back(&j->pin), 1);
544
545 j->cur->data->seq = ++j->seq;
546 j->cur->dirty = false;
547 j->cur->need_write = false;
548 j->cur->data->keys = 0;
549
550 if (fifo_full(&j->pin))
551 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
552 }
553
554 static void journal_write_endio(struct bio *bio)
555 {
556 struct journal_write *w = bio->bi_private;
557
558 cache_set_err_on(bio->bi_status, w->c, "journal io error");
559 closure_put(&w->c->journal.io);
560 }
561
562 static void journal_write(struct closure *);
563
564 static void journal_write_done(struct closure *cl)
565 {
566 struct journal *j = container_of(cl, struct journal, io);
567 struct journal_write *w = (j->cur == j->w)
568 ? &j->w[1]
569 : &j->w[0];
570
571 __closure_wake_up(&w->wait);
572 continue_at_nobarrier(cl, journal_write, system_wq);
573 }
574
575 static void journal_write_unlock(struct closure *cl)
576 {
577 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
578
579 c->journal.io_in_flight = 0;
580 spin_unlock(&c->journal.lock);
581 }
582
583 static void journal_write_unlocked(struct closure *cl)
584 __releases(c->journal.lock)
585 {
586 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
587 struct cache *ca;
588 struct journal_write *w = c->journal.cur;
589 struct bkey *k = &c->journal.key;
590 unsigned i, sectors = set_blocks(w->data, block_bytes(c)) *
591 c->sb.block_size;
592
593 struct bio *bio;
594 struct bio_list list;
595 bio_list_init(&list);
596
597 if (!w->need_write) {
598 closure_return_with_destructor(cl, journal_write_unlock);
599 return;
600 } else if (journal_full(&c->journal)) {
601 journal_reclaim(c);
602 spin_unlock(&c->journal.lock);
603
604 btree_flush_write(c);
605 continue_at(cl, journal_write, system_wq);
606 return;
607 }
608
609 c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
610
611 w->data->btree_level = c->root->level;
612
613 bkey_copy(&w->data->btree_root, &c->root->key);
614 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
615
616 for_each_cache(ca, c, i)
617 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
618
619 w->data->magic = jset_magic(&c->sb);
620 w->data->version = BCACHE_JSET_VERSION;
621 w->data->last_seq = last_seq(&c->journal);
622 w->data->csum = csum_set(w->data);
623
624 for (i = 0; i < KEY_PTRS(k); i++) {
625 ca = PTR_CACHE(c, k, i);
626 bio = &ca->journal.bio;
627
628 atomic_long_add(sectors, &ca->meta_sectors_written);
629
630 bio_reset(bio);
631 bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
632 bio_set_dev(bio, ca->bdev);
633 bio->bi_iter.bi_size = sectors << 9;
634
635 bio->bi_end_io = journal_write_endio;
636 bio->bi_private = w;
637 bio_set_op_attrs(bio, REQ_OP_WRITE,
638 REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
639 bch_bio_map(bio, w->data);
640
641 trace_bcache_journal_write(bio);
642 bio_list_add(&list, bio);
643
644 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
645
646 ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
647 }
648
649 atomic_dec_bug(&fifo_back(&c->journal.pin));
650 bch_journal_next(&c->journal);
651 journal_reclaim(c);
652
653 spin_unlock(&c->journal.lock);
654
655 while ((bio = bio_list_pop(&list)))
656 closure_bio_submit(bio, cl);
657
658 continue_at(cl, journal_write_done, NULL);
659 }
660
661 static void journal_write(struct closure *cl)
662 {
663 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
664
665 spin_lock(&c->journal.lock);
666 journal_write_unlocked(cl);
667 }
668
669 static void journal_try_write(struct cache_set *c)
670 __releases(c->journal.lock)
671 {
672 struct closure *cl = &c->journal.io;
673 struct journal_write *w = c->journal.cur;
674
675 w->need_write = true;
676
677 if (!c->journal.io_in_flight) {
678 c->journal.io_in_flight = 1;
679 closure_call(cl, journal_write_unlocked, NULL, &c->cl);
680 } else {
681 spin_unlock(&c->journal.lock);
682 }
683 }
684
685 static struct journal_write *journal_wait_for_write(struct cache_set *c,
686 unsigned nkeys)
687 {
688 size_t sectors;
689 struct closure cl;
690 bool wait = false;
691
692 closure_init_stack(&cl);
693
694 spin_lock(&c->journal.lock);
695
696 while (1) {
697 struct journal_write *w = c->journal.cur;
698
699 sectors = __set_blocks(w->data, w->data->keys + nkeys,
700 block_bytes(c)) * c->sb.block_size;
701
702 if (sectors <= min_t(size_t,
703 c->journal.blocks_free * c->sb.block_size,
704 PAGE_SECTORS << JSET_BITS))
705 return w;
706
707 if (wait)
708 closure_wait(&c->journal.wait, &cl);
709
710 if (!journal_full(&c->journal)) {
711 if (wait)
712 trace_bcache_journal_entry_full(c);
713
714 /*
715 * XXX: If we were inserting so many keys that they
716 * won't fit in an _empty_ journal write, we'll
717 * deadlock. For now, handle this in
718 * bch_keylist_realloc() - but something to think about.
719 */
720 BUG_ON(!w->data->keys);
721
722 journal_try_write(c); /* unlocks */
723 } else {
724 if (wait)
725 trace_bcache_journal_full(c);
726
727 journal_reclaim(c);
728 spin_unlock(&c->journal.lock);
729
730 btree_flush_write(c);
731 }
732
733 closure_sync(&cl);
734 spin_lock(&c->journal.lock);
735 wait = true;
736 }
737 }
738
739 static void journal_write_work(struct work_struct *work)
740 {
741 struct cache_set *c = container_of(to_delayed_work(work),
742 struct cache_set,
743 journal.work);
744 spin_lock(&c->journal.lock);
745 if (c->journal.cur->dirty)
746 journal_try_write(c);
747 else
748 spin_unlock(&c->journal.lock);
749 }
750
751 /*
752 * Entry point to the journalling code - bio_insert() and btree_invalidate()
753 * pass bch_journal() a list of keys to be journalled, and then
754 * bch_journal() hands those same keys off to btree_insert_async()
755 */
756
757 atomic_t *bch_journal(struct cache_set *c,
758 struct keylist *keys,
759 struct closure *parent)
760 {
761 struct journal_write *w;
762 atomic_t *ret;
763
764 if (!CACHE_SYNC(&c->sb))
765 return NULL;
766
767 w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
768
769 memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
770 w->data->keys += bch_keylist_nkeys(keys);
771
772 ret = &fifo_back(&c->journal.pin);
773 atomic_inc(ret);
774
775 if (parent) {
776 closure_wait(&w->wait, parent);
777 journal_try_write(c);
778 } else if (!w->dirty) {
779 w->dirty = true;
780 schedule_delayed_work(&c->journal.work,
781 msecs_to_jiffies(c->journal_delay_ms));
782 spin_unlock(&c->journal.lock);
783 } else {
784 spin_unlock(&c->journal.lock);
785 }
786
787
788 return ret;
789 }
790
791 void bch_journal_meta(struct cache_set *c, struct closure *cl)
792 {
793 struct keylist keys;
794 atomic_t *ref;
795
796 bch_keylist_init(&keys);
797
798 ref = bch_journal(c, &keys, cl);
799 if (ref)
800 atomic_dec_bug(ref);
801 }
802
803 void bch_journal_free(struct cache_set *c)
804 {
805 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
806 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
807 free_fifo(&c->journal.pin);
808 }
809
810 int bch_journal_alloc(struct cache_set *c)
811 {
812 struct journal *j = &c->journal;
813
814 spin_lock_init(&j->lock);
815 INIT_DELAYED_WORK(&j->work, journal_write_work);
816
817 c->journal_delay_ms = 100;
818
819 j->w[0].c = c;
820 j->w[1].c = c;
821
822 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
823 !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
824 !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
825 return -ENOMEM;
826
827 return 0;
828 }