]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/md/bcache/alloc.c
bcache: bch_allocator_thread() is not freezable
[mirror_ubuntu-artful-kernel.git] / drivers / md / bcache / alloc.c
CommitLineData
cafe5635
KO
1/*
2 * Primary bucket allocation code
3 *
4 * Copyright 2012 Google, Inc.
5 *
6 * Allocation in bcache is done in terms of buckets:
7 *
8 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
9 * btree pointers - they must match for the pointer to be considered valid.
10 *
11 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
12 * bucket simply by incrementing its gen.
13 *
14 * The gens (along with the priorities; it's really the gens are important but
15 * the code is named as if it's the priorities) are written in an arbitrary list
16 * of buckets on disk, with a pointer to them in the journal header.
17 *
18 * When we invalidate a bucket, we have to write its new gen to disk and wait
19 * for that write to complete before we use it - otherwise after a crash we
20 * could have pointers that appeared to be good but pointed to data that had
21 * been overwritten.
22 *
23 * Since the gens and priorities are all stored contiguously on disk, we can
24 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
25 * call prio_write(), and when prio_write() finishes we pull buckets off the
26 * free_inc list and optionally discard them.
27 *
28 * free_inc isn't the only freelist - if it was, we'd often to sleep while
29 * priorities and gens were being written before we could allocate. c->free is a
30 * smaller freelist, and buckets on that list are always ready to be used.
31 *
32 * If we've got discards enabled, that happens when a bucket moves from the
33 * free_inc list to the free list.
34 *
35 * There is another freelist, because sometimes we have buckets that we know
36 * have nothing pointing into them - these we can reuse without waiting for
37 * priorities to be rewritten. These come from freed btree nodes and buckets
38 * that garbage collection discovered no longer had valid keys pointing into
39 * them (because they were overwritten). That's the unused list - buckets on the
40 * unused list move to the free list, optionally being discarded in the process.
41 *
42 * It's also important to ensure that gens don't wrap around - with respect to
43 * either the oldest gen in the btree or the gen on disk. This is quite
44 * difficult to do in practice, but we explicitly guard against it anyways - if
45 * a bucket is in danger of wrapping around we simply skip invalidating it that
46 * time around, and we garbage collect or rewrite the priorities sooner than we
47 * would have otherwise.
48 *
49 * bch_bucket_alloc() allocates a single bucket from a specific cache.
50 *
51 * bch_bucket_alloc_set() allocates one or more buckets from different caches
52 * out of a cache set.
53 *
54 * free_some_buckets() drives all the processes described above. It's called
55 * from bch_bucket_alloc() and a few other places that need to make sure free
56 * buckets are ready.
57 *
58 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
59 * invalidated, and then invalidate them and stick them on the free_inc list -
60 * in either lru or fifo order.
61 */
62
63#include "bcache.h"
64#include "btree.h"
65
49b1212d 66#include <linux/blkdev.h>
119ba0f8 67#include <linux/kthread.h>
cafe5635 68#include <linux/random.h>
c37511b8 69#include <trace/events/bcache.h>
cafe5635 70
cafe5635
KO
71/* Bucket heap / gen */
72
73uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
74{
75 uint8_t ret = ++b->gen;
76
77 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
78 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
79
cafe5635
KO
80 return ret;
81}
82
83void bch_rescale_priorities(struct cache_set *c, int sectors)
84{
85 struct cache *ca;
86 struct bucket *b;
87 unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
88 unsigned i;
89 int r;
90
91 atomic_sub(sectors, &c->rescale);
92
93 do {
94 r = atomic_read(&c->rescale);
95
96 if (r >= 0)
97 return;
98 } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
99
100 mutex_lock(&c->bucket_lock);
101
102 c->min_prio = USHRT_MAX;
103
104 for_each_cache(ca, c, i)
105 for_each_bucket(b, ca)
106 if (b->prio &&
107 b->prio != BTREE_PRIO &&
108 !atomic_read(&b->pin)) {
109 b->prio--;
110 c->min_prio = min(c->min_prio, b->prio);
111 }
112
113 mutex_unlock(&c->bucket_lock);
114}
115
2531d9ee
KO
116/*
117 * Background allocation thread: scans for buckets to be invalidated,
118 * invalidates them, rewrites prios/gens (marking them as invalidated on disk),
119 * then optionally issues discard commands to the newly free buckets, then puts
120 * them on the various freelists.
121 */
cafe5635
KO
122
123static inline bool can_inc_bucket_gen(struct bucket *b)
124{
2531d9ee 125 return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
cafe5635
KO
126}
127
2531d9ee 128bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
cafe5635 129{
2531d9ee 130 BUG_ON(!ca->set->gc_mark_valid);
cafe5635 131
4fe6a816
KO
132 return (!GC_MARK(b) ||
133 GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
cafe5635
KO
134 !atomic_read(&b->pin) &&
135 can_inc_bucket_gen(b);
136}
137
2531d9ee 138void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
cafe5635 139{
2531d9ee
KO
140 lockdep_assert_held(&ca->set->bucket_lock);
141 BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
7159b1ad
KO
142
143 if (GC_SECTORS_USED(b))
2531d9ee 144 trace_bcache_invalidate(ca, b - ca->buckets);
7159b1ad 145
cafe5635
KO
146 bch_inc_gen(ca, b);
147 b->prio = INITIAL_PRIO;
148 atomic_inc(&b->pin);
2531d9ee
KO
149}
150
151static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
152{
153 __bch_invalidate_one_bucket(ca, b);
154
155 fifo_push(&ca->free_inc, b - ca->buckets);
cafe5635
KO
156}
157
e0a985a4
KO
158/*
159 * Determines what order we're going to reuse buckets, smallest bucket_prio()
160 * first: we also take into account the number of sectors of live data in that
161 * bucket, and in order for that multiply to make sense we have to scale bucket
162 *
163 * Thus, we scale the bucket priorities so that the bucket with the smallest
164 * prio is worth 1/8th of what INITIAL_PRIO is worth.
165 */
166
167#define bucket_prio(b) \
168({ \
169 unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
170 \
171 (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
172})
cafe5635 173
b1a67b0f
KO
174#define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
175#define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
cafe5635 176
b1a67b0f
KO
177static void invalidate_buckets_lru(struct cache *ca)
178{
cafe5635
KO
179 struct bucket *b;
180 ssize_t i;
181
182 ca->heap.used = 0;
183
184 for_each_bucket(b, ca) {
2531d9ee 185 if (!bch_can_invalidate_bucket(ca, b))
86b26b82
KO
186 continue;
187
188 if (!heap_full(&ca->heap))
189 heap_add(&ca->heap, b, bucket_max_cmp);
190 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
191 ca->heap.data[0] = b;
192 heap_sift(&ca->heap, 0, bucket_max_cmp);
cafe5635
KO
193 }
194 }
195
cafe5635
KO
196 for (i = ca->heap.used / 2 - 1; i >= 0; --i)
197 heap_sift(&ca->heap, i, bucket_min_cmp);
198
199 while (!fifo_full(&ca->free_inc)) {
200 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
86b26b82
KO
201 /*
202 * We don't want to be calling invalidate_buckets()
cafe5635
KO
203 * multiple times when it can't do anything
204 */
205 ca->invalidate_needs_gc = 1;
72a44517 206 wake_up_gc(ca->set);
cafe5635
KO
207 return;
208 }
209
2531d9ee 210 bch_invalidate_one_bucket(ca, b);
cafe5635
KO
211 }
212}
213
214static void invalidate_buckets_fifo(struct cache *ca)
215{
216 struct bucket *b;
217 size_t checked = 0;
218
219 while (!fifo_full(&ca->free_inc)) {
220 if (ca->fifo_last_bucket < ca->sb.first_bucket ||
221 ca->fifo_last_bucket >= ca->sb.nbuckets)
222 ca->fifo_last_bucket = ca->sb.first_bucket;
223
224 b = ca->buckets + ca->fifo_last_bucket++;
225
2531d9ee
KO
226 if (bch_can_invalidate_bucket(ca, b))
227 bch_invalidate_one_bucket(ca, b);
cafe5635
KO
228
229 if (++checked >= ca->sb.nbuckets) {
230 ca->invalidate_needs_gc = 1;
72a44517 231 wake_up_gc(ca->set);
cafe5635
KO
232 return;
233 }
234 }
235}
236
237static void invalidate_buckets_random(struct cache *ca)
238{
239 struct bucket *b;
240 size_t checked = 0;
241
242 while (!fifo_full(&ca->free_inc)) {
243 size_t n;
244 get_random_bytes(&n, sizeof(n));
245
246 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
247 n += ca->sb.first_bucket;
248
249 b = ca->buckets + n;
250
2531d9ee
KO
251 if (bch_can_invalidate_bucket(ca, b))
252 bch_invalidate_one_bucket(ca, b);
cafe5635
KO
253
254 if (++checked >= ca->sb.nbuckets / 2) {
255 ca->invalidate_needs_gc = 1;
72a44517 256 wake_up_gc(ca->set);
cafe5635
KO
257 return;
258 }
259 }
260}
261
262static void invalidate_buckets(struct cache *ca)
263{
2531d9ee 264 BUG_ON(ca->invalidate_needs_gc);
cafe5635
KO
265
266 switch (CACHE_REPLACEMENT(&ca->sb)) {
267 case CACHE_REPLACEMENT_LRU:
268 invalidate_buckets_lru(ca);
269 break;
270 case CACHE_REPLACEMENT_FIFO:
271 invalidate_buckets_fifo(ca);
272 break;
273 case CACHE_REPLACEMENT_RANDOM:
274 invalidate_buckets_random(ca);
275 break;
276 }
277}
278
279#define allocator_wait(ca, cond) \
280do { \
86b26b82 281 while (1) { \
119ba0f8 282 set_current_state(TASK_INTERRUPTIBLE); \
86b26b82
KO
283 if (cond) \
284 break; \
cafe5635
KO
285 \
286 mutex_unlock(&(ca)->set->bucket_lock); \
79826c35 287 if (kthread_should_stop()) \
119ba0f8 288 return 0; \
cafe5635
KO
289 \
290 schedule(); \
cafe5635
KO
291 mutex_lock(&(ca)->set->bucket_lock); \
292 } \
119ba0f8 293 __set_current_state(TASK_RUNNING); \
cafe5635
KO
294} while (0)
295
78365411
KO
296static int bch_allocator_push(struct cache *ca, long bucket)
297{
298 unsigned i;
299
300 /* Prios/gens are actually the most important reserve */
301 if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
302 return true;
303
304 for (i = 0; i < RESERVE_NR; i++)
305 if (fifo_push(&ca->free[i], bucket))
306 return true;
307
308 return false;
309}
310
119ba0f8 311static int bch_allocator_thread(void *arg)
cafe5635 312{
119ba0f8 313 struct cache *ca = arg;
cafe5635
KO
314
315 mutex_lock(&ca->set->bucket_lock);
316
317 while (1) {
86b26b82
KO
318 /*
319 * First, we pull buckets off of the unused and free_inc lists,
320 * possibly issue discards to them, then we add the bucket to
321 * the free list:
322 */
2531d9ee 323 while (!fifo_empty(&ca->free_inc)) {
cafe5635
KO
324 long bucket;
325
2531d9ee 326 fifo_pop(&ca->free_inc, bucket);
cafe5635 327
cafe5635 328 if (ca->discard) {
49b1212d
KO
329 mutex_unlock(&ca->set->bucket_lock);
330 blkdev_issue_discard(ca->bdev,
331 bucket_to_sector(ca->set, bucket),
8b326d3a 332 ca->sb.bucket_size, GFP_KERNEL, 0);
49b1212d 333 mutex_lock(&ca->set->bucket_lock);
cafe5635 334 }
49b1212d 335
78365411 336 allocator_wait(ca, bch_allocator_push(ca, bucket));
0a63b66d 337 wake_up(&ca->set->btree_cache_wait);
35fcd848 338 wake_up(&ca->set->bucket_wait);
cafe5635
KO
339 }
340
86b26b82
KO
341 /*
342 * We've run out of free buckets, we need to find some buckets
343 * we can invalidate. First, invalidate them in memory and add
344 * them to the free_inc list:
345 */
cafe5635 346
2531d9ee 347retry_invalidate:
86b26b82 348 allocator_wait(ca, ca->set->gc_mark_valid &&
2531d9ee 349 !ca->invalidate_needs_gc);
86b26b82 350 invalidate_buckets(ca);
cafe5635 351
86b26b82
KO
352 /*
353 * Now, we write their new gens to disk so we can start writing
354 * new stuff to them:
355 */
356 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
2531d9ee
KO
357 if (CACHE_SYNC(&ca->set->sb)) {
358 /*
359 * This could deadlock if an allocation with a btree
360 * node locked ever blocked - having the btree node
361 * locked would block garbage collection, but here we're
362 * waiting on garbage collection before we invalidate
363 * and free anything.
364 *
365 * But this should be safe since the btree code always
366 * uses btree_check_reserve() before allocating now, and
367 * if it fails it blocks without btree nodes locked.
368 */
369 if (!fifo_full(&ca->free_inc))
370 goto retry_invalidate;
371
cafe5635 372 bch_prio_write(ca);
2531d9ee 373 }
cafe5635
KO
374 }
375}
376
2531d9ee
KO
377/* Allocation */
378
78365411 379long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
cafe5635 380{
35fcd848
KO
381 DEFINE_WAIT(w);
382 struct bucket *b;
383 long r;
384
385 /* fastpath */
78365411
KO
386 if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
387 fifo_pop(&ca->free[reserve], r))
35fcd848 388 goto out;
35fcd848 389
7159b1ad
KO
390 if (!wait) {
391 trace_bcache_alloc_fail(ca, reserve);
35fcd848 392 return -1;
7159b1ad 393 }
35fcd848 394
78365411 395 do {
35fcd848
KO
396 prepare_to_wait(&ca->set->bucket_wait, &w,
397 TASK_UNINTERRUPTIBLE);
398
399 mutex_unlock(&ca->set->bucket_lock);
400 schedule();
401 mutex_lock(&ca->set->bucket_lock);
78365411
KO
402 } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
403 !fifo_pop(&ca->free[reserve], r));
35fcd848
KO
404
405 finish_wait(&ca->set->bucket_wait, &w);
406out:
119ba0f8 407 wake_up_process(ca->alloc_thread);
cafe5635 408
7159b1ad
KO
409 trace_bcache_alloc(ca, reserve);
410
280481d0 411 if (expensive_debug_checks(ca->set)) {
cafe5635
KO
412 size_t iter;
413 long i;
78365411 414 unsigned j;
cafe5635
KO
415
416 for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
417 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
418
78365411
KO
419 for (j = 0; j < RESERVE_NR; j++)
420 fifo_for_each(i, &ca->free[j], iter)
421 BUG_ON(i == r);
cafe5635
KO
422 fifo_for_each(i, &ca->free_inc, iter)
423 BUG_ON(i == r);
cafe5635 424 }
280481d0 425
35fcd848 426 b = ca->buckets + r;
cafe5635 427
35fcd848 428 BUG_ON(atomic_read(&b->pin) != 1);
cafe5635 429
35fcd848 430 SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
cafe5635 431
78365411 432 if (reserve <= RESERVE_PRIO) {
35fcd848 433 SET_GC_MARK(b, GC_MARK_METADATA);
981aa8c0 434 SET_GC_MOVE(b, 0);
35fcd848
KO
435 b->prio = BTREE_PRIO;
436 } else {
437 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
981aa8c0 438 SET_GC_MOVE(b, 0);
35fcd848 439 b->prio = INITIAL_PRIO;
cafe5635
KO
440 }
441
35fcd848 442 return r;
cafe5635
KO
443}
444
2531d9ee
KO
445void __bch_bucket_free(struct cache *ca, struct bucket *b)
446{
447 SET_GC_MARK(b, 0);
448 SET_GC_SECTORS_USED(b, 0);
449}
450
cafe5635
KO
451void bch_bucket_free(struct cache_set *c, struct bkey *k)
452{
453 unsigned i;
454
2531d9ee
KO
455 for (i = 0; i < KEY_PTRS(k); i++)
456 __bch_bucket_free(PTR_CACHE(c, k, i),
457 PTR_BUCKET(c, k, i));
cafe5635
KO
458}
459
78365411 460int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
35fcd848 461 struct bkey *k, int n, bool wait)
cafe5635
KO
462{
463 int i;
464
465 lockdep_assert_held(&c->bucket_lock);
466 BUG_ON(!n || n > c->caches_loaded || n > 8);
467
468 bkey_init(k);
469
470 /* sort by free space/prio of oldest data in caches */
471
472 for (i = 0; i < n; i++) {
473 struct cache *ca = c->cache_by_alloc[i];
78365411 474 long b = bch_bucket_alloc(ca, reserve, wait);
cafe5635
KO
475
476 if (b == -1)
477 goto err;
478
479 k->ptr[i] = PTR(ca->buckets[b].gen,
480 bucket_to_sector(c, b),
481 ca->sb.nr_this_dev);
482
483 SET_KEY_PTRS(k, i + 1);
484 }
485
486 return 0;
487err:
488 bch_bucket_free(c, k);
3a3b6a4e 489 bkey_put(c, k);
cafe5635
KO
490 return -1;
491}
492
78365411 493int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
35fcd848 494 struct bkey *k, int n, bool wait)
cafe5635
KO
495{
496 int ret;
497 mutex_lock(&c->bucket_lock);
78365411 498 ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
cafe5635
KO
499 mutex_unlock(&c->bucket_lock);
500 return ret;
501}
502
2599b53b
KO
503/* Sector allocator */
504
505struct open_bucket {
506 struct list_head list;
507 unsigned last_write_point;
508 unsigned sectors_free;
509 BKEY_PADDED(key);
510};
511
512/*
513 * We keep multiple buckets open for writes, and try to segregate different
514 * write streams for better cache utilization: first we look for a bucket where
515 * the last write to it was sequential with the current write, and failing that
516 * we look for a bucket that was last used by the same task.
517 *
518 * The ideas is if you've got multiple tasks pulling data into the cache at the
519 * same time, you'll get better cache utilization if you try to segregate their
520 * data and preserve locality.
521 *
522 * For example, say you've starting Firefox at the same time you're copying a
523 * bunch of files. Firefox will likely end up being fairly hot and stay in the
524 * cache awhile, but the data you copied might not be; if you wrote all that
525 * data to the same buckets it'd get invalidated at the same time.
526 *
527 * Both of those tasks will be doing fairly random IO so we can't rely on
528 * detecting sequential IO to segregate their data, but going off of the task
529 * should be a sane heuristic.
530 */
531static struct open_bucket *pick_data_bucket(struct cache_set *c,
532 const struct bkey *search,
533 unsigned write_point,
534 struct bkey *alloc)
535{
536 struct open_bucket *ret, *ret_task = NULL;
537
538 list_for_each_entry_reverse(ret, &c->data_buckets, list)
539 if (!bkey_cmp(&ret->key, search))
540 goto found;
541 else if (ret->last_write_point == write_point)
542 ret_task = ret;
543
544 ret = ret_task ?: list_first_entry(&c->data_buckets,
545 struct open_bucket, list);
546found:
547 if (!ret->sectors_free && KEY_PTRS(alloc)) {
548 ret->sectors_free = c->sb.bucket_size;
549 bkey_copy(&ret->key, alloc);
550 bkey_init(alloc);
551 }
552
553 if (!ret->sectors_free)
554 ret = NULL;
555
556 return ret;
557}
558
559/*
560 * Allocates some space in the cache to write to, and k to point to the newly
561 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
562 * end of the newly allocated space).
563 *
564 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
565 * sectors were actually allocated.
566 *
567 * If s->writeback is true, will not fail.
568 */
569bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
570 unsigned write_point, unsigned write_prio, bool wait)
571{
572 struct open_bucket *b;
573 BKEY_PADDED(key) alloc;
574 unsigned i;
575
576 /*
577 * We might have to allocate a new bucket, which we can't do with a
578 * spinlock held. So if we have to allocate, we drop the lock, allocate
579 * and then retry. KEY_PTRS() indicates whether alloc points to
580 * allocated bucket(s).
581 */
582
583 bkey_init(&alloc.key);
584 spin_lock(&c->data_bucket_lock);
585
586 while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
587 unsigned watermark = write_prio
78365411
KO
588 ? RESERVE_MOVINGGC
589 : RESERVE_NONE;
2599b53b
KO
590
591 spin_unlock(&c->data_bucket_lock);
592
593 if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
594 return false;
595
596 spin_lock(&c->data_bucket_lock);
597 }
598
599 /*
600 * If we had to allocate, we might race and not need to allocate the
601 * second time we call find_data_bucket(). If we allocated a bucket but
602 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
603 */
604 if (KEY_PTRS(&alloc.key))
3a3b6a4e 605 bkey_put(c, &alloc.key);
2599b53b
KO
606
607 for (i = 0; i < KEY_PTRS(&b->key); i++)
608 EBUG_ON(ptr_stale(c, &b->key, i));
609
610 /* Set up the pointer to the space we're allocating: */
611
612 for (i = 0; i < KEY_PTRS(&b->key); i++)
613 k->ptr[i] = b->key.ptr[i];
614
615 sectors = min(sectors, b->sectors_free);
616
617 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
618 SET_KEY_SIZE(k, sectors);
619 SET_KEY_PTRS(k, KEY_PTRS(&b->key));
620
621 /*
622 * Move b to the end of the lru, and keep track of what this bucket was
623 * last used for:
624 */
625 list_move_tail(&b->list, &c->data_buckets);
626 bkey_copy_key(&b->key, k);
627 b->last_write_point = write_point;
628
629 b->sectors_free -= sectors;
630
631 for (i = 0; i < KEY_PTRS(&b->key); i++) {
632 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
633
634 atomic_long_add(sectors,
635 &PTR_CACHE(c, &b->key, i)->sectors_written);
636 }
637
638 if (b->sectors_free < c->sb.block_size)
639 b->sectors_free = 0;
640
641 /*
642 * k takes refcounts on the buckets it points to until it's inserted
643 * into the btree, but if we're done with this bucket we just transfer
644 * get_data_bucket()'s refcount.
645 */
646 if (b->sectors_free)
647 for (i = 0; i < KEY_PTRS(&b->key); i++)
648 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
649
650 spin_unlock(&c->data_bucket_lock);
651 return true;
652}
653
cafe5635
KO
654/* Init */
655
2599b53b
KO
656void bch_open_buckets_free(struct cache_set *c)
657{
658 struct open_bucket *b;
659
660 while (!list_empty(&c->data_buckets)) {
661 b = list_first_entry(&c->data_buckets,
662 struct open_bucket, list);
663 list_del(&b->list);
664 kfree(b);
665 }
666}
667
668int bch_open_buckets_alloc(struct cache_set *c)
669{
670 int i;
671
672 spin_lock_init(&c->data_bucket_lock);
673
674 for (i = 0; i < 6; i++) {
675 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
676 if (!b)
677 return -ENOMEM;
678
679 list_add(&b->list, &c->data_buckets);
680 }
681
682 return 0;
683}
684
119ba0f8
KO
685int bch_cache_allocator_start(struct cache *ca)
686{
79826c35
KO
687 struct task_struct *k = kthread_run(bch_allocator_thread,
688 ca, "bcache_allocator");
689 if (IS_ERR(k))
690 return PTR_ERR(k);
119ba0f8 691
79826c35 692 ca->alloc_thread = k;
119ba0f8
KO
693 return 0;
694}