]>
Commit | Line | Data |
---|---|---|
cafe5635 KO |
1 | /* |
2 | * bcache setup/teardown code, and some metadata io - read a superblock and | |
3 | * figure out what to do with it. | |
4 | * | |
5 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> | |
6 | * Copyright 2012 Google, Inc. | |
7 | */ | |
8 | ||
9 | #include "bcache.h" | |
10 | #include "btree.h" | |
11 | #include "debug.h" | |
12 | #include "request.h" | |
279afbad | 13 | #include "writeback.h" |
cafe5635 | 14 | |
c37511b8 | 15 | #include <linux/blkdev.h> |
cafe5635 KO |
16 | #include <linux/buffer_head.h> |
17 | #include <linux/debugfs.h> | |
18 | #include <linux/genhd.h> | |
28935ab5 | 19 | #include <linux/idr.h> |
79826c35 | 20 | #include <linux/kthread.h> |
cafe5635 KO |
21 | #include <linux/module.h> |
22 | #include <linux/random.h> | |
23 | #include <linux/reboot.h> | |
24 | #include <linux/sysfs.h> | |
25 | ||
26 | MODULE_LICENSE("GPL"); | |
27 | MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); | |
28 | ||
29 | static const char bcache_magic[] = { | |
30 | 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, | |
31 | 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 | |
32 | }; | |
33 | ||
34 | static const char invalid_uuid[] = { | |
35 | 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, | |
36 | 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 | |
37 | }; | |
38 | ||
39 | /* Default is -1; we skip past it for struct cached_dev's cache mode */ | |
40 | const char * const bch_cache_modes[] = { | |
41 | "default", | |
42 | "writethrough", | |
43 | "writeback", | |
44 | "writearound", | |
45 | "none", | |
46 | NULL | |
47 | }; | |
48 | ||
cafe5635 KO |
49 | static struct kobject *bcache_kobj; |
50 | struct mutex bch_register_lock; | |
51 | LIST_HEAD(bch_cache_sets); | |
52 | static LIST_HEAD(uncached_devices); | |
53 | ||
28935ab5 KO |
54 | static int bcache_major; |
55 | static DEFINE_IDA(bcache_minor); | |
cafe5635 KO |
56 | static wait_queue_head_t unregister_wait; |
57 | struct workqueue_struct *bcache_wq; | |
58 | ||
59 | #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) | |
60 | ||
61 | static void bio_split_pool_free(struct bio_split_pool *p) | |
62 | { | |
8ef74790 KO |
63 | if (p->bio_split_hook) |
64 | mempool_destroy(p->bio_split_hook); | |
65 | ||
cafe5635 KO |
66 | if (p->bio_split) |
67 | bioset_free(p->bio_split); | |
cafe5635 KO |
68 | } |
69 | ||
70 | static int bio_split_pool_init(struct bio_split_pool *p) | |
71 | { | |
72 | p->bio_split = bioset_create(4, 0); | |
73 | if (!p->bio_split) | |
74 | return -ENOMEM; | |
75 | ||
76 | p->bio_split_hook = mempool_create_kmalloc_pool(4, | |
77 | sizeof(struct bio_split_hook)); | |
78 | if (!p->bio_split_hook) | |
79 | return -ENOMEM; | |
80 | ||
81 | return 0; | |
82 | } | |
83 | ||
84 | /* Superblock */ | |
85 | ||
86 | static const char *read_super(struct cache_sb *sb, struct block_device *bdev, | |
87 | struct page **res) | |
88 | { | |
89 | const char *err; | |
90 | struct cache_sb *s; | |
91 | struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); | |
92 | unsigned i; | |
93 | ||
94 | if (!bh) | |
95 | return "IO error"; | |
96 | ||
97 | s = (struct cache_sb *) bh->b_data; | |
98 | ||
99 | sb->offset = le64_to_cpu(s->offset); | |
100 | sb->version = le64_to_cpu(s->version); | |
101 | ||
102 | memcpy(sb->magic, s->magic, 16); | |
103 | memcpy(sb->uuid, s->uuid, 16); | |
104 | memcpy(sb->set_uuid, s->set_uuid, 16); | |
105 | memcpy(sb->label, s->label, SB_LABEL_SIZE); | |
106 | ||
107 | sb->flags = le64_to_cpu(s->flags); | |
108 | sb->seq = le64_to_cpu(s->seq); | |
cafe5635 | 109 | sb->last_mount = le32_to_cpu(s->last_mount); |
cafe5635 KO |
110 | sb->first_bucket = le16_to_cpu(s->first_bucket); |
111 | sb->keys = le16_to_cpu(s->keys); | |
112 | ||
113 | for (i = 0; i < SB_JOURNAL_BUCKETS; i++) | |
114 | sb->d[i] = le64_to_cpu(s->d[i]); | |
115 | ||
116 | pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u", | |
117 | sb->version, sb->flags, sb->seq, sb->keys); | |
118 | ||
119 | err = "Not a bcache superblock"; | |
120 | if (sb->offset != SB_SECTOR) | |
121 | goto err; | |
122 | ||
123 | if (memcmp(sb->magic, bcache_magic, 16)) | |
124 | goto err; | |
125 | ||
126 | err = "Too many journal buckets"; | |
127 | if (sb->keys > SB_JOURNAL_BUCKETS) | |
128 | goto err; | |
129 | ||
130 | err = "Bad checksum"; | |
131 | if (s->csum != csum_set(s)) | |
132 | goto err; | |
133 | ||
134 | err = "Bad UUID"; | |
169ef1cf | 135 | if (bch_is_zero(sb->uuid, 16)) |
cafe5635 KO |
136 | goto err; |
137 | ||
8abb2a5d KO |
138 | sb->block_size = le16_to_cpu(s->block_size); |
139 | ||
140 | err = "Superblock block size smaller than device block size"; | |
141 | if (sb->block_size << 9 < bdev_logical_block_size(bdev)) | |
142 | goto err; | |
143 | ||
2903381f KO |
144 | switch (sb->version) { |
145 | case BCACHE_SB_VERSION_BDEV: | |
2903381f KO |
146 | sb->data_offset = BDEV_DATA_START_DEFAULT; |
147 | break; | |
148 | case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: | |
2903381f KO |
149 | sb->data_offset = le64_to_cpu(s->data_offset); |
150 | ||
151 | err = "Bad data offset"; | |
152 | if (sb->data_offset < BDEV_DATA_START_DEFAULT) | |
153 | goto err; | |
cafe5635 | 154 | |
2903381f KO |
155 | break; |
156 | case BCACHE_SB_VERSION_CDEV: | |
157 | case BCACHE_SB_VERSION_CDEV_WITH_UUID: | |
158 | sb->nbuckets = le64_to_cpu(s->nbuckets); | |
159 | sb->block_size = le16_to_cpu(s->block_size); | |
160 | sb->bucket_size = le16_to_cpu(s->bucket_size); | |
cafe5635 | 161 | |
2903381f KO |
162 | sb->nr_in_set = le16_to_cpu(s->nr_in_set); |
163 | sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); | |
cafe5635 | 164 | |
2903381f KO |
165 | err = "Too many buckets"; |
166 | if (sb->nbuckets > LONG_MAX) | |
167 | goto err; | |
cafe5635 | 168 | |
2903381f KO |
169 | err = "Not enough buckets"; |
170 | if (sb->nbuckets < 1 << 7) | |
171 | goto err; | |
cafe5635 | 172 | |
2903381f KO |
173 | err = "Bad block/bucket size"; |
174 | if (!is_power_of_2(sb->block_size) || | |
175 | sb->block_size > PAGE_SECTORS || | |
176 | !is_power_of_2(sb->bucket_size) || | |
177 | sb->bucket_size < PAGE_SECTORS) | |
178 | goto err; | |
cafe5635 | 179 | |
2903381f KO |
180 | err = "Invalid superblock: device too small"; |
181 | if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets) | |
182 | goto err; | |
cafe5635 | 183 | |
2903381f KO |
184 | err = "Bad UUID"; |
185 | if (bch_is_zero(sb->set_uuid, 16)) | |
186 | goto err; | |
cafe5635 | 187 | |
2903381f KO |
188 | err = "Bad cache device number in set"; |
189 | if (!sb->nr_in_set || | |
190 | sb->nr_in_set <= sb->nr_this_dev || | |
191 | sb->nr_in_set > MAX_CACHES_PER_SET) | |
cafe5635 KO |
192 | goto err; |
193 | ||
2903381f KO |
194 | err = "Journal buckets not sequential"; |
195 | for (i = 0; i < sb->keys; i++) | |
196 | if (sb->d[i] != sb->first_bucket + i) | |
197 | goto err; | |
cafe5635 | 198 | |
2903381f KO |
199 | err = "Too many journal buckets"; |
200 | if (sb->first_bucket + sb->keys > sb->nbuckets) | |
201 | goto err; | |
202 | ||
203 | err = "Invalid superblock: first bucket comes before end of super"; | |
204 | if (sb->first_bucket * sb->bucket_size < 16) | |
205 | goto err; | |
206 | ||
207 | break; | |
208 | default: | |
209 | err = "Unsupported superblock version"; | |
cafe5635 | 210 | goto err; |
2903381f KO |
211 | } |
212 | ||
cafe5635 KO |
213 | sb->last_mount = get_seconds(); |
214 | err = NULL; | |
215 | ||
216 | get_page(bh->b_page); | |
217 | *res = bh->b_page; | |
218 | err: | |
219 | put_bh(bh); | |
220 | return err; | |
221 | } | |
222 | ||
223 | static void write_bdev_super_endio(struct bio *bio, int error) | |
224 | { | |
225 | struct cached_dev *dc = bio->bi_private; | |
226 | /* XXX: error checking */ | |
227 | ||
cb7a583e | 228 | closure_put(&dc->sb_write); |
cafe5635 KO |
229 | } |
230 | ||
231 | static void __write_super(struct cache_sb *sb, struct bio *bio) | |
232 | { | |
233 | struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); | |
234 | unsigned i; | |
235 | ||
4f024f37 KO |
236 | bio->bi_iter.bi_sector = SB_SECTOR; |
237 | bio->bi_rw = REQ_SYNC|REQ_META; | |
238 | bio->bi_iter.bi_size = SB_SIZE; | |
169ef1cf | 239 | bch_bio_map(bio, NULL); |
cafe5635 KO |
240 | |
241 | out->offset = cpu_to_le64(sb->offset); | |
242 | out->version = cpu_to_le64(sb->version); | |
243 | ||
244 | memcpy(out->uuid, sb->uuid, 16); | |
245 | memcpy(out->set_uuid, sb->set_uuid, 16); | |
246 | memcpy(out->label, sb->label, SB_LABEL_SIZE); | |
247 | ||
248 | out->flags = cpu_to_le64(sb->flags); | |
249 | out->seq = cpu_to_le64(sb->seq); | |
250 | ||
251 | out->last_mount = cpu_to_le32(sb->last_mount); | |
252 | out->first_bucket = cpu_to_le16(sb->first_bucket); | |
253 | out->keys = cpu_to_le16(sb->keys); | |
254 | ||
255 | for (i = 0; i < sb->keys; i++) | |
256 | out->d[i] = cpu_to_le64(sb->d[i]); | |
257 | ||
258 | out->csum = csum_set(out); | |
259 | ||
260 | pr_debug("ver %llu, flags %llu, seq %llu", | |
261 | sb->version, sb->flags, sb->seq); | |
262 | ||
263 | submit_bio(REQ_WRITE, bio); | |
264 | } | |
265 | ||
cb7a583e KO |
266 | static void bch_write_bdev_super_unlock(struct closure *cl) |
267 | { | |
268 | struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); | |
269 | ||
270 | up(&dc->sb_write_mutex); | |
271 | } | |
272 | ||
cafe5635 KO |
273 | void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) |
274 | { | |
cb7a583e | 275 | struct closure *cl = &dc->sb_write; |
cafe5635 KO |
276 | struct bio *bio = &dc->sb_bio; |
277 | ||
cb7a583e KO |
278 | down(&dc->sb_write_mutex); |
279 | closure_init(cl, parent); | |
cafe5635 KO |
280 | |
281 | bio_reset(bio); | |
282 | bio->bi_bdev = dc->bdev; | |
283 | bio->bi_end_io = write_bdev_super_endio; | |
284 | bio->bi_private = dc; | |
285 | ||
286 | closure_get(cl); | |
287 | __write_super(&dc->sb, bio); | |
288 | ||
cb7a583e | 289 | closure_return_with_destructor(cl, bch_write_bdev_super_unlock); |
cafe5635 KO |
290 | } |
291 | ||
292 | static void write_super_endio(struct bio *bio, int error) | |
293 | { | |
294 | struct cache *ca = bio->bi_private; | |
295 | ||
296 | bch_count_io_errors(ca, error, "writing superblock"); | |
cb7a583e KO |
297 | closure_put(&ca->set->sb_write); |
298 | } | |
299 | ||
300 | static void bcache_write_super_unlock(struct closure *cl) | |
301 | { | |
302 | struct cache_set *c = container_of(cl, struct cache_set, sb_write); | |
303 | ||
304 | up(&c->sb_write_mutex); | |
cafe5635 KO |
305 | } |
306 | ||
307 | void bcache_write_super(struct cache_set *c) | |
308 | { | |
cb7a583e | 309 | struct closure *cl = &c->sb_write; |
cafe5635 KO |
310 | struct cache *ca; |
311 | unsigned i; | |
312 | ||
cb7a583e KO |
313 | down(&c->sb_write_mutex); |
314 | closure_init(cl, &c->cl); | |
cafe5635 KO |
315 | |
316 | c->sb.seq++; | |
317 | ||
318 | for_each_cache(ca, c, i) { | |
319 | struct bio *bio = &ca->sb_bio; | |
320 | ||
2903381f | 321 | ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID; |
cafe5635 KO |
322 | ca->sb.seq = c->sb.seq; |
323 | ca->sb.last_mount = c->sb.last_mount; | |
324 | ||
325 | SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); | |
326 | ||
327 | bio_reset(bio); | |
328 | bio->bi_bdev = ca->bdev; | |
329 | bio->bi_end_io = write_super_endio; | |
330 | bio->bi_private = ca; | |
331 | ||
332 | closure_get(cl); | |
333 | __write_super(&ca->sb, bio); | |
334 | } | |
335 | ||
cb7a583e | 336 | closure_return_with_destructor(cl, bcache_write_super_unlock); |
cafe5635 KO |
337 | } |
338 | ||
339 | /* UUID io */ | |
340 | ||
341 | static void uuid_endio(struct bio *bio, int error) | |
342 | { | |
343 | struct closure *cl = bio->bi_private; | |
cb7a583e | 344 | struct cache_set *c = container_of(cl, struct cache_set, uuid_write); |
cafe5635 KO |
345 | |
346 | cache_set_err_on(error, c, "accessing uuids"); | |
347 | bch_bbio_free(bio, c); | |
348 | closure_put(cl); | |
349 | } | |
350 | ||
cb7a583e KO |
351 | static void uuid_io_unlock(struct closure *cl) |
352 | { | |
353 | struct cache_set *c = container_of(cl, struct cache_set, uuid_write); | |
354 | ||
355 | up(&c->uuid_write_mutex); | |
356 | } | |
357 | ||
cafe5635 KO |
358 | static void uuid_io(struct cache_set *c, unsigned long rw, |
359 | struct bkey *k, struct closure *parent) | |
360 | { | |
cb7a583e | 361 | struct closure *cl = &c->uuid_write; |
cafe5635 KO |
362 | struct uuid_entry *u; |
363 | unsigned i; | |
85b1492e | 364 | char buf[80]; |
cafe5635 KO |
365 | |
366 | BUG_ON(!parent); | |
cb7a583e KO |
367 | down(&c->uuid_write_mutex); |
368 | closure_init(cl, parent); | |
cafe5635 KO |
369 | |
370 | for (i = 0; i < KEY_PTRS(k); i++) { | |
371 | struct bio *bio = bch_bbio_alloc(c); | |
372 | ||
373 | bio->bi_rw = REQ_SYNC|REQ_META|rw; | |
4f024f37 | 374 | bio->bi_iter.bi_size = KEY_SIZE(k) << 9; |
cafe5635 KO |
375 | |
376 | bio->bi_end_io = uuid_endio; | |
377 | bio->bi_private = cl; | |
169ef1cf | 378 | bch_bio_map(bio, c->uuids); |
cafe5635 KO |
379 | |
380 | bch_submit_bbio(bio, c, k, i); | |
381 | ||
382 | if (!(rw & WRITE)) | |
383 | break; | |
384 | } | |
385 | ||
85b1492e KO |
386 | bch_bkey_to_text(buf, sizeof(buf), k); |
387 | pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf); | |
cafe5635 KO |
388 | |
389 | for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) | |
169ef1cf | 390 | if (!bch_is_zero(u->uuid, 16)) |
cafe5635 KO |
391 | pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u", |
392 | u - c->uuids, u->uuid, u->label, | |
393 | u->first_reg, u->last_reg, u->invalidated); | |
394 | ||
cb7a583e | 395 | closure_return_with_destructor(cl, uuid_io_unlock); |
cafe5635 KO |
396 | } |
397 | ||
398 | static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) | |
399 | { | |
400 | struct bkey *k = &j->uuid_bucket; | |
401 | ||
d5cc66e9 | 402 | if (bch_btree_ptr_invalid(c, k)) |
cafe5635 KO |
403 | return "bad uuid pointer"; |
404 | ||
405 | bkey_copy(&c->uuid_bucket, k); | |
406 | uuid_io(c, READ_SYNC, k, cl); | |
407 | ||
408 | if (j->version < BCACHE_JSET_VERSION_UUIDv1) { | |
409 | struct uuid_entry_v0 *u0 = (void *) c->uuids; | |
410 | struct uuid_entry *u1 = (void *) c->uuids; | |
411 | int i; | |
412 | ||
413 | closure_sync(cl); | |
414 | ||
415 | /* | |
416 | * Since the new uuid entry is bigger than the old, we have to | |
417 | * convert starting at the highest memory address and work down | |
418 | * in order to do it in place | |
419 | */ | |
420 | ||
421 | for (i = c->nr_uuids - 1; | |
422 | i >= 0; | |
423 | --i) { | |
424 | memcpy(u1[i].uuid, u0[i].uuid, 16); | |
425 | memcpy(u1[i].label, u0[i].label, 32); | |
426 | ||
427 | u1[i].first_reg = u0[i].first_reg; | |
428 | u1[i].last_reg = u0[i].last_reg; | |
429 | u1[i].invalidated = u0[i].invalidated; | |
430 | ||
431 | u1[i].flags = 0; | |
432 | u1[i].sectors = 0; | |
433 | } | |
434 | } | |
435 | ||
436 | return NULL; | |
437 | } | |
438 | ||
439 | static int __uuid_write(struct cache_set *c) | |
440 | { | |
441 | BKEY_PADDED(key) k; | |
442 | struct closure cl; | |
443 | closure_init_stack(&cl); | |
444 | ||
445 | lockdep_assert_held(&bch_register_lock); | |
446 | ||
78365411 | 447 | if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true)) |
cafe5635 KO |
448 | return 1; |
449 | ||
450 | SET_KEY_SIZE(&k.key, c->sb.bucket_size); | |
451 | uuid_io(c, REQ_WRITE, &k.key, &cl); | |
452 | closure_sync(&cl); | |
453 | ||
454 | bkey_copy(&c->uuid_bucket, &k.key); | |
3a3b6a4e | 455 | bkey_put(c, &k.key); |
cafe5635 KO |
456 | return 0; |
457 | } | |
458 | ||
459 | int bch_uuid_write(struct cache_set *c) | |
460 | { | |
461 | int ret = __uuid_write(c); | |
462 | ||
463 | if (!ret) | |
464 | bch_journal_meta(c, NULL); | |
465 | ||
466 | return ret; | |
467 | } | |
468 | ||
469 | static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) | |
470 | { | |
471 | struct uuid_entry *u; | |
472 | ||
473 | for (u = c->uuids; | |
474 | u < c->uuids + c->nr_uuids; u++) | |
475 | if (!memcmp(u->uuid, uuid, 16)) | |
476 | return u; | |
477 | ||
478 | return NULL; | |
479 | } | |
480 | ||
481 | static struct uuid_entry *uuid_find_empty(struct cache_set *c) | |
482 | { | |
483 | static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; | |
484 | return uuid_find(c, zero_uuid); | |
485 | } | |
486 | ||
487 | /* | |
488 | * Bucket priorities/gens: | |
489 | * | |
490 | * For each bucket, we store on disk its | |
491 | * 8 bit gen | |
492 | * 16 bit priority | |
493 | * | |
494 | * See alloc.c for an explanation of the gen. The priority is used to implement | |
495 | * lru (and in the future other) cache replacement policies; for most purposes | |
496 | * it's just an opaque integer. | |
497 | * | |
498 | * The gens and the priorities don't have a whole lot to do with each other, and | |
499 | * it's actually the gens that must be written out at specific times - it's no | |
500 | * big deal if the priorities don't get written, if we lose them we just reuse | |
501 | * buckets in suboptimal order. | |
502 | * | |
503 | * On disk they're stored in a packed array, and in as many buckets are required | |
504 | * to fit them all. The buckets we use to store them form a list; the journal | |
505 | * header points to the first bucket, the first bucket points to the second | |
506 | * bucket, et cetera. | |
507 | * | |
508 | * This code is used by the allocation code; periodically (whenever it runs out | |
509 | * of buckets to allocate from) the allocation code will invalidate some | |
510 | * buckets, but it can't use those buckets until their new gens are safely on | |
511 | * disk. | |
512 | */ | |
513 | ||
514 | static void prio_endio(struct bio *bio, int error) | |
515 | { | |
516 | struct cache *ca = bio->bi_private; | |
517 | ||
518 | cache_set_err_on(error, ca->set, "accessing priorities"); | |
519 | bch_bbio_free(bio, ca->set); | |
520 | closure_put(&ca->prio); | |
521 | } | |
522 | ||
523 | static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) | |
524 | { | |
525 | struct closure *cl = &ca->prio; | |
526 | struct bio *bio = bch_bbio_alloc(ca->set); | |
527 | ||
528 | closure_init_stack(cl); | |
529 | ||
4f024f37 KO |
530 | bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; |
531 | bio->bi_bdev = ca->bdev; | |
532 | bio->bi_rw = REQ_SYNC|REQ_META|rw; | |
533 | bio->bi_iter.bi_size = bucket_bytes(ca); | |
cafe5635 KO |
534 | |
535 | bio->bi_end_io = prio_endio; | |
536 | bio->bi_private = ca; | |
169ef1cf | 537 | bch_bio_map(bio, ca->disk_buckets); |
cafe5635 KO |
538 | |
539 | closure_bio_submit(bio, &ca->prio, ca); | |
540 | closure_sync(cl); | |
541 | } | |
542 | ||
543 | #define buckets_free(c) "free %zu, free_inc %zu, unused %zu", \ | |
544 | fifo_used(&c->free), fifo_used(&c->free_inc), fifo_used(&c->unused) | |
545 | ||
546 | void bch_prio_write(struct cache *ca) | |
547 | { | |
548 | int i; | |
549 | struct bucket *b; | |
550 | struct closure cl; | |
551 | ||
552 | closure_init_stack(&cl); | |
553 | ||
554 | lockdep_assert_held(&ca->set->bucket_lock); | |
555 | ||
556 | for (b = ca->buckets; | |
557 | b < ca->buckets + ca->sb.nbuckets; b++) | |
558 | b->disk_gen = b->gen; | |
559 | ||
560 | ca->disk_buckets->seq++; | |
561 | ||
562 | atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), | |
563 | &ca->meta_sectors_written); | |
564 | ||
78365411 KO |
565 | //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), |
566 | // fifo_used(&ca->free_inc), fifo_used(&ca->unused)); | |
cafe5635 KO |
567 | |
568 | for (i = prio_buckets(ca) - 1; i >= 0; --i) { | |
569 | long bucket; | |
570 | struct prio_set *p = ca->disk_buckets; | |
b1a67b0f KO |
571 | struct bucket_disk *d = p->data; |
572 | struct bucket_disk *end = d + prios_per_bucket(ca); | |
cafe5635 KO |
573 | |
574 | for (b = ca->buckets + i * prios_per_bucket(ca); | |
575 | b < ca->buckets + ca->sb.nbuckets && d < end; | |
576 | b++, d++) { | |
577 | d->prio = cpu_to_le16(b->prio); | |
578 | d->gen = b->gen; | |
579 | } | |
580 | ||
581 | p->next_bucket = ca->prio_buckets[i + 1]; | |
81ab4190 | 582 | p->magic = pset_magic(&ca->sb); |
169ef1cf | 583 | p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); |
cafe5635 | 584 | |
78365411 | 585 | bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true); |
cafe5635 KO |
586 | BUG_ON(bucket == -1); |
587 | ||
588 | mutex_unlock(&ca->set->bucket_lock); | |
589 | prio_io(ca, bucket, REQ_WRITE); | |
590 | mutex_lock(&ca->set->bucket_lock); | |
591 | ||
592 | ca->prio_buckets[i] = bucket; | |
593 | atomic_dec_bug(&ca->buckets[bucket].pin); | |
594 | } | |
595 | ||
596 | mutex_unlock(&ca->set->bucket_lock); | |
597 | ||
598 | bch_journal_meta(ca->set, &cl); | |
599 | closure_sync(&cl); | |
600 | ||
601 | mutex_lock(&ca->set->bucket_lock); | |
602 | ||
603 | ca->need_save_prio = 0; | |
604 | ||
605 | /* | |
606 | * Don't want the old priorities to get garbage collected until after we | |
607 | * finish writing the new ones, and they're journalled | |
608 | */ | |
609 | for (i = 0; i < prio_buckets(ca); i++) | |
610 | ca->prio_last_buckets[i] = ca->prio_buckets[i]; | |
611 | } | |
612 | ||
613 | static void prio_read(struct cache *ca, uint64_t bucket) | |
614 | { | |
615 | struct prio_set *p = ca->disk_buckets; | |
616 | struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; | |
617 | struct bucket *b; | |
618 | unsigned bucket_nr = 0; | |
619 | ||
620 | for (b = ca->buckets; | |
621 | b < ca->buckets + ca->sb.nbuckets; | |
622 | b++, d++) { | |
623 | if (d == end) { | |
624 | ca->prio_buckets[bucket_nr] = bucket; | |
625 | ca->prio_last_buckets[bucket_nr] = bucket; | |
626 | bucket_nr++; | |
627 | ||
628 | prio_io(ca, bucket, READ_SYNC); | |
629 | ||
169ef1cf | 630 | if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) |
cafe5635 KO |
631 | pr_warn("bad csum reading priorities"); |
632 | ||
81ab4190 | 633 | if (p->magic != pset_magic(&ca->sb)) |
cafe5635 KO |
634 | pr_warn("bad magic reading priorities"); |
635 | ||
636 | bucket = p->next_bucket; | |
637 | d = p->data; | |
638 | } | |
639 | ||
640 | b->prio = le16_to_cpu(d->prio); | |
641 | b->gen = b->disk_gen = b->last_gc = b->gc_gen = d->gen; | |
642 | } | |
643 | } | |
644 | ||
645 | /* Bcache device */ | |
646 | ||
647 | static int open_dev(struct block_device *b, fmode_t mode) | |
648 | { | |
649 | struct bcache_device *d = b->bd_disk->private_data; | |
c4d951dd | 650 | if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) |
cafe5635 KO |
651 | return -ENXIO; |
652 | ||
653 | closure_get(&d->cl); | |
654 | return 0; | |
655 | } | |
656 | ||
867e1162 | 657 | static void release_dev(struct gendisk *b, fmode_t mode) |
cafe5635 KO |
658 | { |
659 | struct bcache_device *d = b->private_data; | |
660 | closure_put(&d->cl); | |
cafe5635 KO |
661 | } |
662 | ||
663 | static int ioctl_dev(struct block_device *b, fmode_t mode, | |
664 | unsigned int cmd, unsigned long arg) | |
665 | { | |
666 | struct bcache_device *d = b->bd_disk->private_data; | |
667 | return d->ioctl(d, mode, cmd, arg); | |
668 | } | |
669 | ||
670 | static const struct block_device_operations bcache_ops = { | |
671 | .open = open_dev, | |
672 | .release = release_dev, | |
673 | .ioctl = ioctl_dev, | |
674 | .owner = THIS_MODULE, | |
675 | }; | |
676 | ||
677 | void bcache_device_stop(struct bcache_device *d) | |
678 | { | |
c4d951dd | 679 | if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags)) |
cafe5635 KO |
680 | closure_queue(&d->cl); |
681 | } | |
682 | ||
ee668506 KO |
683 | static void bcache_device_unlink(struct bcache_device *d) |
684 | { | |
c4d951dd | 685 | lockdep_assert_held(&bch_register_lock); |
ee668506 | 686 | |
c4d951dd KO |
687 | if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { |
688 | unsigned i; | |
689 | struct cache *ca; | |
ee668506 | 690 | |
c4d951dd KO |
691 | sysfs_remove_link(&d->c->kobj, d->name); |
692 | sysfs_remove_link(&d->kobj, "cache"); | |
693 | ||
694 | for_each_cache(ca, d->c, i) | |
695 | bd_unlink_disk_holder(ca->bdev, d->disk); | |
696 | } | |
ee668506 KO |
697 | } |
698 | ||
699 | static void bcache_device_link(struct bcache_device *d, struct cache_set *c, | |
700 | const char *name) | |
701 | { | |
702 | unsigned i; | |
703 | struct cache *ca; | |
704 | ||
705 | for_each_cache(ca, d->c, i) | |
706 | bd_link_disk_holder(ca->bdev, d->disk); | |
707 | ||
708 | snprintf(d->name, BCACHEDEVNAME_SIZE, | |
709 | "%s%u", name, d->id); | |
710 | ||
711 | WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || | |
712 | sysfs_create_link(&c->kobj, &d->kobj, d->name), | |
713 | "Couldn't create device <-> cache set symlinks"); | |
714 | } | |
715 | ||
cafe5635 KO |
716 | static void bcache_device_detach(struct bcache_device *d) |
717 | { | |
718 | lockdep_assert_held(&bch_register_lock); | |
719 | ||
c4d951dd | 720 | if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) { |
cafe5635 KO |
721 | struct uuid_entry *u = d->c->uuids + d->id; |
722 | ||
723 | SET_UUID_FLASH_ONLY(u, 0); | |
724 | memcpy(u->uuid, invalid_uuid, 16); | |
725 | u->invalidated = cpu_to_le32(get_seconds()); | |
726 | bch_uuid_write(d->c); | |
cafe5635 KO |
727 | } |
728 | ||
c4d951dd | 729 | bcache_device_unlink(d); |
ee668506 | 730 | |
cafe5635 KO |
731 | d->c->devices[d->id] = NULL; |
732 | closure_put(&d->c->caching); | |
733 | d->c = NULL; | |
734 | } | |
735 | ||
736 | static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, | |
737 | unsigned id) | |
738 | { | |
739 | BUG_ON(test_bit(CACHE_SET_STOPPING, &c->flags)); | |
740 | ||
741 | d->id = id; | |
742 | d->c = c; | |
743 | c->devices[id] = d; | |
744 | ||
745 | closure_get(&c->caching); | |
746 | } | |
747 | ||
cafe5635 KO |
748 | static void bcache_device_free(struct bcache_device *d) |
749 | { | |
750 | lockdep_assert_held(&bch_register_lock); | |
751 | ||
752 | pr_info("%s stopped", d->disk->disk_name); | |
753 | ||
754 | if (d->c) | |
755 | bcache_device_detach(d); | |
f59fce84 | 756 | if (d->disk && d->disk->flags & GENHD_FL_UP) |
cafe5635 KO |
757 | del_gendisk(d->disk); |
758 | if (d->disk && d->disk->queue) | |
759 | blk_cleanup_queue(d->disk->queue); | |
28935ab5 KO |
760 | if (d->disk) { |
761 | ida_simple_remove(&bcache_minor, d->disk->first_minor); | |
cafe5635 | 762 | put_disk(d->disk); |
28935ab5 | 763 | } |
cafe5635 KO |
764 | |
765 | bio_split_pool_free(&d->bio_split_hook); | |
cafe5635 KO |
766 | if (d->bio_split) |
767 | bioset_free(d->bio_split); | |
48a915a8 KO |
768 | if (is_vmalloc_addr(d->full_dirty_stripes)) |
769 | vfree(d->full_dirty_stripes); | |
770 | else | |
771 | kfree(d->full_dirty_stripes); | |
279afbad KO |
772 | if (is_vmalloc_addr(d->stripe_sectors_dirty)) |
773 | vfree(d->stripe_sectors_dirty); | |
774 | else | |
775 | kfree(d->stripe_sectors_dirty); | |
cafe5635 KO |
776 | |
777 | closure_debug_destroy(&d->cl); | |
778 | } | |
779 | ||
279afbad KO |
780 | static int bcache_device_init(struct bcache_device *d, unsigned block_size, |
781 | sector_t sectors) | |
cafe5635 KO |
782 | { |
783 | struct request_queue *q; | |
279afbad | 784 | size_t n; |
28935ab5 | 785 | int minor; |
279afbad | 786 | |
2d679fc7 KO |
787 | if (!d->stripe_size) |
788 | d->stripe_size = 1 << 31; | |
279afbad | 789 | |
2d679fc7 | 790 | d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); |
279afbad | 791 | |
48a915a8 KO |
792 | if (!d->nr_stripes || |
793 | d->nr_stripes > INT_MAX || | |
794 | d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { | |
795 | pr_err("nr_stripes too large"); | |
279afbad | 796 | return -ENOMEM; |
48a915a8 | 797 | } |
279afbad KO |
798 | |
799 | n = d->nr_stripes * sizeof(atomic_t); | |
800 | d->stripe_sectors_dirty = n < PAGE_SIZE << 6 | |
801 | ? kzalloc(n, GFP_KERNEL) | |
802 | : vzalloc(n); | |
803 | if (!d->stripe_sectors_dirty) | |
804 | return -ENOMEM; | |
cafe5635 | 805 | |
48a915a8 KO |
806 | n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); |
807 | d->full_dirty_stripes = n < PAGE_SIZE << 6 | |
808 | ? kzalloc(n, GFP_KERNEL) | |
809 | : vzalloc(n); | |
810 | if (!d->full_dirty_stripes) | |
811 | return -ENOMEM; | |
812 | ||
28935ab5 KO |
813 | minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL); |
814 | if (minor < 0) | |
815 | return minor; | |
816 | ||
cafe5635 | 817 | if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || |
f59fce84 | 818 | bio_split_pool_init(&d->bio_split_hook) || |
28935ab5 KO |
819 | !(d->disk = alloc_disk(1))) { |
820 | ida_simple_remove(&bcache_minor, minor); | |
cafe5635 | 821 | return -ENOMEM; |
28935ab5 | 822 | } |
cafe5635 | 823 | |
279afbad | 824 | set_capacity(d->disk, sectors); |
28935ab5 | 825 | snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor); |
cafe5635 KO |
826 | |
827 | d->disk->major = bcache_major; | |
28935ab5 | 828 | d->disk->first_minor = minor; |
cafe5635 KO |
829 | d->disk->fops = &bcache_ops; |
830 | d->disk->private_data = d; | |
831 | ||
28935ab5 KO |
832 | q = blk_alloc_queue(GFP_KERNEL); |
833 | if (!q) | |
834 | return -ENOMEM; | |
835 | ||
cafe5635 KO |
836 | blk_queue_make_request(q, NULL); |
837 | d->disk->queue = q; | |
838 | q->queuedata = d; | |
839 | q->backing_dev_info.congested_data = d; | |
840 | q->limits.max_hw_sectors = UINT_MAX; | |
841 | q->limits.max_sectors = UINT_MAX; | |
842 | q->limits.max_segment_size = UINT_MAX; | |
843 | q->limits.max_segments = BIO_MAX_PAGES; | |
844 | q->limits.max_discard_sectors = UINT_MAX; | |
845 | q->limits.io_min = block_size; | |
846 | q->limits.logical_block_size = block_size; | |
847 | q->limits.physical_block_size = block_size; | |
848 | set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); | |
849 | set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); | |
850 | ||
54d12f2b KO |
851 | blk_queue_flush(q, REQ_FLUSH|REQ_FUA); |
852 | ||
cafe5635 KO |
853 | return 0; |
854 | } | |
855 | ||
856 | /* Cached device */ | |
857 | ||
858 | static void calc_cached_dev_sectors(struct cache_set *c) | |
859 | { | |
860 | uint64_t sectors = 0; | |
861 | struct cached_dev *dc; | |
862 | ||
863 | list_for_each_entry(dc, &c->cached_devs, list) | |
864 | sectors += bdev_sectors(dc->bdev); | |
865 | ||
866 | c->cached_dev_sectors = sectors; | |
867 | } | |
868 | ||
869 | void bch_cached_dev_run(struct cached_dev *dc) | |
870 | { | |
871 | struct bcache_device *d = &dc->disk; | |
ab9e1400 | 872 | char buf[SB_LABEL_SIZE + 1]; |
a25c32be GP |
873 | char *env[] = { |
874 | "DRIVER=bcache", | |
875 | kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), | |
ab9e1400 GP |
876 | NULL, |
877 | NULL, | |
a25c32be | 878 | }; |
cafe5635 | 879 | |
ab9e1400 GP |
880 | memcpy(buf, dc->sb.label, SB_LABEL_SIZE); |
881 | buf[SB_LABEL_SIZE] = '\0'; | |
882 | env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf); | |
883 | ||
cafe5635 KO |
884 | if (atomic_xchg(&dc->running, 1)) |
885 | return; | |
886 | ||
887 | if (!d->c && | |
888 | BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { | |
889 | struct closure cl; | |
890 | closure_init_stack(&cl); | |
891 | ||
892 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); | |
893 | bch_write_bdev_super(dc, &cl); | |
894 | closure_sync(&cl); | |
895 | } | |
896 | ||
897 | add_disk(d->disk); | |
ee668506 | 898 | bd_link_disk_holder(dc->bdev, dc->disk.disk); |
a25c32be GP |
899 | /* won't show up in the uevent file, use udevadm monitor -e instead |
900 | * only class / kset properties are persistent */ | |
cafe5635 | 901 | kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); |
a25c32be | 902 | kfree(env[1]); |
ab9e1400 | 903 | kfree(env[2]); |
a25c32be | 904 | |
cafe5635 KO |
905 | if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || |
906 | sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) | |
907 | pr_debug("error creating sysfs link"); | |
908 | } | |
909 | ||
910 | static void cached_dev_detach_finish(struct work_struct *w) | |
911 | { | |
912 | struct cached_dev *dc = container_of(w, struct cached_dev, detach); | |
913 | char buf[BDEVNAME_SIZE]; | |
914 | struct closure cl; | |
915 | closure_init_stack(&cl); | |
916 | ||
c4d951dd | 917 | BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); |
cafe5635 KO |
918 | BUG_ON(atomic_read(&dc->count)); |
919 | ||
cafe5635 KO |
920 | mutex_lock(&bch_register_lock); |
921 | ||
922 | memset(&dc->sb.set_uuid, 0, 16); | |
923 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); | |
924 | ||
925 | bch_write_bdev_super(dc, &cl); | |
926 | closure_sync(&cl); | |
927 | ||
928 | bcache_device_detach(&dc->disk); | |
929 | list_move(&dc->list, &uncached_devices); | |
930 | ||
c4d951dd KO |
931 | clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); |
932 | ||
cafe5635 KO |
933 | mutex_unlock(&bch_register_lock); |
934 | ||
935 | pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); | |
936 | ||
937 | /* Drop ref we took in cached_dev_detach() */ | |
938 | closure_put(&dc->disk.cl); | |
939 | } | |
940 | ||
941 | void bch_cached_dev_detach(struct cached_dev *dc) | |
942 | { | |
943 | lockdep_assert_held(&bch_register_lock); | |
944 | ||
c4d951dd | 945 | if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) |
cafe5635 KO |
946 | return; |
947 | ||
c4d951dd | 948 | if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) |
cafe5635 KO |
949 | return; |
950 | ||
951 | /* | |
952 | * Block the device from being closed and freed until we're finished | |
953 | * detaching | |
954 | */ | |
955 | closure_get(&dc->disk.cl); | |
956 | ||
957 | bch_writeback_queue(dc); | |
958 | cached_dev_put(dc); | |
959 | } | |
960 | ||
961 | int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) | |
962 | { | |
963 | uint32_t rtime = cpu_to_le32(get_seconds()); | |
964 | struct uuid_entry *u; | |
965 | char buf[BDEVNAME_SIZE]; | |
966 | ||
967 | bdevname(dc->bdev, buf); | |
968 | ||
969 | if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)) | |
970 | return -ENOENT; | |
971 | ||
972 | if (dc->disk.c) { | |
973 | pr_err("Can't attach %s: already attached", buf); | |
974 | return -EINVAL; | |
975 | } | |
976 | ||
977 | if (test_bit(CACHE_SET_STOPPING, &c->flags)) { | |
978 | pr_err("Can't attach %s: shutting down", buf); | |
979 | return -EINVAL; | |
980 | } | |
981 | ||
982 | if (dc->sb.block_size < c->sb.block_size) { | |
983 | /* Will die */ | |
b1a67b0f KO |
984 | pr_err("Couldn't attach %s: block size less than set's block size", |
985 | buf); | |
cafe5635 KO |
986 | return -EINVAL; |
987 | } | |
988 | ||
989 | u = uuid_find(c, dc->sb.uuid); | |
990 | ||
991 | if (u && | |
992 | (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || | |
993 | BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { | |
994 | memcpy(u->uuid, invalid_uuid, 16); | |
995 | u->invalidated = cpu_to_le32(get_seconds()); | |
996 | u = NULL; | |
997 | } | |
998 | ||
999 | if (!u) { | |
1000 | if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { | |
1001 | pr_err("Couldn't find uuid for %s in set", buf); | |
1002 | return -ENOENT; | |
1003 | } | |
1004 | ||
1005 | u = uuid_find_empty(c); | |
1006 | if (!u) { | |
1007 | pr_err("Not caching %s, no room for UUID", buf); | |
1008 | return -EINVAL; | |
1009 | } | |
1010 | } | |
1011 | ||
1012 | /* Deadlocks since we're called via sysfs... | |
1013 | sysfs_remove_file(&dc->kobj, &sysfs_attach); | |
1014 | */ | |
1015 | ||
169ef1cf | 1016 | if (bch_is_zero(u->uuid, 16)) { |
cafe5635 KO |
1017 | struct closure cl; |
1018 | closure_init_stack(&cl); | |
1019 | ||
1020 | memcpy(u->uuid, dc->sb.uuid, 16); | |
1021 | memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); | |
1022 | u->first_reg = u->last_reg = rtime; | |
1023 | bch_uuid_write(c); | |
1024 | ||
1025 | memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); | |
1026 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); | |
1027 | ||
1028 | bch_write_bdev_super(dc, &cl); | |
1029 | closure_sync(&cl); | |
1030 | } else { | |
1031 | u->last_reg = rtime; | |
1032 | bch_uuid_write(c); | |
1033 | } | |
1034 | ||
1035 | bcache_device_attach(&dc->disk, c, u - c->uuids); | |
cafe5635 KO |
1036 | list_move(&dc->list, &c->cached_devs); |
1037 | calc_cached_dev_sectors(c); | |
1038 | ||
1039 | smp_wmb(); | |
1040 | /* | |
1041 | * dc->c must be set before dc->count != 0 - paired with the mb in | |
1042 | * cached_dev_get() | |
1043 | */ | |
1044 | atomic_set(&dc->count, 1); | |
1045 | ||
1046 | if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { | |
444fc0b6 | 1047 | bch_sectors_dirty_init(dc); |
cafe5635 KO |
1048 | atomic_set(&dc->has_dirty, 1); |
1049 | atomic_inc(&dc->count); | |
1050 | bch_writeback_queue(dc); | |
1051 | } | |
1052 | ||
1053 | bch_cached_dev_run(dc); | |
ee668506 | 1054 | bcache_device_link(&dc->disk, c, "bdev"); |
cafe5635 KO |
1055 | |
1056 | pr_info("Caching %s as %s on set %pU", | |
1057 | bdevname(dc->bdev, buf), dc->disk.disk->disk_name, | |
1058 | dc->disk.c->sb.set_uuid); | |
1059 | return 0; | |
1060 | } | |
1061 | ||
1062 | void bch_cached_dev_release(struct kobject *kobj) | |
1063 | { | |
1064 | struct cached_dev *dc = container_of(kobj, struct cached_dev, | |
1065 | disk.kobj); | |
1066 | kfree(dc); | |
1067 | module_put(THIS_MODULE); | |
1068 | } | |
1069 | ||
1070 | static void cached_dev_free(struct closure *cl) | |
1071 | { | |
1072 | struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); | |
1073 | ||
1074 | cancel_delayed_work_sync(&dc->writeback_rate_update); | |
5e6926da | 1075 | kthread_stop(dc->writeback_thread); |
cafe5635 KO |
1076 | |
1077 | mutex_lock(&bch_register_lock); | |
1078 | ||
f59fce84 KO |
1079 | if (atomic_read(&dc->running)) |
1080 | bd_unlink_disk_holder(dc->bdev, dc->disk.disk); | |
cafe5635 KO |
1081 | bcache_device_free(&dc->disk); |
1082 | list_del(&dc->list); | |
1083 | ||
1084 | mutex_unlock(&bch_register_lock); | |
1085 | ||
1086 | if (!IS_ERR_OR_NULL(dc->bdev)) { | |
f59fce84 KO |
1087 | if (dc->bdev->bd_disk) |
1088 | blk_sync_queue(bdev_get_queue(dc->bdev)); | |
1089 | ||
cafe5635 KO |
1090 | blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
1091 | } | |
1092 | ||
1093 | wake_up(&unregister_wait); | |
1094 | ||
1095 | kobject_put(&dc->disk.kobj); | |
1096 | } | |
1097 | ||
1098 | static void cached_dev_flush(struct closure *cl) | |
1099 | { | |
1100 | struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); | |
1101 | struct bcache_device *d = &dc->disk; | |
1102 | ||
c9502ea4 | 1103 | mutex_lock(&bch_register_lock); |
c4d951dd | 1104 | bcache_device_unlink(d); |
c9502ea4 KO |
1105 | mutex_unlock(&bch_register_lock); |
1106 | ||
cafe5635 KO |
1107 | bch_cache_accounting_destroy(&dc->accounting); |
1108 | kobject_del(&d->kobj); | |
1109 | ||
1110 | continue_at(cl, cached_dev_free, system_wq); | |
1111 | } | |
1112 | ||
1113 | static int cached_dev_init(struct cached_dev *dc, unsigned block_size) | |
1114 | { | |
f59fce84 | 1115 | int ret; |
cafe5635 | 1116 | struct io *io; |
f59fce84 | 1117 | struct request_queue *q = bdev_get_queue(dc->bdev); |
cafe5635 KO |
1118 | |
1119 | __module_get(THIS_MODULE); | |
1120 | INIT_LIST_HEAD(&dc->list); | |
f59fce84 KO |
1121 | closure_init(&dc->disk.cl, NULL); |
1122 | set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); | |
cafe5635 | 1123 | kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); |
cafe5635 | 1124 | INIT_WORK(&dc->detach, cached_dev_detach_finish); |
cb7a583e | 1125 | sema_init(&dc->sb_write_mutex, 1); |
f59fce84 KO |
1126 | INIT_LIST_HEAD(&dc->io_lru); |
1127 | spin_lock_init(&dc->io_lock); | |
1128 | bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); | |
cafe5635 | 1129 | |
cafe5635 KO |
1130 | dc->sequential_cutoff = 4 << 20; |
1131 | ||
cafe5635 KO |
1132 | for (io = dc->io; io < dc->io + RECENT_IO; io++) { |
1133 | list_add(&io->lru, &dc->io_lru); | |
1134 | hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); | |
1135 | } | |
1136 | ||
279afbad KO |
1137 | ret = bcache_device_init(&dc->disk, block_size, |
1138 | dc->bdev->bd_part->nr_sects - dc->sb.data_offset); | |
f59fce84 KO |
1139 | if (ret) |
1140 | return ret; | |
1141 | ||
1142 | set_capacity(dc->disk.disk, | |
1143 | dc->bdev->bd_part->nr_sects - dc->sb.data_offset); | |
1144 | ||
1145 | dc->disk.disk->queue->backing_dev_info.ra_pages = | |
1146 | max(dc->disk.disk->queue->backing_dev_info.ra_pages, | |
1147 | q->backing_dev_info.ra_pages); | |
1148 | ||
1149 | bch_cached_dev_request_init(dc); | |
1150 | bch_cached_dev_writeback_init(dc); | |
cafe5635 | 1151 | return 0; |
cafe5635 KO |
1152 | } |
1153 | ||
1154 | /* Cached device - bcache superblock */ | |
1155 | ||
f59fce84 | 1156 | static void register_bdev(struct cache_sb *sb, struct page *sb_page, |
cafe5635 KO |
1157 | struct block_device *bdev, |
1158 | struct cached_dev *dc) | |
1159 | { | |
1160 | char name[BDEVNAME_SIZE]; | |
1161 | const char *err = "cannot allocate memory"; | |
cafe5635 KO |
1162 | struct cache_set *c; |
1163 | ||
cafe5635 | 1164 | memcpy(&dc->sb, sb, sizeof(struct cache_sb)); |
cafe5635 KO |
1165 | dc->bdev = bdev; |
1166 | dc->bdev->bd_holder = dc; | |
1167 | ||
f59fce84 KO |
1168 | bio_init(&dc->sb_bio); |
1169 | dc->sb_bio.bi_max_vecs = 1; | |
1170 | dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs; | |
1171 | dc->sb_bio.bi_io_vec[0].bv_page = sb_page; | |
1172 | get_page(sb_page); | |
4f0fd955 | 1173 | |
f59fce84 KO |
1174 | if (cached_dev_init(dc, sb->block_size << 9)) |
1175 | goto err; | |
cafe5635 KO |
1176 | |
1177 | err = "error creating kobject"; | |
1178 | if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, | |
1179 | "bcache")) | |
1180 | goto err; | |
1181 | if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) | |
1182 | goto err; | |
1183 | ||
f59fce84 KO |
1184 | pr_info("registered backing device %s", bdevname(bdev, name)); |
1185 | ||
cafe5635 KO |
1186 | list_add(&dc->list, &uncached_devices); |
1187 | list_for_each_entry(c, &bch_cache_sets, list) | |
1188 | bch_cached_dev_attach(dc, c); | |
1189 | ||
1190 | if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || | |
1191 | BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) | |
1192 | bch_cached_dev_run(dc); | |
1193 | ||
f59fce84 | 1194 | return; |
cafe5635 | 1195 | err: |
cafe5635 | 1196 | pr_notice("error opening %s: %s", bdevname(bdev, name), err); |
f59fce84 | 1197 | bcache_device_stop(&dc->disk); |
cafe5635 KO |
1198 | } |
1199 | ||
1200 | /* Flash only volumes */ | |
1201 | ||
1202 | void bch_flash_dev_release(struct kobject *kobj) | |
1203 | { | |
1204 | struct bcache_device *d = container_of(kobj, struct bcache_device, | |
1205 | kobj); | |
1206 | kfree(d); | |
1207 | } | |
1208 | ||
1209 | static void flash_dev_free(struct closure *cl) | |
1210 | { | |
1211 | struct bcache_device *d = container_of(cl, struct bcache_device, cl); | |
1212 | bcache_device_free(d); | |
1213 | kobject_put(&d->kobj); | |
1214 | } | |
1215 | ||
1216 | static void flash_dev_flush(struct closure *cl) | |
1217 | { | |
1218 | struct bcache_device *d = container_of(cl, struct bcache_device, cl); | |
1219 | ||
ee668506 | 1220 | bcache_device_unlink(d); |
cafe5635 KO |
1221 | kobject_del(&d->kobj); |
1222 | continue_at(cl, flash_dev_free, system_wq); | |
1223 | } | |
1224 | ||
1225 | static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) | |
1226 | { | |
1227 | struct bcache_device *d = kzalloc(sizeof(struct bcache_device), | |
1228 | GFP_KERNEL); | |
1229 | if (!d) | |
1230 | return -ENOMEM; | |
1231 | ||
1232 | closure_init(&d->cl, NULL); | |
1233 | set_closure_fn(&d->cl, flash_dev_flush, system_wq); | |
1234 | ||
1235 | kobject_init(&d->kobj, &bch_flash_dev_ktype); | |
1236 | ||
279afbad | 1237 | if (bcache_device_init(d, block_bytes(c), u->sectors)) |
cafe5635 KO |
1238 | goto err; |
1239 | ||
1240 | bcache_device_attach(d, c, u - c->uuids); | |
cafe5635 KO |
1241 | bch_flash_dev_request_init(d); |
1242 | add_disk(d->disk); | |
1243 | ||
1244 | if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) | |
1245 | goto err; | |
1246 | ||
1247 | bcache_device_link(d, c, "volume"); | |
1248 | ||
1249 | return 0; | |
1250 | err: | |
1251 | kobject_put(&d->kobj); | |
1252 | return -ENOMEM; | |
1253 | } | |
1254 | ||
1255 | static int flash_devs_run(struct cache_set *c) | |
1256 | { | |
1257 | int ret = 0; | |
1258 | struct uuid_entry *u; | |
1259 | ||
1260 | for (u = c->uuids; | |
1261 | u < c->uuids + c->nr_uuids && !ret; | |
1262 | u++) | |
1263 | if (UUID_FLASH_ONLY(u)) | |
1264 | ret = flash_dev_run(c, u); | |
1265 | ||
1266 | return ret; | |
1267 | } | |
1268 | ||
1269 | int bch_flash_dev_create(struct cache_set *c, uint64_t size) | |
1270 | { | |
1271 | struct uuid_entry *u; | |
1272 | ||
1273 | if (test_bit(CACHE_SET_STOPPING, &c->flags)) | |
1274 | return -EINTR; | |
1275 | ||
1276 | u = uuid_find_empty(c); | |
1277 | if (!u) { | |
1278 | pr_err("Can't create volume, no room for UUID"); | |
1279 | return -EINVAL; | |
1280 | } | |
1281 | ||
1282 | get_random_bytes(u->uuid, 16); | |
1283 | memset(u->label, 0, 32); | |
1284 | u->first_reg = u->last_reg = cpu_to_le32(get_seconds()); | |
1285 | ||
1286 | SET_UUID_FLASH_ONLY(u, 1); | |
1287 | u->sectors = size >> 9; | |
1288 | ||
1289 | bch_uuid_write(c); | |
1290 | ||
1291 | return flash_dev_run(c, u); | |
1292 | } | |
1293 | ||
1294 | /* Cache set */ | |
1295 | ||
1296 | __printf(2, 3) | |
1297 | bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) | |
1298 | { | |
1299 | va_list args; | |
1300 | ||
77c320eb KO |
1301 | if (c->on_error != ON_ERROR_PANIC && |
1302 | test_bit(CACHE_SET_STOPPING, &c->flags)) | |
cafe5635 KO |
1303 | return false; |
1304 | ||
1305 | /* XXX: we can be called from atomic context | |
1306 | acquire_console_sem(); | |
1307 | */ | |
1308 | ||
1309 | printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid); | |
1310 | ||
1311 | va_start(args, fmt); | |
1312 | vprintk(fmt, args); | |
1313 | va_end(args); | |
1314 | ||
1315 | printk(", disabling caching\n"); | |
1316 | ||
77c320eb KO |
1317 | if (c->on_error == ON_ERROR_PANIC) |
1318 | panic("panic forced after error\n"); | |
1319 | ||
cafe5635 KO |
1320 | bch_cache_set_unregister(c); |
1321 | return true; | |
1322 | } | |
1323 | ||
1324 | void bch_cache_set_release(struct kobject *kobj) | |
1325 | { | |
1326 | struct cache_set *c = container_of(kobj, struct cache_set, kobj); | |
1327 | kfree(c); | |
1328 | module_put(THIS_MODULE); | |
1329 | } | |
1330 | ||
1331 | static void cache_set_free(struct closure *cl) | |
1332 | { | |
1333 | struct cache_set *c = container_of(cl, struct cache_set, cl); | |
1334 | struct cache *ca; | |
1335 | unsigned i; | |
1336 | ||
1337 | if (!IS_ERR_OR_NULL(c->debug)) | |
1338 | debugfs_remove(c->debug); | |
1339 | ||
1340 | bch_open_buckets_free(c); | |
1341 | bch_btree_cache_free(c); | |
1342 | bch_journal_free(c); | |
1343 | ||
1344 | for_each_cache(ca, c, i) | |
1345 | if (ca) | |
1346 | kobject_put(&ca->kobj); | |
1347 | ||
1348 | free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); | |
1349 | free_pages((unsigned long) c->sort, ilog2(bucket_pages(c))); | |
1350 | ||
cafe5635 KO |
1351 | if (c->bio_split) |
1352 | bioset_free(c->bio_split); | |
57943511 KO |
1353 | if (c->fill_iter) |
1354 | mempool_destroy(c->fill_iter); | |
cafe5635 KO |
1355 | if (c->bio_meta) |
1356 | mempool_destroy(c->bio_meta); | |
1357 | if (c->search) | |
1358 | mempool_destroy(c->search); | |
1359 | kfree(c->devices); | |
1360 | ||
1361 | mutex_lock(&bch_register_lock); | |
1362 | list_del(&c->list); | |
1363 | mutex_unlock(&bch_register_lock); | |
1364 | ||
1365 | pr_info("Cache set %pU unregistered", c->sb.set_uuid); | |
1366 | wake_up(&unregister_wait); | |
1367 | ||
1368 | closure_debug_destroy(&c->cl); | |
1369 | kobject_put(&c->kobj); | |
1370 | } | |
1371 | ||
1372 | static void cache_set_flush(struct closure *cl) | |
1373 | { | |
1374 | struct cache_set *c = container_of(cl, struct cache_set, caching); | |
79826c35 | 1375 | struct cache *ca; |
cafe5635 | 1376 | struct btree *b; |
79826c35 | 1377 | unsigned i; |
cafe5635 KO |
1378 | |
1379 | bch_cache_accounting_destroy(&c->accounting); | |
1380 | ||
1381 | kobject_put(&c->internal); | |
1382 | kobject_del(&c->kobj); | |
1383 | ||
72a44517 KO |
1384 | if (c->gc_thread) |
1385 | kthread_stop(c->gc_thread); | |
1386 | ||
cafe5635 KO |
1387 | if (!IS_ERR_OR_NULL(c->root)) |
1388 | list_add(&c->root->list, &c->btree_cache); | |
1389 | ||
1390 | /* Should skip this if we're unregistering because of an error */ | |
1391 | list_for_each_entry(b, &c->btree_cache, list) | |
1392 | if (btree_node_dirty(b)) | |
57943511 | 1393 | bch_btree_node_write(b, NULL); |
cafe5635 | 1394 | |
79826c35 KO |
1395 | for_each_cache(ca, c, i) |
1396 | if (ca->alloc_thread) | |
1397 | kthread_stop(ca->alloc_thread); | |
1398 | ||
cafe5635 KO |
1399 | closure_return(cl); |
1400 | } | |
1401 | ||
1402 | static void __cache_set_unregister(struct closure *cl) | |
1403 | { | |
1404 | struct cache_set *c = container_of(cl, struct cache_set, caching); | |
5caa52af | 1405 | struct cached_dev *dc; |
cafe5635 KO |
1406 | size_t i; |
1407 | ||
1408 | mutex_lock(&bch_register_lock); | |
1409 | ||
cafe5635 | 1410 | for (i = 0; i < c->nr_uuids; i++) |
5caa52af KO |
1411 | if (c->devices[i]) { |
1412 | if (!UUID_FLASH_ONLY(&c->uuids[i]) && | |
1413 | test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { | |
1414 | dc = container_of(c->devices[i], | |
1415 | struct cached_dev, disk); | |
1416 | bch_cached_dev_detach(dc); | |
1417 | } else { | |
1418 | bcache_device_stop(c->devices[i]); | |
1419 | } | |
1420 | } | |
cafe5635 KO |
1421 | |
1422 | mutex_unlock(&bch_register_lock); | |
1423 | ||
1424 | continue_at(cl, cache_set_flush, system_wq); | |
1425 | } | |
1426 | ||
1427 | void bch_cache_set_stop(struct cache_set *c) | |
1428 | { | |
1429 | if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) | |
1430 | closure_queue(&c->caching); | |
1431 | } | |
1432 | ||
1433 | void bch_cache_set_unregister(struct cache_set *c) | |
1434 | { | |
1435 | set_bit(CACHE_SET_UNREGISTERING, &c->flags); | |
1436 | bch_cache_set_stop(c); | |
1437 | } | |
1438 | ||
1439 | #define alloc_bucket_pages(gfp, c) \ | |
1440 | ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) | |
1441 | ||
1442 | struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) | |
1443 | { | |
1444 | int iter_size; | |
1445 | struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); | |
1446 | if (!c) | |
1447 | return NULL; | |
1448 | ||
1449 | __module_get(THIS_MODULE); | |
1450 | closure_init(&c->cl, NULL); | |
1451 | set_closure_fn(&c->cl, cache_set_free, system_wq); | |
1452 | ||
1453 | closure_init(&c->caching, &c->cl); | |
1454 | set_closure_fn(&c->caching, __cache_set_unregister, system_wq); | |
1455 | ||
1456 | /* Maybe create continue_at_noreturn() and use it here? */ | |
1457 | closure_set_stopped(&c->cl); | |
1458 | closure_put(&c->cl); | |
1459 | ||
1460 | kobject_init(&c->kobj, &bch_cache_set_ktype); | |
1461 | kobject_init(&c->internal, &bch_cache_set_internal_ktype); | |
1462 | ||
1463 | bch_cache_accounting_init(&c->accounting, &c->cl); | |
1464 | ||
1465 | memcpy(c->sb.set_uuid, sb->set_uuid, 16); | |
1466 | c->sb.block_size = sb->block_size; | |
1467 | c->sb.bucket_size = sb->bucket_size; | |
1468 | c->sb.nr_in_set = sb->nr_in_set; | |
1469 | c->sb.last_mount = sb->last_mount; | |
1470 | c->bucket_bits = ilog2(sb->bucket_size); | |
1471 | c->block_bits = ilog2(sb->block_size); | |
1472 | c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); | |
1473 | ||
1474 | c->btree_pages = c->sb.bucket_size / PAGE_SECTORS; | |
1475 | if (c->btree_pages > BTREE_MAX_PAGES) | |
1476 | c->btree_pages = max_t(int, c->btree_pages / 4, | |
1477 | BTREE_MAX_PAGES); | |
1478 | ||
6ded34d1 KO |
1479 | c->sort_crit_factor = int_sqrt(c->btree_pages); |
1480 | ||
cb7a583e | 1481 | sema_init(&c->sb_write_mutex, 1); |
e8e1d468 KO |
1482 | mutex_init(&c->bucket_lock); |
1483 | init_waitqueue_head(&c->try_wait); | |
35fcd848 | 1484 | init_waitqueue_head(&c->bucket_wait); |
cb7a583e | 1485 | sema_init(&c->uuid_write_mutex, 1); |
e8e1d468 | 1486 | mutex_init(&c->sort_lock); |
65d22e91 KO |
1487 | |
1488 | spin_lock_init(&c->sort_time.lock); | |
1489 | spin_lock_init(&c->btree_gc_time.lock); | |
1490 | spin_lock_init(&c->btree_split_time.lock); | |
1491 | spin_lock_init(&c->btree_read_time.lock); | |
1492 | spin_lock_init(&c->try_harder_time.lock); | |
e8e1d468 | 1493 | |
cafe5635 KO |
1494 | bch_moving_init_cache_set(c); |
1495 | ||
1496 | INIT_LIST_HEAD(&c->list); | |
1497 | INIT_LIST_HEAD(&c->cached_devs); | |
1498 | INIT_LIST_HEAD(&c->btree_cache); | |
1499 | INIT_LIST_HEAD(&c->btree_cache_freeable); | |
1500 | INIT_LIST_HEAD(&c->btree_cache_freed); | |
1501 | INIT_LIST_HEAD(&c->data_buckets); | |
1502 | ||
1503 | c->search = mempool_create_slab_pool(32, bch_search_cache); | |
1504 | if (!c->search) | |
1505 | goto err; | |
1506 | ||
1507 | iter_size = (sb->bucket_size / sb->block_size + 1) * | |
1508 | sizeof(struct btree_iter_set); | |
1509 | ||
1510 | if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) || | |
1511 | !(c->bio_meta = mempool_create_kmalloc_pool(2, | |
1512 | sizeof(struct bbio) + sizeof(struct bio_vec) * | |
1513 | bucket_pages(c))) || | |
57943511 | 1514 | !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) || |
cafe5635 | 1515 | !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || |
cafe5635 KO |
1516 | !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) || |
1517 | !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || | |
1518 | bch_journal_alloc(c) || | |
1519 | bch_btree_cache_alloc(c) || | |
1520 | bch_open_buckets_alloc(c)) | |
1521 | goto err; | |
1522 | ||
cafe5635 KO |
1523 | c->congested_read_threshold_us = 2000; |
1524 | c->congested_write_threshold_us = 20000; | |
1525 | c->error_limit = 8 << IO_ERROR_SHIFT; | |
1526 | ||
1527 | return c; | |
1528 | err: | |
1529 | bch_cache_set_unregister(c); | |
1530 | return NULL; | |
1531 | } | |
1532 | ||
1533 | static void run_cache_set(struct cache_set *c) | |
1534 | { | |
1535 | const char *err = "cannot allocate memory"; | |
1536 | struct cached_dev *dc, *t; | |
1537 | struct cache *ca; | |
c18536a7 | 1538 | struct closure cl; |
cafe5635 KO |
1539 | unsigned i; |
1540 | ||
c18536a7 | 1541 | closure_init_stack(&cl); |
cafe5635 KO |
1542 | |
1543 | for_each_cache(ca, c, i) | |
1544 | c->nbuckets += ca->sb.nbuckets; | |
1545 | ||
1546 | if (CACHE_SYNC(&c->sb)) { | |
1547 | LIST_HEAD(journal); | |
1548 | struct bkey *k; | |
1549 | struct jset *j; | |
1550 | ||
1551 | err = "cannot allocate memory for journal"; | |
c18536a7 | 1552 | if (bch_journal_read(c, &journal)) |
cafe5635 KO |
1553 | goto err; |
1554 | ||
1555 | pr_debug("btree_journal_read() done"); | |
1556 | ||
1557 | err = "no journal entries found"; | |
1558 | if (list_empty(&journal)) | |
1559 | goto err; | |
1560 | ||
1561 | j = &list_entry(journal.prev, struct journal_replay, list)->j; | |
1562 | ||
1563 | err = "IO error reading priorities"; | |
1564 | for_each_cache(ca, c, i) | |
1565 | prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]); | |
1566 | ||
1567 | /* | |
1568 | * If prio_read() fails it'll call cache_set_error and we'll | |
1569 | * tear everything down right away, but if we perhaps checked | |
1570 | * sooner we could avoid journal replay. | |
1571 | */ | |
1572 | ||
1573 | k = &j->btree_root; | |
1574 | ||
1575 | err = "bad btree root"; | |
d5cc66e9 | 1576 | if (bch_btree_ptr_invalid(c, k)) |
cafe5635 KO |
1577 | goto err; |
1578 | ||
1579 | err = "error reading btree root"; | |
e8e1d468 | 1580 | c->root = bch_btree_node_get(c, k, j->btree_level, true); |
cafe5635 KO |
1581 | if (IS_ERR_OR_NULL(c->root)) |
1582 | goto err; | |
1583 | ||
1584 | list_del_init(&c->root->list); | |
1585 | rw_unlock(true, c->root); | |
1586 | ||
c18536a7 | 1587 | err = uuid_read(c, j, &cl); |
cafe5635 KO |
1588 | if (err) |
1589 | goto err; | |
1590 | ||
1591 | err = "error in recovery"; | |
c18536a7 | 1592 | if (bch_btree_check(c)) |
cafe5635 KO |
1593 | goto err; |
1594 | ||
1595 | bch_journal_mark(c, &journal); | |
1596 | bch_btree_gc_finish(c); | |
1597 | pr_debug("btree_check() done"); | |
1598 | ||
1599 | /* | |
1600 | * bcache_journal_next() can't happen sooner, or | |
1601 | * btree_gc_finish() will give spurious errors about last_gc > | |
1602 | * gc_gen - this is a hack but oh well. | |
1603 | */ | |
1604 | bch_journal_next(&c->journal); | |
1605 | ||
119ba0f8 | 1606 | err = "error starting allocator thread"; |
cafe5635 | 1607 | for_each_cache(ca, c, i) |
119ba0f8 KO |
1608 | if (bch_cache_allocator_start(ca)) |
1609 | goto err; | |
cafe5635 KO |
1610 | |
1611 | /* | |
1612 | * First place it's safe to allocate: btree_check() and | |
1613 | * btree_gc_finish() have to run before we have buckets to | |
1614 | * allocate, and bch_bucket_alloc_set() might cause a journal | |
1615 | * entry to be written so bcache_journal_next() has to be called | |
1616 | * first. | |
1617 | * | |
1618 | * If the uuids were in the old format we have to rewrite them | |
1619 | * before the next journal entry is written: | |
1620 | */ | |
1621 | if (j->version < BCACHE_JSET_VERSION_UUID) | |
1622 | __uuid_write(c); | |
1623 | ||
c18536a7 | 1624 | bch_journal_replay(c, &journal); |
cafe5635 KO |
1625 | } else { |
1626 | pr_notice("invalidating existing data"); | |
cafe5635 KO |
1627 | |
1628 | for_each_cache(ca, c, i) { | |
1629 | unsigned j; | |
1630 | ||
1631 | ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, | |
1632 | 2, SB_JOURNAL_BUCKETS); | |
1633 | ||
1634 | for (j = 0; j < ca->sb.keys; j++) | |
1635 | ca->sb.d[j] = ca->sb.first_bucket + j; | |
1636 | } | |
1637 | ||
1638 | bch_btree_gc_finish(c); | |
1639 | ||
119ba0f8 | 1640 | err = "error starting allocator thread"; |
cafe5635 | 1641 | for_each_cache(ca, c, i) |
119ba0f8 KO |
1642 | if (bch_cache_allocator_start(ca)) |
1643 | goto err; | |
cafe5635 KO |
1644 | |
1645 | mutex_lock(&c->bucket_lock); | |
1646 | for_each_cache(ca, c, i) | |
1647 | bch_prio_write(ca); | |
1648 | mutex_unlock(&c->bucket_lock); | |
1649 | ||
cafe5635 KO |
1650 | err = "cannot allocate new UUID bucket"; |
1651 | if (__uuid_write(c)) | |
72a44517 | 1652 | goto err; |
cafe5635 KO |
1653 | |
1654 | err = "cannot allocate new btree root"; | |
bc9389ee | 1655 | c->root = bch_btree_node_alloc(c, 0, true); |
cafe5635 | 1656 | if (IS_ERR_OR_NULL(c->root)) |
72a44517 | 1657 | goto err; |
cafe5635 KO |
1658 | |
1659 | bkey_copy_key(&c->root->key, &MAX_KEY); | |
c18536a7 | 1660 | bch_btree_node_write(c->root, &cl); |
cafe5635 KO |
1661 | |
1662 | bch_btree_set_root(c->root); | |
1663 | rw_unlock(true, c->root); | |
1664 | ||
1665 | /* | |
1666 | * We don't want to write the first journal entry until | |
1667 | * everything is set up - fortunately journal entries won't be | |
1668 | * written until the SET_CACHE_SYNC() here: | |
1669 | */ | |
1670 | SET_CACHE_SYNC(&c->sb, true); | |
1671 | ||
1672 | bch_journal_next(&c->journal); | |
c18536a7 | 1673 | bch_journal_meta(c, &cl); |
cafe5635 KO |
1674 | } |
1675 | ||
72a44517 KO |
1676 | err = "error starting gc thread"; |
1677 | if (bch_gc_thread_start(c)) | |
1678 | goto err; | |
1679 | ||
c18536a7 | 1680 | closure_sync(&cl); |
cafe5635 KO |
1681 | c->sb.last_mount = get_seconds(); |
1682 | bcache_write_super(c); | |
1683 | ||
1684 | list_for_each_entry_safe(dc, t, &uncached_devices, list) | |
1685 | bch_cached_dev_attach(dc, c); | |
1686 | ||
1687 | flash_devs_run(c); | |
1688 | ||
1689 | return; | |
cafe5635 | 1690 | err: |
c18536a7 | 1691 | closure_sync(&cl); |
cafe5635 | 1692 | /* XXX: test this, it's broken */ |
c8694948 | 1693 | bch_cache_set_error(c, "%s", err); |
cafe5635 KO |
1694 | } |
1695 | ||
1696 | static bool can_attach_cache(struct cache *ca, struct cache_set *c) | |
1697 | { | |
1698 | return ca->sb.block_size == c->sb.block_size && | |
9eb8ebeb | 1699 | ca->sb.bucket_size == c->sb.bucket_size && |
cafe5635 KO |
1700 | ca->sb.nr_in_set == c->sb.nr_in_set; |
1701 | } | |
1702 | ||
1703 | static const char *register_cache_set(struct cache *ca) | |
1704 | { | |
1705 | char buf[12]; | |
1706 | const char *err = "cannot allocate memory"; | |
1707 | struct cache_set *c; | |
1708 | ||
1709 | list_for_each_entry(c, &bch_cache_sets, list) | |
1710 | if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { | |
1711 | if (c->cache[ca->sb.nr_this_dev]) | |
1712 | return "duplicate cache set member"; | |
1713 | ||
1714 | if (!can_attach_cache(ca, c)) | |
1715 | return "cache sb does not match set"; | |
1716 | ||
1717 | if (!CACHE_SYNC(&ca->sb)) | |
1718 | SET_CACHE_SYNC(&c->sb, false); | |
1719 | ||
1720 | goto found; | |
1721 | } | |
1722 | ||
1723 | c = bch_cache_set_alloc(&ca->sb); | |
1724 | if (!c) | |
1725 | return err; | |
1726 | ||
1727 | err = "error creating kobject"; | |
1728 | if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || | |
1729 | kobject_add(&c->internal, &c->kobj, "internal")) | |
1730 | goto err; | |
1731 | ||
1732 | if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) | |
1733 | goto err; | |
1734 | ||
1735 | bch_debug_init_cache_set(c); | |
1736 | ||
1737 | list_add(&c->list, &bch_cache_sets); | |
1738 | found: | |
1739 | sprintf(buf, "cache%i", ca->sb.nr_this_dev); | |
1740 | if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || | |
1741 | sysfs_create_link(&c->kobj, &ca->kobj, buf)) | |
1742 | goto err; | |
1743 | ||
1744 | if (ca->sb.seq > c->sb.seq) { | |
1745 | c->sb.version = ca->sb.version; | |
1746 | memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); | |
1747 | c->sb.flags = ca->sb.flags; | |
1748 | c->sb.seq = ca->sb.seq; | |
1749 | pr_debug("set version = %llu", c->sb.version); | |
1750 | } | |
1751 | ||
1752 | ca->set = c; | |
1753 | ca->set->cache[ca->sb.nr_this_dev] = ca; | |
1754 | c->cache_by_alloc[c->caches_loaded++] = ca; | |
1755 | ||
1756 | if (c->caches_loaded == c->sb.nr_in_set) | |
1757 | run_cache_set(c); | |
1758 | ||
1759 | return NULL; | |
1760 | err: | |
1761 | bch_cache_set_unregister(c); | |
1762 | return err; | |
1763 | } | |
1764 | ||
1765 | /* Cache device */ | |
1766 | ||
1767 | void bch_cache_release(struct kobject *kobj) | |
1768 | { | |
1769 | struct cache *ca = container_of(kobj, struct cache, kobj); | |
78365411 | 1770 | unsigned i; |
cafe5635 KO |
1771 | |
1772 | if (ca->set) | |
1773 | ca->set->cache[ca->sb.nr_this_dev] = NULL; | |
1774 | ||
cafe5635 KO |
1775 | bio_split_pool_free(&ca->bio_split_hook); |
1776 | ||
cafe5635 KO |
1777 | free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); |
1778 | kfree(ca->prio_buckets); | |
1779 | vfree(ca->buckets); | |
1780 | ||
1781 | free_heap(&ca->heap); | |
1782 | free_fifo(&ca->unused); | |
1783 | free_fifo(&ca->free_inc); | |
78365411 KO |
1784 | |
1785 | for (i = 0; i < RESERVE_NR; i++) | |
1786 | free_fifo(&ca->free[i]); | |
cafe5635 KO |
1787 | |
1788 | if (ca->sb_bio.bi_inline_vecs[0].bv_page) | |
1789 | put_page(ca->sb_bio.bi_io_vec[0].bv_page); | |
1790 | ||
1791 | if (!IS_ERR_OR_NULL(ca->bdev)) { | |
1792 | blk_sync_queue(bdev_get_queue(ca->bdev)); | |
1793 | blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); | |
1794 | } | |
1795 | ||
1796 | kfree(ca); | |
1797 | module_put(THIS_MODULE); | |
1798 | } | |
1799 | ||
1800 | static int cache_alloc(struct cache_sb *sb, struct cache *ca) | |
1801 | { | |
1802 | size_t free; | |
1803 | struct bucket *b; | |
1804 | ||
cafe5635 KO |
1805 | __module_get(THIS_MODULE); |
1806 | kobject_init(&ca->kobj, &bch_cache_ktype); | |
1807 | ||
cafe5635 KO |
1808 | bio_init(&ca->journal.bio); |
1809 | ca->journal.bio.bi_max_vecs = 8; | |
1810 | ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; | |
1811 | ||
78365411 | 1812 | free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; |
cafe5635 | 1813 | |
78365411 KO |
1814 | if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || |
1815 | !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || | |
1816 | !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || | |
1817 | !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || | |
cafe5635 KO |
1818 | !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || |
1819 | !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || | |
1820 | !init_heap(&ca->heap, free << 3, GFP_KERNEL) || | |
f59fce84 | 1821 | !(ca->buckets = vzalloc(sizeof(struct bucket) * |
cafe5635 KO |
1822 | ca->sb.nbuckets)) || |
1823 | !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * | |
1824 | 2, GFP_KERNEL)) || | |
1825 | !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || | |
cafe5635 | 1826 | bio_split_pool_init(&ca->bio_split_hook)) |
f59fce84 | 1827 | return -ENOMEM; |
cafe5635 KO |
1828 | |
1829 | ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); | |
1830 | ||
cafe5635 KO |
1831 | for_each_bucket(b, ca) |
1832 | atomic_set(&b->pin, 0); | |
1833 | ||
1834 | if (bch_cache_allocator_init(ca)) | |
1835 | goto err; | |
1836 | ||
1837 | return 0; | |
1838 | err: | |
1839 | kobject_put(&ca->kobj); | |
1840 | return -ENOMEM; | |
1841 | } | |
1842 | ||
f59fce84 | 1843 | static void register_cache(struct cache_sb *sb, struct page *sb_page, |
cafe5635 KO |
1844 | struct block_device *bdev, struct cache *ca) |
1845 | { | |
1846 | char name[BDEVNAME_SIZE]; | |
1847 | const char *err = "cannot allocate memory"; | |
1848 | ||
f59fce84 | 1849 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); |
cafe5635 KO |
1850 | ca->bdev = bdev; |
1851 | ca->bdev->bd_holder = ca; | |
1852 | ||
f59fce84 KO |
1853 | bio_init(&ca->sb_bio); |
1854 | ca->sb_bio.bi_max_vecs = 1; | |
1855 | ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs; | |
1856 | ca->sb_bio.bi_io_vec[0].bv_page = sb_page; | |
1857 | get_page(sb_page); | |
1858 | ||
cafe5635 KO |
1859 | if (blk_queue_discard(bdev_get_queue(ca->bdev))) |
1860 | ca->discard = CACHE_DISCARD(&ca->sb); | |
1861 | ||
f59fce84 KO |
1862 | if (cache_alloc(sb, ca) != 0) |
1863 | goto err; | |
1864 | ||
cafe5635 KO |
1865 | err = "error creating kobject"; |
1866 | if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) | |
1867 | goto err; | |
1868 | ||
1869 | err = register_cache_set(ca); | |
1870 | if (err) | |
1871 | goto err; | |
1872 | ||
1873 | pr_info("registered cache device %s", bdevname(bdev, name)); | |
f59fce84 | 1874 | return; |
cafe5635 | 1875 | err: |
f59fce84 | 1876 | pr_notice("error opening %s: %s", bdevname(bdev, name), err); |
cafe5635 | 1877 | kobject_put(&ca->kobj); |
cafe5635 KO |
1878 | } |
1879 | ||
1880 | /* Global interfaces/init */ | |
1881 | ||
1882 | static ssize_t register_bcache(struct kobject *, struct kobj_attribute *, | |
1883 | const char *, size_t); | |
1884 | ||
1885 | kobj_attribute_write(register, register_bcache); | |
1886 | kobj_attribute_write(register_quiet, register_bcache); | |
1887 | ||
a9dd53ad GP |
1888 | static bool bch_is_open_backing(struct block_device *bdev) { |
1889 | struct cache_set *c, *tc; | |
1890 | struct cached_dev *dc, *t; | |
1891 | ||
1892 | list_for_each_entry_safe(c, tc, &bch_cache_sets, list) | |
1893 | list_for_each_entry_safe(dc, t, &c->cached_devs, list) | |
1894 | if (dc->bdev == bdev) | |
1895 | return true; | |
1896 | list_for_each_entry_safe(dc, t, &uncached_devices, list) | |
1897 | if (dc->bdev == bdev) | |
1898 | return true; | |
1899 | return false; | |
1900 | } | |
1901 | ||
1902 | static bool bch_is_open_cache(struct block_device *bdev) { | |
1903 | struct cache_set *c, *tc; | |
1904 | struct cache *ca; | |
1905 | unsigned i; | |
1906 | ||
1907 | list_for_each_entry_safe(c, tc, &bch_cache_sets, list) | |
1908 | for_each_cache(ca, c, i) | |
1909 | if (ca->bdev == bdev) | |
1910 | return true; | |
1911 | return false; | |
1912 | } | |
1913 | ||
1914 | static bool bch_is_open(struct block_device *bdev) { | |
1915 | return bch_is_open_cache(bdev) || bch_is_open_backing(bdev); | |
1916 | } | |
1917 | ||
cafe5635 KO |
1918 | static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, |
1919 | const char *buffer, size_t size) | |
1920 | { | |
1921 | ssize_t ret = size; | |
1922 | const char *err = "cannot allocate memory"; | |
1923 | char *path = NULL; | |
1924 | struct cache_sb *sb = NULL; | |
1925 | struct block_device *bdev = NULL; | |
1926 | struct page *sb_page = NULL; | |
1927 | ||
1928 | if (!try_module_get(THIS_MODULE)) | |
1929 | return -EBUSY; | |
1930 | ||
1931 | mutex_lock(&bch_register_lock); | |
1932 | ||
1933 | if (!(path = kstrndup(buffer, size, GFP_KERNEL)) || | |
1934 | !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL))) | |
1935 | goto err; | |
1936 | ||
1937 | err = "failed to open device"; | |
1938 | bdev = blkdev_get_by_path(strim(path), | |
1939 | FMODE_READ|FMODE_WRITE|FMODE_EXCL, | |
1940 | sb); | |
f59fce84 | 1941 | if (IS_ERR(bdev)) { |
a9dd53ad GP |
1942 | if (bdev == ERR_PTR(-EBUSY)) { |
1943 | bdev = lookup_bdev(strim(path)); | |
1944 | if (!IS_ERR(bdev) && bch_is_open(bdev)) | |
1945 | err = "device already registered"; | |
1946 | else | |
1947 | err = "device busy"; | |
1948 | } | |
cafe5635 | 1949 | goto err; |
f59fce84 KO |
1950 | } |
1951 | ||
1952 | err = "failed to set blocksize"; | |
1953 | if (set_blocksize(bdev, 4096)) | |
1954 | goto err_close; | |
cafe5635 KO |
1955 | |
1956 | err = read_super(sb, bdev, &sb_page); | |
1957 | if (err) | |
1958 | goto err_close; | |
1959 | ||
2903381f | 1960 | if (SB_IS_BDEV(sb)) { |
cafe5635 | 1961 | struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); |
f59fce84 KO |
1962 | if (!dc) |
1963 | goto err_close; | |
cafe5635 | 1964 | |
f59fce84 | 1965 | register_bdev(sb, sb_page, bdev, dc); |
cafe5635 KO |
1966 | } else { |
1967 | struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); | |
f59fce84 KO |
1968 | if (!ca) |
1969 | goto err_close; | |
cafe5635 | 1970 | |
f59fce84 | 1971 | register_cache(sb, sb_page, bdev, ca); |
cafe5635 | 1972 | } |
f59fce84 KO |
1973 | out: |
1974 | if (sb_page) | |
cafe5635 | 1975 | put_page(sb_page); |
cafe5635 KO |
1976 | kfree(sb); |
1977 | kfree(path); | |
1978 | mutex_unlock(&bch_register_lock); | |
1979 | module_put(THIS_MODULE); | |
1980 | return ret; | |
f59fce84 KO |
1981 | |
1982 | err_close: | |
1983 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); | |
1984 | err: | |
1985 | if (attr != &ksysfs_register_quiet) | |
1986 | pr_info("error opening %s: %s", path, err); | |
1987 | ret = -EINVAL; | |
1988 | goto out; | |
cafe5635 KO |
1989 | } |
1990 | ||
1991 | static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) | |
1992 | { | |
1993 | if (code == SYS_DOWN || | |
1994 | code == SYS_HALT || | |
1995 | code == SYS_POWER_OFF) { | |
1996 | DEFINE_WAIT(wait); | |
1997 | unsigned long start = jiffies; | |
1998 | bool stopped = false; | |
1999 | ||
2000 | struct cache_set *c, *tc; | |
2001 | struct cached_dev *dc, *tdc; | |
2002 | ||
2003 | mutex_lock(&bch_register_lock); | |
2004 | ||
2005 | if (list_empty(&bch_cache_sets) && | |
2006 | list_empty(&uncached_devices)) | |
2007 | goto out; | |
2008 | ||
2009 | pr_info("Stopping all devices:"); | |
2010 | ||
2011 | list_for_each_entry_safe(c, tc, &bch_cache_sets, list) | |
2012 | bch_cache_set_stop(c); | |
2013 | ||
2014 | list_for_each_entry_safe(dc, tdc, &uncached_devices, list) | |
2015 | bcache_device_stop(&dc->disk); | |
2016 | ||
2017 | /* What's a condition variable? */ | |
2018 | while (1) { | |
2019 | long timeout = start + 2 * HZ - jiffies; | |
2020 | ||
2021 | stopped = list_empty(&bch_cache_sets) && | |
2022 | list_empty(&uncached_devices); | |
2023 | ||
2024 | if (timeout < 0 || stopped) | |
2025 | break; | |
2026 | ||
2027 | prepare_to_wait(&unregister_wait, &wait, | |
2028 | TASK_UNINTERRUPTIBLE); | |
2029 | ||
2030 | mutex_unlock(&bch_register_lock); | |
2031 | schedule_timeout(timeout); | |
2032 | mutex_lock(&bch_register_lock); | |
2033 | } | |
2034 | ||
2035 | finish_wait(&unregister_wait, &wait); | |
2036 | ||
2037 | if (stopped) | |
2038 | pr_info("All devices stopped"); | |
2039 | else | |
2040 | pr_notice("Timeout waiting for devices to be closed"); | |
2041 | out: | |
2042 | mutex_unlock(&bch_register_lock); | |
2043 | } | |
2044 | ||
2045 | return NOTIFY_DONE; | |
2046 | } | |
2047 | ||
2048 | static struct notifier_block reboot = { | |
2049 | .notifier_call = bcache_reboot, | |
2050 | .priority = INT_MAX, /* before any real devices */ | |
2051 | }; | |
2052 | ||
2053 | static void bcache_exit(void) | |
2054 | { | |
2055 | bch_debug_exit(); | |
cafe5635 KO |
2056 | bch_request_exit(); |
2057 | bch_btree_exit(); | |
2058 | if (bcache_kobj) | |
2059 | kobject_put(bcache_kobj); | |
2060 | if (bcache_wq) | |
2061 | destroy_workqueue(bcache_wq); | |
2062 | unregister_blkdev(bcache_major, "bcache"); | |
2063 | unregister_reboot_notifier(&reboot); | |
2064 | } | |
2065 | ||
2066 | static int __init bcache_init(void) | |
2067 | { | |
2068 | static const struct attribute *files[] = { | |
2069 | &ksysfs_register.attr, | |
2070 | &ksysfs_register_quiet.attr, | |
2071 | NULL | |
2072 | }; | |
2073 | ||
2074 | mutex_init(&bch_register_lock); | |
2075 | init_waitqueue_head(&unregister_wait); | |
2076 | register_reboot_notifier(&reboot); | |
07e86ccb | 2077 | closure_debug_init(); |
cafe5635 KO |
2078 | |
2079 | bcache_major = register_blkdev(0, "bcache"); | |
2080 | if (bcache_major < 0) | |
2081 | return bcache_major; | |
2082 | ||
2083 | if (!(bcache_wq = create_workqueue("bcache")) || | |
2084 | !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || | |
2085 | sysfs_create_files(bcache_kobj, files) || | |
2086 | bch_btree_init() || | |
2087 | bch_request_init() || | |
cafe5635 KO |
2088 | bch_debug_init(bcache_kobj)) |
2089 | goto err; | |
2090 | ||
2091 | return 0; | |
2092 | err: | |
2093 | bcache_exit(); | |
2094 | return -ENOMEM; | |
2095 | } | |
2096 | ||
2097 | module_exit(bcache_exit); | |
2098 | module_init(bcache_init); |