2 * bcache setup/teardown code, and some metadata io - read a superblock and
3 * figure out what to do with it.
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
14 #include "writeback.h"
16 #include <linux/blkdev.h>
17 #include <linux/buffer_head.h>
18 #include <linux/debugfs.h>
19 #include <linux/genhd.h>
20 #include <linux/idr.h>
21 #include <linux/kthread.h>
22 #include <linux/module.h>
23 #include <linux/random.h>
24 #include <linux/reboot.h>
25 #include <linux/sysfs.h>
27 MODULE_LICENSE("GPL");
28 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
30 static const char bcache_magic
[] = {
31 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
32 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
35 static const char invalid_uuid
[] = {
36 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
37 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
40 /* Default is -1; we skip past it for struct cached_dev's cache mode */
41 const char * const bch_cache_modes
[] = {
50 /* Default is -1; we skip past it for stop_when_cache_set_failed */
51 const char * const bch_stop_on_failure_modes
[] = {
58 static struct kobject
*bcache_kobj
;
59 struct mutex bch_register_lock
;
60 LIST_HEAD(bch_cache_sets
);
61 static LIST_HEAD(uncached_devices
);
63 static int bcache_major
;
64 static DEFINE_IDA(bcache_device_idx
);
65 static wait_queue_head_t unregister_wait
;
66 struct workqueue_struct
*bcache_wq
;
68 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
69 /* limitation of partitions number on single bcache device */
70 #define BCACHE_MINORS 128
71 /* limitation of bcache devices number on single system */
72 #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS)
76 static const char *read_super(struct cache_sb
*sb
, struct block_device
*bdev
,
81 struct buffer_head
*bh
= __bread(bdev
, 1, SB_SIZE
);
87 s
= (struct cache_sb
*) bh
->b_data
;
89 sb
->offset
= le64_to_cpu(s
->offset
);
90 sb
->version
= le64_to_cpu(s
->version
);
92 memcpy(sb
->magic
, s
->magic
, 16);
93 memcpy(sb
->uuid
, s
->uuid
, 16);
94 memcpy(sb
->set_uuid
, s
->set_uuid
, 16);
95 memcpy(sb
->label
, s
->label
, SB_LABEL_SIZE
);
97 sb
->flags
= le64_to_cpu(s
->flags
);
98 sb
->seq
= le64_to_cpu(s
->seq
);
99 sb
->last_mount
= le32_to_cpu(s
->last_mount
);
100 sb
->first_bucket
= le16_to_cpu(s
->first_bucket
);
101 sb
->keys
= le16_to_cpu(s
->keys
);
103 for (i
= 0; i
< SB_JOURNAL_BUCKETS
; i
++)
104 sb
->d
[i
] = le64_to_cpu(s
->d
[i
]);
106 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
107 sb
->version
, sb
->flags
, sb
->seq
, sb
->keys
);
109 err
= "Not a bcache superblock";
110 if (sb
->offset
!= SB_SECTOR
)
113 if (memcmp(sb
->magic
, bcache_magic
, 16))
116 err
= "Too many journal buckets";
117 if (sb
->keys
> SB_JOURNAL_BUCKETS
)
120 err
= "Bad checksum";
121 if (s
->csum
!= csum_set(s
))
125 if (bch_is_zero(sb
->uuid
, 16))
128 sb
->block_size
= le16_to_cpu(s
->block_size
);
130 err
= "Superblock block size smaller than device block size";
131 if (sb
->block_size
<< 9 < bdev_logical_block_size(bdev
))
134 switch (sb
->version
) {
135 case BCACHE_SB_VERSION_BDEV
:
136 sb
->data_offset
= BDEV_DATA_START_DEFAULT
;
138 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET
:
139 sb
->data_offset
= le64_to_cpu(s
->data_offset
);
141 err
= "Bad data offset";
142 if (sb
->data_offset
< BDEV_DATA_START_DEFAULT
)
146 case BCACHE_SB_VERSION_CDEV
:
147 case BCACHE_SB_VERSION_CDEV_WITH_UUID
:
148 sb
->nbuckets
= le64_to_cpu(s
->nbuckets
);
149 sb
->bucket_size
= le16_to_cpu(s
->bucket_size
);
151 sb
->nr_in_set
= le16_to_cpu(s
->nr_in_set
);
152 sb
->nr_this_dev
= le16_to_cpu(s
->nr_this_dev
);
154 err
= "Too many buckets";
155 if (sb
->nbuckets
> LONG_MAX
)
158 err
= "Not enough buckets";
159 if (sb
->nbuckets
< 1 << 7)
162 err
= "Bad block/bucket size";
163 if (!is_power_of_2(sb
->block_size
) ||
164 sb
->block_size
> PAGE_SECTORS
||
165 !is_power_of_2(sb
->bucket_size
) ||
166 sb
->bucket_size
< PAGE_SECTORS
)
169 err
= "Invalid superblock: device too small";
170 if (get_capacity(bdev
->bd_disk
) < sb
->bucket_size
* sb
->nbuckets
)
174 if (bch_is_zero(sb
->set_uuid
, 16))
177 err
= "Bad cache device number in set";
178 if (!sb
->nr_in_set
||
179 sb
->nr_in_set
<= sb
->nr_this_dev
||
180 sb
->nr_in_set
> MAX_CACHES_PER_SET
)
183 err
= "Journal buckets not sequential";
184 for (i
= 0; i
< sb
->keys
; i
++)
185 if (sb
->d
[i
] != sb
->first_bucket
+ i
)
188 err
= "Too many journal buckets";
189 if (sb
->first_bucket
+ sb
->keys
> sb
->nbuckets
)
192 err
= "Invalid superblock: first bucket comes before end of super";
193 if (sb
->first_bucket
* sb
->bucket_size
< 16)
198 err
= "Unsupported superblock version";
202 sb
->last_mount
= get_seconds();
205 get_page(bh
->b_page
);
212 static void write_bdev_super_endio(struct bio
*bio
)
214 struct cached_dev
*dc
= bio
->bi_private
;
215 /* XXX: error checking */
217 closure_put(&dc
->sb_write
);
220 static void __write_super(struct cache_sb
*sb
, struct bio
*bio
)
222 struct cache_sb
*out
= page_address(bio
->bi_io_vec
[0].bv_page
);
225 bio
->bi_iter
.bi_sector
= SB_SECTOR
;
226 bio
->bi_iter
.bi_size
= SB_SIZE
;
227 bio_set_op_attrs(bio
, REQ_OP_WRITE
, REQ_SYNC
|REQ_META
);
228 bch_bio_map(bio
, NULL
);
230 out
->offset
= cpu_to_le64(sb
->offset
);
231 out
->version
= cpu_to_le64(sb
->version
);
233 memcpy(out
->uuid
, sb
->uuid
, 16);
234 memcpy(out
->set_uuid
, sb
->set_uuid
, 16);
235 memcpy(out
->label
, sb
->label
, SB_LABEL_SIZE
);
237 out
->flags
= cpu_to_le64(sb
->flags
);
238 out
->seq
= cpu_to_le64(sb
->seq
);
240 out
->last_mount
= cpu_to_le32(sb
->last_mount
);
241 out
->first_bucket
= cpu_to_le16(sb
->first_bucket
);
242 out
->keys
= cpu_to_le16(sb
->keys
);
244 for (i
= 0; i
< sb
->keys
; i
++)
245 out
->d
[i
] = cpu_to_le64(sb
->d
[i
]);
247 out
->csum
= csum_set(out
);
249 pr_debug("ver %llu, flags %llu, seq %llu",
250 sb
->version
, sb
->flags
, sb
->seq
);
255 static void bch_write_bdev_super_unlock(struct closure
*cl
)
257 struct cached_dev
*dc
= container_of(cl
, struct cached_dev
, sb_write
);
259 up(&dc
->sb_write_mutex
);
262 void bch_write_bdev_super(struct cached_dev
*dc
, struct closure
*parent
)
264 struct closure
*cl
= &dc
->sb_write
;
265 struct bio
*bio
= &dc
->sb_bio
;
267 down(&dc
->sb_write_mutex
);
268 closure_init(cl
, parent
);
271 bio_set_dev(bio
, dc
->bdev
);
272 bio
->bi_end_io
= write_bdev_super_endio
;
273 bio
->bi_private
= dc
;
276 /* I/O request sent to backing device */
277 __write_super(&dc
->sb
, bio
);
279 closure_return_with_destructor(cl
, bch_write_bdev_super_unlock
);
282 static void write_super_endio(struct bio
*bio
)
284 struct cache
*ca
= bio
->bi_private
;
286 bch_count_io_errors(ca
, bio
->bi_status
, "writing superblock");
287 closure_put(&ca
->set
->sb_write
);
290 static void bcache_write_super_unlock(struct closure
*cl
)
292 struct cache_set
*c
= container_of(cl
, struct cache_set
, sb_write
);
294 up(&c
->sb_write_mutex
);
297 void bcache_write_super(struct cache_set
*c
)
299 struct closure
*cl
= &c
->sb_write
;
303 down(&c
->sb_write_mutex
);
304 closure_init(cl
, &c
->cl
);
308 for_each_cache(ca
, c
, i
) {
309 struct bio
*bio
= &ca
->sb_bio
;
311 ca
->sb
.version
= BCACHE_SB_VERSION_CDEV_WITH_UUID
;
312 ca
->sb
.seq
= c
->sb
.seq
;
313 ca
->sb
.last_mount
= c
->sb
.last_mount
;
315 SET_CACHE_SYNC(&ca
->sb
, CACHE_SYNC(&c
->sb
));
318 bio_set_dev(bio
, ca
->bdev
);
319 bio
->bi_end_io
= write_super_endio
;
320 bio
->bi_private
= ca
;
323 __write_super(&ca
->sb
, bio
);
326 closure_return_with_destructor(cl
, bcache_write_super_unlock
);
331 static void uuid_endio(struct bio
*bio
)
333 struct closure
*cl
= bio
->bi_private
;
334 struct cache_set
*c
= container_of(cl
, struct cache_set
, uuid_write
);
336 cache_set_err_on(bio
->bi_status
, c
, "accessing uuids");
337 bch_bbio_free(bio
, c
);
341 static void uuid_io_unlock(struct closure
*cl
)
343 struct cache_set
*c
= container_of(cl
, struct cache_set
, uuid_write
);
345 up(&c
->uuid_write_mutex
);
348 static void uuid_io(struct cache_set
*c
, int op
, unsigned long op_flags
,
349 struct bkey
*k
, struct closure
*parent
)
351 struct closure
*cl
= &c
->uuid_write
;
352 struct uuid_entry
*u
;
357 down(&c
->uuid_write_mutex
);
358 closure_init(cl
, parent
);
360 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
361 struct bio
*bio
= bch_bbio_alloc(c
);
363 bio
->bi_opf
= REQ_SYNC
| REQ_META
| op_flags
;
364 bio
->bi_iter
.bi_size
= KEY_SIZE(k
) << 9;
366 bio
->bi_end_io
= uuid_endio
;
367 bio
->bi_private
= cl
;
368 bio_set_op_attrs(bio
, op
, REQ_SYNC
|REQ_META
|op_flags
);
369 bch_bio_map(bio
, c
->uuids
);
371 bch_submit_bbio(bio
, c
, k
, i
);
373 if (op
!= REQ_OP_WRITE
)
377 bch_extent_to_text(buf
, sizeof(buf
), k
);
378 pr_debug("%s UUIDs at %s", op
== REQ_OP_WRITE
? "wrote" : "read", buf
);
380 for (u
= c
->uuids
; u
< c
->uuids
+ c
->nr_uuids
; u
++)
381 if (!bch_is_zero(u
->uuid
, 16))
382 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
383 u
- c
->uuids
, u
->uuid
, u
->label
,
384 u
->first_reg
, u
->last_reg
, u
->invalidated
);
386 closure_return_with_destructor(cl
, uuid_io_unlock
);
389 static char *uuid_read(struct cache_set
*c
, struct jset
*j
, struct closure
*cl
)
391 struct bkey
*k
= &j
->uuid_bucket
;
393 if (__bch_btree_ptr_invalid(c
, k
))
394 return "bad uuid pointer";
396 bkey_copy(&c
->uuid_bucket
, k
);
397 uuid_io(c
, REQ_OP_READ
, 0, k
, cl
);
399 if (j
->version
< BCACHE_JSET_VERSION_UUIDv1
) {
400 struct uuid_entry_v0
*u0
= (void *) c
->uuids
;
401 struct uuid_entry
*u1
= (void *) c
->uuids
;
407 * Since the new uuid entry is bigger than the old, we have to
408 * convert starting at the highest memory address and work down
409 * in order to do it in place
412 for (i
= c
->nr_uuids
- 1;
415 memcpy(u1
[i
].uuid
, u0
[i
].uuid
, 16);
416 memcpy(u1
[i
].label
, u0
[i
].label
, 32);
418 u1
[i
].first_reg
= u0
[i
].first_reg
;
419 u1
[i
].last_reg
= u0
[i
].last_reg
;
420 u1
[i
].invalidated
= u0
[i
].invalidated
;
430 static int __uuid_write(struct cache_set
*c
)
434 closure_init_stack(&cl
);
436 lockdep_assert_held(&bch_register_lock
);
438 if (bch_bucket_alloc_set(c
, RESERVE_BTREE
, &k
.key
, 1, true))
441 SET_KEY_SIZE(&k
.key
, c
->sb
.bucket_size
);
442 uuid_io(c
, REQ_OP_WRITE
, 0, &k
.key
, &cl
);
445 bkey_copy(&c
->uuid_bucket
, &k
.key
);
450 int bch_uuid_write(struct cache_set
*c
)
452 int ret
= __uuid_write(c
);
455 bch_journal_meta(c
, NULL
);
460 static struct uuid_entry
*uuid_find(struct cache_set
*c
, const char *uuid
)
462 struct uuid_entry
*u
;
465 u
< c
->uuids
+ c
->nr_uuids
; u
++)
466 if (!memcmp(u
->uuid
, uuid
, 16))
472 static struct uuid_entry
*uuid_find_empty(struct cache_set
*c
)
474 static const char zero_uuid
[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
475 return uuid_find(c
, zero_uuid
);
479 * Bucket priorities/gens:
481 * For each bucket, we store on disk its
485 * See alloc.c for an explanation of the gen. The priority is used to implement
486 * lru (and in the future other) cache replacement policies; for most purposes
487 * it's just an opaque integer.
489 * The gens and the priorities don't have a whole lot to do with each other, and
490 * it's actually the gens that must be written out at specific times - it's no
491 * big deal if the priorities don't get written, if we lose them we just reuse
492 * buckets in suboptimal order.
494 * On disk they're stored in a packed array, and in as many buckets are required
495 * to fit them all. The buckets we use to store them form a list; the journal
496 * header points to the first bucket, the first bucket points to the second
499 * This code is used by the allocation code; periodically (whenever it runs out
500 * of buckets to allocate from) the allocation code will invalidate some
501 * buckets, but it can't use those buckets until their new gens are safely on
505 static void prio_endio(struct bio
*bio
)
507 struct cache
*ca
= bio
->bi_private
;
509 cache_set_err_on(bio
->bi_status
, ca
->set
, "accessing priorities");
510 bch_bbio_free(bio
, ca
->set
);
511 closure_put(&ca
->prio
);
514 static void prio_io(struct cache
*ca
, uint64_t bucket
, int op
,
515 unsigned long op_flags
)
517 struct closure
*cl
= &ca
->prio
;
518 struct bio
*bio
= bch_bbio_alloc(ca
->set
);
520 closure_init_stack(cl
);
522 bio
->bi_iter
.bi_sector
= bucket
* ca
->sb
.bucket_size
;
523 bio_set_dev(bio
, ca
->bdev
);
524 bio
->bi_iter
.bi_size
= bucket_bytes(ca
);
526 bio
->bi_end_io
= prio_endio
;
527 bio
->bi_private
= ca
;
528 bio_set_op_attrs(bio
, op
, REQ_SYNC
|REQ_META
|op_flags
);
529 bch_bio_map(bio
, ca
->disk_buckets
);
531 closure_bio_submit(ca
->set
, bio
, &ca
->prio
);
535 void bch_prio_write(struct cache
*ca
)
541 closure_init_stack(&cl
);
543 lockdep_assert_held(&ca
->set
->bucket_lock
);
545 ca
->disk_buckets
->seq
++;
547 atomic_long_add(ca
->sb
.bucket_size
* prio_buckets(ca
),
548 &ca
->meta_sectors_written
);
550 //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
551 // fifo_used(&ca->free_inc), fifo_used(&ca->unused));
553 for (i
= prio_buckets(ca
) - 1; i
>= 0; --i
) {
555 struct prio_set
*p
= ca
->disk_buckets
;
556 struct bucket_disk
*d
= p
->data
;
557 struct bucket_disk
*end
= d
+ prios_per_bucket(ca
);
559 for (b
= ca
->buckets
+ i
* prios_per_bucket(ca
);
560 b
< ca
->buckets
+ ca
->sb
.nbuckets
&& d
< end
;
562 d
->prio
= cpu_to_le16(b
->prio
);
566 p
->next_bucket
= ca
->prio_buckets
[i
+ 1];
567 p
->magic
= pset_magic(&ca
->sb
);
568 p
->csum
= bch_crc64(&p
->magic
, bucket_bytes(ca
) - 8);
570 bucket
= bch_bucket_alloc(ca
, RESERVE_PRIO
, true);
571 BUG_ON(bucket
== -1);
573 mutex_unlock(&ca
->set
->bucket_lock
);
574 prio_io(ca
, bucket
, REQ_OP_WRITE
, 0);
575 mutex_lock(&ca
->set
->bucket_lock
);
577 ca
->prio_buckets
[i
] = bucket
;
578 atomic_dec_bug(&ca
->buckets
[bucket
].pin
);
581 mutex_unlock(&ca
->set
->bucket_lock
);
583 bch_journal_meta(ca
->set
, &cl
);
586 mutex_lock(&ca
->set
->bucket_lock
);
589 * Don't want the old priorities to get garbage collected until after we
590 * finish writing the new ones, and they're journalled
592 for (i
= 0; i
< prio_buckets(ca
); i
++) {
593 if (ca
->prio_last_buckets
[i
])
594 __bch_bucket_free(ca
,
595 &ca
->buckets
[ca
->prio_last_buckets
[i
]]);
597 ca
->prio_last_buckets
[i
] = ca
->prio_buckets
[i
];
601 static void prio_read(struct cache
*ca
, uint64_t bucket
)
603 struct prio_set
*p
= ca
->disk_buckets
;
604 struct bucket_disk
*d
= p
->data
+ prios_per_bucket(ca
), *end
= d
;
606 unsigned bucket_nr
= 0;
608 for (b
= ca
->buckets
;
609 b
< ca
->buckets
+ ca
->sb
.nbuckets
;
612 ca
->prio_buckets
[bucket_nr
] = bucket
;
613 ca
->prio_last_buckets
[bucket_nr
] = bucket
;
616 prio_io(ca
, bucket
, REQ_OP_READ
, 0);
618 if (p
->csum
!= bch_crc64(&p
->magic
, bucket_bytes(ca
) - 8))
619 pr_warn("bad csum reading priorities");
621 if (p
->magic
!= pset_magic(&ca
->sb
))
622 pr_warn("bad magic reading priorities");
624 bucket
= p
->next_bucket
;
628 b
->prio
= le16_to_cpu(d
->prio
);
629 b
->gen
= b
->last_gc
= d
->gen
;
635 static int open_dev(struct block_device
*b
, fmode_t mode
)
637 struct bcache_device
*d
= b
->bd_disk
->private_data
;
638 if (test_bit(BCACHE_DEV_CLOSING
, &d
->flags
))
645 static void release_dev(struct gendisk
*b
, fmode_t mode
)
647 struct bcache_device
*d
= b
->private_data
;
651 static int ioctl_dev(struct block_device
*b
, fmode_t mode
,
652 unsigned int cmd
, unsigned long arg
)
654 struct bcache_device
*d
= b
->bd_disk
->private_data
;
656 return d
->ioctl(d
, mode
, cmd
, arg
);
659 static const struct block_device_operations bcache_ops
= {
661 .release
= release_dev
,
663 .owner
= THIS_MODULE
,
666 void bcache_device_stop(struct bcache_device
*d
)
668 if (!test_and_set_bit(BCACHE_DEV_CLOSING
, &d
->flags
))
669 closure_queue(&d
->cl
);
672 static void bcache_device_unlink(struct bcache_device
*d
)
674 lockdep_assert_held(&bch_register_lock
);
676 if (d
->c
&& !test_and_set_bit(BCACHE_DEV_UNLINK_DONE
, &d
->flags
)) {
680 sysfs_remove_link(&d
->c
->kobj
, d
->name
);
681 sysfs_remove_link(&d
->kobj
, "cache");
683 for_each_cache(ca
, d
->c
, i
)
684 bd_unlink_disk_holder(ca
->bdev
, d
->disk
);
688 static void bcache_device_link(struct bcache_device
*d
, struct cache_set
*c
,
694 for_each_cache(ca
, d
->c
, i
)
695 bd_link_disk_holder(ca
->bdev
, d
->disk
);
697 snprintf(d
->name
, BCACHEDEVNAME_SIZE
,
698 "%s%u", name
, d
->id
);
700 WARN(sysfs_create_link(&d
->kobj
, &c
->kobj
, "cache") ||
701 sysfs_create_link(&c
->kobj
, &d
->kobj
, d
->name
),
702 "Couldn't create device <-> cache set symlinks");
704 clear_bit(BCACHE_DEV_UNLINK_DONE
, &d
->flags
);
707 static void bcache_device_detach(struct bcache_device
*d
)
709 lockdep_assert_held(&bch_register_lock
);
711 if (test_bit(BCACHE_DEV_DETACHING
, &d
->flags
)) {
712 struct uuid_entry
*u
= d
->c
->uuids
+ d
->id
;
714 SET_UUID_FLASH_ONLY(u
, 0);
715 memcpy(u
->uuid
, invalid_uuid
, 16);
716 u
->invalidated
= cpu_to_le32(get_seconds());
717 bch_uuid_write(d
->c
);
720 bcache_device_unlink(d
);
722 d
->c
->devices
[d
->id
] = NULL
;
723 closure_put(&d
->c
->caching
);
727 static void bcache_device_attach(struct bcache_device
*d
, struct cache_set
*c
,
734 closure_get(&c
->caching
);
737 static inline int first_minor_to_idx(int first_minor
)
739 return (first_minor
/BCACHE_MINORS
);
742 static inline int idx_to_first_minor(int idx
)
744 return (idx
* BCACHE_MINORS
);
747 static void bcache_device_free(struct bcache_device
*d
)
749 lockdep_assert_held(&bch_register_lock
);
751 pr_info("%s stopped", d
->disk
->disk_name
);
754 bcache_device_detach(d
);
755 if (d
->disk
&& d
->disk
->flags
& GENHD_FL_UP
)
756 del_gendisk(d
->disk
);
757 if (d
->disk
&& d
->disk
->queue
)
758 blk_cleanup_queue(d
->disk
->queue
);
760 ida_simple_remove(&bcache_device_idx
,
761 first_minor_to_idx(d
->disk
->first_minor
));
766 bioset_free(d
->bio_split
);
767 kvfree(d
->full_dirty_stripes
);
768 kvfree(d
->stripe_sectors_dirty
);
770 closure_debug_destroy(&d
->cl
);
773 static int bcache_device_init(struct bcache_device
*d
, unsigned block_size
,
776 struct request_queue
*q
;
781 d
->stripe_size
= 1 << 31;
783 d
->nr_stripes
= DIV_ROUND_UP_ULL(sectors
, d
->stripe_size
);
785 if (!d
->nr_stripes
||
786 d
->nr_stripes
> INT_MAX
||
787 d
->nr_stripes
> SIZE_MAX
/ sizeof(atomic_t
)) {
788 pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
789 (unsigned)d
->nr_stripes
);
793 n
= d
->nr_stripes
* sizeof(atomic_t
);
794 d
->stripe_sectors_dirty
= kvzalloc(n
, GFP_KERNEL
);
795 if (!d
->stripe_sectors_dirty
)
798 n
= BITS_TO_LONGS(d
->nr_stripes
) * sizeof(unsigned long);
799 d
->full_dirty_stripes
= kvzalloc(n
, GFP_KERNEL
);
800 if (!d
->full_dirty_stripes
)
803 idx
= ida_simple_get(&bcache_device_idx
, 0,
804 BCACHE_DEVICE_IDX_MAX
, GFP_KERNEL
);
808 if (!(d
->bio_split
= bioset_create(4, offsetof(struct bbio
, bio
),
810 BIOSET_NEED_RESCUER
)) ||
811 !(d
->disk
= alloc_disk(BCACHE_MINORS
))) {
812 ida_simple_remove(&bcache_device_idx
, idx
);
816 set_capacity(d
->disk
, sectors
);
817 snprintf(d
->disk
->disk_name
, DISK_NAME_LEN
, "bcache%i", idx
);
819 d
->disk
->major
= bcache_major
;
820 d
->disk
->first_minor
= idx_to_first_minor(idx
);
821 d
->disk
->fops
= &bcache_ops
;
822 d
->disk
->private_data
= d
;
824 q
= blk_alloc_queue(GFP_KERNEL
);
828 blk_queue_make_request(q
, NULL
);
831 q
->backing_dev_info
->congested_data
= d
;
832 q
->limits
.max_hw_sectors
= UINT_MAX
;
833 q
->limits
.max_sectors
= UINT_MAX
;
834 q
->limits
.max_segment_size
= UINT_MAX
;
835 q
->limits
.max_segments
= BIO_MAX_PAGES
;
836 blk_queue_max_discard_sectors(q
, UINT_MAX
);
837 q
->limits
.discard_granularity
= 512;
838 q
->limits
.io_min
= block_size
;
839 q
->limits
.logical_block_size
= block_size
;
840 q
->limits
.physical_block_size
= block_size
;
841 set_bit(QUEUE_FLAG_NONROT
, &d
->disk
->queue
->queue_flags
);
842 clear_bit(QUEUE_FLAG_ADD_RANDOM
, &d
->disk
->queue
->queue_flags
);
843 set_bit(QUEUE_FLAG_DISCARD
, &d
->disk
->queue
->queue_flags
);
845 blk_queue_write_cache(q
, true, true);
852 static void calc_cached_dev_sectors(struct cache_set
*c
)
854 uint64_t sectors
= 0;
855 struct cached_dev
*dc
;
857 list_for_each_entry(dc
, &c
->cached_devs
, list
)
858 sectors
+= bdev_sectors(dc
->bdev
);
860 c
->cached_dev_sectors
= sectors
;
863 #define BACKING_DEV_OFFLINE_TIMEOUT 5
864 static int cached_dev_status_update(void *arg
)
866 struct cached_dev
*dc
= arg
;
867 struct request_queue
*q
;
870 * If this delayed worker is stopping outside, directly quit here.
871 * dc->io_disable might be set via sysfs interface, so check it
874 while (!kthread_should_stop() && !dc
->io_disable
) {
875 q
= bdev_get_queue(dc
->bdev
);
876 if (blk_queue_dying(q
))
877 dc
->offline_seconds
++;
879 dc
->offline_seconds
= 0;
881 if (dc
->offline_seconds
>= BACKING_DEV_OFFLINE_TIMEOUT
) {
882 pr_err("%s: device offline for %d seconds",
883 dc
->backing_dev_name
,
884 BACKING_DEV_OFFLINE_TIMEOUT
);
885 pr_err("%s: disable I/O request due to backing "
886 "device offline", dc
->disk
.name
);
887 dc
->io_disable
= true;
888 /* let others know earlier that io_disable is true */
890 bcache_device_stop(&dc
->disk
);
893 schedule_timeout_interruptible(HZ
);
896 wait_for_kthread_stop();
901 void bch_cached_dev_emit_change(struct cached_dev
*dc
)
903 struct bcache_device
*d
= &dc
->disk
;
904 char buf
[SB_LABEL_SIZE
+ 1];
907 kasprintf(GFP_KERNEL
, "CACHED_UUID=%pU", dc
->sb
.uuid
),
912 memcpy(buf
, dc
->sb
.label
, SB_LABEL_SIZE
);
913 buf
[SB_LABEL_SIZE
] = '\0';
914 env
[2] = kasprintf(GFP_KERNEL
, "CACHED_LABEL=%s", buf
);
916 /* won't show up in the uevent file, use udevadm monitor -e instead
917 * only class / kset properties are persistent */
918 kobject_uevent_env(&disk_to_dev(d
->disk
)->kobj
, KOBJ_CHANGE
, env
);
924 void bch_cached_dev_run(struct cached_dev
*dc
)
926 struct bcache_device
*d
= &dc
->disk
;
927 if (atomic_xchg(&dc
->running
, 1)) {
932 BDEV_STATE(&dc
->sb
) != BDEV_STATE_NONE
) {
934 closure_init_stack(&cl
);
936 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_STALE
);
937 bch_write_bdev_super(dc
, &cl
);
942 bd_link_disk_holder(dc
->bdev
, dc
->disk
.disk
);
944 /* emit change event */
945 bch_cached_dev_emit_change(dc
);
947 if (sysfs_create_link(&d
->kobj
, &disk_to_dev(d
->disk
)->kobj
, "dev") ||
948 sysfs_create_link(&disk_to_dev(d
->disk
)->kobj
, &d
->kobj
, "bcache"))
949 pr_debug("error creating sysfs link");
951 dc
->status_update_thread
= kthread_run(cached_dev_status_update
,
952 dc
, "bcache_status_update");
953 if (IS_ERR(dc
->status_update_thread
)) {
954 pr_warn("failed to create bcache_status_update kthread, "
955 "continue to run without monitoring backing "
961 * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed
962 * work dc->writeback_rate_update is running. Wait until the routine
963 * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to
964 * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out
965 * seconds, give up waiting here and continue to cancel it too.
967 static void cancel_writeback_rate_update_dwork(struct cached_dev
*dc
)
969 int time_out
= WRITEBACK_RATE_UPDATE_SECS_MAX
* HZ
;
972 if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING
,
976 schedule_timeout_interruptible(1);
977 } while (time_out
> 0);
980 pr_warn("give up waiting for dc->writeback_write_update to quit");
982 cancel_delayed_work_sync(&dc
->writeback_rate_update
);
985 static void cached_dev_detach_finish(struct work_struct
*w
)
987 struct cached_dev
*dc
= container_of(w
, struct cached_dev
, detach
);
989 closure_init_stack(&cl
);
991 BUG_ON(!test_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
));
992 BUG_ON(refcount_read(&dc
->count
));
994 mutex_lock(&bch_register_lock
);
996 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING
, &dc
->disk
.flags
))
997 cancel_writeback_rate_update_dwork(dc
);
999 if (!IS_ERR_OR_NULL(dc
->writeback_thread
)) {
1000 kthread_stop(dc
->writeback_thread
);
1001 dc
->writeback_thread
= NULL
;
1004 memset(&dc
->sb
.set_uuid
, 0, 16);
1005 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_NONE
);
1007 bch_write_bdev_super(dc
, &cl
);
1010 bcache_device_detach(&dc
->disk
);
1011 list_move(&dc
->list
, &uncached_devices
);
1013 clear_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
);
1014 clear_bit(BCACHE_DEV_UNLINK_DONE
, &dc
->disk
.flags
);
1016 mutex_unlock(&bch_register_lock
);
1018 pr_info("Caching disabled for %s", dc
->backing_dev_name
);
1020 /* Drop ref we took in cached_dev_detach() */
1021 closure_put(&dc
->disk
.cl
);
1024 void bch_cached_dev_detach(struct cached_dev
*dc
)
1026 lockdep_assert_held(&bch_register_lock
);
1028 if (test_bit(BCACHE_DEV_CLOSING
, &dc
->disk
.flags
))
1031 if (test_and_set_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
))
1035 * Block the device from being closed and freed until we're finished
1038 closure_get(&dc
->disk
.cl
);
1040 bch_writeback_queue(dc
);
1045 int bch_cached_dev_attach(struct cached_dev
*dc
, struct cache_set
*c
,
1048 uint32_t rtime
= cpu_to_le32(get_seconds());
1049 struct uuid_entry
*u
;
1050 struct cached_dev
*exist_dc
, *t
;
1052 if ((set_uuid
&& memcmp(set_uuid
, c
->sb
.set_uuid
, 16)) ||
1053 (!set_uuid
&& memcmp(dc
->sb
.set_uuid
, c
->sb
.set_uuid
, 16)))
1057 pr_err("Can't attach %s: already attached",
1058 dc
->backing_dev_name
);
1062 if (test_bit(CACHE_SET_STOPPING
, &c
->flags
)) {
1063 pr_err("Can't attach %s: shutting down",
1064 dc
->backing_dev_name
);
1068 if (dc
->sb
.block_size
< c
->sb
.block_size
) {
1070 pr_err("Couldn't attach %s: block size less than set's block size",
1071 dc
->backing_dev_name
);
1075 /* Check whether already attached */
1076 list_for_each_entry_safe(exist_dc
, t
, &c
->cached_devs
, list
) {
1077 if (!memcmp(dc
->sb
.uuid
, exist_dc
->sb
.uuid
, 16)) {
1078 pr_err("Tried to attach %s but duplicate UUID already attached",
1079 dc
->backing_dev_name
);
1085 u
= uuid_find(c
, dc
->sb
.uuid
);
1088 (BDEV_STATE(&dc
->sb
) == BDEV_STATE_STALE
||
1089 BDEV_STATE(&dc
->sb
) == BDEV_STATE_NONE
)) {
1090 memcpy(u
->uuid
, invalid_uuid
, 16);
1091 u
->invalidated
= cpu_to_le32(get_seconds());
1096 if (BDEV_STATE(&dc
->sb
) == BDEV_STATE_DIRTY
) {
1097 pr_err("Couldn't find uuid for %s in set",
1098 dc
->backing_dev_name
);
1102 u
= uuid_find_empty(c
);
1104 pr_err("Not caching %s, no room for UUID",
1105 dc
->backing_dev_name
);
1110 /* Deadlocks since we're called via sysfs...
1111 sysfs_remove_file(&dc->kobj, &sysfs_attach);
1114 if (bch_is_zero(u
->uuid
, 16)) {
1116 closure_init_stack(&cl
);
1118 memcpy(u
->uuid
, dc
->sb
.uuid
, 16);
1119 memcpy(u
->label
, dc
->sb
.label
, SB_LABEL_SIZE
);
1120 u
->first_reg
= u
->last_reg
= rtime
;
1123 memcpy(dc
->sb
.set_uuid
, c
->sb
.set_uuid
, 16);
1124 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_CLEAN
);
1126 bch_write_bdev_super(dc
, &cl
);
1129 u
->last_reg
= rtime
;
1133 bcache_device_attach(&dc
->disk
, c
, u
- c
->uuids
);
1134 list_move(&dc
->list
, &c
->cached_devs
);
1135 calc_cached_dev_sectors(c
);
1139 * dc->c must be set before dc->count != 0 - paired with the mb in
1142 refcount_set(&dc
->count
, 1);
1144 /* Block writeback thread, but spawn it */
1145 down_write(&dc
->writeback_lock
);
1146 if (bch_cached_dev_writeback_start(dc
)) {
1147 up_write(&dc
->writeback_lock
);
1151 if (BDEV_STATE(&dc
->sb
) == BDEV_STATE_DIRTY
) {
1152 atomic_set(&dc
->has_dirty
, 1);
1153 bch_writeback_queue(dc
);
1156 bch_sectors_dirty_init(&dc
->disk
);
1158 bch_cached_dev_run(dc
);
1159 bcache_device_link(&dc
->disk
, c
, "bdev");
1161 /* Allow the writeback thread to proceed */
1162 up_write(&dc
->writeback_lock
);
1164 pr_info("Caching %s as %s on set %pU",
1165 dc
->backing_dev_name
,
1166 dc
->disk
.disk
->disk_name
,
1167 dc
->disk
.c
->sb
.set_uuid
);
1171 void bch_cached_dev_release(struct kobject
*kobj
)
1173 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
1176 module_put(THIS_MODULE
);
1179 static void cached_dev_free(struct closure
*cl
)
1181 struct cached_dev
*dc
= container_of(cl
, struct cached_dev
, disk
.cl
);
1183 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING
, &dc
->disk
.flags
))
1184 cancel_writeback_rate_update_dwork(dc
);
1186 if (!IS_ERR_OR_NULL(dc
->writeback_thread
))
1187 kthread_stop(dc
->writeback_thread
);
1188 if (dc
->writeback_write_wq
)
1189 destroy_workqueue(dc
->writeback_write_wq
);
1190 if (!IS_ERR_OR_NULL(dc
->status_update_thread
))
1191 kthread_stop(dc
->status_update_thread
);
1193 mutex_lock(&bch_register_lock
);
1195 if (atomic_read(&dc
->running
))
1196 bd_unlink_disk_holder(dc
->bdev
, dc
->disk
.disk
);
1197 bcache_device_free(&dc
->disk
);
1198 list_del(&dc
->list
);
1200 mutex_unlock(&bch_register_lock
);
1202 if (!IS_ERR_OR_NULL(dc
->bdev
))
1203 blkdev_put(dc
->bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
1205 wake_up(&unregister_wait
);
1207 kobject_put(&dc
->disk
.kobj
);
1210 static void cached_dev_flush(struct closure
*cl
)
1212 struct cached_dev
*dc
= container_of(cl
, struct cached_dev
, disk
.cl
);
1213 struct bcache_device
*d
= &dc
->disk
;
1215 mutex_lock(&bch_register_lock
);
1216 bcache_device_unlink(d
);
1217 mutex_unlock(&bch_register_lock
);
1219 bch_cache_accounting_destroy(&dc
->accounting
);
1220 kobject_del(&d
->kobj
);
1222 continue_at(cl
, cached_dev_free
, system_wq
);
1225 static int cached_dev_init(struct cached_dev
*dc
, unsigned block_size
)
1229 struct request_queue
*q
= bdev_get_queue(dc
->bdev
);
1231 __module_get(THIS_MODULE
);
1232 INIT_LIST_HEAD(&dc
->list
);
1233 closure_init(&dc
->disk
.cl
, NULL
);
1234 set_closure_fn(&dc
->disk
.cl
, cached_dev_flush
, system_wq
);
1235 kobject_init(&dc
->disk
.kobj
, &bch_cached_dev_ktype
);
1236 INIT_WORK(&dc
->detach
, cached_dev_detach_finish
);
1237 sema_init(&dc
->sb_write_mutex
, 1);
1238 INIT_LIST_HEAD(&dc
->io_lru
);
1239 spin_lock_init(&dc
->io_lock
);
1240 bch_cache_accounting_init(&dc
->accounting
, &dc
->disk
.cl
);
1242 dc
->sequential_cutoff
= 4 << 20;
1244 for (io
= dc
->io
; io
< dc
->io
+ RECENT_IO
; io
++) {
1245 list_add(&io
->lru
, &dc
->io_lru
);
1246 hlist_add_head(&io
->hash
, dc
->io_hash
+ RECENT_IO
);
1249 dc
->disk
.stripe_size
= q
->limits
.io_opt
>> 9;
1251 if (dc
->disk
.stripe_size
)
1252 dc
->partial_stripes_expensive
=
1253 q
->limits
.raid_partial_stripes_expensive
;
1255 ret
= bcache_device_init(&dc
->disk
, block_size
,
1256 dc
->bdev
->bd_part
->nr_sects
- dc
->sb
.data_offset
);
1260 dc
->disk
.disk
->queue
->backing_dev_info
->ra_pages
=
1261 max(dc
->disk
.disk
->queue
->backing_dev_info
->ra_pages
,
1262 q
->backing_dev_info
->ra_pages
);
1264 atomic_set(&dc
->io_errors
, 0);
1265 dc
->io_disable
= false;
1266 dc
->error_limit
= DEFAULT_CACHED_DEV_ERROR_LIMIT
;
1267 /* default to auto */
1268 dc
->stop_when_cache_set_failed
= BCH_CACHED_DEV_STOP_AUTO
;
1270 bch_cached_dev_request_init(dc
);
1271 bch_cached_dev_writeback_init(dc
);
1275 /* Cached device - bcache superblock */
1277 static void register_bdev(struct cache_sb
*sb
, struct page
*sb_page
,
1278 struct block_device
*bdev
,
1279 struct cached_dev
*dc
)
1281 const char *err
= "cannot allocate memory";
1282 struct cache_set
*c
;
1284 bdevname(bdev
, dc
->backing_dev_name
);
1285 memcpy(&dc
->sb
, sb
, sizeof(struct cache_sb
));
1287 dc
->bdev
->bd_holder
= dc
;
1289 bio_init(&dc
->sb_bio
, dc
->sb_bio
.bi_inline_vecs
, 1);
1290 dc
->sb_bio
.bi_io_vec
[0].bv_page
= sb_page
;
1294 if (cached_dev_init(dc
, sb
->block_size
<< 9))
1297 err
= "error creating kobject";
1298 if (kobject_add(&dc
->disk
.kobj
, &part_to_dev(bdev
->bd_part
)->kobj
,
1301 if (bch_cache_accounting_add_kobjs(&dc
->accounting
, &dc
->disk
.kobj
))
1304 pr_info("registered backing device %s", dc
->backing_dev_name
);
1306 list_add(&dc
->list
, &uncached_devices
);
1307 list_for_each_entry(c
, &bch_cache_sets
, list
)
1308 bch_cached_dev_attach(dc
, c
, NULL
);
1310 if (BDEV_STATE(&dc
->sb
) == BDEV_STATE_NONE
||
1311 BDEV_STATE(&dc
->sb
) == BDEV_STATE_STALE
)
1312 bch_cached_dev_run(dc
);
1316 pr_notice("error %s: %s", dc
->backing_dev_name
, err
);
1317 bcache_device_stop(&dc
->disk
);
1320 /* Flash only volumes */
1322 void bch_flash_dev_release(struct kobject
*kobj
)
1324 struct bcache_device
*d
= container_of(kobj
, struct bcache_device
,
1329 static void flash_dev_free(struct closure
*cl
)
1331 struct bcache_device
*d
= container_of(cl
, struct bcache_device
, cl
);
1332 mutex_lock(&bch_register_lock
);
1333 bcache_device_free(d
);
1334 mutex_unlock(&bch_register_lock
);
1335 kobject_put(&d
->kobj
);
1338 static void flash_dev_flush(struct closure
*cl
)
1340 struct bcache_device
*d
= container_of(cl
, struct bcache_device
, cl
);
1342 mutex_lock(&bch_register_lock
);
1343 bcache_device_unlink(d
);
1344 mutex_unlock(&bch_register_lock
);
1345 kobject_del(&d
->kobj
);
1346 continue_at(cl
, flash_dev_free
, system_wq
);
1349 static int flash_dev_run(struct cache_set
*c
, struct uuid_entry
*u
)
1351 struct bcache_device
*d
= kzalloc(sizeof(struct bcache_device
),
1356 closure_init(&d
->cl
, NULL
);
1357 set_closure_fn(&d
->cl
, flash_dev_flush
, system_wq
);
1359 kobject_init(&d
->kobj
, &bch_flash_dev_ktype
);
1361 if (bcache_device_init(d
, block_bytes(c
), u
->sectors
))
1364 bcache_device_attach(d
, c
, u
- c
->uuids
);
1365 bch_sectors_dirty_init(d
);
1366 bch_flash_dev_request_init(d
);
1369 if (kobject_add(&d
->kobj
, &disk_to_dev(d
->disk
)->kobj
, "bcache"))
1372 bcache_device_link(d
, c
, "volume");
1376 kobject_put(&d
->kobj
);
1380 static int flash_devs_run(struct cache_set
*c
)
1383 struct uuid_entry
*u
;
1386 u
< c
->uuids
+ c
->nr_uuids
&& !ret
;
1388 if (UUID_FLASH_ONLY(u
))
1389 ret
= flash_dev_run(c
, u
);
1394 int bch_flash_dev_create(struct cache_set
*c
, uint64_t size
)
1396 struct uuid_entry
*u
;
1398 if (test_bit(CACHE_SET_STOPPING
, &c
->flags
))
1401 if (!test_bit(CACHE_SET_RUNNING
, &c
->flags
))
1404 u
= uuid_find_empty(c
);
1406 pr_err("Can't create volume, no room for UUID");
1410 get_random_bytes(u
->uuid
, 16);
1411 memset(u
->label
, 0, 32);
1412 u
->first_reg
= u
->last_reg
= cpu_to_le32(get_seconds());
1414 SET_UUID_FLASH_ONLY(u
, 1);
1415 u
->sectors
= size
>> 9;
1419 return flash_dev_run(c
, u
);
1422 bool bch_cached_dev_error(struct cached_dev
*dc
)
1424 if (!dc
|| test_bit(BCACHE_DEV_CLOSING
, &dc
->disk
.flags
))
1427 dc
->io_disable
= true;
1428 /* make others know io_disable is true earlier */
1431 pr_err("stop %s: too many IO errors on backing device %s\n",
1432 dc
->disk
.disk
->disk_name
, dc
->backing_dev_name
);
1434 bcache_device_stop(&dc
->disk
);
1441 bool bch_cache_set_error(struct cache_set
*c
, const char *fmt
, ...)
1445 if (c
->on_error
!= ON_ERROR_PANIC
&&
1446 test_bit(CACHE_SET_STOPPING
, &c
->flags
))
1449 if (test_and_set_bit(CACHE_SET_IO_DISABLE
, &c
->flags
))
1450 pr_warn("CACHE_SET_IO_DISABLE already set");
1452 /* XXX: we can be called from atomic context
1453 acquire_console_sem();
1456 printk(KERN_ERR
"bcache: error on %pU: ", c
->sb
.set_uuid
);
1458 va_start(args
, fmt
);
1462 printk(", disabling caching\n");
1464 if (c
->on_error
== ON_ERROR_PANIC
)
1465 panic("panic forced after error\n");
1467 bch_cache_set_unregister(c
);
1471 void bch_cache_set_release(struct kobject
*kobj
)
1473 struct cache_set
*c
= container_of(kobj
, struct cache_set
, kobj
);
1475 module_put(THIS_MODULE
);
1478 static void cache_set_free(struct closure
*cl
)
1480 struct cache_set
*c
= container_of(cl
, struct cache_set
, cl
);
1484 if (!IS_ERR_OR_NULL(c
->debug
))
1485 debugfs_remove(c
->debug
);
1487 bch_open_buckets_free(c
);
1488 bch_btree_cache_free(c
);
1489 bch_journal_free(c
);
1491 mutex_lock(&bch_register_lock
);
1492 for_each_cache(ca
, c
, i
)
1495 c
->cache
[ca
->sb
.nr_this_dev
] = NULL
;
1496 kobject_put(&ca
->kobj
);
1499 bch_bset_sort_state_free(&c
->sort
);
1500 free_pages((unsigned long) c
->uuids
, ilog2(bucket_pages(c
)));
1502 if (c
->moving_gc_wq
)
1503 destroy_workqueue(c
->moving_gc_wq
);
1505 bioset_free(c
->bio_split
);
1507 mempool_destroy(c
->fill_iter
);
1509 mempool_destroy(c
->bio_meta
);
1511 mempool_destroy(c
->search
);
1515 mutex_unlock(&bch_register_lock
);
1517 pr_info("Cache set %pU unregistered", c
->sb
.set_uuid
);
1518 wake_up(&unregister_wait
);
1520 closure_debug_destroy(&c
->cl
);
1521 kobject_put(&c
->kobj
);
1524 static void cache_set_flush(struct closure
*cl
)
1526 struct cache_set
*c
= container_of(cl
, struct cache_set
, caching
);
1531 bch_cache_accounting_destroy(&c
->accounting
);
1533 kobject_put(&c
->internal
);
1534 kobject_del(&c
->kobj
);
1536 if (!IS_ERR_OR_NULL(c
->gc_thread
))
1537 kthread_stop(c
->gc_thread
);
1539 if (!IS_ERR_OR_NULL(c
->root
))
1540 list_add(&c
->root
->list
, &c
->btree_cache
);
1542 /* Should skip this if we're unregistering because of an error */
1543 list_for_each_entry(b
, &c
->btree_cache
, list
) {
1544 mutex_lock(&b
->write_lock
);
1545 if (btree_node_dirty(b
))
1546 __bch_btree_node_write(b
, NULL
);
1547 mutex_unlock(&b
->write_lock
);
1550 for_each_cache(ca
, c
, i
)
1551 if (ca
->alloc_thread
)
1552 kthread_stop(ca
->alloc_thread
);
1554 if (c
->journal
.cur
) {
1555 cancel_delayed_work_sync(&c
->journal
.work
);
1556 /* flush last journal entry if needed */
1557 c
->journal
.work
.work
.func(&c
->journal
.work
.work
);
1564 * This function is only called when CACHE_SET_IO_DISABLE is set, which means
1565 * cache set is unregistering due to too many I/O errors. In this condition,
1566 * the bcache device might be stopped, it depends on stop_when_cache_set_failed
1567 * value and whether the broken cache has dirty data:
1569 * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device
1570 * BCH_CACHED_STOP_AUTO 0 NO
1571 * BCH_CACHED_STOP_AUTO 1 YES
1572 * BCH_CACHED_DEV_STOP_ALWAYS 0 YES
1573 * BCH_CACHED_DEV_STOP_ALWAYS 1 YES
1575 * The expected behavior is, if stop_when_cache_set_failed is configured to
1576 * "auto" via sysfs interface, the bcache device will not be stopped if the
1577 * backing device is clean on the broken cache device.
1579 static void conditional_stop_bcache_device(struct cache_set
*c
,
1580 struct bcache_device
*d
,
1581 struct cached_dev
*dc
)
1583 if (dc
->stop_when_cache_set_failed
== BCH_CACHED_DEV_STOP_ALWAYS
) {
1584 pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.",
1585 d
->disk
->disk_name
, c
->sb
.set_uuid
);
1586 bcache_device_stop(d
);
1587 } else if (atomic_read(&dc
->has_dirty
)) {
1589 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
1590 * and dc->has_dirty == 1
1592 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.",
1593 d
->disk
->disk_name
);
1595 * There might be a small time gap that cache set is
1596 * released but bcache device is not. Inside this time
1597 * gap, regular I/O requests will directly go into
1598 * backing device as no cache set attached to. This
1599 * behavior may also introduce potential inconsistence
1600 * data in writeback mode while cache is dirty.
1601 * Therefore before calling bcache_device_stop() due
1602 * to a broken cache device, dc->io_disable should be
1603 * explicitly set to true.
1605 dc
->io_disable
= true;
1606 /* make others know io_disable is true earlier */
1608 bcache_device_stop(d
);
1611 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
1612 * and dc->has_dirty == 0
1614 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.",
1615 d
->disk
->disk_name
);
1619 static void __cache_set_unregister(struct closure
*cl
)
1621 struct cache_set
*c
= container_of(cl
, struct cache_set
, caching
);
1622 struct cached_dev
*dc
;
1623 struct bcache_device
*d
;
1626 mutex_lock(&bch_register_lock
);
1628 for (i
= 0; i
< c
->nr_uuids
; i
++) {
1633 if (!UUID_FLASH_ONLY(&c
->uuids
[i
]) &&
1634 test_bit(CACHE_SET_UNREGISTERING
, &c
->flags
)) {
1635 dc
= container_of(d
, struct cached_dev
, disk
);
1636 bch_cached_dev_detach(dc
);
1637 if (test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
))
1638 conditional_stop_bcache_device(c
, d
, dc
);
1640 bcache_device_stop(d
);
1644 mutex_unlock(&bch_register_lock
);
1646 continue_at(cl
, cache_set_flush
, system_wq
);
1649 void bch_cache_set_stop(struct cache_set
*c
)
1651 if (!test_and_set_bit(CACHE_SET_STOPPING
, &c
->flags
))
1652 closure_queue(&c
->caching
);
1655 void bch_cache_set_unregister(struct cache_set
*c
)
1657 set_bit(CACHE_SET_UNREGISTERING
, &c
->flags
);
1658 bch_cache_set_stop(c
);
1661 #define alloc_bucket_pages(gfp, c) \
1662 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
1664 struct cache_set
*bch_cache_set_alloc(struct cache_sb
*sb
)
1667 struct cache_set
*c
= kzalloc(sizeof(struct cache_set
), GFP_KERNEL
);
1671 __module_get(THIS_MODULE
);
1672 closure_init(&c
->cl
, NULL
);
1673 set_closure_fn(&c
->cl
, cache_set_free
, system_wq
);
1675 closure_init(&c
->caching
, &c
->cl
);
1676 set_closure_fn(&c
->caching
, __cache_set_unregister
, system_wq
);
1678 /* Maybe create continue_at_noreturn() and use it here? */
1679 closure_set_stopped(&c
->cl
);
1680 closure_put(&c
->cl
);
1682 kobject_init(&c
->kobj
, &bch_cache_set_ktype
);
1683 kobject_init(&c
->internal
, &bch_cache_set_internal_ktype
);
1685 bch_cache_accounting_init(&c
->accounting
, &c
->cl
);
1687 memcpy(c
->sb
.set_uuid
, sb
->set_uuid
, 16);
1688 c
->sb
.block_size
= sb
->block_size
;
1689 c
->sb
.bucket_size
= sb
->bucket_size
;
1690 c
->sb
.nr_in_set
= sb
->nr_in_set
;
1691 c
->sb
.last_mount
= sb
->last_mount
;
1692 c
->bucket_bits
= ilog2(sb
->bucket_size
);
1693 c
->block_bits
= ilog2(sb
->block_size
);
1694 c
->nr_uuids
= bucket_bytes(c
) / sizeof(struct uuid_entry
);
1696 c
->btree_pages
= bucket_pages(c
);
1697 if (c
->btree_pages
> BTREE_MAX_PAGES
)
1698 c
->btree_pages
= max_t(int, c
->btree_pages
/ 4,
1701 sema_init(&c
->sb_write_mutex
, 1);
1702 mutex_init(&c
->bucket_lock
);
1703 init_waitqueue_head(&c
->btree_cache_wait
);
1704 init_waitqueue_head(&c
->bucket_wait
);
1705 init_waitqueue_head(&c
->gc_wait
);
1706 sema_init(&c
->uuid_write_mutex
, 1);
1708 spin_lock_init(&c
->btree_gc_time
.lock
);
1709 spin_lock_init(&c
->btree_split_time
.lock
);
1710 spin_lock_init(&c
->btree_read_time
.lock
);
1712 bch_moving_init_cache_set(c
);
1714 INIT_LIST_HEAD(&c
->list
);
1715 INIT_LIST_HEAD(&c
->cached_devs
);
1716 INIT_LIST_HEAD(&c
->btree_cache
);
1717 INIT_LIST_HEAD(&c
->btree_cache_freeable
);
1718 INIT_LIST_HEAD(&c
->btree_cache_freed
);
1719 INIT_LIST_HEAD(&c
->data_buckets
);
1721 c
->search
= mempool_create_slab_pool(32, bch_search_cache
);
1725 iter_size
= (sb
->bucket_size
/ sb
->block_size
+ 1) *
1726 sizeof(struct btree_iter_set
);
1728 if (!(c
->devices
= kzalloc(c
->nr_uuids
* sizeof(void *), GFP_KERNEL
)) ||
1729 !(c
->bio_meta
= mempool_create_kmalloc_pool(2,
1730 sizeof(struct bbio
) + sizeof(struct bio_vec
) *
1731 bucket_pages(c
))) ||
1732 !(c
->fill_iter
= mempool_create_kmalloc_pool(1, iter_size
)) ||
1733 !(c
->bio_split
= bioset_create(4, offsetof(struct bbio
, bio
),
1735 BIOSET_NEED_RESCUER
)) ||
1736 !(c
->uuids
= alloc_bucket_pages(GFP_KERNEL
, c
)) ||
1737 !(c
->moving_gc_wq
= alloc_workqueue("bcache_gc",
1738 WQ_MEM_RECLAIM
, 0)) ||
1739 bch_journal_alloc(c
) ||
1740 bch_btree_cache_alloc(c
) ||
1741 bch_open_buckets_alloc(c
) ||
1742 bch_bset_sort_state_init(&c
->sort
, ilog2(c
->btree_pages
)))
1745 c
->congested_read_threshold_us
= 2000;
1746 c
->congested_write_threshold_us
= 20000;
1747 c
->error_limit
= 8 << IO_ERROR_SHIFT
;
1748 WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE
, &c
->flags
));
1752 bch_cache_set_unregister(c
);
1756 static int run_cache_set(struct cache_set
*c
)
1758 const char *err
= "cannot allocate memory";
1759 struct cached_dev
*dc
, *t
;
1764 struct journal_replay
*l
;
1766 closure_init_stack(&cl
);
1768 for_each_cache(ca
, c
, i
)
1769 c
->nbuckets
+= ca
->sb
.nbuckets
;
1772 if (CACHE_SYNC(&c
->sb
)) {
1777 err
= "cannot allocate memory for journal";
1778 if (bch_journal_read(c
, &journal
))
1781 pr_debug("btree_journal_read() done");
1783 err
= "no journal entries found";
1784 if (list_empty(&journal
))
1787 j
= &list_entry(journal
.prev
, struct journal_replay
, list
)->j
;
1789 err
= "IO error reading priorities";
1790 for_each_cache(ca
, c
, i
)
1791 prio_read(ca
, j
->prio_bucket
[ca
->sb
.nr_this_dev
]);
1794 * If prio_read() fails it'll call cache_set_error and we'll
1795 * tear everything down right away, but if we perhaps checked
1796 * sooner we could avoid journal replay.
1801 err
= "bad btree root";
1802 if (__bch_btree_ptr_invalid(c
, k
))
1805 err
= "error reading btree root";
1806 c
->root
= bch_btree_node_get(c
, NULL
, k
, j
->btree_level
, true, NULL
);
1807 if (IS_ERR_OR_NULL(c
->root
))
1810 list_del_init(&c
->root
->list
);
1811 rw_unlock(true, c
->root
);
1813 err
= uuid_read(c
, j
, &cl
);
1817 err
= "error in recovery";
1818 if (bch_btree_check(c
))
1821 bch_journal_mark(c
, &journal
);
1822 bch_initial_gc_finish(c
);
1823 pr_debug("btree_check() done");
1826 * bcache_journal_next() can't happen sooner, or
1827 * btree_gc_finish() will give spurious errors about last_gc >
1828 * gc_gen - this is a hack but oh well.
1830 bch_journal_next(&c
->journal
);
1832 err
= "error starting allocator thread";
1833 for_each_cache(ca
, c
, i
)
1834 if (bch_cache_allocator_start(ca
))
1838 * First place it's safe to allocate: btree_check() and
1839 * btree_gc_finish() have to run before we have buckets to
1840 * allocate, and bch_bucket_alloc_set() might cause a journal
1841 * entry to be written so bcache_journal_next() has to be called
1844 * If the uuids were in the old format we have to rewrite them
1845 * before the next journal entry is written:
1847 if (j
->version
< BCACHE_JSET_VERSION_UUID
)
1850 err
= "bcache: replay journal failed";
1851 if (bch_journal_replay(c
, &journal
))
1854 pr_notice("invalidating existing data");
1856 for_each_cache(ca
, c
, i
) {
1859 ca
->sb
.keys
= clamp_t(int, ca
->sb
.nbuckets
>> 7,
1860 2, SB_JOURNAL_BUCKETS
);
1862 for (j
= 0; j
< ca
->sb
.keys
; j
++)
1863 ca
->sb
.d
[j
] = ca
->sb
.first_bucket
+ j
;
1866 bch_initial_gc_finish(c
);
1868 err
= "error starting allocator thread";
1869 for_each_cache(ca
, c
, i
)
1870 if (bch_cache_allocator_start(ca
))
1873 mutex_lock(&c
->bucket_lock
);
1874 for_each_cache(ca
, c
, i
)
1876 mutex_unlock(&c
->bucket_lock
);
1878 err
= "cannot allocate new UUID bucket";
1879 if (__uuid_write(c
))
1882 err
= "cannot allocate new btree root";
1883 c
->root
= __bch_btree_node_alloc(c
, NULL
, 0, true, NULL
);
1884 if (IS_ERR_OR_NULL(c
->root
))
1887 mutex_lock(&c
->root
->write_lock
);
1888 bkey_copy_key(&c
->root
->key
, &MAX_KEY
);
1889 bch_btree_node_write(c
->root
, &cl
);
1890 mutex_unlock(&c
->root
->write_lock
);
1892 bch_btree_set_root(c
->root
);
1893 rw_unlock(true, c
->root
);
1896 * We don't want to write the first journal entry until
1897 * everything is set up - fortunately journal entries won't be
1898 * written until the SET_CACHE_SYNC() here:
1900 SET_CACHE_SYNC(&c
->sb
, true);
1902 bch_journal_next(&c
->journal
);
1903 bch_journal_meta(c
, &cl
);
1906 err
= "error starting gc thread";
1907 if (bch_gc_thread_start(c
))
1911 c
->sb
.last_mount
= get_seconds();
1912 bcache_write_super(c
);
1914 list_for_each_entry_safe(dc
, t
, &uncached_devices
, list
)
1915 bch_cached_dev_attach(dc
, c
, NULL
);
1919 set_bit(CACHE_SET_RUNNING
, &c
->flags
);
1922 while (!list_empty(&journal
)) {
1923 l
= list_first_entry(&journal
, struct journal_replay
, list
);
1929 /* XXX: test this, it's broken */
1930 bch_cache_set_error(c
, "%s", err
);
1935 static bool can_attach_cache(struct cache
*ca
, struct cache_set
*c
)
1937 return ca
->sb
.block_size
== c
->sb
.block_size
&&
1938 ca
->sb
.bucket_size
== c
->sb
.bucket_size
&&
1939 ca
->sb
.nr_in_set
== c
->sb
.nr_in_set
;
1942 static const char *register_cache_set(struct cache
*ca
)
1945 const char *err
= "cannot allocate memory";
1946 struct cache_set
*c
;
1948 list_for_each_entry(c
, &bch_cache_sets
, list
)
1949 if (!memcmp(c
->sb
.set_uuid
, ca
->sb
.set_uuid
, 16)) {
1950 if (c
->cache
[ca
->sb
.nr_this_dev
])
1951 return "duplicate cache set member";
1953 if (!can_attach_cache(ca
, c
))
1954 return "cache sb does not match set";
1956 if (!CACHE_SYNC(&ca
->sb
))
1957 SET_CACHE_SYNC(&c
->sb
, false);
1962 c
= bch_cache_set_alloc(&ca
->sb
);
1966 err
= "error creating kobject";
1967 if (kobject_add(&c
->kobj
, bcache_kobj
, "%pU", c
->sb
.set_uuid
) ||
1968 kobject_add(&c
->internal
, &c
->kobj
, "internal"))
1971 if (bch_cache_accounting_add_kobjs(&c
->accounting
, &c
->kobj
))
1974 bch_debug_init_cache_set(c
);
1976 list_add(&c
->list
, &bch_cache_sets
);
1978 sprintf(buf
, "cache%i", ca
->sb
.nr_this_dev
);
1979 if (sysfs_create_link(&ca
->kobj
, &c
->kobj
, "set") ||
1980 sysfs_create_link(&c
->kobj
, &ca
->kobj
, buf
))
1983 if (ca
->sb
.seq
> c
->sb
.seq
) {
1984 c
->sb
.version
= ca
->sb
.version
;
1985 memcpy(c
->sb
.set_uuid
, ca
->sb
.set_uuid
, 16);
1986 c
->sb
.flags
= ca
->sb
.flags
;
1987 c
->sb
.seq
= ca
->sb
.seq
;
1988 pr_debug("set version = %llu", c
->sb
.version
);
1991 kobject_get(&ca
->kobj
);
1993 ca
->set
->cache
[ca
->sb
.nr_this_dev
] = ca
;
1994 c
->cache_by_alloc
[c
->caches_loaded
++] = ca
;
1996 if (c
->caches_loaded
== c
->sb
.nr_in_set
) {
1997 err
= "failed to run cache set";
1998 if (run_cache_set(c
) < 0)
2004 bch_cache_set_unregister(c
);
2010 void bch_cache_release(struct kobject
*kobj
)
2012 struct cache
*ca
= container_of(kobj
, struct cache
, kobj
);
2016 BUG_ON(ca
->set
->cache
[ca
->sb
.nr_this_dev
] != ca
);
2017 ca
->set
->cache
[ca
->sb
.nr_this_dev
] = NULL
;
2020 free_pages((unsigned long) ca
->disk_buckets
, ilog2(bucket_pages(ca
)));
2021 kfree(ca
->prio_buckets
);
2024 free_heap(&ca
->heap
);
2025 free_fifo(&ca
->free_inc
);
2027 for (i
= 0; i
< RESERVE_NR
; i
++)
2028 free_fifo(&ca
->free
[i
]);
2030 if (ca
->sb_bio
.bi_inline_vecs
[0].bv_page
)
2031 put_page(ca
->sb_bio
.bi_io_vec
[0].bv_page
);
2033 if (!IS_ERR_OR_NULL(ca
->bdev
))
2034 blkdev_put(ca
->bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
2037 module_put(THIS_MODULE
);
2040 static int cache_alloc(struct cache
*ca
)
2043 size_t btree_buckets
;
2046 __module_get(THIS_MODULE
);
2047 kobject_init(&ca
->kobj
, &bch_cache_ktype
);
2049 bio_init(&ca
->journal
.bio
, ca
->journal
.bio
.bi_inline_vecs
, 8);
2052 * when ca->sb.njournal_buckets is not zero, journal exists,
2053 * and in bch_journal_replay(), tree node may split,
2054 * so bucket of RESERVE_BTREE type is needed,
2055 * the worst situation is all journal buckets are valid journal,
2056 * and all the keys need to replay,
2057 * so the number of RESERVE_BTREE type buckets should be as much
2058 * as journal buckets
2060 btree_buckets
= ca
->sb
.njournal_buckets
?: 8;
2061 free
= roundup_pow_of_two(ca
->sb
.nbuckets
) >> 10;
2063 if (!init_fifo(&ca
->free
[RESERVE_BTREE
], btree_buckets
, GFP_KERNEL
) ||
2064 !init_fifo_exact(&ca
->free
[RESERVE_PRIO
], prio_buckets(ca
), GFP_KERNEL
) ||
2065 !init_fifo(&ca
->free
[RESERVE_MOVINGGC
], free
, GFP_KERNEL
) ||
2066 !init_fifo(&ca
->free
[RESERVE_NONE
], free
, GFP_KERNEL
) ||
2067 !init_fifo(&ca
->free_inc
, free
<< 2, GFP_KERNEL
) ||
2068 !init_heap(&ca
->heap
, free
<< 3, GFP_KERNEL
) ||
2069 !(ca
->buckets
= vzalloc(sizeof(struct bucket
) *
2070 ca
->sb
.nbuckets
)) ||
2071 !(ca
->prio_buckets
= kzalloc(sizeof(uint64_t) * prio_buckets(ca
) *
2073 !(ca
->disk_buckets
= alloc_bucket_pages(GFP_KERNEL
, ca
)))
2076 ca
->prio_last_buckets
= ca
->prio_buckets
+ prio_buckets(ca
);
2078 for_each_bucket(b
, ca
)
2079 atomic_set(&b
->pin
, 0);
2084 static int register_cache(struct cache_sb
*sb
, struct page
*sb_page
,
2085 struct block_device
*bdev
, struct cache
*ca
)
2087 const char *err
= NULL
; /* must be set for any error case */
2090 bdevname(bdev
, ca
->cache_dev_name
);
2091 memcpy(&ca
->sb
, sb
, sizeof(struct cache_sb
));
2093 ca
->bdev
->bd_holder
= ca
;
2095 bio_init(&ca
->sb_bio
, ca
->sb_bio
.bi_inline_vecs
, 1);
2096 ca
->sb_bio
.bi_io_vec
[0].bv_page
= sb_page
;
2099 if (blk_queue_discard(bdev_get_queue(bdev
)))
2100 ca
->discard
= CACHE_DISCARD(&ca
->sb
);
2102 ret
= cache_alloc(ca
);
2104 blkdev_put(bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
2106 err
= "cache_alloc(): -ENOMEM";
2108 err
= "cache_alloc(): unknown error";
2112 if (kobject_add(&ca
->kobj
, &part_to_dev(bdev
->bd_part
)->kobj
, "bcache")) {
2113 err
= "error calling kobject_add";
2118 mutex_lock(&bch_register_lock
);
2119 err
= register_cache_set(ca
);
2120 mutex_unlock(&bch_register_lock
);
2127 pr_info("registered cache device %s", ca
->cache_dev_name
);
2130 kobject_put(&ca
->kobj
);
2134 pr_notice("error %s: %s", ca
->cache_dev_name
, err
);
2139 /* Global interfaces/init */
2141 static ssize_t
register_bcache(struct kobject
*, struct kobj_attribute
*,
2142 const char *, size_t);
2144 kobj_attribute_write(register, register_bcache
);
2145 kobj_attribute_write(register_quiet
, register_bcache
);
2147 static bool bch_is_open_backing(struct block_device
*bdev
) {
2148 struct cache_set
*c
, *tc
;
2149 struct cached_dev
*dc
, *t
;
2151 list_for_each_entry_safe(c
, tc
, &bch_cache_sets
, list
)
2152 list_for_each_entry_safe(dc
, t
, &c
->cached_devs
, list
)
2153 if (dc
->bdev
== bdev
)
2155 list_for_each_entry_safe(dc
, t
, &uncached_devices
, list
)
2156 if (dc
->bdev
== bdev
)
2161 static struct cached_dev
*bch_find_cached_dev(struct block_device
*bdev
) {
2162 struct cache_set
*c
, *tc
;
2163 struct cached_dev
*dc
, *t
;
2165 list_for_each_entry_safe(c
, tc
, &bch_cache_sets
, list
)
2166 list_for_each_entry_safe(dc
, t
, &c
->cached_devs
, list
)
2167 if (dc
->bdev
== bdev
)
2169 list_for_each_entry_safe(dc
, t
, &uncached_devices
, list
)
2170 if (dc
->bdev
== bdev
)
2176 static bool bch_is_open_cache(struct block_device
*bdev
) {
2177 struct cache_set
*c
, *tc
;
2181 list_for_each_entry_safe(c
, tc
, &bch_cache_sets
, list
)
2182 for_each_cache(ca
, c
, i
)
2183 if (ca
->bdev
== bdev
)
2188 static bool bch_is_open(struct block_device
*bdev
) {
2189 return bch_is_open_cache(bdev
) || bch_is_open_backing(bdev
);
2192 static ssize_t
register_bcache(struct kobject
*k
, struct kobj_attribute
*attr
,
2193 const char *buffer
, size_t size
)
2196 const char *err
= "cannot allocate memory";
2198 struct cache_sb
*sb
= NULL
;
2199 struct block_device
*bdev
= NULL
;
2200 struct page
*sb_page
= NULL
;
2201 struct cached_dev
*dc
= NULL
;
2203 if (!try_module_get(THIS_MODULE
))
2206 if (!(path
= kstrndup(buffer
, size
, GFP_KERNEL
)) ||
2207 !(sb
= kmalloc(sizeof(struct cache_sb
), GFP_KERNEL
)))
2210 err
= "failed to open device";
2211 bdev
= blkdev_get_by_path(strim(path
),
2212 FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
,
2215 if (bdev
== ERR_PTR(-EBUSY
)) {
2216 bdev
= lookup_bdev(strim(path
), 0);
2217 mutex_lock(&bch_register_lock
);
2218 if (!IS_ERR(bdev
) && bch_is_open(bdev
)) {
2219 err
= "device already registered";
2220 /* emit CHANGE event for backing devices to export
2221 * CACHED_{UUID/LABEL} values to udev */
2222 if (bch_is_open_backing(bdev
)) {
2223 dc
= bch_find_cached_dev(bdev
);
2225 bch_cached_dev_emit_change(dc
);
2226 err
= "device already registered (emitting change event)";
2230 err
= "device busy";
2232 mutex_unlock(&bch_register_lock
);
2235 if (attr
== &ksysfs_register_quiet
)
2241 err
= "failed to set blocksize";
2242 if (set_blocksize(bdev
, 4096))
2245 err
= read_super(sb
, bdev
, &sb_page
);
2249 err
= "failed to register device";
2250 if (SB_IS_BDEV(sb
)) {
2251 struct cached_dev
*dc
= kzalloc(sizeof(*dc
), GFP_KERNEL
);
2255 mutex_lock(&bch_register_lock
);
2256 register_bdev(sb
, sb_page
, bdev
, dc
);
2257 mutex_unlock(&bch_register_lock
);
2259 struct cache
*ca
= kzalloc(sizeof(*ca
), GFP_KERNEL
);
2263 if (register_cache(sb
, sb_page
, bdev
, ca
) != 0)
2271 module_put(THIS_MODULE
);
2275 blkdev_put(bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
2277 pr_info("error %s: %s", path
, err
);
2282 static int bcache_reboot(struct notifier_block
*n
, unsigned long code
, void *x
)
2284 if (code
== SYS_DOWN
||
2286 code
== SYS_POWER_OFF
) {
2288 unsigned long start
= jiffies
;
2289 bool stopped
= false;
2291 struct cache_set
*c
, *tc
;
2292 struct cached_dev
*dc
, *tdc
;
2294 mutex_lock(&bch_register_lock
);
2296 if (list_empty(&bch_cache_sets
) &&
2297 list_empty(&uncached_devices
))
2300 pr_info("Stopping all devices:");
2302 list_for_each_entry_safe(c
, tc
, &bch_cache_sets
, list
)
2303 bch_cache_set_stop(c
);
2305 list_for_each_entry_safe(dc
, tdc
, &uncached_devices
, list
)
2306 bcache_device_stop(&dc
->disk
);
2308 /* What's a condition variable? */
2310 long timeout
= start
+ 2 * HZ
- jiffies
;
2312 stopped
= list_empty(&bch_cache_sets
) &&
2313 list_empty(&uncached_devices
);
2315 if (timeout
< 0 || stopped
)
2318 prepare_to_wait(&unregister_wait
, &wait
,
2319 TASK_UNINTERRUPTIBLE
);
2321 mutex_unlock(&bch_register_lock
);
2322 schedule_timeout(timeout
);
2323 mutex_lock(&bch_register_lock
);
2326 finish_wait(&unregister_wait
, &wait
);
2329 pr_info("All devices stopped");
2331 pr_notice("Timeout waiting for devices to be closed");
2333 mutex_unlock(&bch_register_lock
);
2339 static struct notifier_block reboot
= {
2340 .notifier_call
= bcache_reboot
,
2341 .priority
= INT_MAX
, /* before any real devices */
2344 static void bcache_exit(void)
2349 kobject_put(bcache_kobj
);
2351 destroy_workqueue(bcache_wq
);
2353 unregister_blkdev(bcache_major
, "bcache");
2354 unregister_reboot_notifier(&reboot
);
2355 mutex_destroy(&bch_register_lock
);
2358 static int __init
bcache_init(void)
2360 static const struct attribute
*files
[] = {
2361 &ksysfs_register
.attr
,
2362 &ksysfs_register_quiet
.attr
,
2366 mutex_init(&bch_register_lock
);
2367 init_waitqueue_head(&unregister_wait
);
2368 register_reboot_notifier(&reboot
);
2369 closure_debug_init();
2371 bcache_major
= register_blkdev(0, "bcache");
2372 if (bcache_major
< 0) {
2373 unregister_reboot_notifier(&reboot
);
2374 mutex_destroy(&bch_register_lock
);
2375 return bcache_major
;
2378 if (!(bcache_wq
= alloc_workqueue("bcache", WQ_MEM_RECLAIM
, 0)) ||
2379 !(bcache_kobj
= kobject_create_and_add("bcache", fs_kobj
)) ||
2380 bch_request_init() ||
2381 bch_debug_init(bcache_kobj
) ||
2382 sysfs_create_files(bcache_kobj
, files
))
2391 module_exit(bcache_exit
);
2392 module_init(bcache_init
);