2 * Assorted bcache debug code
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
12 #include <linux/console.h>
13 #include <linux/debugfs.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/seq_file.h>
18 static struct dentry
*debug
;
20 const char *bch_ptr_status(struct cache_set
*c
, const struct bkey
*k
)
24 for (i
= 0; i
< KEY_PTRS(k
); i
++)
25 if (ptr_available(c
, k
, i
)) {
26 struct cache
*ca
= PTR_CACHE(c
, k
, i
);
27 size_t bucket
= PTR_BUCKET_NR(c
, k
, i
);
28 size_t r
= bucket_remainder(c
, PTR_OFFSET(k
, i
));
30 if (KEY_SIZE(k
) + r
> c
->sb
.bucket_size
)
31 return "bad, length too big";
32 if (bucket
< ca
->sb
.first_bucket
)
33 return "bad, short offset";
34 if (bucket
>= ca
->sb
.nbuckets
)
35 return "bad, offset past end of device";
36 if (ptr_stale(c
, k
, i
))
40 if (!bkey_cmp(k
, &ZERO_KEY
))
41 return "bad, null key";
43 return "bad, no pointers";
49 int bch_bkey_to_text(char *buf
, size_t size
, const struct bkey
*k
)
52 char *out
= buf
, *end
= buf
+ size
;
54 #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
56 p("%llu:%llu len %llu -> [", KEY_INODE(k
), KEY_OFFSET(k
), KEY_SIZE(k
));
60 p("%llu:%llu gen %llu",
61 PTR_DEV(k
, i
), PTR_OFFSET(k
, i
), PTR_GEN(k
, i
));
63 if (++i
== KEY_PTRS(k
))
74 p(" cs%llu %llx", KEY_CSUM(k
), k
->ptr
[1]);
79 #ifdef CONFIG_BCACHE_DEBUG
81 static void dump_bset(struct btree
*b
, struct bset
*i
)
83 struct bkey
*k
, *next
;
87 for (k
= i
->start
; k
< end(i
); k
= next
) {
90 bch_bkey_to_text(buf
, sizeof(buf
), k
);
91 printk(KERN_ERR
"block %zu key %zi/%u: %s", index(i
, b
),
92 (uint64_t *) k
- i
->d
, i
->keys
, buf
);
94 for (j
= 0; j
< KEY_PTRS(k
); j
++) {
95 size_t n
= PTR_BUCKET_NR(b
->c
, k
, j
);
96 printk(" bucket %zu", n
);
98 if (n
>= b
->c
->sb
.first_bucket
&& n
< b
->c
->sb
.nbuckets
)
100 PTR_BUCKET(b
->c
, k
, j
)->prio
);
103 printk(" %s\n", bch_ptr_status(b
->c
, k
));
106 bkey_cmp(k
, !b
->level
? &START_KEY(next
) : next
) > 0)
107 printk(KERN_ERR
"Key skipped backwards\n");
111 static void bch_dump_bucket(struct btree
*b
)
116 for (i
= 0; i
<= b
->nsets
; i
++)
117 dump_bset(b
, b
->sets
[i
].data
);
121 void bch_btree_verify(struct btree
*b
, struct bset
*new)
123 struct btree
*v
= b
->c
->verify_data
;
125 closure_init_stack(&cl
);
130 closure_wait_event(&b
->io
.wait
, &cl
,
131 atomic_read(&b
->io
.cl
.remaining
) == -1);
133 mutex_lock(&b
->c
->verify_lock
);
135 bkey_copy(&v
->key
, &b
->key
);
139 bch_btree_node_read(v
);
140 closure_wait_event(&v
->io
.wait
, &cl
,
141 atomic_read(&b
->io
.cl
.remaining
) == -1);
143 if (new->keys
!= v
->sets
[0].data
->keys
||
145 v
->sets
[0].data
->start
,
146 (void *) end(new) - (void *) new->start
)) {
151 printk(KERN_ERR
"*** original memory node:\n");
152 for (i
= 0; i
<= b
->nsets
; i
++)
153 dump_bset(b
, b
->sets
[i
].data
);
155 printk(KERN_ERR
"*** sorted memory node:\n");
158 printk(KERN_ERR
"*** on disk node:\n");
159 dump_bset(v
, v
->sets
[0].data
);
161 for (j
= 0; j
< new->keys
; j
++)
162 if (new->d
[j
] != v
->sets
[0].data
->d
[j
])
166 panic("verify failed at %u\n", j
);
169 mutex_unlock(&b
->c
->verify_lock
);
172 void bch_data_verify(struct cached_dev
*dc
, struct bio
*bio
)
174 char name
[BDEVNAME_SIZE
];
179 check
= bio_clone(bio
, GFP_NOIO
);
183 if (bio_alloc_pages(check
, GFP_NOIO
))
186 submit_bio_wait(READ_SYNC
, check
);
188 bio_for_each_segment(bv
, bio
, i
) {
189 void *p1
= kmap_atomic(bv
->bv_page
);
190 void *p2
= page_address(check
->bi_io_vec
[i
].bv_page
);
192 cache_set_err_on(memcmp(p1
+ bv
->bv_offset
,
196 "verify failed at dev %s sector %llu",
197 bdevname(dc
->bdev
, name
),
198 (uint64_t) bio
->bi_iter
.bi_sector
);
203 bio_for_each_segment_all(bv
, check
, i
)
204 __free_page(bv
->bv_page
);
209 int __bch_count_data(struct btree
*b
)
212 struct btree_iter iter
;
216 for_each_key(b
, k
, &iter
)
221 void __bch_check_keys(struct btree
*b
, const char *fmt
, ...)
224 struct bkey
*k
, *p
= NULL
;
225 struct btree_iter iter
;
228 for_each_key(b
, k
, &iter
) {
230 err
= "Keys out of order";
231 if (p
&& bkey_cmp(&START_KEY(p
), &START_KEY(k
)) > 0)
234 if (bch_ptr_invalid(b
, k
))
237 err
= "Overlapping keys";
238 if (p
&& bkey_cmp(p
, &START_KEY(k
)) > 0)
241 if (bch_ptr_bad(b
, k
))
244 err
= "Duplicate keys";
245 if (p
&& !bkey_cmp(p
, k
))
251 err
= "Key larger than btree node key";
252 if (p
&& bkey_cmp(p
, &b
->key
) > 0)
263 panic("bcache error: %s:\n", err
);
266 void bch_btree_iter_next_check(struct btree_iter
*iter
)
268 struct bkey
*k
= iter
->data
->k
, *next
= bkey_next(k
);
270 if (next
< iter
->data
->end
&&
271 bkey_cmp(k
, iter
->b
->level
? next
: &START_KEY(next
)) > 0) {
272 bch_dump_bucket(iter
->b
);
273 panic("Key skipped backwards\n");
279 #ifdef CONFIG_DEBUG_FS
281 /* XXX: cache set refcounting */
283 struct dump_iterator
{
290 static bool dump_pred(struct keybuf
*buf
, struct bkey
*k
)
295 static ssize_t
bch_dump_read(struct file
*file
, char __user
*buf
,
296 size_t size
, loff_t
*ppos
)
298 struct dump_iterator
*i
= file
->private_data
;
303 struct keybuf_key
*w
;
304 unsigned bytes
= min(i
->bytes
, size
);
306 int err
= copy_to_user(buf
, i
->buf
, bytes
);
314 memmove(i
->buf
, i
->buf
+ bytes
, i
->bytes
);
319 w
= bch_keybuf_next_rescan(i
->c
, &i
->keys
, &MAX_KEY
, dump_pred
);
323 bch_bkey_to_text(kbuf
, sizeof(kbuf
), &w
->key
);
324 i
->bytes
= snprintf(i
->buf
, PAGE_SIZE
, "%s\n", kbuf
);
325 bch_keybuf_del(&i
->keys
, w
);
331 static int bch_dump_open(struct inode
*inode
, struct file
*file
)
333 struct cache_set
*c
= inode
->i_private
;
334 struct dump_iterator
*i
;
336 i
= kzalloc(sizeof(struct dump_iterator
), GFP_KERNEL
);
340 file
->private_data
= i
;
342 bch_keybuf_init(&i
->keys
);
343 i
->keys
.last_scanned
= KEY(0, 0, 0);
348 static int bch_dump_release(struct inode
*inode
, struct file
*file
)
350 kfree(file
->private_data
);
354 static const struct file_operations cache_set_debug_ops
= {
355 .owner
= THIS_MODULE
,
356 .open
= bch_dump_open
,
357 .read
= bch_dump_read
,
358 .release
= bch_dump_release
361 void bch_debug_init_cache_set(struct cache_set
*c
)
363 if (!IS_ERR_OR_NULL(debug
)) {
365 snprintf(name
, 50, "bcache-%pU", c
->sb
.set_uuid
);
367 c
->debug
= debugfs_create_file(name
, 0400, debug
, c
,
368 &cache_set_debug_ops
);
374 void bch_debug_exit(void)
376 if (!IS_ERR_OR_NULL(debug
))
377 debugfs_remove_recursive(debug
);
380 int __init
bch_debug_init(struct kobject
*kobj
)
384 debug
= debugfs_create_dir("bcache", NULL
);