]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/md/bcache/sysfs.c
bcache: add stop_when_cache_set_failed option to backing device
[mirror_ubuntu-bionic-kernel.git] / drivers / md / bcache / sysfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * bcache sysfs interfaces
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9 #include "bcache.h"
10 #include "sysfs.h"
11 #include "btree.h"
12 #include "request.h"
13 #include "writeback.h"
14
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
18
19 static const char * const cache_replacement_policies[] = {
20 "lru",
21 "fifo",
22 "random",
23 NULL
24 };
25
26 static const char * const error_actions[] = {
27 "unregister",
28 "panic",
29 NULL
30 };
31
32 write_attribute(attach);
33 write_attribute(detach);
34 write_attribute(unregister);
35 write_attribute(stop);
36 write_attribute(clear_stats);
37 write_attribute(trigger_gc);
38 write_attribute(prune_cache);
39 write_attribute(flash_vol_create);
40
41 read_attribute(bucket_size);
42 read_attribute(block_size);
43 read_attribute(nbuckets);
44 read_attribute(tree_depth);
45 read_attribute(root_usage_percent);
46 read_attribute(priority_stats);
47 read_attribute(btree_cache_size);
48 read_attribute(btree_cache_max_chain);
49 read_attribute(cache_available_percent);
50 read_attribute(written);
51 read_attribute(btree_written);
52 read_attribute(metadata_written);
53 read_attribute(active_journal_entries);
54
55 sysfs_time_stats_attribute(btree_gc, sec, ms);
56 sysfs_time_stats_attribute(btree_split, sec, us);
57 sysfs_time_stats_attribute(btree_sort, ms, us);
58 sysfs_time_stats_attribute(btree_read, ms, us);
59
60 read_attribute(btree_nodes);
61 read_attribute(btree_used_percent);
62 read_attribute(average_key_size);
63 read_attribute(dirty_data);
64 read_attribute(bset_tree_stats);
65
66 read_attribute(state);
67 read_attribute(cache_read_races);
68 read_attribute(writeback_keys_done);
69 read_attribute(writeback_keys_failed);
70 read_attribute(io_errors);
71 read_attribute(congested);
72 rw_attribute(congested_read_threshold_us);
73 rw_attribute(congested_write_threshold_us);
74
75 rw_attribute(sequential_cutoff);
76 rw_attribute(data_csum);
77 rw_attribute(cache_mode);
78 rw_attribute(stop_when_cache_set_failed);
79 rw_attribute(writeback_metadata);
80 rw_attribute(writeback_running);
81 rw_attribute(writeback_percent);
82 rw_attribute(writeback_delay);
83 rw_attribute(writeback_rate);
84
85 rw_attribute(writeback_rate_update_seconds);
86 rw_attribute(writeback_rate_i_term_inverse);
87 rw_attribute(writeback_rate_p_term_inverse);
88 rw_attribute(writeback_rate_minimum);
89 read_attribute(writeback_rate_debug);
90
91 read_attribute(stripe_size);
92 read_attribute(partial_stripes_expensive);
93
94 rw_attribute(synchronous);
95 rw_attribute(journal_delay_ms);
96 rw_attribute(io_disable);
97 rw_attribute(discard);
98 rw_attribute(running);
99 rw_attribute(label);
100 rw_attribute(readahead);
101 rw_attribute(errors);
102 rw_attribute(io_error_limit);
103 rw_attribute(io_error_halflife);
104 rw_attribute(verify);
105 rw_attribute(bypass_torture_test);
106 rw_attribute(key_merging_disabled);
107 rw_attribute(gc_always_rewrite);
108 rw_attribute(expensive_debug_checks);
109 rw_attribute(cache_replacement_policy);
110 rw_attribute(btree_shrinker_disabled);
111 rw_attribute(copy_gc_enabled);
112 rw_attribute(size);
113
114 SHOW(__bch_cached_dev)
115 {
116 struct cached_dev *dc = container_of(kobj, struct cached_dev,
117 disk.kobj);
118 const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
119
120 #define var(stat) (dc->stat)
121
122 if (attr == &sysfs_cache_mode)
123 return bch_snprint_string_list(buf, PAGE_SIZE,
124 bch_cache_modes + 1,
125 BDEV_CACHE_MODE(&dc->sb));
126
127 if (attr == &sysfs_stop_when_cache_set_failed)
128 return bch_snprint_string_list(buf, PAGE_SIZE,
129 bch_stop_on_failure_modes + 1,
130 dc->stop_when_cache_set_failed);
131
132
133 sysfs_printf(data_csum, "%i", dc->disk.data_csum);
134 var_printf(verify, "%i");
135 var_printf(bypass_torture_test, "%i");
136 var_printf(writeback_metadata, "%i");
137 var_printf(writeback_running, "%i");
138 var_print(writeback_delay);
139 var_print(writeback_percent);
140 sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
141
142 var_print(writeback_rate_update_seconds);
143 var_print(writeback_rate_i_term_inverse);
144 var_print(writeback_rate_p_term_inverse);
145 var_print(writeback_rate_minimum);
146
147 if (attr == &sysfs_writeback_rate_debug) {
148 char rate[20];
149 char dirty[20];
150 char target[20];
151 char proportional[20];
152 char integral[20];
153 char change[20];
154 s64 next_io;
155
156 bch_hprint(rate, dc->writeback_rate.rate << 9);
157 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
158 bch_hprint(target, dc->writeback_rate_target << 9);
159 bch_hprint(proportional,dc->writeback_rate_proportional << 9);
160 bch_hprint(integral, dc->writeback_rate_integral_scaled << 9);
161 bch_hprint(change, dc->writeback_rate_change << 9);
162
163 next_io = div64_s64(dc->writeback_rate.next - local_clock(),
164 NSEC_PER_MSEC);
165
166 return sprintf(buf,
167 "rate:\t\t%s/sec\n"
168 "dirty:\t\t%s\n"
169 "target:\t\t%s\n"
170 "proportional:\t%s\n"
171 "integral:\t%s\n"
172 "change:\t\t%s/sec\n"
173 "next io:\t%llims\n",
174 rate, dirty, target, proportional,
175 integral, change, next_io);
176 }
177
178 sysfs_hprint(dirty_data,
179 bcache_dev_sectors_dirty(&dc->disk) << 9);
180
181 sysfs_hprint(stripe_size, dc->disk.stripe_size << 9);
182 var_printf(partial_stripes_expensive, "%u");
183
184 var_hprint(sequential_cutoff);
185 var_hprint(readahead);
186
187 sysfs_print(running, atomic_read(&dc->running));
188 sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
189
190 if (attr == &sysfs_label) {
191 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
192 buf[SB_LABEL_SIZE + 1] = '\0';
193 strcat(buf, "\n");
194 return strlen(buf);
195 }
196
197 #undef var
198 return 0;
199 }
200 SHOW_LOCKED(bch_cached_dev)
201
202 STORE(__cached_dev)
203 {
204 struct cached_dev *dc = container_of(kobj, struct cached_dev,
205 disk.kobj);
206 ssize_t v;
207 struct cache_set *c;
208 struct kobj_uevent_env *env;
209
210 #define d_strtoul(var) sysfs_strtoul(var, dc->var)
211 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
212 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
213
214 sysfs_strtoul(data_csum, dc->disk.data_csum);
215 d_strtoul(verify);
216 d_strtoul(bypass_torture_test);
217 d_strtoul(writeback_metadata);
218 d_strtoul(writeback_running);
219 d_strtoul(writeback_delay);
220
221 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
222
223 sysfs_strtoul_clamp(writeback_rate,
224 dc->writeback_rate.rate, 1, INT_MAX);
225
226 sysfs_strtoul_clamp(writeback_rate_update_seconds,
227 dc->writeback_rate_update_seconds,
228 1, WRITEBACK_RATE_UPDATE_SECS_MAX);
229 d_strtoul(writeback_rate_i_term_inverse);
230 d_strtoul_nonzero(writeback_rate_p_term_inverse);
231 d_strtoul_nonzero(writeback_rate_minimum);
232
233 d_strtoi_h(sequential_cutoff);
234 d_strtoi_h(readahead);
235
236 if (attr == &sysfs_clear_stats)
237 bch_cache_accounting_clear(&dc->accounting);
238
239 if (attr == &sysfs_running &&
240 strtoul_or_return(buf))
241 bch_cached_dev_run(dc);
242
243 if (attr == &sysfs_cache_mode) {
244 v = bch_read_string_list(buf, bch_cache_modes + 1);
245
246 if (v < 0)
247 return v;
248
249 if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) {
250 SET_BDEV_CACHE_MODE(&dc->sb, v);
251 bch_write_bdev_super(dc, NULL);
252 }
253 }
254
255 if (attr == &sysfs_stop_when_cache_set_failed) {
256 v = bch_read_string_list(buf, bch_stop_on_failure_modes + 1);
257
258 if (v < 0)
259 return v;
260
261 dc->stop_when_cache_set_failed = v;
262 }
263
264 if (attr == &sysfs_label) {
265 if (size > SB_LABEL_SIZE)
266 return -EINVAL;
267 memcpy(dc->sb.label, buf, size);
268 if (size < SB_LABEL_SIZE)
269 dc->sb.label[size] = '\0';
270 if (size && dc->sb.label[size - 1] == '\n')
271 dc->sb.label[size - 1] = '\0';
272 bch_write_bdev_super(dc, NULL);
273 if (dc->disk.c) {
274 memcpy(dc->disk.c->uuids[dc->disk.id].label,
275 buf, SB_LABEL_SIZE);
276 bch_uuid_write(dc->disk.c);
277 }
278 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
279 if (!env)
280 return -ENOMEM;
281 add_uevent_var(env, "DRIVER=bcache");
282 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
283 add_uevent_var(env, "CACHED_LABEL=%s", buf);
284 kobject_uevent_env(
285 &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
286 kfree(env);
287 }
288
289 if (attr == &sysfs_attach) {
290 uint8_t set_uuid[16];
291
292 if (bch_parse_uuid(buf, set_uuid) < 16)
293 return -EINVAL;
294
295 v = -ENOENT;
296 list_for_each_entry(c, &bch_cache_sets, list) {
297 v = bch_cached_dev_attach(dc, c, set_uuid);
298 if (!v)
299 return size;
300 }
301
302 pr_err("Can't attach %s: cache set not found", buf);
303 return v;
304 }
305
306 if (attr == &sysfs_detach && dc->disk.c)
307 bch_cached_dev_detach(dc);
308
309 if (attr == &sysfs_stop)
310 bcache_device_stop(&dc->disk);
311
312 return size;
313 }
314
315 STORE(bch_cached_dev)
316 {
317 struct cached_dev *dc = container_of(kobj, struct cached_dev,
318 disk.kobj);
319
320 mutex_lock(&bch_register_lock);
321 size = __cached_dev_store(kobj, attr, buf, size);
322
323 if (attr == &sysfs_writeback_running)
324 bch_writeback_queue(dc);
325
326 if (attr == &sysfs_writeback_percent)
327 if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
328 schedule_delayed_work(&dc->writeback_rate_update,
329 dc->writeback_rate_update_seconds * HZ);
330
331 mutex_unlock(&bch_register_lock);
332 return size;
333 }
334
335 static struct attribute *bch_cached_dev_files[] = {
336 &sysfs_attach,
337 &sysfs_detach,
338 &sysfs_stop,
339 #if 0
340 &sysfs_data_csum,
341 #endif
342 &sysfs_cache_mode,
343 &sysfs_stop_when_cache_set_failed,
344 &sysfs_writeback_metadata,
345 &sysfs_writeback_running,
346 &sysfs_writeback_delay,
347 &sysfs_writeback_percent,
348 &sysfs_writeback_rate,
349 &sysfs_writeback_rate_update_seconds,
350 &sysfs_writeback_rate_i_term_inverse,
351 &sysfs_writeback_rate_p_term_inverse,
352 &sysfs_writeback_rate_minimum,
353 &sysfs_writeback_rate_debug,
354 &sysfs_dirty_data,
355 &sysfs_stripe_size,
356 &sysfs_partial_stripes_expensive,
357 &sysfs_sequential_cutoff,
358 &sysfs_clear_stats,
359 &sysfs_running,
360 &sysfs_state,
361 &sysfs_label,
362 &sysfs_readahead,
363 #ifdef CONFIG_BCACHE_DEBUG
364 &sysfs_verify,
365 &sysfs_bypass_torture_test,
366 #endif
367 NULL
368 };
369 KTYPE(bch_cached_dev);
370
371 SHOW(bch_flash_dev)
372 {
373 struct bcache_device *d = container_of(kobj, struct bcache_device,
374 kobj);
375 struct uuid_entry *u = &d->c->uuids[d->id];
376
377 sysfs_printf(data_csum, "%i", d->data_csum);
378 sysfs_hprint(size, u->sectors << 9);
379
380 if (attr == &sysfs_label) {
381 memcpy(buf, u->label, SB_LABEL_SIZE);
382 buf[SB_LABEL_SIZE + 1] = '\0';
383 strcat(buf, "\n");
384 return strlen(buf);
385 }
386
387 return 0;
388 }
389
390 STORE(__bch_flash_dev)
391 {
392 struct bcache_device *d = container_of(kobj, struct bcache_device,
393 kobj);
394 struct uuid_entry *u = &d->c->uuids[d->id];
395
396 sysfs_strtoul(data_csum, d->data_csum);
397
398 if (attr == &sysfs_size) {
399 uint64_t v;
400 strtoi_h_or_return(buf, v);
401
402 u->sectors = v >> 9;
403 bch_uuid_write(d->c);
404 set_capacity(d->disk, u->sectors);
405 }
406
407 if (attr == &sysfs_label) {
408 memcpy(u->label, buf, SB_LABEL_SIZE);
409 bch_uuid_write(d->c);
410 }
411
412 if (attr == &sysfs_unregister) {
413 set_bit(BCACHE_DEV_DETACHING, &d->flags);
414 bcache_device_stop(d);
415 }
416
417 return size;
418 }
419 STORE_LOCKED(bch_flash_dev)
420
421 static struct attribute *bch_flash_dev_files[] = {
422 &sysfs_unregister,
423 #if 0
424 &sysfs_data_csum,
425 #endif
426 &sysfs_label,
427 &sysfs_size,
428 NULL
429 };
430 KTYPE(bch_flash_dev);
431
432 struct bset_stats_op {
433 struct btree_op op;
434 size_t nodes;
435 struct bset_stats stats;
436 };
437
438 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
439 {
440 struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
441
442 op->nodes++;
443 bch_btree_keys_stats(&b->keys, &op->stats);
444
445 return MAP_CONTINUE;
446 }
447
448 static int bch_bset_print_stats(struct cache_set *c, char *buf)
449 {
450 struct bset_stats_op op;
451 int ret;
452
453 memset(&op, 0, sizeof(op));
454 bch_btree_op_init(&op.op, -1);
455
456 ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
457 if (ret < 0)
458 return ret;
459
460 return snprintf(buf, PAGE_SIZE,
461 "btree nodes: %zu\n"
462 "written sets: %zu\n"
463 "unwritten sets: %zu\n"
464 "written key bytes: %zu\n"
465 "unwritten key bytes: %zu\n"
466 "floats: %zu\n"
467 "failed: %zu\n",
468 op.nodes,
469 op.stats.sets_written, op.stats.sets_unwritten,
470 op.stats.bytes_written, op.stats.bytes_unwritten,
471 op.stats.floats, op.stats.failed);
472 }
473
474 static unsigned bch_root_usage(struct cache_set *c)
475 {
476 unsigned bytes = 0;
477 struct bkey *k;
478 struct btree *b;
479 struct btree_iter iter;
480
481 goto lock_root;
482
483 do {
484 rw_unlock(false, b);
485 lock_root:
486 b = c->root;
487 rw_lock(false, b, b->level);
488 } while (b != c->root);
489
490 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
491 bytes += bkey_bytes(k);
492
493 rw_unlock(false, b);
494
495 return (bytes * 100) / btree_bytes(c);
496 }
497
498 static size_t bch_cache_size(struct cache_set *c)
499 {
500 size_t ret = 0;
501 struct btree *b;
502
503 mutex_lock(&c->bucket_lock);
504 list_for_each_entry(b, &c->btree_cache, list)
505 ret += 1 << (b->keys.page_order + PAGE_SHIFT);
506
507 mutex_unlock(&c->bucket_lock);
508 return ret;
509 }
510
511 static unsigned bch_cache_max_chain(struct cache_set *c)
512 {
513 unsigned ret = 0;
514 struct hlist_head *h;
515
516 mutex_lock(&c->bucket_lock);
517
518 for (h = c->bucket_hash;
519 h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
520 h++) {
521 unsigned i = 0;
522 struct hlist_node *p;
523
524 hlist_for_each(p, h)
525 i++;
526
527 ret = max(ret, i);
528 }
529
530 mutex_unlock(&c->bucket_lock);
531 return ret;
532 }
533
534 static unsigned bch_btree_used(struct cache_set *c)
535 {
536 return div64_u64(c->gc_stats.key_bytes * 100,
537 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
538 }
539
540 static unsigned bch_average_key_size(struct cache_set *c)
541 {
542 return c->gc_stats.nkeys
543 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
544 : 0;
545 }
546
547 SHOW(__bch_cache_set)
548 {
549 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
550
551 sysfs_print(synchronous, CACHE_SYNC(&c->sb));
552 sysfs_print(journal_delay_ms, c->journal_delay_ms);
553 sysfs_hprint(bucket_size, bucket_bytes(c));
554 sysfs_hprint(block_size, block_bytes(c));
555 sysfs_print(tree_depth, c->root->level);
556 sysfs_print(root_usage_percent, bch_root_usage(c));
557
558 sysfs_hprint(btree_cache_size, bch_cache_size(c));
559 sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c));
560 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
561
562 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
563 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
564 sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
565 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
566
567 sysfs_print(btree_used_percent, bch_btree_used(c));
568 sysfs_print(btree_nodes, c->gc_stats.nodes);
569 sysfs_hprint(average_key_size, bch_average_key_size(c));
570
571 sysfs_print(cache_read_races,
572 atomic_long_read(&c->cache_read_races));
573
574 sysfs_print(writeback_keys_done,
575 atomic_long_read(&c->writeback_keys_done));
576 sysfs_print(writeback_keys_failed,
577 atomic_long_read(&c->writeback_keys_failed));
578
579 if (attr == &sysfs_errors)
580 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
581 c->on_error);
582
583 /* See count_io_errors for why 88 */
584 sysfs_print(io_error_halflife, c->error_decay * 88);
585 sysfs_print(io_error_limit, c->error_limit >> IO_ERROR_SHIFT);
586
587 sysfs_hprint(congested,
588 ((uint64_t) bch_get_congested(c)) << 9);
589 sysfs_print(congested_read_threshold_us,
590 c->congested_read_threshold_us);
591 sysfs_print(congested_write_threshold_us,
592 c->congested_write_threshold_us);
593
594 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
595 sysfs_printf(verify, "%i", c->verify);
596 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
597 sysfs_printf(expensive_debug_checks,
598 "%i", c->expensive_debug_checks);
599 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
600 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
601 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
602 sysfs_printf(io_disable, "%i",
603 test_bit(CACHE_SET_IO_DISABLE, &c->flags));
604
605 if (attr == &sysfs_bset_tree_stats)
606 return bch_bset_print_stats(c, buf);
607
608 return 0;
609 }
610 SHOW_LOCKED(bch_cache_set)
611
612 STORE(__bch_cache_set)
613 {
614 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
615
616 if (attr == &sysfs_unregister)
617 bch_cache_set_unregister(c);
618
619 if (attr == &sysfs_stop)
620 bch_cache_set_stop(c);
621
622 if (attr == &sysfs_synchronous) {
623 bool sync = strtoul_or_return(buf);
624
625 if (sync != CACHE_SYNC(&c->sb)) {
626 SET_CACHE_SYNC(&c->sb, sync);
627 bcache_write_super(c);
628 }
629 }
630
631 if (attr == &sysfs_flash_vol_create) {
632 int r;
633 uint64_t v;
634 strtoi_h_or_return(buf, v);
635
636 r = bch_flash_dev_create(c, v);
637 if (r)
638 return r;
639 }
640
641 if (attr == &sysfs_clear_stats) {
642 atomic_long_set(&c->writeback_keys_done, 0);
643 atomic_long_set(&c->writeback_keys_failed, 0);
644
645 memset(&c->gc_stats, 0, sizeof(struct gc_stat));
646 bch_cache_accounting_clear(&c->accounting);
647 }
648
649 if (attr == &sysfs_trigger_gc) {
650 /*
651 * Garbage collection thread only works when sectors_to_gc < 0,
652 * when users write to sysfs entry trigger_gc, most of time
653 * they want to forcibly triger gargage collection. Here -1 is
654 * set to c->sectors_to_gc, to make gc_should_run() give a
655 * chance to permit gc thread to run. "give a chance" means
656 * before going into gc_should_run(), there is still chance
657 * that c->sectors_to_gc being set to other positive value. So
658 * writing sysfs entry trigger_gc won't always make sure gc
659 * thread takes effect.
660 */
661 atomic_set(&c->sectors_to_gc, -1);
662 wake_up_gc(c);
663 }
664
665 if (attr == &sysfs_prune_cache) {
666 struct shrink_control sc;
667 sc.gfp_mask = GFP_KERNEL;
668 sc.nr_to_scan = strtoul_or_return(buf);
669 c->shrink.scan_objects(&c->shrink, &sc);
670 }
671
672 sysfs_strtoul(congested_read_threshold_us,
673 c->congested_read_threshold_us);
674 sysfs_strtoul(congested_write_threshold_us,
675 c->congested_write_threshold_us);
676
677 if (attr == &sysfs_errors) {
678 ssize_t v = bch_read_string_list(buf, error_actions);
679
680 if (v < 0)
681 return v;
682
683 c->on_error = v;
684 }
685
686 if (attr == &sysfs_io_error_limit)
687 c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
688
689 /* See count_io_errors() for why 88 */
690 if (attr == &sysfs_io_error_halflife)
691 c->error_decay = strtoul_or_return(buf) / 88;
692
693 if (attr == &sysfs_io_disable) {
694 int v = strtoul_or_return(buf);
695
696 if (v) {
697 if (test_and_set_bit(CACHE_SET_IO_DISABLE,
698 &c->flags))
699 pr_warn("CACHE_SET_IO_DISABLE already set");
700 } else {
701 if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
702 &c->flags))
703 pr_warn("CACHE_SET_IO_DISABLE already cleared");
704 }
705 }
706
707 sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
708 sysfs_strtoul(verify, c->verify);
709 sysfs_strtoul(key_merging_disabled, c->key_merging_disabled);
710 sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
711 sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
712 sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
713 sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
714
715 return size;
716 }
717 STORE_LOCKED(bch_cache_set)
718
719 SHOW(bch_cache_set_internal)
720 {
721 struct cache_set *c = container_of(kobj, struct cache_set, internal);
722 return bch_cache_set_show(&c->kobj, attr, buf);
723 }
724
725 STORE(bch_cache_set_internal)
726 {
727 struct cache_set *c = container_of(kobj, struct cache_set, internal);
728 return bch_cache_set_store(&c->kobj, attr, buf, size);
729 }
730
731 static void bch_cache_set_internal_release(struct kobject *k)
732 {
733 }
734
735 static struct attribute *bch_cache_set_files[] = {
736 &sysfs_unregister,
737 &sysfs_stop,
738 &sysfs_synchronous,
739 &sysfs_journal_delay_ms,
740 &sysfs_flash_vol_create,
741
742 &sysfs_bucket_size,
743 &sysfs_block_size,
744 &sysfs_tree_depth,
745 &sysfs_root_usage_percent,
746 &sysfs_btree_cache_size,
747 &sysfs_cache_available_percent,
748
749 &sysfs_average_key_size,
750
751 &sysfs_errors,
752 &sysfs_io_error_limit,
753 &sysfs_io_error_halflife,
754 &sysfs_congested,
755 &sysfs_congested_read_threshold_us,
756 &sysfs_congested_write_threshold_us,
757 &sysfs_clear_stats,
758 NULL
759 };
760 KTYPE(bch_cache_set);
761
762 static struct attribute *bch_cache_set_internal_files[] = {
763 &sysfs_active_journal_entries,
764
765 sysfs_time_stats_attribute_list(btree_gc, sec, ms)
766 sysfs_time_stats_attribute_list(btree_split, sec, us)
767 sysfs_time_stats_attribute_list(btree_sort, ms, us)
768 sysfs_time_stats_attribute_list(btree_read, ms, us)
769
770 &sysfs_btree_nodes,
771 &sysfs_btree_used_percent,
772 &sysfs_btree_cache_max_chain,
773
774 &sysfs_bset_tree_stats,
775 &sysfs_cache_read_races,
776 &sysfs_writeback_keys_done,
777 &sysfs_writeback_keys_failed,
778
779 &sysfs_trigger_gc,
780 &sysfs_prune_cache,
781 #ifdef CONFIG_BCACHE_DEBUG
782 &sysfs_verify,
783 &sysfs_key_merging_disabled,
784 &sysfs_expensive_debug_checks,
785 #endif
786 &sysfs_gc_always_rewrite,
787 &sysfs_btree_shrinker_disabled,
788 &sysfs_copy_gc_enabled,
789 &sysfs_io_disable,
790 NULL
791 };
792 KTYPE(bch_cache_set_internal);
793
794 static int __bch_cache_cmp(const void *l, const void *r)
795 {
796 return *((uint16_t *)r) - *((uint16_t *)l);
797 }
798
799 SHOW(__bch_cache)
800 {
801 struct cache *ca = container_of(kobj, struct cache, kobj);
802
803 sysfs_hprint(bucket_size, bucket_bytes(ca));
804 sysfs_hprint(block_size, block_bytes(ca));
805 sysfs_print(nbuckets, ca->sb.nbuckets);
806 sysfs_print(discard, ca->discard);
807 sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
808 sysfs_hprint(btree_written,
809 atomic_long_read(&ca->btree_sectors_written) << 9);
810 sysfs_hprint(metadata_written,
811 (atomic_long_read(&ca->meta_sectors_written) +
812 atomic_long_read(&ca->btree_sectors_written)) << 9);
813
814 sysfs_print(io_errors,
815 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
816
817 if (attr == &sysfs_cache_replacement_policy)
818 return bch_snprint_string_list(buf, PAGE_SIZE,
819 cache_replacement_policies,
820 CACHE_REPLACEMENT(&ca->sb));
821
822 if (attr == &sysfs_priority_stats) {
823 struct bucket *b;
824 size_t n = ca->sb.nbuckets, i;
825 size_t unused = 0, available = 0, dirty = 0, meta = 0;
826 uint64_t sum = 0;
827 /* Compute 31 quantiles */
828 uint16_t q[31], *p, *cached;
829 ssize_t ret;
830
831 cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
832 if (!p)
833 return -ENOMEM;
834
835 mutex_lock(&ca->set->bucket_lock);
836 for_each_bucket(b, ca) {
837 if (!GC_SECTORS_USED(b))
838 unused++;
839 if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
840 available++;
841 if (GC_MARK(b) == GC_MARK_DIRTY)
842 dirty++;
843 if (GC_MARK(b) == GC_MARK_METADATA)
844 meta++;
845 }
846
847 for (i = ca->sb.first_bucket; i < n; i++)
848 p[i] = ca->buckets[i].prio;
849 mutex_unlock(&ca->set->bucket_lock);
850
851 sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
852
853 while (n &&
854 !cached[n - 1])
855 --n;
856
857 unused = ca->sb.nbuckets - n;
858
859 while (cached < p + n &&
860 *cached == BTREE_PRIO)
861 cached++, n--;
862
863 for (i = 0; i < n; i++)
864 sum += INITIAL_PRIO - cached[i];
865
866 if (n)
867 do_div(sum, n);
868
869 for (i = 0; i < ARRAY_SIZE(q); i++)
870 q[i] = INITIAL_PRIO - cached[n * (i + 1) /
871 (ARRAY_SIZE(q) + 1)];
872
873 vfree(p);
874
875 ret = scnprintf(buf, PAGE_SIZE,
876 "Unused: %zu%%\n"
877 "Clean: %zu%%\n"
878 "Dirty: %zu%%\n"
879 "Metadata: %zu%%\n"
880 "Average: %llu\n"
881 "Sectors per Q: %zu\n"
882 "Quantiles: [",
883 unused * 100 / (size_t) ca->sb.nbuckets,
884 available * 100 / (size_t) ca->sb.nbuckets,
885 dirty * 100 / (size_t) ca->sb.nbuckets,
886 meta * 100 / (size_t) ca->sb.nbuckets, sum,
887 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
888
889 for (i = 0; i < ARRAY_SIZE(q); i++)
890 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
891 "%u ", q[i]);
892 ret--;
893
894 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
895
896 return ret;
897 }
898
899 return 0;
900 }
901 SHOW_LOCKED(bch_cache)
902
903 STORE(__bch_cache)
904 {
905 struct cache *ca = container_of(kobj, struct cache, kobj);
906
907 if (attr == &sysfs_discard) {
908 bool v = strtoul_or_return(buf);
909
910 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
911 ca->discard = v;
912
913 if (v != CACHE_DISCARD(&ca->sb)) {
914 SET_CACHE_DISCARD(&ca->sb, v);
915 bcache_write_super(ca->set);
916 }
917 }
918
919 if (attr == &sysfs_cache_replacement_policy) {
920 ssize_t v = bch_read_string_list(buf, cache_replacement_policies);
921
922 if (v < 0)
923 return v;
924
925 if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) {
926 mutex_lock(&ca->set->bucket_lock);
927 SET_CACHE_REPLACEMENT(&ca->sb, v);
928 mutex_unlock(&ca->set->bucket_lock);
929
930 bcache_write_super(ca->set);
931 }
932 }
933
934 if (attr == &sysfs_clear_stats) {
935 atomic_long_set(&ca->sectors_written, 0);
936 atomic_long_set(&ca->btree_sectors_written, 0);
937 atomic_long_set(&ca->meta_sectors_written, 0);
938 atomic_set(&ca->io_count, 0);
939 atomic_set(&ca->io_errors, 0);
940 }
941
942 return size;
943 }
944 STORE_LOCKED(bch_cache)
945
946 static struct attribute *bch_cache_files[] = {
947 &sysfs_bucket_size,
948 &sysfs_block_size,
949 &sysfs_nbuckets,
950 &sysfs_priority_stats,
951 &sysfs_discard,
952 &sysfs_written,
953 &sysfs_btree_written,
954 &sysfs_metadata_written,
955 &sysfs_io_errors,
956 &sysfs_clear_stats,
957 &sysfs_cache_replacement_policy,
958 NULL
959 };
960 KTYPE(bch_cache);