1 // SPDX-License-Identifier: GPL-2.0
3 * bcache sysfs interfaces
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
19 /* Default is -1; we skip past it for struct cached_dev's cache mode */
20 static const char * const bch_cache_modes
[] = {
29 /* Default is -1; we skip past it for stop_when_cache_set_failed */
30 static const char * const bch_stop_on_failure_modes
[] = {
37 static const char * const cache_replacement_policies
[] = {
44 static const char * const error_actions
[] = {
50 write_attribute(attach
);
51 write_attribute(detach
);
52 write_attribute(unregister
);
53 write_attribute(stop
);
54 write_attribute(clear_stats
);
55 write_attribute(trigger_gc
);
56 write_attribute(prune_cache
);
57 write_attribute(flash_vol_create
);
59 read_attribute(bucket_size
);
60 read_attribute(block_size
);
61 read_attribute(nbuckets
);
62 read_attribute(tree_depth
);
63 read_attribute(root_usage_percent
);
64 read_attribute(priority_stats
);
65 read_attribute(btree_cache_size
);
66 read_attribute(btree_cache_max_chain
);
67 read_attribute(cache_available_percent
);
68 read_attribute(written
);
69 read_attribute(btree_written
);
70 read_attribute(metadata_written
);
71 read_attribute(active_journal_entries
);
73 sysfs_time_stats_attribute(btree_gc
, sec
, ms
);
74 sysfs_time_stats_attribute(btree_split
, sec
, us
);
75 sysfs_time_stats_attribute(btree_sort
, ms
, us
);
76 sysfs_time_stats_attribute(btree_read
, ms
, us
);
78 read_attribute(btree_nodes
);
79 read_attribute(btree_used_percent
);
80 read_attribute(average_key_size
);
81 read_attribute(dirty_data
);
82 read_attribute(bset_tree_stats
);
84 read_attribute(state
);
85 read_attribute(cache_read_races
);
86 read_attribute(reclaim
);
87 read_attribute(flush_write
);
88 read_attribute(retry_flush_write
);
89 read_attribute(writeback_keys_done
);
90 read_attribute(writeback_keys_failed
);
91 read_attribute(io_errors
);
92 read_attribute(congested
);
93 rw_attribute(congested_read_threshold_us
);
94 rw_attribute(congested_write_threshold_us
);
96 rw_attribute(sequential_cutoff
);
97 rw_attribute(data_csum
);
98 rw_attribute(cache_mode
);
99 rw_attribute(stop_when_cache_set_failed
);
100 rw_attribute(writeback_metadata
);
101 rw_attribute(writeback_running
);
102 rw_attribute(writeback_percent
);
103 rw_attribute(writeback_delay
);
104 rw_attribute(writeback_rate
);
106 rw_attribute(writeback_rate_update_seconds
);
107 rw_attribute(writeback_rate_i_term_inverse
);
108 rw_attribute(writeback_rate_p_term_inverse
);
109 rw_attribute(writeback_rate_minimum
);
110 read_attribute(writeback_rate_debug
);
112 read_attribute(stripe_size
);
113 read_attribute(partial_stripes_expensive
);
115 rw_attribute(synchronous
);
116 rw_attribute(journal_delay_ms
);
117 rw_attribute(io_disable
);
118 rw_attribute(discard
);
119 rw_attribute(running
);
121 rw_attribute(readahead
);
122 rw_attribute(errors
);
123 rw_attribute(io_error_limit
);
124 rw_attribute(io_error_halflife
);
125 rw_attribute(verify
);
126 rw_attribute(bypass_torture_test
);
127 rw_attribute(key_merging_disabled
);
128 rw_attribute(gc_always_rewrite
);
129 rw_attribute(expensive_debug_checks
);
130 rw_attribute(cache_replacement_policy
);
131 rw_attribute(btree_shrinker_disabled
);
132 rw_attribute(copy_gc_enabled
);
135 static ssize_t
bch_snprint_string_list(char *buf
, size_t size
, const char * const list
[],
141 for (i
= 0; list
[i
]; i
++)
142 out
+= snprintf(out
, buf
+ size
- out
,
143 i
== selected
? "[%s] " : "%s ", list
[i
]);
149 static ssize_t
bch_read_string_list(const char *buf
, const char * const list
[])
152 char *s
, *d
= kstrndup(buf
, PAGE_SIZE
- 1, GFP_KERNEL
);
158 for (i
= 0; list
[i
]; i
++)
159 if (!strcmp(list
[i
], s
))
170 SHOW(__bch_cached_dev
)
172 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
174 const char *states
[] = { "no cache", "clean", "dirty", "inconsistent" };
176 #define var(stat) (dc->stat)
178 if (attr
== &sysfs_cache_mode
)
179 return bch_snprint_string_list(buf
, PAGE_SIZE
,
181 BDEV_CACHE_MODE(&dc
->sb
));
183 if (attr
== &sysfs_stop_when_cache_set_failed
)
184 return bch_snprint_string_list(buf
, PAGE_SIZE
,
185 bch_stop_on_failure_modes
+ 1,
186 dc
->stop_when_cache_set_failed
);
189 sysfs_printf(data_csum
, "%i", dc
->disk
.data_csum
);
190 var_printf(verify
, "%i");
191 var_printf(bypass_torture_test
, "%i");
192 var_printf(writeback_metadata
, "%i");
193 var_printf(writeback_running
, "%i");
194 var_print(writeback_delay
);
195 var_print(writeback_percent
);
196 sysfs_hprint(writeback_rate
, dc
->writeback_rate
.rate
<< 9);
197 sysfs_printf(io_errors
, "%i", atomic_read(&dc
->io_errors
));
198 sysfs_printf(io_error_limit
, "%i", dc
->error_limit
);
199 sysfs_printf(io_disable
, "%i", dc
->io_disable
);
200 var_print(writeback_rate_update_seconds
);
201 var_print(writeback_rate_i_term_inverse
);
202 var_print(writeback_rate_p_term_inverse
);
203 var_print(writeback_rate_minimum
);
205 if (attr
== &sysfs_writeback_rate_debug
) {
209 char proportional
[20];
214 bch_hprint(rate
, dc
->writeback_rate
.rate
<< 9);
215 bch_hprint(dirty
, bcache_dev_sectors_dirty(&dc
->disk
) << 9);
216 bch_hprint(target
, dc
->writeback_rate_target
<< 9);
217 bch_hprint(proportional
,dc
->writeback_rate_proportional
<< 9);
218 bch_hprint(integral
, dc
->writeback_rate_integral_scaled
<< 9);
219 bch_hprint(change
, dc
->writeback_rate_change
<< 9);
221 next_io
= div64_s64(dc
->writeback_rate
.next
- local_clock(),
228 "proportional:\t%s\n"
230 "change:\t\t%s/sec\n"
231 "next io:\t%llims\n",
232 rate
, dirty
, target
, proportional
,
233 integral
, change
, next_io
);
236 sysfs_hprint(dirty_data
,
237 bcache_dev_sectors_dirty(&dc
->disk
) << 9);
239 sysfs_hprint(stripe_size
, ((uint64_t)dc
->disk
.stripe_size
) << 9);
240 var_printf(partial_stripes_expensive
, "%u");
242 var_hprint(sequential_cutoff
);
243 var_hprint(readahead
);
245 sysfs_print(running
, atomic_read(&dc
->running
));
246 sysfs_print(state
, states
[BDEV_STATE(&dc
->sb
)]);
248 if (attr
== &sysfs_label
) {
249 memcpy(buf
, dc
->sb
.label
, SB_LABEL_SIZE
);
250 buf
[SB_LABEL_SIZE
+ 1] = '\0';
258 SHOW_LOCKED(bch_cached_dev
)
262 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
266 struct kobj_uevent_env
*env
;
268 #define d_strtoul(var) sysfs_strtoul(var, dc->var)
269 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
270 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
272 sysfs_strtoul(data_csum
, dc
->disk
.data_csum
);
274 d_strtoul(bypass_torture_test
);
275 d_strtoul(writeback_metadata
);
276 d_strtoul(writeback_running
);
277 d_strtoul(writeback_delay
);
279 sysfs_strtoul_clamp(writeback_percent
, dc
->writeback_percent
, 0, 40);
281 sysfs_strtoul_clamp(writeback_rate
,
282 dc
->writeback_rate
.rate
, 1, INT_MAX
);
284 sysfs_strtoul_clamp(writeback_rate_update_seconds
,
285 dc
->writeback_rate_update_seconds
,
286 1, WRITEBACK_RATE_UPDATE_SECS_MAX
);
287 sysfs_strtoul_clamp(writeback_rate_i_term_inverse
,
288 dc
->writeback_rate_i_term_inverse
,
290 sysfs_strtoul_clamp(writeback_rate_p_term_inverse
,
291 dc
->writeback_rate_p_term_inverse
,
293 d_strtoul_nonzero(writeback_rate_minimum
);
295 sysfs_strtoul_clamp(io_error_limit
, dc
->error_limit
, 0, INT_MAX
);
297 if (attr
== &sysfs_io_disable
) {
298 int v
= strtoul_or_return(buf
);
300 dc
->io_disable
= v
? 1 : 0;
303 sysfs_strtoul_clamp(sequential_cutoff
,
304 dc
->sequential_cutoff
,
306 d_strtoi_h(readahead
);
308 if (attr
== &sysfs_clear_stats
)
309 bch_cache_accounting_clear(&dc
->accounting
);
311 if (attr
== &sysfs_running
&&
312 strtoul_or_return(buf
))
313 bch_cached_dev_run(dc
);
315 if (attr
== &sysfs_cache_mode
) {
316 v
= bch_read_string_list(buf
, bch_cache_modes
+ 1);
321 if ((unsigned) v
!= BDEV_CACHE_MODE(&dc
->sb
)) {
322 SET_BDEV_CACHE_MODE(&dc
->sb
, v
);
323 bch_write_bdev_super(dc
, NULL
);
327 if (attr
== &sysfs_stop_when_cache_set_failed
) {
328 v
= bch_read_string_list(buf
, bch_stop_on_failure_modes
+ 1);
333 dc
->stop_when_cache_set_failed
= v
;
336 if (attr
== &sysfs_label
) {
337 if (size
> SB_LABEL_SIZE
)
339 memcpy(dc
->sb
.label
, buf
, size
);
340 if (size
< SB_LABEL_SIZE
)
341 dc
->sb
.label
[size
] = '\0';
342 if (size
&& dc
->sb
.label
[size
- 1] == '\n')
343 dc
->sb
.label
[size
- 1] = '\0';
344 bch_write_bdev_super(dc
, NULL
);
346 memcpy(dc
->disk
.c
->uuids
[dc
->disk
.id
].label
,
348 bch_uuid_write(dc
->disk
.c
);
350 env
= kzalloc(sizeof(struct kobj_uevent_env
), GFP_KERNEL
);
353 add_uevent_var(env
, "DRIVER=bcache");
354 add_uevent_var(env
, "CACHED_UUID=%pU", dc
->sb
.uuid
),
355 add_uevent_var(env
, "CACHED_LABEL=%s", buf
);
357 &disk_to_dev(dc
->disk
.disk
)->kobj
, KOBJ_CHANGE
, env
->envp
);
361 if (attr
== &sysfs_attach
) {
362 uint8_t set_uuid
[16];
364 if (bch_parse_uuid(buf
, set_uuid
) < 16)
368 list_for_each_entry(c
, &bch_cache_sets
, list
) {
369 v
= bch_cached_dev_attach(dc
, c
, set_uuid
);
374 pr_err("Can't attach %s: cache set not found", buf
);
378 if (attr
== &sysfs_detach
&& dc
->disk
.c
)
379 bch_cached_dev_detach(dc
);
381 if (attr
== &sysfs_stop
)
382 bcache_device_stop(&dc
->disk
);
387 STORE(bch_cached_dev
)
389 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
392 mutex_lock(&bch_register_lock
);
393 size
= __cached_dev_store(kobj
, attr
, buf
, size
);
395 if (attr
== &sysfs_writeback_running
)
396 bch_writeback_queue(dc
);
399 * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
400 * a cache set, otherwise it doesn't make sense.
402 if (attr
== &sysfs_writeback_percent
)
403 if ((dc
->disk
.c
!= NULL
) &&
404 (!test_and_set_bit(BCACHE_DEV_WB_RUNNING
, &dc
->disk
.flags
)))
405 schedule_delayed_work(&dc
->writeback_rate_update
,
406 dc
->writeback_rate_update_seconds
* HZ
);
408 mutex_unlock(&bch_register_lock
);
412 static struct attribute
*bch_cached_dev_files
[] = {
420 &sysfs_stop_when_cache_set_failed
,
421 &sysfs_writeback_metadata
,
422 &sysfs_writeback_running
,
423 &sysfs_writeback_delay
,
424 &sysfs_writeback_percent
,
425 &sysfs_writeback_rate
,
426 &sysfs_writeback_rate_update_seconds
,
427 &sysfs_writeback_rate_i_term_inverse
,
428 &sysfs_writeback_rate_p_term_inverse
,
429 &sysfs_writeback_rate_minimum
,
430 &sysfs_writeback_rate_debug
,
432 &sysfs_io_error_limit
,
436 &sysfs_partial_stripes_expensive
,
437 &sysfs_sequential_cutoff
,
443 #ifdef CONFIG_BCACHE_DEBUG
445 &sysfs_bypass_torture_test
,
449 KTYPE(bch_cached_dev
);
453 struct bcache_device
*d
= container_of(kobj
, struct bcache_device
,
455 struct uuid_entry
*u
= &d
->c
->uuids
[d
->id
];
457 sysfs_printf(data_csum
, "%i", d
->data_csum
);
458 sysfs_hprint(size
, u
->sectors
<< 9);
460 if (attr
== &sysfs_label
) {
461 memcpy(buf
, u
->label
, SB_LABEL_SIZE
);
462 buf
[SB_LABEL_SIZE
+ 1] = '\0';
470 STORE(__bch_flash_dev
)
472 struct bcache_device
*d
= container_of(kobj
, struct bcache_device
,
474 struct uuid_entry
*u
= &d
->c
->uuids
[d
->id
];
476 sysfs_strtoul(data_csum
, d
->data_csum
);
478 if (attr
== &sysfs_size
) {
480 strtoi_h_or_return(buf
, v
);
483 bch_uuid_write(d
->c
);
484 set_capacity(d
->disk
, u
->sectors
);
487 if (attr
== &sysfs_label
) {
488 memcpy(u
->label
, buf
, SB_LABEL_SIZE
);
489 bch_uuid_write(d
->c
);
492 if (attr
== &sysfs_unregister
) {
493 set_bit(BCACHE_DEV_DETACHING
, &d
->flags
);
494 bcache_device_stop(d
);
499 STORE_LOCKED(bch_flash_dev
)
501 static struct attribute
*bch_flash_dev_files
[] = {
510 KTYPE(bch_flash_dev
);
512 struct bset_stats_op
{
515 struct bset_stats stats
;
518 static int bch_btree_bset_stats(struct btree_op
*b_op
, struct btree
*b
)
520 struct bset_stats_op
*op
= container_of(b_op
, struct bset_stats_op
, op
);
523 bch_btree_keys_stats(&b
->keys
, &op
->stats
);
528 static int bch_bset_print_stats(struct cache_set
*c
, char *buf
)
530 struct bset_stats_op op
;
533 memset(&op
, 0, sizeof(op
));
534 bch_btree_op_init(&op
.op
, -1);
536 ret
= bch_btree_map_nodes(&op
.op
, c
, &ZERO_KEY
, bch_btree_bset_stats
);
540 return snprintf(buf
, PAGE_SIZE
,
542 "written sets: %zu\n"
543 "unwritten sets: %zu\n"
544 "written key bytes: %zu\n"
545 "unwritten key bytes: %zu\n"
549 op
.stats
.sets_written
, op
.stats
.sets_unwritten
,
550 op
.stats
.bytes_written
, op
.stats
.bytes_unwritten
,
551 op
.stats
.floats
, op
.stats
.failed
);
554 static unsigned bch_root_usage(struct cache_set
*c
)
559 struct btree_iter iter
;
567 rw_lock(false, b
, b
->level
);
568 } while (b
!= c
->root
);
570 for_each_key_filter(&b
->keys
, k
, &iter
, bch_ptr_bad
)
571 bytes
+= bkey_bytes(k
);
575 return (bytes
* 100) / btree_bytes(c
);
578 static size_t bch_cache_size(struct cache_set
*c
)
583 mutex_lock(&c
->bucket_lock
);
584 list_for_each_entry(b
, &c
->btree_cache
, list
)
585 ret
+= 1 << (b
->keys
.page_order
+ PAGE_SHIFT
);
587 mutex_unlock(&c
->bucket_lock
);
591 static unsigned bch_cache_max_chain(struct cache_set
*c
)
594 struct hlist_head
*h
;
596 mutex_lock(&c
->bucket_lock
);
598 for (h
= c
->bucket_hash
;
599 h
< c
->bucket_hash
+ (1 << BUCKET_HASH_BITS
);
602 struct hlist_node
*p
;
610 mutex_unlock(&c
->bucket_lock
);
614 static unsigned bch_btree_used(struct cache_set
*c
)
616 return div64_u64(c
->gc_stats
.key_bytes
* 100,
617 (c
->gc_stats
.nodes
?: 1) * btree_bytes(c
));
620 static unsigned bch_average_key_size(struct cache_set
*c
)
622 return c
->gc_stats
.nkeys
623 ? div64_u64(c
->gc_stats
.data
, c
->gc_stats
.nkeys
)
627 SHOW(__bch_cache_set
)
629 struct cache_set
*c
= container_of(kobj
, struct cache_set
, kobj
);
631 sysfs_print(synchronous
, CACHE_SYNC(&c
->sb
));
632 sysfs_print(journal_delay_ms
, c
->journal_delay_ms
);
633 sysfs_hprint(bucket_size
, bucket_bytes(c
));
634 sysfs_hprint(block_size
, block_bytes(c
));
635 sysfs_print(tree_depth
, c
->root
->level
);
636 sysfs_print(root_usage_percent
, bch_root_usage(c
));
638 sysfs_hprint(btree_cache_size
, bch_cache_size(c
));
639 sysfs_print(btree_cache_max_chain
, bch_cache_max_chain(c
));
640 sysfs_print(cache_available_percent
, 100 - c
->gc_stats
.in_use
);
642 sysfs_print_time_stats(&c
->btree_gc_time
, btree_gc
, sec
, ms
);
643 sysfs_print_time_stats(&c
->btree_split_time
, btree_split
, sec
, us
);
644 sysfs_print_time_stats(&c
->sort
.time
, btree_sort
, ms
, us
);
645 sysfs_print_time_stats(&c
->btree_read_time
, btree_read
, ms
, us
);
647 sysfs_print(btree_used_percent
, bch_btree_used(c
));
648 sysfs_print(btree_nodes
, c
->gc_stats
.nodes
);
649 sysfs_hprint(average_key_size
, bch_average_key_size(c
));
651 sysfs_print(cache_read_races
,
652 atomic_long_read(&c
->cache_read_races
));
655 atomic_long_read(&c
->reclaim
));
657 sysfs_print(flush_write
,
658 atomic_long_read(&c
->flush_write
));
660 sysfs_print(retry_flush_write
,
661 atomic_long_read(&c
->retry_flush_write
));
663 sysfs_print(writeback_keys_done
,
664 atomic_long_read(&c
->writeback_keys_done
));
665 sysfs_print(writeback_keys_failed
,
666 atomic_long_read(&c
->writeback_keys_failed
));
668 if (attr
== &sysfs_errors
)
669 return bch_snprint_string_list(buf
, PAGE_SIZE
, error_actions
,
672 /* See count_io_errors for why 88 */
673 sysfs_print(io_error_halflife
, c
->error_decay
* 88);
674 sysfs_print(io_error_limit
, c
->error_limit
>> IO_ERROR_SHIFT
);
676 sysfs_hprint(congested
,
677 ((uint64_t) bch_get_congested(c
)) << 9);
678 sysfs_print(congested_read_threshold_us
,
679 c
->congested_read_threshold_us
);
680 sysfs_print(congested_write_threshold_us
,
681 c
->congested_write_threshold_us
);
683 sysfs_print(active_journal_entries
, fifo_used(&c
->journal
.pin
));
684 sysfs_printf(verify
, "%i", c
->verify
);
685 sysfs_printf(key_merging_disabled
, "%i", c
->key_merging_disabled
);
686 sysfs_printf(expensive_debug_checks
,
687 "%i", c
->expensive_debug_checks
);
688 sysfs_printf(gc_always_rewrite
, "%i", c
->gc_always_rewrite
);
689 sysfs_printf(btree_shrinker_disabled
, "%i", c
->shrinker_disabled
);
690 sysfs_printf(copy_gc_enabled
, "%i", c
->copy_gc_enabled
);
691 sysfs_printf(io_disable
, "%i",
692 test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
));
694 if (attr
== &sysfs_bset_tree_stats
)
695 return bch_bset_print_stats(c
, buf
);
699 SHOW_LOCKED(bch_cache_set
)
701 STORE(__bch_cache_set
)
703 struct cache_set
*c
= container_of(kobj
, struct cache_set
, kobj
);
705 if (attr
== &sysfs_unregister
)
706 bch_cache_set_unregister(c
);
708 if (attr
== &sysfs_stop
)
709 bch_cache_set_stop(c
);
711 if (attr
== &sysfs_synchronous
) {
712 bool sync
= strtoul_or_return(buf
);
714 if (sync
!= CACHE_SYNC(&c
->sb
)) {
715 SET_CACHE_SYNC(&c
->sb
, sync
);
716 bcache_write_super(c
);
720 if (attr
== &sysfs_flash_vol_create
) {
723 strtoi_h_or_return(buf
, v
);
725 r
= bch_flash_dev_create(c
, v
);
730 if (attr
== &sysfs_clear_stats
) {
731 atomic_long_set(&c
->writeback_keys_done
, 0);
732 atomic_long_set(&c
->writeback_keys_failed
, 0);
734 memset(&c
->gc_stats
, 0, sizeof(struct gc_stat
));
735 bch_cache_accounting_clear(&c
->accounting
);
738 if (attr
== &sysfs_trigger_gc
) {
740 * Garbage collection thread only works when sectors_to_gc < 0,
741 * when users write to sysfs entry trigger_gc, most of time
742 * they want to forcibly triger gargage collection. Here -1 is
743 * set to c->sectors_to_gc, to make gc_should_run() give a
744 * chance to permit gc thread to run. "give a chance" means
745 * before going into gc_should_run(), there is still chance
746 * that c->sectors_to_gc being set to other positive value. So
747 * writing sysfs entry trigger_gc won't always make sure gc
748 * thread takes effect.
750 atomic_set(&c
->sectors_to_gc
, -1);
754 if (attr
== &sysfs_prune_cache
) {
755 struct shrink_control sc
;
756 sc
.gfp_mask
= GFP_KERNEL
;
757 sc
.nr_to_scan
= strtoul_or_return(buf
);
758 c
->shrink
.scan_objects(&c
->shrink
, &sc
);
761 sysfs_strtoul(congested_read_threshold_us
,
762 c
->congested_read_threshold_us
);
763 sysfs_strtoul(congested_write_threshold_us
,
764 c
->congested_write_threshold_us
);
766 if (attr
== &sysfs_errors
) {
767 ssize_t v
= bch_read_string_list(buf
, error_actions
);
775 if (attr
== &sysfs_io_error_limit
)
776 c
->error_limit
= strtoul_or_return(buf
) << IO_ERROR_SHIFT
;
778 /* See count_io_errors() for why 88 */
779 if (attr
== &sysfs_io_error_halflife
) {
783 ret
= strtoul_safe_clamp(buf
, v
, 0, UINT_MAX
);
785 c
->error_decay
= v
/ 88;
791 if (attr
== &sysfs_io_disable
) {
792 int v
= strtoul_or_return(buf
);
795 if (test_and_set_bit(CACHE_SET_IO_DISABLE
,
797 pr_warn("CACHE_SET_IO_DISABLE already set");
799 if (!test_and_clear_bit(CACHE_SET_IO_DISABLE
,
801 pr_warn("CACHE_SET_IO_DISABLE already cleared");
805 sysfs_strtoul(journal_delay_ms
, c
->journal_delay_ms
);
806 sysfs_strtoul(verify
, c
->verify
);
807 sysfs_strtoul(key_merging_disabled
, c
->key_merging_disabled
);
808 sysfs_strtoul(expensive_debug_checks
, c
->expensive_debug_checks
);
809 sysfs_strtoul(gc_always_rewrite
, c
->gc_always_rewrite
);
810 sysfs_strtoul(btree_shrinker_disabled
, c
->shrinker_disabled
);
811 sysfs_strtoul(copy_gc_enabled
, c
->copy_gc_enabled
);
815 STORE_LOCKED(bch_cache_set
)
817 SHOW(bch_cache_set_internal
)
819 struct cache_set
*c
= container_of(kobj
, struct cache_set
, internal
);
820 return bch_cache_set_show(&c
->kobj
, attr
, buf
);
823 STORE(bch_cache_set_internal
)
825 struct cache_set
*c
= container_of(kobj
, struct cache_set
, internal
);
826 return bch_cache_set_store(&c
->kobj
, attr
, buf
, size
);
829 static void bch_cache_set_internal_release(struct kobject
*k
)
833 static struct attribute
*bch_cache_set_files
[] = {
837 &sysfs_journal_delay_ms
,
838 &sysfs_flash_vol_create
,
843 &sysfs_root_usage_percent
,
844 &sysfs_btree_cache_size
,
845 &sysfs_cache_available_percent
,
847 &sysfs_average_key_size
,
850 &sysfs_io_error_limit
,
851 &sysfs_io_error_halflife
,
853 &sysfs_congested_read_threshold_us
,
854 &sysfs_congested_write_threshold_us
,
858 KTYPE(bch_cache_set
);
860 static struct attribute
*bch_cache_set_internal_files
[] = {
861 &sysfs_active_journal_entries
,
863 sysfs_time_stats_attribute_list(btree_gc
, sec
, ms
)
864 sysfs_time_stats_attribute_list(btree_split
, sec
, us
)
865 sysfs_time_stats_attribute_list(btree_sort
, ms
, us
)
866 sysfs_time_stats_attribute_list(btree_read
, ms
, us
)
869 &sysfs_btree_used_percent
,
870 &sysfs_btree_cache_max_chain
,
872 &sysfs_bset_tree_stats
,
873 &sysfs_cache_read_races
,
876 &sysfs_retry_flush_write
,
877 &sysfs_writeback_keys_done
,
878 &sysfs_writeback_keys_failed
,
882 #ifdef CONFIG_BCACHE_DEBUG
884 &sysfs_key_merging_disabled
,
885 &sysfs_expensive_debug_checks
,
887 &sysfs_gc_always_rewrite
,
888 &sysfs_btree_shrinker_disabled
,
889 &sysfs_copy_gc_enabled
,
893 KTYPE(bch_cache_set_internal
);
895 static int __bch_cache_cmp(const void *l
, const void *r
)
897 return *((uint16_t *)r
) - *((uint16_t *)l
);
902 struct cache
*ca
= container_of(kobj
, struct cache
, kobj
);
904 sysfs_hprint(bucket_size
, bucket_bytes(ca
));
905 sysfs_hprint(block_size
, block_bytes(ca
));
906 sysfs_print(nbuckets
, ca
->sb
.nbuckets
);
907 sysfs_print(discard
, ca
->discard
);
908 sysfs_hprint(written
, atomic_long_read(&ca
->sectors_written
) << 9);
909 sysfs_hprint(btree_written
,
910 atomic_long_read(&ca
->btree_sectors_written
) << 9);
911 sysfs_hprint(metadata_written
,
912 (atomic_long_read(&ca
->meta_sectors_written
) +
913 atomic_long_read(&ca
->btree_sectors_written
)) << 9);
915 sysfs_print(io_errors
,
916 atomic_read(&ca
->io_errors
) >> IO_ERROR_SHIFT
);
918 if (attr
== &sysfs_cache_replacement_policy
)
919 return bch_snprint_string_list(buf
, PAGE_SIZE
,
920 cache_replacement_policies
,
921 CACHE_REPLACEMENT(&ca
->sb
));
923 if (attr
== &sysfs_priority_stats
) {
925 size_t n
= ca
->sb
.nbuckets
, i
;
926 size_t unused
= 0, available
= 0, dirty
= 0, meta
= 0;
928 /* Compute 31 quantiles */
929 uint16_t q
[31], *p
, *cached
;
932 cached
= p
= vmalloc(ca
->sb
.nbuckets
* sizeof(uint16_t));
936 mutex_lock(&ca
->set
->bucket_lock
);
937 for_each_bucket(b
, ca
) {
938 if (!GC_SECTORS_USED(b
))
940 if (GC_MARK(b
) == GC_MARK_RECLAIMABLE
)
942 if (GC_MARK(b
) == GC_MARK_DIRTY
)
944 if (GC_MARK(b
) == GC_MARK_METADATA
)
948 for (i
= ca
->sb
.first_bucket
; i
< n
; i
++)
949 p
[i
] = ca
->buckets
[i
].prio
;
950 mutex_unlock(&ca
->set
->bucket_lock
);
952 sort(p
, n
, sizeof(uint16_t), __bch_cache_cmp
, NULL
);
958 unused
= ca
->sb
.nbuckets
- n
;
960 while (cached
< p
+ n
&&
961 *cached
== BTREE_PRIO
)
964 for (i
= 0; i
< n
; i
++)
965 sum
+= INITIAL_PRIO
- cached
[i
];
970 for (i
= 0; i
< ARRAY_SIZE(q
); i
++)
971 q
[i
] = INITIAL_PRIO
- cached
[n
* (i
+ 1) /
972 (ARRAY_SIZE(q
) + 1)];
976 ret
= scnprintf(buf
, PAGE_SIZE
,
982 "Sectors per Q: %zu\n"
984 unused
* 100 / (size_t) ca
->sb
.nbuckets
,
985 available
* 100 / (size_t) ca
->sb
.nbuckets
,
986 dirty
* 100 / (size_t) ca
->sb
.nbuckets
,
987 meta
* 100 / (size_t) ca
->sb
.nbuckets
, sum
,
988 n
* ca
->sb
.bucket_size
/ (ARRAY_SIZE(q
) + 1));
990 for (i
= 0; i
< ARRAY_SIZE(q
); i
++)
991 ret
+= scnprintf(buf
+ ret
, PAGE_SIZE
- ret
,
995 ret
+= scnprintf(buf
+ ret
, PAGE_SIZE
- ret
, "]\n");
1002 SHOW_LOCKED(bch_cache
)
1006 struct cache
*ca
= container_of(kobj
, struct cache
, kobj
);
1008 if (attr
== &sysfs_discard
) {
1009 bool v
= strtoul_or_return(buf
);
1011 if (blk_queue_discard(bdev_get_queue(ca
->bdev
)))
1014 if (v
!= CACHE_DISCARD(&ca
->sb
)) {
1015 SET_CACHE_DISCARD(&ca
->sb
, v
);
1016 bcache_write_super(ca
->set
);
1020 if (attr
== &sysfs_cache_replacement_policy
) {
1021 ssize_t v
= bch_read_string_list(buf
, cache_replacement_policies
);
1026 if ((unsigned) v
!= CACHE_REPLACEMENT(&ca
->sb
)) {
1027 mutex_lock(&ca
->set
->bucket_lock
);
1028 SET_CACHE_REPLACEMENT(&ca
->sb
, v
);
1029 mutex_unlock(&ca
->set
->bucket_lock
);
1031 bcache_write_super(ca
->set
);
1035 if (attr
== &sysfs_clear_stats
) {
1036 atomic_long_set(&ca
->sectors_written
, 0);
1037 atomic_long_set(&ca
->btree_sectors_written
, 0);
1038 atomic_long_set(&ca
->meta_sectors_written
, 0);
1039 atomic_set(&ca
->io_count
, 0);
1040 atomic_set(&ca
->io_errors
, 0);
1045 STORE_LOCKED(bch_cache
)
1047 static struct attribute
*bch_cache_files
[] = {
1051 &sysfs_priority_stats
,
1054 &sysfs_btree_written
,
1055 &sysfs_metadata_written
,
1058 &sysfs_cache_replacement_policy
,