1 // SPDX-License-Identifier: GPL-2.0
3 * bcache sysfs interfaces
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
19 /* Default is -1; we skip past it for struct cached_dev's cache mode */
20 static const char * const bch_cache_modes
[] = {
28 /* Default is -1; we skip past it for stop_when_cache_set_failed */
29 static const char * const bch_stop_on_failure_modes
[] = {
35 static const char * const cache_replacement_policies
[] = {
42 static const char * const error_actions
[] = {
48 write_attribute(attach
);
49 write_attribute(detach
);
50 write_attribute(unregister
);
51 write_attribute(stop
);
52 write_attribute(clear_stats
);
53 write_attribute(trigger_gc
);
54 write_attribute(prune_cache
);
55 write_attribute(flash_vol_create
);
57 read_attribute(bucket_size
);
58 read_attribute(block_size
);
59 read_attribute(nbuckets
);
60 read_attribute(tree_depth
);
61 read_attribute(root_usage_percent
);
62 read_attribute(priority_stats
);
63 read_attribute(btree_cache_size
);
64 read_attribute(btree_cache_max_chain
);
65 read_attribute(cache_available_percent
);
66 read_attribute(written
);
67 read_attribute(btree_written
);
68 read_attribute(metadata_written
);
69 read_attribute(active_journal_entries
);
71 sysfs_time_stats_attribute(btree_gc
, sec
, ms
);
72 sysfs_time_stats_attribute(btree_split
, sec
, us
);
73 sysfs_time_stats_attribute(btree_sort
, ms
, us
);
74 sysfs_time_stats_attribute(btree_read
, ms
, us
);
76 read_attribute(btree_nodes
);
77 read_attribute(btree_used_percent
);
78 read_attribute(average_key_size
);
79 read_attribute(dirty_data
);
80 read_attribute(bset_tree_stats
);
82 read_attribute(state
);
83 read_attribute(cache_read_races
);
84 read_attribute(reclaim
);
85 read_attribute(flush_write
);
86 read_attribute(retry_flush_write
);
87 read_attribute(writeback_keys_done
);
88 read_attribute(writeback_keys_failed
);
89 read_attribute(io_errors
);
90 read_attribute(congested
);
91 rw_attribute(congested_read_threshold_us
);
92 rw_attribute(congested_write_threshold_us
);
94 rw_attribute(sequential_cutoff
);
95 rw_attribute(data_csum
);
96 rw_attribute(cache_mode
);
97 rw_attribute(stop_when_cache_set_failed
);
98 rw_attribute(writeback_metadata
);
99 rw_attribute(writeback_running
);
100 rw_attribute(writeback_percent
);
101 rw_attribute(writeback_delay
);
102 rw_attribute(writeback_rate
);
104 rw_attribute(writeback_rate_update_seconds
);
105 rw_attribute(writeback_rate_i_term_inverse
);
106 rw_attribute(writeback_rate_p_term_inverse
);
107 rw_attribute(writeback_rate_minimum
);
108 read_attribute(writeback_rate_debug
);
110 read_attribute(stripe_size
);
111 read_attribute(partial_stripes_expensive
);
113 rw_attribute(synchronous
);
114 rw_attribute(journal_delay_ms
);
115 rw_attribute(io_disable
);
116 rw_attribute(discard
);
117 rw_attribute(running
);
119 rw_attribute(readahead
);
120 rw_attribute(errors
);
121 rw_attribute(io_error_limit
);
122 rw_attribute(io_error_halflife
);
123 rw_attribute(verify
);
124 rw_attribute(bypass_torture_test
);
125 rw_attribute(key_merging_disabled
);
126 rw_attribute(gc_always_rewrite
);
127 rw_attribute(expensive_debug_checks
);
128 rw_attribute(cache_replacement_policy
);
129 rw_attribute(btree_shrinker_disabled
);
130 rw_attribute(copy_gc_enabled
);
133 static ssize_t
bch_snprint_string_list(char *buf
, size_t size
, const char * const list
[],
139 for (i
= 0; list
[i
]; i
++)
140 out
+= snprintf(out
, buf
+ size
- out
,
141 i
== selected
? "[%s] " : "%s ", list
[i
]);
147 SHOW(__bch_cached_dev
)
149 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
151 const char *states
[] = { "no cache", "clean", "dirty", "inconsistent" };
153 #define var(stat) (dc->stat)
155 if (attr
== &sysfs_cache_mode
)
156 return bch_snprint_string_list(buf
, PAGE_SIZE
,
158 BDEV_CACHE_MODE(&dc
->sb
));
160 if (attr
== &sysfs_stop_when_cache_set_failed
)
161 return bch_snprint_string_list(buf
, PAGE_SIZE
,
162 bch_stop_on_failure_modes
,
163 dc
->stop_when_cache_set_failed
);
166 sysfs_printf(data_csum
, "%i", dc
->disk
.data_csum
);
167 var_printf(verify
, "%i");
168 var_printf(bypass_torture_test
, "%i");
169 var_printf(writeback_metadata
, "%i");
170 var_printf(writeback_running
, "%i");
171 var_print(writeback_delay
);
172 var_print(writeback_percent
);
173 sysfs_hprint(writeback_rate
, dc
->writeback_rate
.rate
<< 9);
174 sysfs_hprint(io_errors
, atomic_read(&dc
->io_errors
));
175 sysfs_printf(io_error_limit
, "%i", dc
->error_limit
);
176 sysfs_printf(io_disable
, "%i", dc
->io_disable
);
177 var_print(writeback_rate_update_seconds
);
178 var_print(writeback_rate_i_term_inverse
);
179 var_print(writeback_rate_p_term_inverse
);
180 var_print(writeback_rate_minimum
);
182 if (attr
== &sysfs_writeback_rate_debug
) {
186 char proportional
[20];
191 bch_hprint(rate
, dc
->writeback_rate
.rate
<< 9);
192 bch_hprint(dirty
, bcache_dev_sectors_dirty(&dc
->disk
) << 9);
193 bch_hprint(target
, dc
->writeback_rate_target
<< 9);
194 bch_hprint(proportional
,dc
->writeback_rate_proportional
<< 9);
195 bch_hprint(integral
, dc
->writeback_rate_integral_scaled
<< 9);
196 bch_hprint(change
, dc
->writeback_rate_change
<< 9);
198 next_io
= div64_s64(dc
->writeback_rate
.next
- local_clock(),
205 "proportional:\t%s\n"
207 "change:\t\t%s/sec\n"
208 "next io:\t%llims\n",
209 rate
, dirty
, target
, proportional
,
210 integral
, change
, next_io
);
213 sysfs_hprint(dirty_data
,
214 bcache_dev_sectors_dirty(&dc
->disk
) << 9);
216 sysfs_hprint(stripe_size
, ((uint64_t)dc
->disk
.stripe_size
) << 9);
217 var_printf(partial_stripes_expensive
, "%u");
219 var_hprint(sequential_cutoff
);
220 var_hprint(readahead
);
222 sysfs_print(running
, atomic_read(&dc
->running
));
223 sysfs_print(state
, states
[BDEV_STATE(&dc
->sb
)]);
225 if (attr
== &sysfs_label
) {
226 memcpy(buf
, dc
->sb
.label
, SB_LABEL_SIZE
);
227 buf
[SB_LABEL_SIZE
+ 1] = '\0';
235 SHOW_LOCKED(bch_cached_dev
)
239 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
243 struct kobj_uevent_env
*env
;
245 #define d_strtoul(var) sysfs_strtoul(var, dc->var)
246 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
247 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
249 sysfs_strtoul(data_csum
, dc
->disk
.data_csum
);
251 d_strtoul(bypass_torture_test
);
252 d_strtoul(writeback_metadata
);
253 d_strtoul(writeback_running
);
254 d_strtoul(writeback_delay
);
256 sysfs_strtoul_clamp(writeback_percent
, dc
->writeback_percent
, 0, 40);
258 sysfs_strtoul_clamp(writeback_rate
,
259 dc
->writeback_rate
.rate
, 1, INT_MAX
);
261 sysfs_strtoul_clamp(writeback_rate_update_seconds
,
262 dc
->writeback_rate_update_seconds
,
263 1, WRITEBACK_RATE_UPDATE_SECS_MAX
);
264 d_strtoul(writeback_rate_i_term_inverse
);
265 d_strtoul_nonzero(writeback_rate_p_term_inverse
);
267 sysfs_strtoul_clamp(io_error_limit
, dc
->error_limit
, 0, INT_MAX
);
269 if (attr
== &sysfs_io_disable
) {
270 int v
= strtoul_or_return(buf
);
272 dc
->io_disable
= v
? 1 : 0;
275 d_strtoi_h(sequential_cutoff
);
276 d_strtoi_h(readahead
);
278 if (attr
== &sysfs_clear_stats
)
279 bch_cache_accounting_clear(&dc
->accounting
);
281 if (attr
== &sysfs_running
&&
282 strtoul_or_return(buf
))
283 bch_cached_dev_run(dc
);
285 if (attr
== &sysfs_cache_mode
) {
286 v
= __sysfs_match_string(bch_cache_modes
, -1, buf
);
290 if ((unsigned) v
!= BDEV_CACHE_MODE(&dc
->sb
)) {
291 SET_BDEV_CACHE_MODE(&dc
->sb
, v
);
292 bch_write_bdev_super(dc
, NULL
);
296 if (attr
== &sysfs_stop_when_cache_set_failed
) {
297 v
= __sysfs_match_string(bch_stop_on_failure_modes
, -1, buf
);
301 dc
->stop_when_cache_set_failed
= v
;
304 if (attr
== &sysfs_label
) {
305 if (size
> SB_LABEL_SIZE
)
307 memcpy(dc
->sb
.label
, buf
, size
);
308 if (size
< SB_LABEL_SIZE
)
309 dc
->sb
.label
[size
] = '\0';
310 if (size
&& dc
->sb
.label
[size
- 1] == '\n')
311 dc
->sb
.label
[size
- 1] = '\0';
312 bch_write_bdev_super(dc
, NULL
);
314 memcpy(dc
->disk
.c
->uuids
[dc
->disk
.id
].label
,
316 bch_uuid_write(dc
->disk
.c
);
318 env
= kzalloc(sizeof(struct kobj_uevent_env
), GFP_KERNEL
);
321 add_uevent_var(env
, "DRIVER=bcache");
322 add_uevent_var(env
, "CACHED_UUID=%pU", dc
->sb
.uuid
),
323 add_uevent_var(env
, "CACHED_LABEL=%s", buf
);
325 &disk_to_dev(dc
->disk
.disk
)->kobj
, KOBJ_CHANGE
, env
->envp
);
329 if (attr
== &sysfs_attach
) {
330 uint8_t set_uuid
[16];
332 if (bch_parse_uuid(buf
, set_uuid
) < 16)
336 list_for_each_entry(c
, &bch_cache_sets
, list
) {
337 v
= bch_cached_dev_attach(dc
, c
, set_uuid
);
342 pr_err("Can't attach %s: cache set not found", buf
);
346 if (attr
== &sysfs_detach
&& dc
->disk
.c
)
347 bch_cached_dev_detach(dc
);
349 if (attr
== &sysfs_stop
)
350 bcache_device_stop(&dc
->disk
);
355 STORE(bch_cached_dev
)
357 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
360 mutex_lock(&bch_register_lock
);
361 size
= __cached_dev_store(kobj
, attr
, buf
, size
);
363 if (attr
== &sysfs_writeback_running
)
364 bch_writeback_queue(dc
);
366 if (attr
== &sysfs_writeback_percent
)
367 if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING
, &dc
->disk
.flags
))
368 schedule_delayed_work(&dc
->writeback_rate_update
,
369 dc
->writeback_rate_update_seconds
* HZ
);
371 mutex_unlock(&bch_register_lock
);
375 static struct attribute
*bch_cached_dev_files
[] = {
383 &sysfs_stop_when_cache_set_failed
,
384 &sysfs_writeback_metadata
,
385 &sysfs_writeback_running
,
386 &sysfs_writeback_delay
,
387 &sysfs_writeback_percent
,
388 &sysfs_writeback_rate
,
389 &sysfs_writeback_rate_update_seconds
,
390 &sysfs_writeback_rate_i_term_inverse
,
391 &sysfs_writeback_rate_p_term_inverse
,
392 &sysfs_writeback_rate_debug
,
394 &sysfs_io_error_limit
,
398 &sysfs_partial_stripes_expensive
,
399 &sysfs_sequential_cutoff
,
405 #ifdef CONFIG_BCACHE_DEBUG
407 &sysfs_bypass_torture_test
,
411 KTYPE(bch_cached_dev
);
415 struct bcache_device
*d
= container_of(kobj
, struct bcache_device
,
417 struct uuid_entry
*u
= &d
->c
->uuids
[d
->id
];
419 sysfs_printf(data_csum
, "%i", d
->data_csum
);
420 sysfs_hprint(size
, u
->sectors
<< 9);
422 if (attr
== &sysfs_label
) {
423 memcpy(buf
, u
->label
, SB_LABEL_SIZE
);
424 buf
[SB_LABEL_SIZE
+ 1] = '\0';
432 STORE(__bch_flash_dev
)
434 struct bcache_device
*d
= container_of(kobj
, struct bcache_device
,
436 struct uuid_entry
*u
= &d
->c
->uuids
[d
->id
];
438 sysfs_strtoul(data_csum
, d
->data_csum
);
440 if (attr
== &sysfs_size
) {
442 strtoi_h_or_return(buf
, v
);
445 bch_uuid_write(d
->c
);
446 set_capacity(d
->disk
, u
->sectors
);
449 if (attr
== &sysfs_label
) {
450 memcpy(u
->label
, buf
, SB_LABEL_SIZE
);
451 bch_uuid_write(d
->c
);
454 if (attr
== &sysfs_unregister
) {
455 set_bit(BCACHE_DEV_DETACHING
, &d
->flags
);
456 bcache_device_stop(d
);
461 STORE_LOCKED(bch_flash_dev
)
463 static struct attribute
*bch_flash_dev_files
[] = {
472 KTYPE(bch_flash_dev
);
474 struct bset_stats_op
{
477 struct bset_stats stats
;
480 static int bch_btree_bset_stats(struct btree_op
*b_op
, struct btree
*b
)
482 struct bset_stats_op
*op
= container_of(b_op
, struct bset_stats_op
, op
);
485 bch_btree_keys_stats(&b
->keys
, &op
->stats
);
490 static int bch_bset_print_stats(struct cache_set
*c
, char *buf
)
492 struct bset_stats_op op
;
495 memset(&op
, 0, sizeof(op
));
496 bch_btree_op_init(&op
.op
, -1);
498 ret
= bch_btree_map_nodes(&op
.op
, c
, &ZERO_KEY
, bch_btree_bset_stats
);
502 return snprintf(buf
, PAGE_SIZE
,
504 "written sets: %zu\n"
505 "unwritten sets: %zu\n"
506 "written key bytes: %zu\n"
507 "unwritten key bytes: %zu\n"
511 op
.stats
.sets_written
, op
.stats
.sets_unwritten
,
512 op
.stats
.bytes_written
, op
.stats
.bytes_unwritten
,
513 op
.stats
.floats
, op
.stats
.failed
);
516 static unsigned bch_root_usage(struct cache_set
*c
)
521 struct btree_iter iter
;
529 rw_lock(false, b
, b
->level
);
530 } while (b
!= c
->root
);
532 for_each_key_filter(&b
->keys
, k
, &iter
, bch_ptr_bad
)
533 bytes
+= bkey_bytes(k
);
537 return (bytes
* 100) / btree_bytes(c
);
540 static size_t bch_cache_size(struct cache_set
*c
)
545 mutex_lock(&c
->bucket_lock
);
546 list_for_each_entry(b
, &c
->btree_cache
, list
)
547 ret
+= 1 << (b
->keys
.page_order
+ PAGE_SHIFT
);
549 mutex_unlock(&c
->bucket_lock
);
553 static unsigned bch_cache_max_chain(struct cache_set
*c
)
556 struct hlist_head
*h
;
558 mutex_lock(&c
->bucket_lock
);
560 for (h
= c
->bucket_hash
;
561 h
< c
->bucket_hash
+ (1 << BUCKET_HASH_BITS
);
564 struct hlist_node
*p
;
572 mutex_unlock(&c
->bucket_lock
);
576 static unsigned bch_btree_used(struct cache_set
*c
)
578 return div64_u64(c
->gc_stats
.key_bytes
* 100,
579 (c
->gc_stats
.nodes
?: 1) * btree_bytes(c
));
582 static unsigned bch_average_key_size(struct cache_set
*c
)
584 return c
->gc_stats
.nkeys
585 ? div64_u64(c
->gc_stats
.data
, c
->gc_stats
.nkeys
)
589 SHOW(__bch_cache_set
)
591 struct cache_set
*c
= container_of(kobj
, struct cache_set
, kobj
);
593 sysfs_print(synchronous
, CACHE_SYNC(&c
->sb
));
594 sysfs_print(journal_delay_ms
, c
->journal_delay_ms
);
595 sysfs_hprint(bucket_size
, bucket_bytes(c
));
596 sysfs_hprint(block_size
, block_bytes(c
));
597 sysfs_print(tree_depth
, c
->root
->level
);
598 sysfs_print(root_usage_percent
, bch_root_usage(c
));
600 sysfs_hprint(btree_cache_size
, bch_cache_size(c
));
601 sysfs_print(btree_cache_max_chain
, bch_cache_max_chain(c
));
602 sysfs_print(cache_available_percent
, 100 - c
->gc_stats
.in_use
);
604 sysfs_print_time_stats(&c
->btree_gc_time
, btree_gc
, sec
, ms
);
605 sysfs_print_time_stats(&c
->btree_split_time
, btree_split
, sec
, us
);
606 sysfs_print_time_stats(&c
->sort
.time
, btree_sort
, ms
, us
);
607 sysfs_print_time_stats(&c
->btree_read_time
, btree_read
, ms
, us
);
609 sysfs_print(btree_used_percent
, bch_btree_used(c
));
610 sysfs_print(btree_nodes
, c
->gc_stats
.nodes
);
611 sysfs_hprint(average_key_size
, bch_average_key_size(c
));
613 sysfs_print(cache_read_races
,
614 atomic_long_read(&c
->cache_read_races
));
617 atomic_long_read(&c
->reclaim
));
619 sysfs_print(flush_write
,
620 atomic_long_read(&c
->flush_write
));
622 sysfs_print(retry_flush_write
,
623 atomic_long_read(&c
->retry_flush_write
));
625 sysfs_print(writeback_keys_done
,
626 atomic_long_read(&c
->writeback_keys_done
));
627 sysfs_print(writeback_keys_failed
,
628 atomic_long_read(&c
->writeback_keys_failed
));
630 if (attr
== &sysfs_errors
)
631 return bch_snprint_string_list(buf
, PAGE_SIZE
, error_actions
,
634 /* See count_io_errors for why 88 */
635 sysfs_print(io_error_halflife
, c
->error_decay
* 88);
636 sysfs_print(io_error_limit
, c
->error_limit
);
638 sysfs_hprint(congested
,
639 ((uint64_t) bch_get_congested(c
)) << 9);
640 sysfs_print(congested_read_threshold_us
,
641 c
->congested_read_threshold_us
);
642 sysfs_print(congested_write_threshold_us
,
643 c
->congested_write_threshold_us
);
645 sysfs_print(active_journal_entries
, fifo_used(&c
->journal
.pin
));
646 sysfs_printf(verify
, "%i", c
->verify
);
647 sysfs_printf(key_merging_disabled
, "%i", c
->key_merging_disabled
);
648 sysfs_printf(expensive_debug_checks
,
649 "%i", c
->expensive_debug_checks
);
650 sysfs_printf(gc_always_rewrite
, "%i", c
->gc_always_rewrite
);
651 sysfs_printf(btree_shrinker_disabled
, "%i", c
->shrinker_disabled
);
652 sysfs_printf(copy_gc_enabled
, "%i", c
->copy_gc_enabled
);
653 sysfs_printf(io_disable
, "%i",
654 test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
));
656 if (attr
== &sysfs_bset_tree_stats
)
657 return bch_bset_print_stats(c
, buf
);
661 SHOW_LOCKED(bch_cache_set
)
663 STORE(__bch_cache_set
)
665 struct cache_set
*c
= container_of(kobj
, struct cache_set
, kobj
);
668 if (attr
== &sysfs_unregister
)
669 bch_cache_set_unregister(c
);
671 if (attr
== &sysfs_stop
)
672 bch_cache_set_stop(c
);
674 if (attr
== &sysfs_synchronous
) {
675 bool sync
= strtoul_or_return(buf
);
677 if (sync
!= CACHE_SYNC(&c
->sb
)) {
678 SET_CACHE_SYNC(&c
->sb
, sync
);
679 bcache_write_super(c
);
683 if (attr
== &sysfs_flash_vol_create
) {
686 strtoi_h_or_return(buf
, v
);
688 r
= bch_flash_dev_create(c
, v
);
693 if (attr
== &sysfs_clear_stats
) {
694 atomic_long_set(&c
->writeback_keys_done
, 0);
695 atomic_long_set(&c
->writeback_keys_failed
, 0);
697 memset(&c
->gc_stats
, 0, sizeof(struct gc_stat
));
698 bch_cache_accounting_clear(&c
->accounting
);
701 if (attr
== &sysfs_trigger_gc
) {
703 * Garbage collection thread only works when sectors_to_gc < 0,
704 * when users write to sysfs entry trigger_gc, most of time
705 * they want to forcibly triger gargage collection. Here -1 is
706 * set to c->sectors_to_gc, to make gc_should_run() give a
707 * chance to permit gc thread to run. "give a chance" means
708 * before going into gc_should_run(), there is still chance
709 * that c->sectors_to_gc being set to other positive value. So
710 * writing sysfs entry trigger_gc won't always make sure gc
711 * thread takes effect.
713 atomic_set(&c
->sectors_to_gc
, -1);
717 if (attr
== &sysfs_prune_cache
) {
718 struct shrink_control sc
;
719 sc
.gfp_mask
= GFP_KERNEL
;
720 sc
.nr_to_scan
= strtoul_or_return(buf
);
721 c
->shrink
.scan_objects(&c
->shrink
, &sc
);
724 sysfs_strtoul(congested_read_threshold_us
,
725 c
->congested_read_threshold_us
);
726 sysfs_strtoul(congested_write_threshold_us
,
727 c
->congested_write_threshold_us
);
729 if (attr
== &sysfs_errors
) {
730 v
= __sysfs_match_string(error_actions
, -1, buf
);
737 if (attr
== &sysfs_io_error_limit
)
738 c
->error_limit
= strtoul_or_return(buf
);
740 /* See count_io_errors() for why 88 */
741 if (attr
== &sysfs_io_error_halflife
)
742 c
->error_decay
= strtoul_or_return(buf
) / 88;
744 if (attr
== &sysfs_io_disable
) {
745 v
= strtoul_or_return(buf
);
747 if (test_and_set_bit(CACHE_SET_IO_DISABLE
,
749 pr_warn("CACHE_SET_IO_DISABLE already set");
751 if (!test_and_clear_bit(CACHE_SET_IO_DISABLE
,
753 pr_warn("CACHE_SET_IO_DISABLE already cleared");
757 sysfs_strtoul(journal_delay_ms
, c
->journal_delay_ms
);
758 sysfs_strtoul(verify
, c
->verify
);
759 sysfs_strtoul(key_merging_disabled
, c
->key_merging_disabled
);
760 sysfs_strtoul(expensive_debug_checks
, c
->expensive_debug_checks
);
761 sysfs_strtoul(gc_always_rewrite
, c
->gc_always_rewrite
);
762 sysfs_strtoul(btree_shrinker_disabled
, c
->shrinker_disabled
);
763 sysfs_strtoul(copy_gc_enabled
, c
->copy_gc_enabled
);
767 STORE_LOCKED(bch_cache_set
)
769 SHOW(bch_cache_set_internal
)
771 struct cache_set
*c
= container_of(kobj
, struct cache_set
, internal
);
772 return bch_cache_set_show(&c
->kobj
, attr
, buf
);
775 STORE(bch_cache_set_internal
)
777 struct cache_set
*c
= container_of(kobj
, struct cache_set
, internal
);
778 return bch_cache_set_store(&c
->kobj
, attr
, buf
, size
);
781 static void bch_cache_set_internal_release(struct kobject
*k
)
785 static struct attribute
*bch_cache_set_files
[] = {
789 &sysfs_journal_delay_ms
,
790 &sysfs_flash_vol_create
,
795 &sysfs_root_usage_percent
,
796 &sysfs_btree_cache_size
,
797 &sysfs_cache_available_percent
,
799 &sysfs_average_key_size
,
802 &sysfs_io_error_limit
,
803 &sysfs_io_error_halflife
,
805 &sysfs_congested_read_threshold_us
,
806 &sysfs_congested_write_threshold_us
,
810 KTYPE(bch_cache_set
);
812 static struct attribute
*bch_cache_set_internal_files
[] = {
813 &sysfs_active_journal_entries
,
815 sysfs_time_stats_attribute_list(btree_gc
, sec
, ms
)
816 sysfs_time_stats_attribute_list(btree_split
, sec
, us
)
817 sysfs_time_stats_attribute_list(btree_sort
, ms
, us
)
818 sysfs_time_stats_attribute_list(btree_read
, ms
, us
)
821 &sysfs_btree_used_percent
,
822 &sysfs_btree_cache_max_chain
,
824 &sysfs_bset_tree_stats
,
825 &sysfs_cache_read_races
,
828 &sysfs_retry_flush_write
,
829 &sysfs_writeback_keys_done
,
830 &sysfs_writeback_keys_failed
,
834 #ifdef CONFIG_BCACHE_DEBUG
836 &sysfs_key_merging_disabled
,
837 &sysfs_expensive_debug_checks
,
839 &sysfs_gc_always_rewrite
,
840 &sysfs_btree_shrinker_disabled
,
841 &sysfs_copy_gc_enabled
,
845 KTYPE(bch_cache_set_internal
);
847 static int __bch_cache_cmp(const void *l
, const void *r
)
849 return *((uint16_t *)r
) - *((uint16_t *)l
);
854 struct cache
*ca
= container_of(kobj
, struct cache
, kobj
);
856 sysfs_hprint(bucket_size
, bucket_bytes(ca
));
857 sysfs_hprint(block_size
, block_bytes(ca
));
858 sysfs_print(nbuckets
, ca
->sb
.nbuckets
);
859 sysfs_print(discard
, ca
->discard
);
860 sysfs_hprint(written
, atomic_long_read(&ca
->sectors_written
) << 9);
861 sysfs_hprint(btree_written
,
862 atomic_long_read(&ca
->btree_sectors_written
) << 9);
863 sysfs_hprint(metadata_written
,
864 (atomic_long_read(&ca
->meta_sectors_written
) +
865 atomic_long_read(&ca
->btree_sectors_written
)) << 9);
867 sysfs_print(io_errors
,
868 atomic_read(&ca
->io_errors
) >> IO_ERROR_SHIFT
);
870 if (attr
== &sysfs_cache_replacement_policy
)
871 return bch_snprint_string_list(buf
, PAGE_SIZE
,
872 cache_replacement_policies
,
873 CACHE_REPLACEMENT(&ca
->sb
));
875 if (attr
== &sysfs_priority_stats
) {
877 size_t n
= ca
->sb
.nbuckets
, i
;
878 size_t unused
= 0, available
= 0, dirty
= 0, meta
= 0;
880 /* Compute 31 quantiles */
881 uint16_t q
[31], *p
, *cached
;
884 cached
= p
= vmalloc(array_size(sizeof(uint16_t),
889 mutex_lock(&ca
->set
->bucket_lock
);
890 for_each_bucket(b
, ca
) {
891 if (!GC_SECTORS_USED(b
))
893 if (GC_MARK(b
) == GC_MARK_RECLAIMABLE
)
895 if (GC_MARK(b
) == GC_MARK_DIRTY
)
897 if (GC_MARK(b
) == GC_MARK_METADATA
)
901 for (i
= ca
->sb
.first_bucket
; i
< n
; i
++)
902 p
[i
] = ca
->buckets
[i
].prio
;
903 mutex_unlock(&ca
->set
->bucket_lock
);
905 sort(p
, n
, sizeof(uint16_t), __bch_cache_cmp
, NULL
);
911 unused
= ca
->sb
.nbuckets
- n
;
913 while (cached
< p
+ n
&&
914 *cached
== BTREE_PRIO
)
917 for (i
= 0; i
< n
; i
++)
918 sum
+= INITIAL_PRIO
- cached
[i
];
923 for (i
= 0; i
< ARRAY_SIZE(q
); i
++)
924 q
[i
] = INITIAL_PRIO
- cached
[n
* (i
+ 1) /
925 (ARRAY_SIZE(q
) + 1)];
929 ret
= scnprintf(buf
, PAGE_SIZE
,
935 "Sectors per Q: %zu\n"
937 unused
* 100 / (size_t) ca
->sb
.nbuckets
,
938 available
* 100 / (size_t) ca
->sb
.nbuckets
,
939 dirty
* 100 / (size_t) ca
->sb
.nbuckets
,
940 meta
* 100 / (size_t) ca
->sb
.nbuckets
, sum
,
941 n
* ca
->sb
.bucket_size
/ (ARRAY_SIZE(q
) + 1));
943 for (i
= 0; i
< ARRAY_SIZE(q
); i
++)
944 ret
+= scnprintf(buf
+ ret
, PAGE_SIZE
- ret
,
948 ret
+= scnprintf(buf
+ ret
, PAGE_SIZE
- ret
, "]\n");
955 SHOW_LOCKED(bch_cache
)
959 struct cache
*ca
= container_of(kobj
, struct cache
, kobj
);
962 if (attr
== &sysfs_discard
) {
963 bool v
= strtoul_or_return(buf
);
965 if (blk_queue_discard(bdev_get_queue(ca
->bdev
)))
968 if (v
!= CACHE_DISCARD(&ca
->sb
)) {
969 SET_CACHE_DISCARD(&ca
->sb
, v
);
970 bcache_write_super(ca
->set
);
974 if (attr
== &sysfs_cache_replacement_policy
) {
975 v
= __sysfs_match_string(cache_replacement_policies
, -1, buf
);
979 if ((unsigned) v
!= CACHE_REPLACEMENT(&ca
->sb
)) {
980 mutex_lock(&ca
->set
->bucket_lock
);
981 SET_CACHE_REPLACEMENT(&ca
->sb
, v
);
982 mutex_unlock(&ca
->set
->bucket_lock
);
984 bcache_write_super(ca
->set
);
988 if (attr
== &sysfs_clear_stats
) {
989 atomic_long_set(&ca
->sectors_written
, 0);
990 atomic_long_set(&ca
->btree_sectors_written
, 0);
991 atomic_long_set(&ca
->meta_sectors_written
, 0);
992 atomic_set(&ca
->io_count
, 0);
993 atomic_set(&ca
->io_errors
, 0);
998 STORE_LOCKED(bch_cache
)
1000 static struct attribute
*bch_cache_files
[] = {
1004 &sysfs_priority_stats
,
1007 &sysfs_btree_written
,
1008 &sysfs_metadata_written
,
1011 &sysfs_cache_replacement_policy
,