2 * bcache sysfs interfaces
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
12 #include "writeback.h"
14 #include <linux/blkdev.h>
15 #include <linux/sort.h>
17 static const char * const cache_replacement_policies
[] = {
24 static const char * const error_actions
[] = {
30 write_attribute(attach
);
31 write_attribute(detach
);
32 write_attribute(unregister
);
33 write_attribute(stop
);
34 write_attribute(clear_stats
);
35 write_attribute(trigger_gc
);
36 write_attribute(prune_cache
);
37 write_attribute(flash_vol_create
);
39 read_attribute(bucket_size
);
40 read_attribute(block_size
);
41 read_attribute(nbuckets
);
42 read_attribute(tree_depth
);
43 read_attribute(root_usage_percent
);
44 read_attribute(priority_stats
);
45 read_attribute(btree_cache_size
);
46 read_attribute(btree_cache_max_chain
);
47 read_attribute(cache_available_percent
);
48 read_attribute(written
);
49 read_attribute(btree_written
);
50 read_attribute(metadata_written
);
51 read_attribute(active_journal_entries
);
53 sysfs_time_stats_attribute(btree_gc
, sec
, ms
);
54 sysfs_time_stats_attribute(btree_split
, sec
, us
);
55 sysfs_time_stats_attribute(btree_sort
, ms
, us
);
56 sysfs_time_stats_attribute(btree_read
, ms
, us
);
57 sysfs_time_stats_attribute(try_harder
, ms
, us
);
59 read_attribute(btree_nodes
);
60 read_attribute(btree_used_percent
);
61 read_attribute(average_key_size
);
62 read_attribute(dirty_data
);
63 read_attribute(bset_tree_stats
);
65 read_attribute(state
);
66 read_attribute(cache_read_races
);
67 read_attribute(writeback_keys_done
);
68 read_attribute(writeback_keys_failed
);
69 read_attribute(io_errors
);
70 read_attribute(congested
);
71 rw_attribute(congested_read_threshold_us
);
72 rw_attribute(congested_write_threshold_us
);
74 rw_attribute(sequential_cutoff
);
75 rw_attribute(data_csum
);
76 rw_attribute(cache_mode
);
77 rw_attribute(writeback_metadata
);
78 rw_attribute(writeback_running
);
79 rw_attribute(writeback_percent
);
80 rw_attribute(writeback_delay
);
81 rw_attribute(writeback_rate
);
83 rw_attribute(writeback_rate_update_seconds
);
84 rw_attribute(writeback_rate_d_term
);
85 rw_attribute(writeback_rate_p_term_inverse
);
86 read_attribute(writeback_rate_debug
);
88 read_attribute(stripe_size
);
89 read_attribute(partial_stripes_expensive
);
91 rw_attribute(synchronous
);
92 rw_attribute(journal_delay_ms
);
93 rw_attribute(discard
);
94 rw_attribute(running
);
96 rw_attribute(readahead
);
98 rw_attribute(io_error_limit
);
99 rw_attribute(io_error_halflife
);
100 rw_attribute(verify
);
101 rw_attribute(bypass_torture_test
);
102 rw_attribute(key_merging_disabled
);
103 rw_attribute(gc_always_rewrite
);
104 rw_attribute(expensive_debug_checks
);
105 rw_attribute(freelist_percent
);
106 rw_attribute(cache_replacement_policy
);
107 rw_attribute(btree_shrinker_disabled
);
108 rw_attribute(copy_gc_enabled
);
111 SHOW(__bch_cached_dev
)
113 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
115 const char *states
[] = { "no cache", "clean", "dirty", "inconsistent" };
117 #define var(stat) (dc->stat)
119 if (attr
== &sysfs_cache_mode
)
120 return bch_snprint_string_list(buf
, PAGE_SIZE
,
122 BDEV_CACHE_MODE(&dc
->sb
));
124 sysfs_printf(data_csum
, "%i", dc
->disk
.data_csum
);
125 var_printf(verify
, "%i");
126 var_printf(bypass_torture_test
, "%i");
127 var_printf(writeback_metadata
, "%i");
128 var_printf(writeback_running
, "%i");
129 var_print(writeback_delay
);
130 var_print(writeback_percent
);
131 sysfs_hprint(writeback_rate
, dc
->writeback_rate
.rate
<< 9);
133 var_print(writeback_rate_update_seconds
);
134 var_print(writeback_rate_d_term
);
135 var_print(writeback_rate_p_term_inverse
);
137 if (attr
== &sysfs_writeback_rate_debug
) {
141 char proportional
[20];
146 bch_hprint(rate
, dc
->writeback_rate
.rate
<< 9);
147 bch_hprint(dirty
, bcache_dev_sectors_dirty(&dc
->disk
) << 9);
148 bch_hprint(target
, dc
->writeback_rate_target
<< 9);
149 bch_hprint(proportional
,dc
->writeback_rate_proportional
<< 9);
150 bch_hprint(derivative
, dc
->writeback_rate_derivative
<< 9);
151 bch_hprint(change
, dc
->writeback_rate_change
<< 9);
153 next_io
= div64_s64(dc
->writeback_rate
.next
- local_clock(),
160 "proportional:\t%s\n"
162 "change:\t\t%s/sec\n"
163 "next io:\t%llims\n",
164 rate
, dirty
, target
, proportional
,
165 derivative
, change
, next_io
);
168 sysfs_hprint(dirty_data
,
169 bcache_dev_sectors_dirty(&dc
->disk
) << 9);
171 sysfs_hprint(stripe_size
, dc
->disk
.stripe_size
<< 9);
172 var_printf(partial_stripes_expensive
, "%u");
174 var_hprint(sequential_cutoff
);
175 var_hprint(readahead
);
177 sysfs_print(running
, atomic_read(&dc
->running
));
178 sysfs_print(state
, states
[BDEV_STATE(&dc
->sb
)]);
180 if (attr
== &sysfs_label
) {
181 memcpy(buf
, dc
->sb
.label
, SB_LABEL_SIZE
);
182 buf
[SB_LABEL_SIZE
+ 1] = '\0';
190 SHOW_LOCKED(bch_cached_dev
)
194 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
198 struct kobj_uevent_env
*env
;
200 #define d_strtoul(var) sysfs_strtoul(var, dc->var)
201 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
202 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
204 sysfs_strtoul(data_csum
, dc
->disk
.data_csum
);
206 d_strtoul(bypass_torture_test
);
207 d_strtoul(writeback_metadata
);
208 d_strtoul(writeback_running
);
209 d_strtoul(writeback_delay
);
211 sysfs_strtoul_clamp(writeback_percent
, dc
->writeback_percent
, 0, 40);
213 sysfs_strtoul_clamp(writeback_rate
,
214 dc
->writeback_rate
.rate
, 1, INT_MAX
);
216 d_strtoul_nonzero(writeback_rate_update_seconds
);
217 d_strtoul(writeback_rate_d_term
);
218 d_strtoul_nonzero(writeback_rate_p_term_inverse
);
220 d_strtoi_h(sequential_cutoff
);
221 d_strtoi_h(readahead
);
223 if (attr
== &sysfs_clear_stats
)
224 bch_cache_accounting_clear(&dc
->accounting
);
226 if (attr
== &sysfs_running
&&
227 strtoul_or_return(buf
))
228 bch_cached_dev_run(dc
);
230 if (attr
== &sysfs_cache_mode
) {
231 ssize_t v
= bch_read_string_list(buf
, bch_cache_modes
+ 1);
236 if ((unsigned) v
!= BDEV_CACHE_MODE(&dc
->sb
)) {
237 SET_BDEV_CACHE_MODE(&dc
->sb
, v
);
238 bch_write_bdev_super(dc
, NULL
);
242 if (attr
== &sysfs_label
) {
243 if (size
> SB_LABEL_SIZE
)
245 memcpy(dc
->sb
.label
, buf
, size
);
246 if (size
< SB_LABEL_SIZE
)
247 dc
->sb
.label
[size
] = '\0';
248 if (size
&& dc
->sb
.label
[size
- 1] == '\n')
249 dc
->sb
.label
[size
- 1] = '\0';
250 bch_write_bdev_super(dc
, NULL
);
252 memcpy(dc
->disk
.c
->uuids
[dc
->disk
.id
].label
,
254 bch_uuid_write(dc
->disk
.c
);
256 env
= kzalloc(sizeof(struct kobj_uevent_env
), GFP_KERNEL
);
259 add_uevent_var(env
, "DRIVER=bcache");
260 add_uevent_var(env
, "CACHED_UUID=%pU", dc
->sb
.uuid
),
261 add_uevent_var(env
, "CACHED_LABEL=%s", buf
);
263 &disk_to_dev(dc
->disk
.disk
)->kobj
, KOBJ_CHANGE
, env
->envp
);
267 if (attr
== &sysfs_attach
) {
268 if (bch_parse_uuid(buf
, dc
->sb
.set_uuid
) < 16)
271 list_for_each_entry(c
, &bch_cache_sets
, list
) {
272 v
= bch_cached_dev_attach(dc
, c
);
277 pr_err("Can't attach %s: cache set not found", buf
);
281 if (attr
== &sysfs_detach
&& dc
->disk
.c
)
282 bch_cached_dev_detach(dc
);
284 if (attr
== &sysfs_stop
)
285 bcache_device_stop(&dc
->disk
);
290 STORE(bch_cached_dev
)
292 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
295 mutex_lock(&bch_register_lock
);
296 size
= __cached_dev_store(kobj
, attr
, buf
, size
);
298 if (attr
== &sysfs_writeback_running
)
299 bch_writeback_queue(dc
);
301 if (attr
== &sysfs_writeback_percent
)
302 schedule_delayed_work(&dc
->writeback_rate_update
,
303 dc
->writeback_rate_update_seconds
* HZ
);
305 mutex_unlock(&bch_register_lock
);
309 static struct attribute
*bch_cached_dev_files
[] = {
317 &sysfs_writeback_metadata
,
318 &sysfs_writeback_running
,
319 &sysfs_writeback_delay
,
320 &sysfs_writeback_percent
,
321 &sysfs_writeback_rate
,
322 &sysfs_writeback_rate_update_seconds
,
323 &sysfs_writeback_rate_d_term
,
324 &sysfs_writeback_rate_p_term_inverse
,
325 &sysfs_writeback_rate_debug
,
328 &sysfs_partial_stripes_expensive
,
329 &sysfs_sequential_cutoff
,
335 #ifdef CONFIG_BCACHE_DEBUG
337 &sysfs_bypass_torture_test
,
341 KTYPE(bch_cached_dev
);
345 struct bcache_device
*d
= container_of(kobj
, struct bcache_device
,
347 struct uuid_entry
*u
= &d
->c
->uuids
[d
->id
];
349 sysfs_printf(data_csum
, "%i", d
->data_csum
);
350 sysfs_hprint(size
, u
->sectors
<< 9);
352 if (attr
== &sysfs_label
) {
353 memcpy(buf
, u
->label
, SB_LABEL_SIZE
);
354 buf
[SB_LABEL_SIZE
+ 1] = '\0';
362 STORE(__bch_flash_dev
)
364 struct bcache_device
*d
= container_of(kobj
, struct bcache_device
,
366 struct uuid_entry
*u
= &d
->c
->uuids
[d
->id
];
368 sysfs_strtoul(data_csum
, d
->data_csum
);
370 if (attr
== &sysfs_size
) {
372 strtoi_h_or_return(buf
, v
);
375 bch_uuid_write(d
->c
);
376 set_capacity(d
->disk
, u
->sectors
);
379 if (attr
== &sysfs_label
) {
380 memcpy(u
->label
, buf
, SB_LABEL_SIZE
);
381 bch_uuid_write(d
->c
);
384 if (attr
== &sysfs_unregister
) {
385 set_bit(BCACHE_DEV_DETACHING
, &d
->flags
);
386 bcache_device_stop(d
);
391 STORE_LOCKED(bch_flash_dev
)
393 static struct attribute
*bch_flash_dev_files
[] = {
402 KTYPE(bch_flash_dev
);
404 SHOW(__bch_cache_set
)
406 unsigned root_usage(struct cache_set
*c
)
411 struct btree_iter iter
;
419 rw_lock(false, b
, b
->level
);
420 } while (b
!= c
->root
);
422 for_each_key_filter(b
, k
, &iter
, bch_ptr_bad
)
423 bytes
+= bkey_bytes(k
);
427 return (bytes
* 100) / btree_bytes(c
);
430 size_t cache_size(struct cache_set
*c
)
435 mutex_lock(&c
->bucket_lock
);
436 list_for_each_entry(b
, &c
->btree_cache
, list
)
437 ret
+= 1 << (b
->page_order
+ PAGE_SHIFT
);
439 mutex_unlock(&c
->bucket_lock
);
443 unsigned cache_max_chain(struct cache_set
*c
)
446 struct hlist_head
*h
;
448 mutex_lock(&c
->bucket_lock
);
450 for (h
= c
->bucket_hash
;
451 h
< c
->bucket_hash
+ (1 << BUCKET_HASH_BITS
);
454 struct hlist_node
*p
;
462 mutex_unlock(&c
->bucket_lock
);
466 unsigned btree_used(struct cache_set
*c
)
468 return div64_u64(c
->gc_stats
.key_bytes
* 100,
469 (c
->gc_stats
.nodes
?: 1) * btree_bytes(c
));
472 unsigned average_key_size(struct cache_set
*c
)
474 return c
->gc_stats
.nkeys
475 ? div64_u64(c
->gc_stats
.data
, c
->gc_stats
.nkeys
)
479 struct cache_set
*c
= container_of(kobj
, struct cache_set
, kobj
);
481 sysfs_print(synchronous
, CACHE_SYNC(&c
->sb
));
482 sysfs_print(journal_delay_ms
, c
->journal_delay_ms
);
483 sysfs_hprint(bucket_size
, bucket_bytes(c
));
484 sysfs_hprint(block_size
, block_bytes(c
));
485 sysfs_print(tree_depth
, c
->root
->level
);
486 sysfs_print(root_usage_percent
, root_usage(c
));
488 sysfs_hprint(btree_cache_size
, cache_size(c
));
489 sysfs_print(btree_cache_max_chain
, cache_max_chain(c
));
490 sysfs_print(cache_available_percent
, 100 - c
->gc_stats
.in_use
);
492 sysfs_print_time_stats(&c
->btree_gc_time
, btree_gc
, sec
, ms
);
493 sysfs_print_time_stats(&c
->btree_split_time
, btree_split
, sec
, us
);
494 sysfs_print_time_stats(&c
->sort_time
, btree_sort
, ms
, us
);
495 sysfs_print_time_stats(&c
->btree_read_time
, btree_read
, ms
, us
);
496 sysfs_print_time_stats(&c
->try_harder_time
, try_harder
, ms
, us
);
498 sysfs_print(btree_used_percent
, btree_used(c
));
499 sysfs_print(btree_nodes
, c
->gc_stats
.nodes
);
500 sysfs_hprint(average_key_size
, average_key_size(c
));
502 sysfs_print(cache_read_races
,
503 atomic_long_read(&c
->cache_read_races
));
505 sysfs_print(writeback_keys_done
,
506 atomic_long_read(&c
->writeback_keys_done
));
507 sysfs_print(writeback_keys_failed
,
508 atomic_long_read(&c
->writeback_keys_failed
));
510 if (attr
== &sysfs_errors
)
511 return bch_snprint_string_list(buf
, PAGE_SIZE
, error_actions
,
514 /* See count_io_errors for why 88 */
515 sysfs_print(io_error_halflife
, c
->error_decay
* 88);
516 sysfs_print(io_error_limit
, c
->error_limit
>> IO_ERROR_SHIFT
);
518 sysfs_hprint(congested
,
519 ((uint64_t) bch_get_congested(c
)) << 9);
520 sysfs_print(congested_read_threshold_us
,
521 c
->congested_read_threshold_us
);
522 sysfs_print(congested_write_threshold_us
,
523 c
->congested_write_threshold_us
);
525 sysfs_print(active_journal_entries
, fifo_used(&c
->journal
.pin
));
526 sysfs_printf(verify
, "%i", c
->verify
);
527 sysfs_printf(key_merging_disabled
, "%i", c
->key_merging_disabled
);
528 sysfs_printf(expensive_debug_checks
,
529 "%i", c
->expensive_debug_checks
);
530 sysfs_printf(gc_always_rewrite
, "%i", c
->gc_always_rewrite
);
531 sysfs_printf(btree_shrinker_disabled
, "%i", c
->shrinker_disabled
);
532 sysfs_printf(copy_gc_enabled
, "%i", c
->copy_gc_enabled
);
534 if (attr
== &sysfs_bset_tree_stats
)
535 return bch_bset_print_stats(c
, buf
);
539 SHOW_LOCKED(bch_cache_set
)
541 STORE(__bch_cache_set
)
543 struct cache_set
*c
= container_of(kobj
, struct cache_set
, kobj
);
545 if (attr
== &sysfs_unregister
)
546 bch_cache_set_unregister(c
);
548 if (attr
== &sysfs_stop
)
549 bch_cache_set_stop(c
);
551 if (attr
== &sysfs_synchronous
) {
552 bool sync
= strtoul_or_return(buf
);
554 if (sync
!= CACHE_SYNC(&c
->sb
)) {
555 SET_CACHE_SYNC(&c
->sb
, sync
);
556 bcache_write_super(c
);
560 if (attr
== &sysfs_flash_vol_create
) {
563 strtoi_h_or_return(buf
, v
);
565 r
= bch_flash_dev_create(c
, v
);
570 if (attr
== &sysfs_clear_stats
) {
571 atomic_long_set(&c
->writeback_keys_done
, 0);
572 atomic_long_set(&c
->writeback_keys_failed
, 0);
574 memset(&c
->gc_stats
, 0, sizeof(struct gc_stat
));
575 bch_cache_accounting_clear(&c
->accounting
);
578 if (attr
== &sysfs_trigger_gc
)
581 if (attr
== &sysfs_prune_cache
) {
582 struct shrink_control sc
;
583 sc
.gfp_mask
= GFP_KERNEL
;
584 sc
.nr_to_scan
= strtoul_or_return(buf
);
585 c
->shrink
.scan_objects(&c
->shrink
, &sc
);
588 sysfs_strtoul(congested_read_threshold_us
,
589 c
->congested_read_threshold_us
);
590 sysfs_strtoul(congested_write_threshold_us
,
591 c
->congested_write_threshold_us
);
593 if (attr
== &sysfs_errors
) {
594 ssize_t v
= bch_read_string_list(buf
, error_actions
);
602 if (attr
== &sysfs_io_error_limit
)
603 c
->error_limit
= strtoul_or_return(buf
) << IO_ERROR_SHIFT
;
605 /* See count_io_errors() for why 88 */
606 if (attr
== &sysfs_io_error_halflife
)
607 c
->error_decay
= strtoul_or_return(buf
) / 88;
609 sysfs_strtoul(journal_delay_ms
, c
->journal_delay_ms
);
610 sysfs_strtoul(verify
, c
->verify
);
611 sysfs_strtoul(key_merging_disabled
, c
->key_merging_disabled
);
612 sysfs_strtoul(expensive_debug_checks
, c
->expensive_debug_checks
);
613 sysfs_strtoul(gc_always_rewrite
, c
->gc_always_rewrite
);
614 sysfs_strtoul(btree_shrinker_disabled
, c
->shrinker_disabled
);
615 sysfs_strtoul(copy_gc_enabled
, c
->copy_gc_enabled
);
619 STORE_LOCKED(bch_cache_set
)
621 SHOW(bch_cache_set_internal
)
623 struct cache_set
*c
= container_of(kobj
, struct cache_set
, internal
);
624 return bch_cache_set_show(&c
->kobj
, attr
, buf
);
627 STORE(bch_cache_set_internal
)
629 struct cache_set
*c
= container_of(kobj
, struct cache_set
, internal
);
630 return bch_cache_set_store(&c
->kobj
, attr
, buf
, size
);
633 static void bch_cache_set_internal_release(struct kobject
*k
)
637 static struct attribute
*bch_cache_set_files
[] = {
641 &sysfs_journal_delay_ms
,
642 &sysfs_flash_vol_create
,
647 &sysfs_root_usage_percent
,
648 &sysfs_btree_cache_size
,
649 &sysfs_cache_available_percent
,
651 &sysfs_average_key_size
,
654 &sysfs_io_error_limit
,
655 &sysfs_io_error_halflife
,
657 &sysfs_congested_read_threshold_us
,
658 &sysfs_congested_write_threshold_us
,
662 KTYPE(bch_cache_set
);
664 static struct attribute
*bch_cache_set_internal_files
[] = {
665 &sysfs_active_journal_entries
,
667 sysfs_time_stats_attribute_list(btree_gc
, sec
, ms
)
668 sysfs_time_stats_attribute_list(btree_split
, sec
, us
)
669 sysfs_time_stats_attribute_list(btree_sort
, ms
, us
)
670 sysfs_time_stats_attribute_list(btree_read
, ms
, us
)
671 sysfs_time_stats_attribute_list(try_harder
, ms
, us
)
674 &sysfs_btree_used_percent
,
675 &sysfs_btree_cache_max_chain
,
677 &sysfs_bset_tree_stats
,
678 &sysfs_cache_read_races
,
679 &sysfs_writeback_keys_done
,
680 &sysfs_writeback_keys_failed
,
684 #ifdef CONFIG_BCACHE_DEBUG
686 &sysfs_key_merging_disabled
,
687 &sysfs_expensive_debug_checks
,
689 &sysfs_gc_always_rewrite
,
690 &sysfs_btree_shrinker_disabled
,
691 &sysfs_copy_gc_enabled
,
694 KTYPE(bch_cache_set_internal
);
698 struct cache
*ca
= container_of(kobj
, struct cache
, kobj
);
700 sysfs_hprint(bucket_size
, bucket_bytes(ca
));
701 sysfs_hprint(block_size
, block_bytes(ca
));
702 sysfs_print(nbuckets
, ca
->sb
.nbuckets
);
703 sysfs_print(discard
, ca
->discard
);
704 sysfs_hprint(written
, atomic_long_read(&ca
->sectors_written
) << 9);
705 sysfs_hprint(btree_written
,
706 atomic_long_read(&ca
->btree_sectors_written
) << 9);
707 sysfs_hprint(metadata_written
,
708 (atomic_long_read(&ca
->meta_sectors_written
) +
709 atomic_long_read(&ca
->btree_sectors_written
)) << 9);
711 sysfs_print(io_errors
,
712 atomic_read(&ca
->io_errors
) >> IO_ERROR_SHIFT
);
714 sysfs_print(freelist_percent
, ca
->free
.size
* 100 /
715 ((size_t) ca
->sb
.nbuckets
));
717 if (attr
== &sysfs_cache_replacement_policy
)
718 return bch_snprint_string_list(buf
, PAGE_SIZE
,
719 cache_replacement_policies
,
720 CACHE_REPLACEMENT(&ca
->sb
));
722 if (attr
== &sysfs_priority_stats
) {
723 int cmp(const void *l
, const void *r
)
724 { return *((uint16_t *) r
) - *((uint16_t *) l
); }
726 size_t n
= ca
->sb
.nbuckets
, i
, unused
, btree
;
728 /* Compute 31 quantiles */
729 uint16_t q
[31], *p
, *cached
;
732 cached
= p
= vmalloc(ca
->sb
.nbuckets
* sizeof(uint16_t));
736 mutex_lock(&ca
->set
->bucket_lock
);
737 for (i
= ca
->sb
.first_bucket
; i
< n
; i
++)
738 p
[i
] = ca
->buckets
[i
].prio
;
739 mutex_unlock(&ca
->set
->bucket_lock
);
741 sort(p
, n
, sizeof(uint16_t), cmp
, NULL
);
747 unused
= ca
->sb
.nbuckets
- n
;
749 while (cached
< p
+ n
&&
750 *cached
== BTREE_PRIO
)
756 for (i
= 0; i
< n
; i
++)
757 sum
+= INITIAL_PRIO
- cached
[i
];
762 for (i
= 0; i
< ARRAY_SIZE(q
); i
++)
763 q
[i
] = INITIAL_PRIO
- cached
[n
* (i
+ 1) /
764 (ARRAY_SIZE(q
) + 1)];
768 ret
= scnprintf(buf
, PAGE_SIZE
,
772 "Sectors per Q: %zu\n"
774 unused
* 100 / (size_t) ca
->sb
.nbuckets
,
775 btree
* 100 / (size_t) ca
->sb
.nbuckets
, sum
,
776 n
* ca
->sb
.bucket_size
/ (ARRAY_SIZE(q
) + 1));
778 for (i
= 0; i
< ARRAY_SIZE(q
); i
++)
779 ret
+= scnprintf(buf
+ ret
, PAGE_SIZE
- ret
,
783 ret
+= scnprintf(buf
+ ret
, PAGE_SIZE
- ret
, "]\n");
790 SHOW_LOCKED(bch_cache
)
794 struct cache
*ca
= container_of(kobj
, struct cache
, kobj
);
796 if (attr
== &sysfs_discard
) {
797 bool v
= strtoul_or_return(buf
);
799 if (blk_queue_discard(bdev_get_queue(ca
->bdev
)))
802 if (v
!= CACHE_DISCARD(&ca
->sb
)) {
803 SET_CACHE_DISCARD(&ca
->sb
, v
);
804 bcache_write_super(ca
->set
);
808 if (attr
== &sysfs_cache_replacement_policy
) {
809 ssize_t v
= bch_read_string_list(buf
, cache_replacement_policies
);
814 if ((unsigned) v
!= CACHE_REPLACEMENT(&ca
->sb
)) {
815 mutex_lock(&ca
->set
->bucket_lock
);
816 SET_CACHE_REPLACEMENT(&ca
->sb
, v
);
817 mutex_unlock(&ca
->set
->bucket_lock
);
819 bcache_write_super(ca
->set
);
823 if (attr
== &sysfs_freelist_percent
) {
824 DECLARE_FIFO(long, free
);
826 size_t p
= strtoul_or_return(buf
);
829 ((size_t) ca
->sb
.nbuckets
* p
) / 100,
830 roundup_pow_of_two(ca
->sb
.nbuckets
) >> 9,
831 ca
->sb
.nbuckets
/ 2);
833 if (!init_fifo_exact(&free
, p
, GFP_KERNEL
))
836 mutex_lock(&ca
->set
->bucket_lock
);
838 fifo_move(&free
, &ca
->free
);
839 fifo_swap(&free
, &ca
->free
);
841 mutex_unlock(&ca
->set
->bucket_lock
);
843 while (fifo_pop(&free
, i
))
844 atomic_dec(&ca
->buckets
[i
].pin
);
849 if (attr
== &sysfs_clear_stats
) {
850 atomic_long_set(&ca
->sectors_written
, 0);
851 atomic_long_set(&ca
->btree_sectors_written
, 0);
852 atomic_long_set(&ca
->meta_sectors_written
, 0);
853 atomic_set(&ca
->io_count
, 0);
854 atomic_set(&ca
->io_errors
, 0);
859 STORE_LOCKED(bch_cache
)
861 static struct attribute
*bch_cache_files
[] = {
865 &sysfs_priority_stats
,
868 &sysfs_btree_written
,
869 &sysfs_metadata_written
,
872 &sysfs_freelist_percent
,
873 &sysfs_cache_replacement_policy
,