]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/md/bcache/sysfs.c
mac80211: propagate STBC / LDPC flags to radiotap
[mirror_ubuntu-bionic-kernel.git] / drivers / md / bcache / sysfs.c
1 /*
2 * bcache sysfs interfaces
3 *
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
6 */
7
8 #include "bcache.h"
9 #include "sysfs.h"
10 #include "btree.h"
11 #include "request.h"
12 #include "writeback.h"
13
14 #include <linux/blkdev.h>
15 #include <linux/sort.h>
16
17 static const char * const cache_replacement_policies[] = {
18 "lru",
19 "fifo",
20 "random",
21 NULL
22 };
23
24 static const char * const error_actions[] = {
25 "unregister",
26 "panic",
27 NULL
28 };
29
30 write_attribute(attach);
31 write_attribute(detach);
32 write_attribute(unregister);
33 write_attribute(stop);
34 write_attribute(clear_stats);
35 write_attribute(trigger_gc);
36 write_attribute(prune_cache);
37 write_attribute(flash_vol_create);
38
39 read_attribute(bucket_size);
40 read_attribute(block_size);
41 read_attribute(nbuckets);
42 read_attribute(tree_depth);
43 read_attribute(root_usage_percent);
44 read_attribute(priority_stats);
45 read_attribute(btree_cache_size);
46 read_attribute(btree_cache_max_chain);
47 read_attribute(cache_available_percent);
48 read_attribute(written);
49 read_attribute(btree_written);
50 read_attribute(metadata_written);
51 read_attribute(active_journal_entries);
52
53 sysfs_time_stats_attribute(btree_gc, sec, ms);
54 sysfs_time_stats_attribute(btree_split, sec, us);
55 sysfs_time_stats_attribute(btree_sort, ms, us);
56 sysfs_time_stats_attribute(btree_read, ms, us);
57 sysfs_time_stats_attribute(try_harder, ms, us);
58
59 read_attribute(btree_nodes);
60 read_attribute(btree_used_percent);
61 read_attribute(average_key_size);
62 read_attribute(dirty_data);
63 read_attribute(bset_tree_stats);
64
65 read_attribute(state);
66 read_attribute(cache_read_races);
67 read_attribute(writeback_keys_done);
68 read_attribute(writeback_keys_failed);
69 read_attribute(io_errors);
70 read_attribute(congested);
71 rw_attribute(congested_read_threshold_us);
72 rw_attribute(congested_write_threshold_us);
73
74 rw_attribute(sequential_cutoff);
75 rw_attribute(data_csum);
76 rw_attribute(cache_mode);
77 rw_attribute(writeback_metadata);
78 rw_attribute(writeback_running);
79 rw_attribute(writeback_percent);
80 rw_attribute(writeback_delay);
81 rw_attribute(writeback_rate);
82
83 rw_attribute(writeback_rate_update_seconds);
84 rw_attribute(writeback_rate_d_term);
85 rw_attribute(writeback_rate_p_term_inverse);
86 read_attribute(writeback_rate_debug);
87
88 read_attribute(stripe_size);
89 read_attribute(partial_stripes_expensive);
90
91 rw_attribute(synchronous);
92 rw_attribute(journal_delay_ms);
93 rw_attribute(discard);
94 rw_attribute(running);
95 rw_attribute(label);
96 rw_attribute(readahead);
97 rw_attribute(errors);
98 rw_attribute(io_error_limit);
99 rw_attribute(io_error_halflife);
100 rw_attribute(verify);
101 rw_attribute(bypass_torture_test);
102 rw_attribute(key_merging_disabled);
103 rw_attribute(gc_always_rewrite);
104 rw_attribute(expensive_debug_checks);
105 rw_attribute(freelist_percent);
106 rw_attribute(cache_replacement_policy);
107 rw_attribute(btree_shrinker_disabled);
108 rw_attribute(copy_gc_enabled);
109 rw_attribute(size);
110
111 SHOW(__bch_cached_dev)
112 {
113 struct cached_dev *dc = container_of(kobj, struct cached_dev,
114 disk.kobj);
115 const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
116
117 #define var(stat) (dc->stat)
118
119 if (attr == &sysfs_cache_mode)
120 return bch_snprint_string_list(buf, PAGE_SIZE,
121 bch_cache_modes + 1,
122 BDEV_CACHE_MODE(&dc->sb));
123
124 sysfs_printf(data_csum, "%i", dc->disk.data_csum);
125 var_printf(verify, "%i");
126 var_printf(bypass_torture_test, "%i");
127 var_printf(writeback_metadata, "%i");
128 var_printf(writeback_running, "%i");
129 var_print(writeback_delay);
130 var_print(writeback_percent);
131 sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
132
133 var_print(writeback_rate_update_seconds);
134 var_print(writeback_rate_d_term);
135 var_print(writeback_rate_p_term_inverse);
136
137 if (attr == &sysfs_writeback_rate_debug) {
138 char rate[20];
139 char dirty[20];
140 char target[20];
141 char proportional[20];
142 char derivative[20];
143 char change[20];
144 s64 next_io;
145
146 bch_hprint(rate, dc->writeback_rate.rate << 9);
147 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
148 bch_hprint(target, dc->writeback_rate_target << 9);
149 bch_hprint(proportional,dc->writeback_rate_proportional << 9);
150 bch_hprint(derivative, dc->writeback_rate_derivative << 9);
151 bch_hprint(change, dc->writeback_rate_change << 9);
152
153 next_io = div64_s64(dc->writeback_rate.next - local_clock(),
154 NSEC_PER_MSEC);
155
156 return sprintf(buf,
157 "rate:\t\t%s/sec\n"
158 "dirty:\t\t%s\n"
159 "target:\t\t%s\n"
160 "proportional:\t%s\n"
161 "derivative:\t%s\n"
162 "change:\t\t%s/sec\n"
163 "next io:\t%llims\n",
164 rate, dirty, target, proportional,
165 derivative, change, next_io);
166 }
167
168 sysfs_hprint(dirty_data,
169 bcache_dev_sectors_dirty(&dc->disk) << 9);
170
171 sysfs_hprint(stripe_size, dc->disk.stripe_size << 9);
172 var_printf(partial_stripes_expensive, "%u");
173
174 var_hprint(sequential_cutoff);
175 var_hprint(readahead);
176
177 sysfs_print(running, atomic_read(&dc->running));
178 sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
179
180 if (attr == &sysfs_label) {
181 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
182 buf[SB_LABEL_SIZE + 1] = '\0';
183 strcat(buf, "\n");
184 return strlen(buf);
185 }
186
187 #undef var
188 return 0;
189 }
190 SHOW_LOCKED(bch_cached_dev)
191
192 STORE(__cached_dev)
193 {
194 struct cached_dev *dc = container_of(kobj, struct cached_dev,
195 disk.kobj);
196 unsigned v = size;
197 struct cache_set *c;
198 struct kobj_uevent_env *env;
199
200 #define d_strtoul(var) sysfs_strtoul(var, dc->var)
201 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
202 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
203
204 sysfs_strtoul(data_csum, dc->disk.data_csum);
205 d_strtoul(verify);
206 d_strtoul(bypass_torture_test);
207 d_strtoul(writeback_metadata);
208 d_strtoul(writeback_running);
209 d_strtoul(writeback_delay);
210
211 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
212
213 sysfs_strtoul_clamp(writeback_rate,
214 dc->writeback_rate.rate, 1, INT_MAX);
215
216 d_strtoul_nonzero(writeback_rate_update_seconds);
217 d_strtoul(writeback_rate_d_term);
218 d_strtoul_nonzero(writeback_rate_p_term_inverse);
219
220 d_strtoi_h(sequential_cutoff);
221 d_strtoi_h(readahead);
222
223 if (attr == &sysfs_clear_stats)
224 bch_cache_accounting_clear(&dc->accounting);
225
226 if (attr == &sysfs_running &&
227 strtoul_or_return(buf))
228 bch_cached_dev_run(dc);
229
230 if (attr == &sysfs_cache_mode) {
231 ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
232
233 if (v < 0)
234 return v;
235
236 if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) {
237 SET_BDEV_CACHE_MODE(&dc->sb, v);
238 bch_write_bdev_super(dc, NULL);
239 }
240 }
241
242 if (attr == &sysfs_label) {
243 if (size > SB_LABEL_SIZE)
244 return -EINVAL;
245 memcpy(dc->sb.label, buf, size);
246 if (size < SB_LABEL_SIZE)
247 dc->sb.label[size] = '\0';
248 if (size && dc->sb.label[size - 1] == '\n')
249 dc->sb.label[size - 1] = '\0';
250 bch_write_bdev_super(dc, NULL);
251 if (dc->disk.c) {
252 memcpy(dc->disk.c->uuids[dc->disk.id].label,
253 buf, SB_LABEL_SIZE);
254 bch_uuid_write(dc->disk.c);
255 }
256 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
257 if (!env)
258 return -ENOMEM;
259 add_uevent_var(env, "DRIVER=bcache");
260 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
261 add_uevent_var(env, "CACHED_LABEL=%s", buf);
262 kobject_uevent_env(
263 &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
264 kfree(env);
265 }
266
267 if (attr == &sysfs_attach) {
268 if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16)
269 return -EINVAL;
270
271 list_for_each_entry(c, &bch_cache_sets, list) {
272 v = bch_cached_dev_attach(dc, c);
273 if (!v)
274 return size;
275 }
276
277 pr_err("Can't attach %s: cache set not found", buf);
278 size = v;
279 }
280
281 if (attr == &sysfs_detach && dc->disk.c)
282 bch_cached_dev_detach(dc);
283
284 if (attr == &sysfs_stop)
285 bcache_device_stop(&dc->disk);
286
287 return size;
288 }
289
290 STORE(bch_cached_dev)
291 {
292 struct cached_dev *dc = container_of(kobj, struct cached_dev,
293 disk.kobj);
294
295 mutex_lock(&bch_register_lock);
296 size = __cached_dev_store(kobj, attr, buf, size);
297
298 if (attr == &sysfs_writeback_running)
299 bch_writeback_queue(dc);
300
301 if (attr == &sysfs_writeback_percent)
302 schedule_delayed_work(&dc->writeback_rate_update,
303 dc->writeback_rate_update_seconds * HZ);
304
305 mutex_unlock(&bch_register_lock);
306 return size;
307 }
308
309 static struct attribute *bch_cached_dev_files[] = {
310 &sysfs_attach,
311 &sysfs_detach,
312 &sysfs_stop,
313 #if 0
314 &sysfs_data_csum,
315 #endif
316 &sysfs_cache_mode,
317 &sysfs_writeback_metadata,
318 &sysfs_writeback_running,
319 &sysfs_writeback_delay,
320 &sysfs_writeback_percent,
321 &sysfs_writeback_rate,
322 &sysfs_writeback_rate_update_seconds,
323 &sysfs_writeback_rate_d_term,
324 &sysfs_writeback_rate_p_term_inverse,
325 &sysfs_writeback_rate_debug,
326 &sysfs_dirty_data,
327 &sysfs_stripe_size,
328 &sysfs_partial_stripes_expensive,
329 &sysfs_sequential_cutoff,
330 &sysfs_clear_stats,
331 &sysfs_running,
332 &sysfs_state,
333 &sysfs_label,
334 &sysfs_readahead,
335 #ifdef CONFIG_BCACHE_DEBUG
336 &sysfs_verify,
337 &sysfs_bypass_torture_test,
338 #endif
339 NULL
340 };
341 KTYPE(bch_cached_dev);
342
343 SHOW(bch_flash_dev)
344 {
345 struct bcache_device *d = container_of(kobj, struct bcache_device,
346 kobj);
347 struct uuid_entry *u = &d->c->uuids[d->id];
348
349 sysfs_printf(data_csum, "%i", d->data_csum);
350 sysfs_hprint(size, u->sectors << 9);
351
352 if (attr == &sysfs_label) {
353 memcpy(buf, u->label, SB_LABEL_SIZE);
354 buf[SB_LABEL_SIZE + 1] = '\0';
355 strcat(buf, "\n");
356 return strlen(buf);
357 }
358
359 return 0;
360 }
361
362 STORE(__bch_flash_dev)
363 {
364 struct bcache_device *d = container_of(kobj, struct bcache_device,
365 kobj);
366 struct uuid_entry *u = &d->c->uuids[d->id];
367
368 sysfs_strtoul(data_csum, d->data_csum);
369
370 if (attr == &sysfs_size) {
371 uint64_t v;
372 strtoi_h_or_return(buf, v);
373
374 u->sectors = v >> 9;
375 bch_uuid_write(d->c);
376 set_capacity(d->disk, u->sectors);
377 }
378
379 if (attr == &sysfs_label) {
380 memcpy(u->label, buf, SB_LABEL_SIZE);
381 bch_uuid_write(d->c);
382 }
383
384 if (attr == &sysfs_unregister) {
385 set_bit(BCACHE_DEV_DETACHING, &d->flags);
386 bcache_device_stop(d);
387 }
388
389 return size;
390 }
391 STORE_LOCKED(bch_flash_dev)
392
393 static struct attribute *bch_flash_dev_files[] = {
394 &sysfs_unregister,
395 #if 0
396 &sysfs_data_csum,
397 #endif
398 &sysfs_label,
399 &sysfs_size,
400 NULL
401 };
402 KTYPE(bch_flash_dev);
403
404 SHOW(__bch_cache_set)
405 {
406 unsigned root_usage(struct cache_set *c)
407 {
408 unsigned bytes = 0;
409 struct bkey *k;
410 struct btree *b;
411 struct btree_iter iter;
412
413 goto lock_root;
414
415 do {
416 rw_unlock(false, b);
417 lock_root:
418 b = c->root;
419 rw_lock(false, b, b->level);
420 } while (b != c->root);
421
422 for_each_key_filter(b, k, &iter, bch_ptr_bad)
423 bytes += bkey_bytes(k);
424
425 rw_unlock(false, b);
426
427 return (bytes * 100) / btree_bytes(c);
428 }
429
430 size_t cache_size(struct cache_set *c)
431 {
432 size_t ret = 0;
433 struct btree *b;
434
435 mutex_lock(&c->bucket_lock);
436 list_for_each_entry(b, &c->btree_cache, list)
437 ret += 1 << (b->page_order + PAGE_SHIFT);
438
439 mutex_unlock(&c->bucket_lock);
440 return ret;
441 }
442
443 unsigned cache_max_chain(struct cache_set *c)
444 {
445 unsigned ret = 0;
446 struct hlist_head *h;
447
448 mutex_lock(&c->bucket_lock);
449
450 for (h = c->bucket_hash;
451 h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
452 h++) {
453 unsigned i = 0;
454 struct hlist_node *p;
455
456 hlist_for_each(p, h)
457 i++;
458
459 ret = max(ret, i);
460 }
461
462 mutex_unlock(&c->bucket_lock);
463 return ret;
464 }
465
466 unsigned btree_used(struct cache_set *c)
467 {
468 return div64_u64(c->gc_stats.key_bytes * 100,
469 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
470 }
471
472 unsigned average_key_size(struct cache_set *c)
473 {
474 return c->gc_stats.nkeys
475 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
476 : 0;
477 }
478
479 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
480
481 sysfs_print(synchronous, CACHE_SYNC(&c->sb));
482 sysfs_print(journal_delay_ms, c->journal_delay_ms);
483 sysfs_hprint(bucket_size, bucket_bytes(c));
484 sysfs_hprint(block_size, block_bytes(c));
485 sysfs_print(tree_depth, c->root->level);
486 sysfs_print(root_usage_percent, root_usage(c));
487
488 sysfs_hprint(btree_cache_size, cache_size(c));
489 sysfs_print(btree_cache_max_chain, cache_max_chain(c));
490 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
491
492 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
493 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
494 sysfs_print_time_stats(&c->sort_time, btree_sort, ms, us);
495 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
496 sysfs_print_time_stats(&c->try_harder_time, try_harder, ms, us);
497
498 sysfs_print(btree_used_percent, btree_used(c));
499 sysfs_print(btree_nodes, c->gc_stats.nodes);
500 sysfs_hprint(average_key_size, average_key_size(c));
501
502 sysfs_print(cache_read_races,
503 atomic_long_read(&c->cache_read_races));
504
505 sysfs_print(writeback_keys_done,
506 atomic_long_read(&c->writeback_keys_done));
507 sysfs_print(writeback_keys_failed,
508 atomic_long_read(&c->writeback_keys_failed));
509
510 if (attr == &sysfs_errors)
511 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
512 c->on_error);
513
514 /* See count_io_errors for why 88 */
515 sysfs_print(io_error_halflife, c->error_decay * 88);
516 sysfs_print(io_error_limit, c->error_limit >> IO_ERROR_SHIFT);
517
518 sysfs_hprint(congested,
519 ((uint64_t) bch_get_congested(c)) << 9);
520 sysfs_print(congested_read_threshold_us,
521 c->congested_read_threshold_us);
522 sysfs_print(congested_write_threshold_us,
523 c->congested_write_threshold_us);
524
525 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
526 sysfs_printf(verify, "%i", c->verify);
527 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
528 sysfs_printf(expensive_debug_checks,
529 "%i", c->expensive_debug_checks);
530 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
531 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
532 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
533
534 if (attr == &sysfs_bset_tree_stats)
535 return bch_bset_print_stats(c, buf);
536
537 return 0;
538 }
539 SHOW_LOCKED(bch_cache_set)
540
541 STORE(__bch_cache_set)
542 {
543 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
544
545 if (attr == &sysfs_unregister)
546 bch_cache_set_unregister(c);
547
548 if (attr == &sysfs_stop)
549 bch_cache_set_stop(c);
550
551 if (attr == &sysfs_synchronous) {
552 bool sync = strtoul_or_return(buf);
553
554 if (sync != CACHE_SYNC(&c->sb)) {
555 SET_CACHE_SYNC(&c->sb, sync);
556 bcache_write_super(c);
557 }
558 }
559
560 if (attr == &sysfs_flash_vol_create) {
561 int r;
562 uint64_t v;
563 strtoi_h_or_return(buf, v);
564
565 r = bch_flash_dev_create(c, v);
566 if (r)
567 return r;
568 }
569
570 if (attr == &sysfs_clear_stats) {
571 atomic_long_set(&c->writeback_keys_done, 0);
572 atomic_long_set(&c->writeback_keys_failed, 0);
573
574 memset(&c->gc_stats, 0, sizeof(struct gc_stat));
575 bch_cache_accounting_clear(&c->accounting);
576 }
577
578 if (attr == &sysfs_trigger_gc)
579 wake_up_gc(c);
580
581 if (attr == &sysfs_prune_cache) {
582 struct shrink_control sc;
583 sc.gfp_mask = GFP_KERNEL;
584 sc.nr_to_scan = strtoul_or_return(buf);
585 c->shrink.scan_objects(&c->shrink, &sc);
586 }
587
588 sysfs_strtoul(congested_read_threshold_us,
589 c->congested_read_threshold_us);
590 sysfs_strtoul(congested_write_threshold_us,
591 c->congested_write_threshold_us);
592
593 if (attr == &sysfs_errors) {
594 ssize_t v = bch_read_string_list(buf, error_actions);
595
596 if (v < 0)
597 return v;
598
599 c->on_error = v;
600 }
601
602 if (attr == &sysfs_io_error_limit)
603 c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
604
605 /* See count_io_errors() for why 88 */
606 if (attr == &sysfs_io_error_halflife)
607 c->error_decay = strtoul_or_return(buf) / 88;
608
609 sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
610 sysfs_strtoul(verify, c->verify);
611 sysfs_strtoul(key_merging_disabled, c->key_merging_disabled);
612 sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
613 sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
614 sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
615 sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
616
617 return size;
618 }
619 STORE_LOCKED(bch_cache_set)
620
621 SHOW(bch_cache_set_internal)
622 {
623 struct cache_set *c = container_of(kobj, struct cache_set, internal);
624 return bch_cache_set_show(&c->kobj, attr, buf);
625 }
626
627 STORE(bch_cache_set_internal)
628 {
629 struct cache_set *c = container_of(kobj, struct cache_set, internal);
630 return bch_cache_set_store(&c->kobj, attr, buf, size);
631 }
632
633 static void bch_cache_set_internal_release(struct kobject *k)
634 {
635 }
636
637 static struct attribute *bch_cache_set_files[] = {
638 &sysfs_unregister,
639 &sysfs_stop,
640 &sysfs_synchronous,
641 &sysfs_journal_delay_ms,
642 &sysfs_flash_vol_create,
643
644 &sysfs_bucket_size,
645 &sysfs_block_size,
646 &sysfs_tree_depth,
647 &sysfs_root_usage_percent,
648 &sysfs_btree_cache_size,
649 &sysfs_cache_available_percent,
650
651 &sysfs_average_key_size,
652
653 &sysfs_errors,
654 &sysfs_io_error_limit,
655 &sysfs_io_error_halflife,
656 &sysfs_congested,
657 &sysfs_congested_read_threshold_us,
658 &sysfs_congested_write_threshold_us,
659 &sysfs_clear_stats,
660 NULL
661 };
662 KTYPE(bch_cache_set);
663
664 static struct attribute *bch_cache_set_internal_files[] = {
665 &sysfs_active_journal_entries,
666
667 sysfs_time_stats_attribute_list(btree_gc, sec, ms)
668 sysfs_time_stats_attribute_list(btree_split, sec, us)
669 sysfs_time_stats_attribute_list(btree_sort, ms, us)
670 sysfs_time_stats_attribute_list(btree_read, ms, us)
671 sysfs_time_stats_attribute_list(try_harder, ms, us)
672
673 &sysfs_btree_nodes,
674 &sysfs_btree_used_percent,
675 &sysfs_btree_cache_max_chain,
676
677 &sysfs_bset_tree_stats,
678 &sysfs_cache_read_races,
679 &sysfs_writeback_keys_done,
680 &sysfs_writeback_keys_failed,
681
682 &sysfs_trigger_gc,
683 &sysfs_prune_cache,
684 #ifdef CONFIG_BCACHE_DEBUG
685 &sysfs_verify,
686 &sysfs_key_merging_disabled,
687 &sysfs_expensive_debug_checks,
688 #endif
689 &sysfs_gc_always_rewrite,
690 &sysfs_btree_shrinker_disabled,
691 &sysfs_copy_gc_enabled,
692 NULL
693 };
694 KTYPE(bch_cache_set_internal);
695
696 SHOW(__bch_cache)
697 {
698 struct cache *ca = container_of(kobj, struct cache, kobj);
699
700 sysfs_hprint(bucket_size, bucket_bytes(ca));
701 sysfs_hprint(block_size, block_bytes(ca));
702 sysfs_print(nbuckets, ca->sb.nbuckets);
703 sysfs_print(discard, ca->discard);
704 sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
705 sysfs_hprint(btree_written,
706 atomic_long_read(&ca->btree_sectors_written) << 9);
707 sysfs_hprint(metadata_written,
708 (atomic_long_read(&ca->meta_sectors_written) +
709 atomic_long_read(&ca->btree_sectors_written)) << 9);
710
711 sysfs_print(io_errors,
712 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
713
714 sysfs_print(freelist_percent, ca->free.size * 100 /
715 ((size_t) ca->sb.nbuckets));
716
717 if (attr == &sysfs_cache_replacement_policy)
718 return bch_snprint_string_list(buf, PAGE_SIZE,
719 cache_replacement_policies,
720 CACHE_REPLACEMENT(&ca->sb));
721
722 if (attr == &sysfs_priority_stats) {
723 int cmp(const void *l, const void *r)
724 { return *((uint16_t *) r) - *((uint16_t *) l); }
725
726 size_t n = ca->sb.nbuckets, i, unused, btree;
727 uint64_t sum = 0;
728 /* Compute 31 quantiles */
729 uint16_t q[31], *p, *cached;
730 ssize_t ret;
731
732 cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
733 if (!p)
734 return -ENOMEM;
735
736 mutex_lock(&ca->set->bucket_lock);
737 for (i = ca->sb.first_bucket; i < n; i++)
738 p[i] = ca->buckets[i].prio;
739 mutex_unlock(&ca->set->bucket_lock);
740
741 sort(p, n, sizeof(uint16_t), cmp, NULL);
742
743 while (n &&
744 !cached[n - 1])
745 --n;
746
747 unused = ca->sb.nbuckets - n;
748
749 while (cached < p + n &&
750 *cached == BTREE_PRIO)
751 cached++;
752
753 btree = cached - p;
754 n -= btree;
755
756 for (i = 0; i < n; i++)
757 sum += INITIAL_PRIO - cached[i];
758
759 if (n)
760 do_div(sum, n);
761
762 for (i = 0; i < ARRAY_SIZE(q); i++)
763 q[i] = INITIAL_PRIO - cached[n * (i + 1) /
764 (ARRAY_SIZE(q) + 1)];
765
766 vfree(p);
767
768 ret = scnprintf(buf, PAGE_SIZE,
769 "Unused: %zu%%\n"
770 "Metadata: %zu%%\n"
771 "Average: %llu\n"
772 "Sectors per Q: %zu\n"
773 "Quantiles: [",
774 unused * 100 / (size_t) ca->sb.nbuckets,
775 btree * 100 / (size_t) ca->sb.nbuckets, sum,
776 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
777
778 for (i = 0; i < ARRAY_SIZE(q); i++)
779 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
780 "%u ", q[i]);
781 ret--;
782
783 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
784
785 return ret;
786 }
787
788 return 0;
789 }
790 SHOW_LOCKED(bch_cache)
791
792 STORE(__bch_cache)
793 {
794 struct cache *ca = container_of(kobj, struct cache, kobj);
795
796 if (attr == &sysfs_discard) {
797 bool v = strtoul_or_return(buf);
798
799 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
800 ca->discard = v;
801
802 if (v != CACHE_DISCARD(&ca->sb)) {
803 SET_CACHE_DISCARD(&ca->sb, v);
804 bcache_write_super(ca->set);
805 }
806 }
807
808 if (attr == &sysfs_cache_replacement_policy) {
809 ssize_t v = bch_read_string_list(buf, cache_replacement_policies);
810
811 if (v < 0)
812 return v;
813
814 if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) {
815 mutex_lock(&ca->set->bucket_lock);
816 SET_CACHE_REPLACEMENT(&ca->sb, v);
817 mutex_unlock(&ca->set->bucket_lock);
818
819 bcache_write_super(ca->set);
820 }
821 }
822
823 if (attr == &sysfs_freelist_percent) {
824 DECLARE_FIFO(long, free);
825 long i;
826 size_t p = strtoul_or_return(buf);
827
828 p = clamp_t(size_t,
829 ((size_t) ca->sb.nbuckets * p) / 100,
830 roundup_pow_of_two(ca->sb.nbuckets) >> 9,
831 ca->sb.nbuckets / 2);
832
833 if (!init_fifo_exact(&free, p, GFP_KERNEL))
834 return -ENOMEM;
835
836 mutex_lock(&ca->set->bucket_lock);
837
838 fifo_move(&free, &ca->free);
839 fifo_swap(&free, &ca->free);
840
841 mutex_unlock(&ca->set->bucket_lock);
842
843 while (fifo_pop(&free, i))
844 atomic_dec(&ca->buckets[i].pin);
845
846 free_fifo(&free);
847 }
848
849 if (attr == &sysfs_clear_stats) {
850 atomic_long_set(&ca->sectors_written, 0);
851 atomic_long_set(&ca->btree_sectors_written, 0);
852 atomic_long_set(&ca->meta_sectors_written, 0);
853 atomic_set(&ca->io_count, 0);
854 atomic_set(&ca->io_errors, 0);
855 }
856
857 return size;
858 }
859 STORE_LOCKED(bch_cache)
860
861 static struct attribute *bch_cache_files[] = {
862 &sysfs_bucket_size,
863 &sysfs_block_size,
864 &sysfs_nbuckets,
865 &sysfs_priority_stats,
866 &sysfs_discard,
867 &sysfs_written,
868 &sysfs_btree_written,
869 &sysfs_metadata_written,
870 &sysfs_io_errors,
871 &sysfs_clear_stats,
872 &sysfs_freelist_percent,
873 &sysfs_cache_replacement_policy,
874 NULL
875 };
876 KTYPE(bch_cache);