4 * Copyright 2012 Google, Inc.
14 * We keep absolute totals of various statistics, and addionally a set of three
17 * Every so often, a timer goes off and rescales the rolling averages.
18 * accounting_rescale[] is how many times the timer has to go off before we
19 * rescale each set of numbers; that gets us half lives of 5 minutes, one hour,
22 * accounting_delay is how often the timer goes off - 22 times in 5 minutes,
23 * and accounting_weight is what we use to rescale:
25 * pow(31 / 32, 22) ~= 1/2
27 * So that we don't have to increment each set of numbers every time we (say)
28 * get a cache hit, we increment a single atomic_t in acc->collector, and when
29 * the rescale function runs it resets the atomic counter to 0 and adds its
30 * old value to each of the exported numbers.
32 * To reduce rounding error, the numbers in struct cache_stats are all
33 * stored left shifted by 16, and scaled back in the sysfs show() function.
36 static const unsigned DAY_RESCALE
= 288;
37 static const unsigned HOUR_RESCALE
= 12;
38 static const unsigned FIVE_MINUTE_RESCALE
= 1;
39 static const unsigned accounting_delay
= (HZ
* 300) / 22;
40 static const unsigned accounting_weight
= 32;
42 /* sysfs reading/writing */
44 read_attribute(cache_hits
);
45 read_attribute(cache_misses
);
46 read_attribute(cache_bypass_hits
);
47 read_attribute(cache_bypass_misses
);
48 read_attribute(cache_hit_ratio
);
49 read_attribute(cache_readaheads
);
50 read_attribute(cache_miss_collisions
);
51 read_attribute(bypassed
);
55 struct cache_stats
*s
=
56 container_of(kobj
, struct cache_stats
, kobj
);
57 #define var(stat) (s->stat >> 16)
58 var_print(cache_hits
);
59 var_print(cache_misses
);
60 var_print(cache_bypass_hits
);
61 var_print(cache_bypass_misses
);
63 sysfs_print(cache_hit_ratio
,
64 DIV_SAFE(var(cache_hits
) * 100,
65 var(cache_hits
) + var(cache_misses
)));
67 var_print(cache_readaheads
);
68 var_print(cache_miss_collisions
);
69 sysfs_hprint(bypassed
, var(sectors_bypassed
) << 9);
79 static void bch_stats_release(struct kobject
*k
)
83 static struct attribute
*bch_stats_files
[] = {
86 &sysfs_cache_bypass_hits
,
87 &sysfs_cache_bypass_misses
,
88 &sysfs_cache_hit_ratio
,
89 &sysfs_cache_readaheads
,
90 &sysfs_cache_miss_collisions
,
94 static KTYPE(bch_stats
);
96 static void scale_accounting(unsigned long data
);
98 void bch_cache_accounting_init(struct cache_accounting
*acc
,
99 struct closure
*parent
)
101 kobject_init(&acc
->total
.kobj
, &bch_stats_ktype
);
102 kobject_init(&acc
->five_minute
.kobj
, &bch_stats_ktype
);
103 kobject_init(&acc
->hour
.kobj
, &bch_stats_ktype
);
104 kobject_init(&acc
->day
.kobj
, &bch_stats_ktype
);
106 closure_init(&acc
->cl
, parent
);
107 init_timer(&acc
->timer
);
108 acc
->timer
.expires
= jiffies
+ accounting_delay
;
109 acc
->timer
.data
= (unsigned long) acc
;
110 acc
->timer
.function
= scale_accounting
;
111 add_timer(&acc
->timer
);
114 int bch_cache_accounting_add_kobjs(struct cache_accounting
*acc
,
115 struct kobject
*parent
)
117 int ret
= kobject_add(&acc
->total
.kobj
, parent
,
119 ret
= ret
?: kobject_add(&acc
->five_minute
.kobj
, parent
,
120 "stats_five_minute");
121 ret
= ret
?: kobject_add(&acc
->hour
.kobj
, parent
,
123 ret
= ret
?: kobject_add(&acc
->day
.kobj
, parent
,
128 void bch_cache_accounting_clear(struct cache_accounting
*acc
)
130 memset(&acc
->total
.cache_hits
,
132 sizeof(unsigned long) * 7);
135 void bch_cache_accounting_destroy(struct cache_accounting
*acc
)
137 kobject_put(&acc
->total
.kobj
);
138 kobject_put(&acc
->five_minute
.kobj
);
139 kobject_put(&acc
->hour
.kobj
);
140 kobject_put(&acc
->day
.kobj
);
142 atomic_set(&acc
->closing
, 1);
143 if (del_timer_sync(&acc
->timer
))
144 closure_return(&acc
->cl
);
149 static void scale_stat(unsigned long *stat
)
151 *stat
= ewma_add(*stat
, 0, accounting_weight
, 0);
154 static void scale_stats(struct cache_stats
*stats
, unsigned long rescale_at
)
156 if (++stats
->rescale
== rescale_at
) {
158 scale_stat(&stats
->cache_hits
);
159 scale_stat(&stats
->cache_misses
);
160 scale_stat(&stats
->cache_bypass_hits
);
161 scale_stat(&stats
->cache_bypass_misses
);
162 scale_stat(&stats
->cache_readaheads
);
163 scale_stat(&stats
->cache_miss_collisions
);
164 scale_stat(&stats
->sectors_bypassed
);
168 static void scale_accounting(unsigned long data
)
170 struct cache_accounting
*acc
= (struct cache_accounting
*) data
;
172 #define move_stat(name) do { \
173 unsigned t = atomic_xchg(&acc->collector.name, 0); \
175 acc->five_minute.name += t; \
176 acc->hour.name += t; \
177 acc->day.name += t; \
178 acc->total.name += t; \
181 move_stat(cache_hits
);
182 move_stat(cache_misses
);
183 move_stat(cache_bypass_hits
);
184 move_stat(cache_bypass_misses
);
185 move_stat(cache_readaheads
);
186 move_stat(cache_miss_collisions
);
187 move_stat(sectors_bypassed
);
189 scale_stats(&acc
->total
, 0);
190 scale_stats(&acc
->day
, DAY_RESCALE
);
191 scale_stats(&acc
->hour
, HOUR_RESCALE
);
192 scale_stats(&acc
->five_minute
, FIVE_MINUTE_RESCALE
);
194 acc
->timer
.expires
+= accounting_delay
;
196 if (!atomic_read(&acc
->closing
))
197 add_timer(&acc
->timer
);
199 closure_return(&acc
->cl
);
202 static void mark_cache_stats(struct cache_stat_collector
*stats
,
203 bool hit
, bool bypass
)
207 atomic_inc(&stats
->cache_hits
);
209 atomic_inc(&stats
->cache_misses
);
212 atomic_inc(&stats
->cache_bypass_hits
);
214 atomic_inc(&stats
->cache_bypass_misses
);
217 void bch_mark_cache_accounting(struct search
*s
, bool hit
, bool bypass
)
219 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
220 mark_cache_stats(&dc
->accounting
.collector
, hit
, bypass
);
221 mark_cache_stats(&s
->op
.c
->accounting
.collector
, hit
, bypass
);
222 #ifdef CONFIG_CGROUP_BCACHE
223 mark_cache_stats(&(bch_bio_to_cgroup(s
->orig_bio
)->stats
), hit
, bypass
);
227 void bch_mark_cache_readahead(struct search
*s
)
229 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
230 atomic_inc(&dc
->accounting
.collector
.cache_readaheads
);
231 atomic_inc(&s
->op
.c
->accounting
.collector
.cache_readaheads
);
234 void bch_mark_cache_miss_collision(struct search
*s
)
236 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
237 atomic_inc(&dc
->accounting
.collector
.cache_miss_collisions
);
238 atomic_inc(&s
->op
.c
->accounting
.collector
.cache_miss_collisions
);
241 void bch_mark_sectors_bypassed(struct search
*s
, int sectors
)
243 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
244 atomic_add(sectors
, &dc
->accounting
.collector
.sectors_bypassed
);
245 atomic_add(sectors
, &s
->op
.c
->accounting
.collector
.sectors_bypassed
);