]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - block/blk-stat.c
blk-stat: convert blk-stat bucket callback to signed
[mirror_ubuntu-bionic-kernel.git] / block / blk-stat.c
1 /*
2 * Block stat tracking code
3 *
4 * Copyright (C) 2016 Jens Axboe
5 */
6 #include <linux/kernel.h>
7 #include <linux/rculist.h>
8 #include <linux/blk-mq.h>
9
10 #include "blk-stat.h"
11 #include "blk-mq.h"
12 #include "blk.h"
13
14 #define BLK_RQ_STAT_BATCH 64
15
16 struct blk_queue_stats {
17 struct list_head callbacks;
18 spinlock_t lock;
19 bool enable_accounting;
20 };
21
22 int blk_stat_rq_ddir(const struct request *rq)
23 {
24 return rq_data_dir(rq);
25 }
26 EXPORT_SYMBOL_GPL(blk_stat_rq_ddir);
27
28 static void blk_stat_init(struct blk_rq_stat *stat)
29 {
30 stat->min = -1ULL;
31 stat->max = stat->nr_samples = stat->mean = 0;
32 stat->batch = stat->nr_batch = 0;
33 }
34
35 static void blk_stat_flush_batch(struct blk_rq_stat *stat)
36 {
37 const s32 nr_batch = READ_ONCE(stat->nr_batch);
38 const s32 nr_samples = READ_ONCE(stat->nr_samples);
39
40 if (!nr_batch)
41 return;
42 if (!nr_samples)
43 stat->mean = div64_s64(stat->batch, nr_batch);
44 else {
45 stat->mean = div64_s64((stat->mean * nr_samples) +
46 stat->batch,
47 nr_batch + nr_samples);
48 }
49
50 stat->nr_samples += nr_batch;
51 stat->nr_batch = stat->batch = 0;
52 }
53
54 static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
55 {
56 blk_stat_flush_batch(src);
57
58 if (!src->nr_samples)
59 return;
60
61 dst->min = min(dst->min, src->min);
62 dst->max = max(dst->max, src->max);
63
64 if (!dst->nr_samples)
65 dst->mean = src->mean;
66 else {
67 dst->mean = div64_s64((src->mean * src->nr_samples) +
68 (dst->mean * dst->nr_samples),
69 dst->nr_samples + src->nr_samples);
70 }
71 dst->nr_samples += src->nr_samples;
72 }
73
74 static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
75 {
76 stat->min = min(stat->min, value);
77 stat->max = max(stat->max, value);
78
79 if (stat->batch + value < stat->batch ||
80 stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
81 blk_stat_flush_batch(stat);
82
83 stat->batch += value;
84 stat->nr_batch++;
85 }
86
87 void blk_stat_add(struct request *rq)
88 {
89 struct request_queue *q = rq->q;
90 struct blk_stat_callback *cb;
91 struct blk_rq_stat *stat;
92 int bucket;
93 s64 now, value;
94
95 now = __blk_stat_time(ktime_to_ns(ktime_get()));
96 if (now < blk_stat_time(&rq->issue_stat))
97 return;
98
99 value = now - blk_stat_time(&rq->issue_stat);
100
101 blk_throtl_stat_add(rq, value);
102
103 rcu_read_lock();
104 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
105 if (blk_stat_is_active(cb)) {
106 bucket = cb->bucket_fn(rq);
107 if (bucket < 0)
108 continue;
109 stat = &this_cpu_ptr(cb->cpu_stat)[bucket];
110 __blk_stat_add(stat, value);
111 }
112 }
113 rcu_read_unlock();
114 }
115
116 static void blk_stat_timer_fn(unsigned long data)
117 {
118 struct blk_stat_callback *cb = (void *)data;
119 unsigned int bucket;
120 int cpu;
121
122 for (bucket = 0; bucket < cb->buckets; bucket++)
123 blk_stat_init(&cb->stat[bucket]);
124
125 for_each_online_cpu(cpu) {
126 struct blk_rq_stat *cpu_stat;
127
128 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
129 for (bucket = 0; bucket < cb->buckets; bucket++) {
130 blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
131 blk_stat_init(&cpu_stat[bucket]);
132 }
133 }
134
135 cb->timer_fn(cb);
136 }
137
138 struct blk_stat_callback *
139 blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
140 int (*bucket_fn)(const struct request *),
141 unsigned int buckets, void *data)
142 {
143 struct blk_stat_callback *cb;
144
145 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
146 if (!cb)
147 return NULL;
148
149 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
150 GFP_KERNEL);
151 if (!cb->stat) {
152 kfree(cb);
153 return NULL;
154 }
155 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
156 __alignof__(struct blk_rq_stat));
157 if (!cb->cpu_stat) {
158 kfree(cb->stat);
159 kfree(cb);
160 return NULL;
161 }
162
163 cb->timer_fn = timer_fn;
164 cb->bucket_fn = bucket_fn;
165 cb->data = data;
166 cb->buckets = buckets;
167 setup_timer(&cb->timer, blk_stat_timer_fn, (unsigned long)cb);
168
169 return cb;
170 }
171 EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
172
173 void blk_stat_add_callback(struct request_queue *q,
174 struct blk_stat_callback *cb)
175 {
176 unsigned int bucket;
177 int cpu;
178
179 for_each_possible_cpu(cpu) {
180 struct blk_rq_stat *cpu_stat;
181
182 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
183 for (bucket = 0; bucket < cb->buckets; bucket++)
184 blk_stat_init(&cpu_stat[bucket]);
185 }
186
187 spin_lock(&q->stats->lock);
188 list_add_tail_rcu(&cb->list, &q->stats->callbacks);
189 set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
190 spin_unlock(&q->stats->lock);
191 }
192 EXPORT_SYMBOL_GPL(blk_stat_add_callback);
193
194 void blk_stat_remove_callback(struct request_queue *q,
195 struct blk_stat_callback *cb)
196 {
197 spin_lock(&q->stats->lock);
198 list_del_rcu(&cb->list);
199 if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
200 clear_bit(QUEUE_FLAG_STATS, &q->queue_flags);
201 spin_unlock(&q->stats->lock);
202
203 del_timer_sync(&cb->timer);
204 }
205 EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
206
207 static void blk_stat_free_callback_rcu(struct rcu_head *head)
208 {
209 struct blk_stat_callback *cb;
210
211 cb = container_of(head, struct blk_stat_callback, rcu);
212 free_percpu(cb->cpu_stat);
213 kfree(cb->stat);
214 kfree(cb);
215 }
216
217 void blk_stat_free_callback(struct blk_stat_callback *cb)
218 {
219 if (cb)
220 call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
221 }
222 EXPORT_SYMBOL_GPL(blk_stat_free_callback);
223
224 void blk_stat_enable_accounting(struct request_queue *q)
225 {
226 spin_lock(&q->stats->lock);
227 q->stats->enable_accounting = true;
228 set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
229 spin_unlock(&q->stats->lock);
230 }
231
232 struct blk_queue_stats *blk_alloc_queue_stats(void)
233 {
234 struct blk_queue_stats *stats;
235
236 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
237 if (!stats)
238 return NULL;
239
240 INIT_LIST_HEAD(&stats->callbacks);
241 spin_lock_init(&stats->lock);
242 stats->enable_accounting = false;
243
244 return stats;
245 }
246
247 void blk_free_queue_stats(struct blk_queue_stats *stats)
248 {
249 if (!stats)
250 return;
251
252 WARN_ON(!list_empty(&stats->callbacks));
253
254 kfree(stats);
255 }