2 * Block stat tracking code
4 * Copyright (C) 2016 Jens Axboe
6 #include <linux/kernel.h>
7 #include <linux/rculist.h>
8 #include <linux/blk-mq.h>
14 #define BLK_RQ_STAT_BATCH 64
16 struct blk_queue_stats
{
17 struct list_head callbacks
;
19 bool enable_accounting
;
22 static void blk_stat_init(struct blk_rq_stat
*stat
)
25 stat
->max
= stat
->nr_samples
= stat
->mean
= 0;
26 stat
->batch
= stat
->nr_batch
= 0;
29 static void blk_stat_flush_batch(struct blk_rq_stat
*stat
)
31 const s32 nr_batch
= READ_ONCE(stat
->nr_batch
);
32 const s32 nr_samples
= READ_ONCE(stat
->nr_samples
);
37 stat
->mean
= div64_s64(stat
->batch
, nr_batch
);
39 stat
->mean
= div64_s64((stat
->mean
* nr_samples
) +
41 nr_batch
+ nr_samples
);
44 stat
->nr_samples
+= nr_batch
;
45 stat
->nr_batch
= stat
->batch
= 0;
48 static void blk_stat_sum(struct blk_rq_stat
*dst
, struct blk_rq_stat
*src
)
50 blk_stat_flush_batch(src
);
55 dst
->min
= min(dst
->min
, src
->min
);
56 dst
->max
= max(dst
->max
, src
->max
);
59 dst
->mean
= src
->mean
;
61 dst
->mean
= div64_s64((src
->mean
* src
->nr_samples
) +
62 (dst
->mean
* dst
->nr_samples
),
63 dst
->nr_samples
+ src
->nr_samples
);
65 dst
->nr_samples
+= src
->nr_samples
;
68 static void __blk_stat_add(struct blk_rq_stat
*stat
, u64 value
)
70 stat
->min
= min(stat
->min
, value
);
71 stat
->max
= max(stat
->max
, value
);
73 if (stat
->batch
+ value
< stat
->batch
||
74 stat
->nr_batch
+ 1 == BLK_RQ_STAT_BATCH
)
75 blk_stat_flush_batch(stat
);
81 void blk_stat_add(struct request
*rq
)
83 struct request_queue
*q
= rq
->q
;
84 struct blk_stat_callback
*cb
;
85 struct blk_rq_stat
*stat
;
89 now
= __blk_stat_time(ktime_to_ns(ktime_get()));
90 if (now
< blk_stat_time(&rq
->issue_stat
))
93 value
= now
- blk_stat_time(&rq
->issue_stat
);
95 blk_throtl_stat_add(rq
, value
);
98 list_for_each_entry_rcu(cb
, &q
->stats
->callbacks
, list
) {
99 if (blk_stat_is_active(cb
)) {
100 bucket
= cb
->bucket_fn(rq
);
103 stat
= &this_cpu_ptr(cb
->cpu_stat
)[bucket
];
104 __blk_stat_add(stat
, value
);
110 static void blk_stat_timer_fn(unsigned long data
)
112 struct blk_stat_callback
*cb
= (void *)data
;
116 for (bucket
= 0; bucket
< cb
->buckets
; bucket
++)
117 blk_stat_init(&cb
->stat
[bucket
]);
119 for_each_online_cpu(cpu
) {
120 struct blk_rq_stat
*cpu_stat
;
122 cpu_stat
= per_cpu_ptr(cb
->cpu_stat
, cpu
);
123 for (bucket
= 0; bucket
< cb
->buckets
; bucket
++) {
124 blk_stat_sum(&cb
->stat
[bucket
], &cpu_stat
[bucket
]);
125 blk_stat_init(&cpu_stat
[bucket
]);
132 struct blk_stat_callback
*
133 blk_stat_alloc_callback(void (*timer_fn
)(struct blk_stat_callback
*),
134 int (*bucket_fn
)(const struct request
*),
135 unsigned int buckets
, void *data
)
137 struct blk_stat_callback
*cb
;
139 cb
= kmalloc(sizeof(*cb
), GFP_KERNEL
);
143 cb
->stat
= kmalloc_array(buckets
, sizeof(struct blk_rq_stat
),
149 cb
->cpu_stat
= __alloc_percpu(buckets
* sizeof(struct blk_rq_stat
),
150 __alignof__(struct blk_rq_stat
));
157 cb
->timer_fn
= timer_fn
;
158 cb
->bucket_fn
= bucket_fn
;
160 cb
->buckets
= buckets
;
161 setup_timer(&cb
->timer
, blk_stat_timer_fn
, (unsigned long)cb
);
165 EXPORT_SYMBOL_GPL(blk_stat_alloc_callback
);
167 void blk_stat_add_callback(struct request_queue
*q
,
168 struct blk_stat_callback
*cb
)
173 for_each_possible_cpu(cpu
) {
174 struct blk_rq_stat
*cpu_stat
;
176 cpu_stat
= per_cpu_ptr(cb
->cpu_stat
, cpu
);
177 for (bucket
= 0; bucket
< cb
->buckets
; bucket
++)
178 blk_stat_init(&cpu_stat
[bucket
]);
181 spin_lock(&q
->stats
->lock
);
182 list_add_tail_rcu(&cb
->list
, &q
->stats
->callbacks
);
183 set_bit(QUEUE_FLAG_STATS
, &q
->queue_flags
);
184 spin_unlock(&q
->stats
->lock
);
186 EXPORT_SYMBOL_GPL(blk_stat_add_callback
);
188 void blk_stat_remove_callback(struct request_queue
*q
,
189 struct blk_stat_callback
*cb
)
191 spin_lock(&q
->stats
->lock
);
192 list_del_rcu(&cb
->list
);
193 if (list_empty(&q
->stats
->callbacks
) && !q
->stats
->enable_accounting
)
194 clear_bit(QUEUE_FLAG_STATS
, &q
->queue_flags
);
195 spin_unlock(&q
->stats
->lock
);
197 del_timer_sync(&cb
->timer
);
199 EXPORT_SYMBOL_GPL(blk_stat_remove_callback
);
201 static void blk_stat_free_callback_rcu(struct rcu_head
*head
)
203 struct blk_stat_callback
*cb
;
205 cb
= container_of(head
, struct blk_stat_callback
, rcu
);
206 free_percpu(cb
->cpu_stat
);
211 void blk_stat_free_callback(struct blk_stat_callback
*cb
)
214 call_rcu(&cb
->rcu
, blk_stat_free_callback_rcu
);
216 EXPORT_SYMBOL_GPL(blk_stat_free_callback
);
218 void blk_stat_enable_accounting(struct request_queue
*q
)
220 spin_lock(&q
->stats
->lock
);
221 q
->stats
->enable_accounting
= true;
222 set_bit(QUEUE_FLAG_STATS
, &q
->queue_flags
);
223 spin_unlock(&q
->stats
->lock
);
226 struct blk_queue_stats
*blk_alloc_queue_stats(void)
228 struct blk_queue_stats
*stats
;
230 stats
= kmalloc(sizeof(*stats
), GFP_KERNEL
);
234 INIT_LIST_HEAD(&stats
->callbacks
);
235 spin_lock_init(&stats
->lock
);
236 stats
->enable_accounting
= false;
241 void blk_free_queue_stats(struct blk_queue_stats
*stats
)
246 WARN_ON(!list_empty(&stats
->callbacks
));