]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-stat.c
blk-stat: move BLK_RQ_STAT_BATCH definition to blk-stat.c
[mirror_ubuntu-bionic-kernel.git] / block / blk-stat.c
CommitLineData
cf43e6be
JA
1/*
2 * Block stat tracking code
3 *
4 * Copyright (C) 2016 Jens Axboe
5 */
6#include <linux/kernel.h>
7#include <linux/blk-mq.h>
8
9#include "blk-stat.h"
10#include "blk-mq.h"
11
4875253f
OS
12#define BLK_RQ_STAT_BATCH 64
13
cf43e6be
JA
14static void blk_stat_flush_batch(struct blk_rq_stat *stat)
15{
16 const s32 nr_batch = READ_ONCE(stat->nr_batch);
209200ef 17 const s32 nr_samples = READ_ONCE(stat->nr_samples);
cf43e6be
JA
18
19 if (!nr_batch)
20 return;
21 if (!nr_samples)
22 stat->mean = div64_s64(stat->batch, nr_batch);
23 else {
24 stat->mean = div64_s64((stat->mean * nr_samples) +
25 stat->batch,
26 nr_batch + nr_samples);
27 }
28
29 stat->nr_samples += nr_batch;
30 stat->nr_batch = stat->batch = 0;
31}
32
33static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
34{
7d8d0014
OS
35 blk_stat_flush_batch(src);
36
cf43e6be
JA
37 if (!src->nr_samples)
38 return;
39
cf43e6be
JA
40 dst->min = min(dst->min, src->min);
41 dst->max = max(dst->max, src->max);
42
43 if (!dst->nr_samples)
44 dst->mean = src->mean;
45 else {
46 dst->mean = div64_s64((src->mean * src->nr_samples) +
47 (dst->mean * dst->nr_samples),
48 dst->nr_samples + src->nr_samples);
49 }
50 dst->nr_samples += src->nr_samples;
51}
52
53static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
54{
55 struct blk_mq_hw_ctx *hctx;
56 struct blk_mq_ctx *ctx;
57 uint64_t latest = 0;
58 int i, j, nr;
59
fa2e39cb
OS
60 blk_stat_init(&dst[READ]);
61 blk_stat_init(&dst[WRITE]);
cf43e6be
JA
62
63 nr = 0;
64 do {
65 uint64_t newest = 0;
66
67 queue_for_each_hw_ctx(q, hctx, i) {
68 hctx_for_each_ctx(hctx, ctx, j) {
fa2e39cb
OS
69 blk_stat_flush_batch(&ctx->stat[READ]);
70 blk_stat_flush_batch(&ctx->stat[WRITE]);
7cd54aa8 71
fa2e39cb
OS
72 if (!ctx->stat[READ].nr_samples &&
73 !ctx->stat[WRITE].nr_samples)
cf43e6be 74 continue;
fa2e39cb
OS
75 if (ctx->stat[READ].time > newest)
76 newest = ctx->stat[READ].time;
77 if (ctx->stat[WRITE].time > newest)
78 newest = ctx->stat[WRITE].time;
cf43e6be
JA
79 }
80 }
81
82 /*
83 * No samples
84 */
85 if (!newest)
86 break;
87
88 if (newest > latest)
89 latest = newest;
90
91 queue_for_each_hw_ctx(q, hctx, i) {
92 hctx_for_each_ctx(hctx, ctx, j) {
fa2e39cb
OS
93 if (ctx->stat[READ].time == newest) {
94 blk_stat_sum(&dst[READ],
95 &ctx->stat[READ]);
cf43e6be
JA
96 nr++;
97 }
fa2e39cb
OS
98 if (ctx->stat[WRITE].time == newest) {
99 blk_stat_sum(&dst[WRITE],
100 &ctx->stat[WRITE]);
cf43e6be
JA
101 nr++;
102 }
103 }
104 }
105 /*
106 * If we race on finding an entry, just loop back again.
107 * Should be very rare.
108 */
109 } while (!nr);
110
fa2e39cb 111 dst[READ].time = dst[WRITE].time = latest;
cf43e6be
JA
112}
113
114void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
115{
116 if (q->mq_ops)
117 blk_mq_stat_get(q, dst);
118 else {
fa2e39cb
OS
119 blk_stat_flush_batch(&q->rq_stats[READ]);
120 blk_stat_flush_batch(&q->rq_stats[WRITE]);
121 memcpy(&dst[READ], &q->rq_stats[READ],
122 sizeof(struct blk_rq_stat));
123 memcpy(&dst[WRITE], &q->rq_stats[WRITE],
124 sizeof(struct blk_rq_stat));
cf43e6be
JA
125 }
126}
127
128void blk_hctx_stat_get(struct blk_mq_hw_ctx *hctx, struct blk_rq_stat *dst)
129{
130 struct blk_mq_ctx *ctx;
131 unsigned int i, nr;
132
133 nr = 0;
134 do {
135 uint64_t newest = 0;
136
137 hctx_for_each_ctx(hctx, ctx, i) {
fa2e39cb
OS
138 blk_stat_flush_batch(&ctx->stat[READ]);
139 blk_stat_flush_batch(&ctx->stat[WRITE]);
7cd54aa8 140
fa2e39cb
OS
141 if (!ctx->stat[READ].nr_samples &&
142 !ctx->stat[WRITE].nr_samples)
cf43e6be
JA
143 continue;
144
fa2e39cb
OS
145 if (ctx->stat[READ].time > newest)
146 newest = ctx->stat[READ].time;
147 if (ctx->stat[WRITE].time > newest)
148 newest = ctx->stat[WRITE].time;
cf43e6be
JA
149 }
150
151 if (!newest)
152 break;
153
154 hctx_for_each_ctx(hctx, ctx, i) {
fa2e39cb
OS
155 if (ctx->stat[READ].time == newest) {
156 blk_stat_sum(&dst[READ], &ctx->stat[READ]);
cf43e6be
JA
157 nr++;
158 }
fa2e39cb
OS
159 if (ctx->stat[WRITE].time == newest) {
160 blk_stat_sum(&dst[WRITE], &ctx->stat[WRITE]);
cf43e6be
JA
161 nr++;
162 }
163 }
164 /*
165 * If we race on finding an entry, just loop back again.
166 * Should be very rare, as the window is only updated
167 * occasionally
168 */
169 } while (!nr);
170}
171
172static void __blk_stat_init(struct blk_rq_stat *stat, s64 time_now)
173{
174 stat->min = -1ULL;
175 stat->max = stat->nr_samples = stat->mean = 0;
176 stat->batch = stat->nr_batch = 0;
177 stat->time = time_now & BLK_STAT_NSEC_MASK;
178}
179
180void blk_stat_init(struct blk_rq_stat *stat)
181{
182 __blk_stat_init(stat, ktime_to_ns(ktime_get()));
183}
184
185static bool __blk_stat_is_current(struct blk_rq_stat *stat, s64 now)
186{
187 return (now & BLK_STAT_NSEC_MASK) == (stat->time & BLK_STAT_NSEC_MASK);
188}
189
190bool blk_stat_is_current(struct blk_rq_stat *stat)
191{
192 return __blk_stat_is_current(stat, ktime_to_ns(ktime_get()));
193}
194
195void blk_stat_add(struct blk_rq_stat *stat, struct request *rq)
196{
197 s64 now, value;
198
199 now = __blk_stat_time(ktime_to_ns(ktime_get()));
200 if (now < blk_stat_time(&rq->issue_stat))
201 return;
202
203 if (!__blk_stat_is_current(stat, now))
204 __blk_stat_init(stat, now);
205
206 value = now - blk_stat_time(&rq->issue_stat);
207 if (value > stat->max)
208 stat->max = value;
209 if (value < stat->min)
210 stat->min = value;
211
212 if (stat->batch + value < stat->batch ||
213 stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
214 blk_stat_flush_batch(stat);
215
216 stat->batch += value;
217 stat->nr_batch++;
218}
219
220void blk_stat_clear(struct request_queue *q)
221{
222 if (q->mq_ops) {
223 struct blk_mq_hw_ctx *hctx;
224 struct blk_mq_ctx *ctx;
225 int i, j;
226
227 queue_for_each_hw_ctx(q, hctx, i) {
228 hctx_for_each_ctx(hctx, ctx, j) {
fa2e39cb
OS
229 blk_stat_init(&ctx->stat[READ]);
230 blk_stat_init(&ctx->stat[WRITE]);
cf43e6be
JA
231 }
232 }
233 } else {
fa2e39cb
OS
234 blk_stat_init(&q->rq_stats[READ]);
235 blk_stat_init(&q->rq_stats[WRITE]);
cf43e6be
JA
236 }
237}
238
239void blk_stat_set_issue_time(struct blk_issue_stat *stat)
240{
241 stat->time = (stat->time & BLK_STAT_MASK) |
242 (ktime_to_ns(ktime_get()) & BLK_STAT_TIME_MASK);
243}
244
245/*
246 * Enable stat tracking, return whether it was enabled
247 */
248bool blk_stat_enable(struct request_queue *q)
249{
250 if (!test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
251 set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
252 return false;
253 }
254
255 return true;
256}