]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - block/blk-cgroup.h
blkcg: move blkio_group_stats_cpu and friends to blk-throttle.c
[mirror_ubuntu-bionic-kernel.git] / block / blk-cgroup.h
1 #ifndef _BLK_CGROUP_H
2 #define _BLK_CGROUP_H
3 /*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16 #include <linux/cgroup.h>
17 #include <linux/u64_stats_sync.h>
18 #include <linux/seq_file.h>
19
20 enum blkio_policy_id {
21 BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
22 BLKIO_POLICY_THROTL, /* Throttling */
23
24 BLKIO_NR_POLICIES,
25 };
26
27 /* Max limits for throttle policy */
28 #define THROTL_IOPS_MAX UINT_MAX
29
30 #ifdef CONFIG_BLK_CGROUP
31
32 /* cft->private [un]packing for stat printing */
33 #define BLKCG_STAT_PRIV(pol, off) (((unsigned)(pol) << 16) | (off))
34 #define BLKCG_STAT_POL(prv) ((unsigned)(prv) >> 16)
35 #define BLKCG_STAT_OFF(prv) ((unsigned)(prv) & 0xffff)
36
37 enum blkg_rwstat_type {
38 BLKG_RWSTAT_READ,
39 BLKG_RWSTAT_WRITE,
40 BLKG_RWSTAT_SYNC,
41 BLKG_RWSTAT_ASYNC,
42
43 BLKG_RWSTAT_NR,
44 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
45 };
46
47 struct blkio_cgroup {
48 struct cgroup_subsys_state css;
49 unsigned int weight;
50 spinlock_t lock;
51 struct hlist_head blkg_list;
52
53 /* for policies to test whether associated blkcg has changed */
54 uint64_t id;
55 };
56
57 struct blkg_stat {
58 struct u64_stats_sync syncp;
59 uint64_t cnt;
60 };
61
62 struct blkg_rwstat {
63 struct u64_stats_sync syncp;
64 uint64_t cnt[BLKG_RWSTAT_NR];
65 };
66
67 struct blkio_group_conf {
68 unsigned int weight;
69 u64 iops[2];
70 u64 bps[2];
71 };
72
73 /* per-blkg per-policy data */
74 struct blkg_policy_data {
75 /* the blkg this per-policy data belongs to */
76 struct blkio_group *blkg;
77
78 /* Configuration */
79 struct blkio_group_conf conf;
80
81 /* pol->pdata_size bytes of private data used by policy impl */
82 char pdata[] __aligned(__alignof__(unsigned long long));
83 };
84
85 struct blkio_group {
86 /* Pointer to the associated request_queue */
87 struct request_queue *q;
88 struct list_head q_node;
89 struct hlist_node blkcg_node;
90 struct blkio_cgroup *blkcg;
91 /* Store cgroup path */
92 char path[128];
93 /* reference count */
94 int refcnt;
95
96 struct blkg_policy_data *pd[BLKIO_NR_POLICIES];
97
98 struct rcu_head rcu_head;
99 };
100
101 typedef void (blkio_init_group_fn)(struct blkio_group *blkg);
102 typedef void (blkio_exit_group_fn)(struct blkio_group *blkg);
103 typedef void (blkio_reset_group_stats_fn)(struct blkio_group *blkg);
104
105 struct blkio_policy_ops {
106 blkio_init_group_fn *blkio_init_group_fn;
107 blkio_exit_group_fn *blkio_exit_group_fn;
108 blkio_reset_group_stats_fn *blkio_reset_group_stats_fn;
109 };
110
111 struct blkio_policy_type {
112 struct list_head list;
113 struct blkio_policy_ops ops;
114 enum blkio_policy_id plid;
115 size_t pdata_size; /* policy specific private data size */
116 struct cftype *cftypes; /* cgroup files for the policy */
117 };
118
119 extern int blkcg_init_queue(struct request_queue *q);
120 extern void blkcg_drain_queue(struct request_queue *q);
121 extern void blkcg_exit_queue(struct request_queue *q);
122
123 /* Blkio controller policy registration */
124 extern void blkio_policy_register(struct blkio_policy_type *);
125 extern void blkio_policy_unregister(struct blkio_policy_type *);
126 extern void blkg_destroy_all(struct request_queue *q, bool destroy_root);
127 extern void update_root_blkg_pd(struct request_queue *q,
128 enum blkio_policy_id plid);
129
130 void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg,
131 u64 (*prfill)(struct seq_file *, struct blkg_policy_data *, int),
132 int pol, int data, bool show_total);
133 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
134 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
135 const struct blkg_rwstat *rwstat);
136 int blkcg_print_stat(struct cgroup *cgrp, struct cftype *cft,
137 struct seq_file *sf);
138 int blkcg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
139 struct seq_file *sf);
140
141 struct blkg_conf_ctx {
142 struct gendisk *disk;
143 struct blkio_group *blkg;
144 u64 v;
145 };
146
147 int blkg_conf_prep(struct blkio_cgroup *blkcg, const char *input,
148 struct blkg_conf_ctx *ctx);
149 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
150
151
152 /**
153 * blkg_to_pdata - get policy private data
154 * @blkg: blkg of interest
155 * @pol: policy of interest
156 *
157 * Return pointer to private data associated with the @blkg-@pol pair.
158 */
159 static inline void *blkg_to_pdata(struct blkio_group *blkg,
160 struct blkio_policy_type *pol)
161 {
162 return blkg ? blkg->pd[pol->plid]->pdata : NULL;
163 }
164
165 /**
166 * pdata_to_blkg - get blkg associated with policy private data
167 * @pdata: policy private data of interest
168 *
169 * @pdata is policy private data. Determine the blkg it's associated with.
170 */
171 static inline struct blkio_group *pdata_to_blkg(void *pdata)
172 {
173 if (pdata) {
174 struct blkg_policy_data *pd =
175 container_of(pdata, struct blkg_policy_data, pdata);
176 return pd->blkg;
177 }
178 return NULL;
179 }
180
181 static inline char *blkg_path(struct blkio_group *blkg)
182 {
183 return blkg->path;
184 }
185
186 /**
187 * blkg_get - get a blkg reference
188 * @blkg: blkg to get
189 *
190 * The caller should be holding queue_lock and an existing reference.
191 */
192 static inline void blkg_get(struct blkio_group *blkg)
193 {
194 lockdep_assert_held(blkg->q->queue_lock);
195 WARN_ON_ONCE(!blkg->refcnt);
196 blkg->refcnt++;
197 }
198
199 void __blkg_release(struct blkio_group *blkg);
200
201 /**
202 * blkg_put - put a blkg reference
203 * @blkg: blkg to put
204 *
205 * The caller should be holding queue_lock.
206 */
207 static inline void blkg_put(struct blkio_group *blkg)
208 {
209 lockdep_assert_held(blkg->q->queue_lock);
210 WARN_ON_ONCE(blkg->refcnt <= 0);
211 if (!--blkg->refcnt)
212 __blkg_release(blkg);
213 }
214
215 /**
216 * blkg_stat_add - add a value to a blkg_stat
217 * @stat: target blkg_stat
218 * @val: value to add
219 *
220 * Add @val to @stat. The caller is responsible for synchronizing calls to
221 * this function.
222 */
223 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
224 {
225 u64_stats_update_begin(&stat->syncp);
226 stat->cnt += val;
227 u64_stats_update_end(&stat->syncp);
228 }
229
230 /**
231 * blkg_stat_read - read the current value of a blkg_stat
232 * @stat: blkg_stat to read
233 *
234 * Read the current value of @stat. This function can be called without
235 * synchroniztion and takes care of u64 atomicity.
236 */
237 static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
238 {
239 unsigned int start;
240 uint64_t v;
241
242 do {
243 start = u64_stats_fetch_begin(&stat->syncp);
244 v = stat->cnt;
245 } while (u64_stats_fetch_retry(&stat->syncp, start));
246
247 return v;
248 }
249
250 /**
251 * blkg_stat_reset - reset a blkg_stat
252 * @stat: blkg_stat to reset
253 */
254 static inline void blkg_stat_reset(struct blkg_stat *stat)
255 {
256 stat->cnt = 0;
257 }
258
259 /**
260 * blkg_rwstat_add - add a value to a blkg_rwstat
261 * @rwstat: target blkg_rwstat
262 * @rw: mask of REQ_{WRITE|SYNC}
263 * @val: value to add
264 *
265 * Add @val to @rwstat. The counters are chosen according to @rw. The
266 * caller is responsible for synchronizing calls to this function.
267 */
268 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
269 int rw, uint64_t val)
270 {
271 u64_stats_update_begin(&rwstat->syncp);
272
273 if (rw & REQ_WRITE)
274 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
275 else
276 rwstat->cnt[BLKG_RWSTAT_READ] += val;
277 if (rw & REQ_SYNC)
278 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
279 else
280 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
281
282 u64_stats_update_end(&rwstat->syncp);
283 }
284
285 /**
286 * blkg_rwstat_read - read the current values of a blkg_rwstat
287 * @rwstat: blkg_rwstat to read
288 *
289 * Read the current snapshot of @rwstat and return it as the return value.
290 * This function can be called without synchronization and takes care of
291 * u64 atomicity.
292 */
293 static struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
294 {
295 unsigned int start;
296 struct blkg_rwstat tmp;
297
298 do {
299 start = u64_stats_fetch_begin(&rwstat->syncp);
300 tmp = *rwstat;
301 } while (u64_stats_fetch_retry(&rwstat->syncp, start));
302
303 return tmp;
304 }
305
306 /**
307 * blkg_rwstat_sum - read the total count of a blkg_rwstat
308 * @rwstat: blkg_rwstat to read
309 *
310 * Return the total count of @rwstat regardless of the IO direction. This
311 * function can be called without synchronization and takes care of u64
312 * atomicity.
313 */
314 static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
315 {
316 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
317
318 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
319 }
320
321 /**
322 * blkg_rwstat_reset - reset a blkg_rwstat
323 * @rwstat: blkg_rwstat to reset
324 */
325 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
326 {
327 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
328 }
329
330 #else
331
332 struct blkio_group {
333 };
334
335 struct blkio_policy_type {
336 };
337
338 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
339 static inline void blkcg_drain_queue(struct request_queue *q) { }
340 static inline void blkcg_exit_queue(struct request_queue *q) { }
341 static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
342 static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
343 static inline void blkg_destroy_all(struct request_queue *q,
344 bool destory_root) { }
345 static inline void update_root_blkg_pd(struct request_queue *q,
346 enum blkio_policy_id plid) { }
347
348 static inline void *blkg_to_pdata(struct blkio_group *blkg,
349 struct blkio_policy_type *pol) { return NULL; }
350 static inline struct blkio_group *pdata_to_blkg(void *pdata,
351 struct blkio_policy_type *pol) { return NULL; }
352 static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
353 static inline void blkg_get(struct blkio_group *blkg) { }
354 static inline void blkg_put(struct blkio_group *blkg) { }
355
356 #endif
357
358 #define BLKIO_WEIGHT_MIN 10
359 #define BLKIO_WEIGHT_MAX 1000
360 #define BLKIO_WEIGHT_DEFAULT 500
361
362 #ifdef CONFIG_BLK_CGROUP
363 extern struct blkio_cgroup blkio_root_cgroup;
364 extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
365 extern struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio);
366 extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
367 struct request_queue *q);
368 struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
369 struct request_queue *q,
370 bool for_root);
371 #else
372 struct cgroup;
373 static inline struct blkio_cgroup *
374 cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
375 static inline struct blkio_cgroup *
376 bio_blkio_cgroup(struct bio *bio) { return NULL; }
377
378 static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
379 void *key) { return NULL; }
380 #endif
381 #endif /* _BLK_CGROUP_H */