4 * Common Block IO controller cgroup interface
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
16 #include <linux/cgroup.h>
17 #include <linux/u64_stats_sync.h>
18 #include <linux/seq_file.h>
20 enum blkio_policy_id
{
21 BLKIO_POLICY_PROP
= 0, /* Proportional Bandwidth division */
22 BLKIO_POLICY_THROTL
, /* Throttling */
27 /* Max limits for throttle policy */
28 #define THROTL_IOPS_MAX UINT_MAX
30 #ifdef CONFIG_BLK_CGROUP
32 /* cft->private [un]packing for stat printing */
33 #define BLKCG_STAT_PRIV(pol, off) (((unsigned)(pol) << 16) | (off))
34 #define BLKCG_STAT_POL(prv) ((unsigned)(prv) >> 16)
35 #define BLKCG_STAT_OFF(prv) ((unsigned)(prv) & 0xffff)
37 enum blkg_rwstat_type
{
44 BLKG_RWSTAT_TOTAL
= BLKG_RWSTAT_NR
,
48 struct cgroup_subsys_state css
;
51 struct hlist_head blkg_list
;
53 /* for policies to test whether associated blkcg has changed */
58 struct u64_stats_sync syncp
;
63 struct u64_stats_sync syncp
;
64 uint64_t cnt
[BLKG_RWSTAT_NR
];
67 struct blkio_group_conf
{
73 /* per-blkg per-policy data */
74 struct blkg_policy_data
{
75 /* the blkg this per-policy data belongs to */
76 struct blkio_group
*blkg
;
79 struct blkio_group_conf conf
;
81 /* pol->pdata_size bytes of private data used by policy impl */
82 char pdata
[] __aligned(__alignof__(unsigned long long));
86 /* Pointer to the associated request_queue */
87 struct request_queue
*q
;
88 struct list_head q_node
;
89 struct hlist_node blkcg_node
;
90 struct blkio_cgroup
*blkcg
;
91 /* Store cgroup path */
96 struct blkg_policy_data
*pd
[BLKIO_NR_POLICIES
];
98 struct rcu_head rcu_head
;
101 typedef void (blkio_init_group_fn
)(struct blkio_group
*blkg
);
102 typedef void (blkio_exit_group_fn
)(struct blkio_group
*blkg
);
103 typedef void (blkio_reset_group_stats_fn
)(struct blkio_group
*blkg
);
105 struct blkio_policy_ops
{
106 blkio_init_group_fn
*blkio_init_group_fn
;
107 blkio_exit_group_fn
*blkio_exit_group_fn
;
108 blkio_reset_group_stats_fn
*blkio_reset_group_stats_fn
;
111 struct blkio_policy_type
{
112 struct list_head list
;
113 struct blkio_policy_ops ops
;
114 enum blkio_policy_id plid
;
115 size_t pdata_size
; /* policy specific private data size */
116 struct cftype
*cftypes
; /* cgroup files for the policy */
119 extern int blkcg_init_queue(struct request_queue
*q
);
120 extern void blkcg_drain_queue(struct request_queue
*q
);
121 extern void blkcg_exit_queue(struct request_queue
*q
);
123 /* Blkio controller policy registration */
124 extern void blkio_policy_register(struct blkio_policy_type
*);
125 extern void blkio_policy_unregister(struct blkio_policy_type
*);
126 extern void blkg_destroy_all(struct request_queue
*q
, bool destroy_root
);
127 extern void update_root_blkg_pd(struct request_queue
*q
,
128 enum blkio_policy_id plid
);
130 void blkcg_print_blkgs(struct seq_file
*sf
, struct blkio_cgroup
*blkcg
,
131 u64 (*prfill
)(struct seq_file
*, struct blkg_policy_data
*, int),
132 int pol
, int data
, bool show_total
);
133 u64
__blkg_prfill_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
, u64 v
);
134 u64
__blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
135 const struct blkg_rwstat
*rwstat
);
136 int blkcg_print_stat(struct cgroup
*cgrp
, struct cftype
*cft
,
137 struct seq_file
*sf
);
138 int blkcg_print_rwstat(struct cgroup
*cgrp
, struct cftype
*cft
,
139 struct seq_file
*sf
);
141 struct blkg_conf_ctx
{
142 struct gendisk
*disk
;
143 struct blkio_group
*blkg
;
147 int blkg_conf_prep(struct blkio_cgroup
*blkcg
, const char *input
,
148 struct blkg_conf_ctx
*ctx
);
149 void blkg_conf_finish(struct blkg_conf_ctx
*ctx
);
153 * blkg_to_pdata - get policy private data
154 * @blkg: blkg of interest
155 * @pol: policy of interest
157 * Return pointer to private data associated with the @blkg-@pol pair.
159 static inline void *blkg_to_pdata(struct blkio_group
*blkg
,
160 struct blkio_policy_type
*pol
)
162 return blkg
? blkg
->pd
[pol
->plid
]->pdata
: NULL
;
166 * pdata_to_blkg - get blkg associated with policy private data
167 * @pdata: policy private data of interest
169 * @pdata is policy private data. Determine the blkg it's associated with.
171 static inline struct blkio_group
*pdata_to_blkg(void *pdata
)
174 struct blkg_policy_data
*pd
=
175 container_of(pdata
, struct blkg_policy_data
, pdata
);
181 static inline char *blkg_path(struct blkio_group
*blkg
)
187 * blkg_get - get a blkg reference
190 * The caller should be holding queue_lock and an existing reference.
192 static inline void blkg_get(struct blkio_group
*blkg
)
194 lockdep_assert_held(blkg
->q
->queue_lock
);
195 WARN_ON_ONCE(!blkg
->refcnt
);
199 void __blkg_release(struct blkio_group
*blkg
);
202 * blkg_put - put a blkg reference
205 * The caller should be holding queue_lock.
207 static inline void blkg_put(struct blkio_group
*blkg
)
209 lockdep_assert_held(blkg
->q
->queue_lock
);
210 WARN_ON_ONCE(blkg
->refcnt
<= 0);
212 __blkg_release(blkg
);
216 * blkg_stat_add - add a value to a blkg_stat
217 * @stat: target blkg_stat
220 * Add @val to @stat. The caller is responsible for synchronizing calls to
223 static inline void blkg_stat_add(struct blkg_stat
*stat
, uint64_t val
)
225 u64_stats_update_begin(&stat
->syncp
);
227 u64_stats_update_end(&stat
->syncp
);
231 * blkg_stat_read - read the current value of a blkg_stat
232 * @stat: blkg_stat to read
234 * Read the current value of @stat. This function can be called without
235 * synchroniztion and takes care of u64 atomicity.
237 static inline uint64_t blkg_stat_read(struct blkg_stat
*stat
)
243 start
= u64_stats_fetch_begin(&stat
->syncp
);
245 } while (u64_stats_fetch_retry(&stat
->syncp
, start
));
251 * blkg_stat_reset - reset a blkg_stat
252 * @stat: blkg_stat to reset
254 static inline void blkg_stat_reset(struct blkg_stat
*stat
)
260 * blkg_rwstat_add - add a value to a blkg_rwstat
261 * @rwstat: target blkg_rwstat
262 * @rw: mask of REQ_{WRITE|SYNC}
265 * Add @val to @rwstat. The counters are chosen according to @rw. The
266 * caller is responsible for synchronizing calls to this function.
268 static inline void blkg_rwstat_add(struct blkg_rwstat
*rwstat
,
269 int rw
, uint64_t val
)
271 u64_stats_update_begin(&rwstat
->syncp
);
274 rwstat
->cnt
[BLKG_RWSTAT_WRITE
] += val
;
276 rwstat
->cnt
[BLKG_RWSTAT_READ
] += val
;
278 rwstat
->cnt
[BLKG_RWSTAT_SYNC
] += val
;
280 rwstat
->cnt
[BLKG_RWSTAT_ASYNC
] += val
;
282 u64_stats_update_end(&rwstat
->syncp
);
286 * blkg_rwstat_read - read the current values of a blkg_rwstat
287 * @rwstat: blkg_rwstat to read
289 * Read the current snapshot of @rwstat and return it as the return value.
290 * This function can be called without synchronization and takes care of
293 static struct blkg_rwstat
blkg_rwstat_read(struct blkg_rwstat
*rwstat
)
296 struct blkg_rwstat tmp
;
299 start
= u64_stats_fetch_begin(&rwstat
->syncp
);
301 } while (u64_stats_fetch_retry(&rwstat
->syncp
, start
));
307 * blkg_rwstat_sum - read the total count of a blkg_rwstat
308 * @rwstat: blkg_rwstat to read
310 * Return the total count of @rwstat regardless of the IO direction. This
311 * function can be called without synchronization and takes care of u64
314 static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat
*rwstat
)
316 struct blkg_rwstat tmp
= blkg_rwstat_read(rwstat
);
318 return tmp
.cnt
[BLKG_RWSTAT_READ
] + tmp
.cnt
[BLKG_RWSTAT_WRITE
];
322 * blkg_rwstat_reset - reset a blkg_rwstat
323 * @rwstat: blkg_rwstat to reset
325 static inline void blkg_rwstat_reset(struct blkg_rwstat
*rwstat
)
327 memset(rwstat
->cnt
, 0, sizeof(rwstat
->cnt
));
335 struct blkio_policy_type
{
338 static inline int blkcg_init_queue(struct request_queue
*q
) { return 0; }
339 static inline void blkcg_drain_queue(struct request_queue
*q
) { }
340 static inline void blkcg_exit_queue(struct request_queue
*q
) { }
341 static inline void blkio_policy_register(struct blkio_policy_type
*blkiop
) { }
342 static inline void blkio_policy_unregister(struct blkio_policy_type
*blkiop
) { }
343 static inline void blkg_destroy_all(struct request_queue
*q
,
344 bool destory_root
) { }
345 static inline void update_root_blkg_pd(struct request_queue
*q
,
346 enum blkio_policy_id plid
) { }
348 static inline void *blkg_to_pdata(struct blkio_group
*blkg
,
349 struct blkio_policy_type
*pol
) { return NULL
; }
350 static inline struct blkio_group
*pdata_to_blkg(void *pdata
,
351 struct blkio_policy_type
*pol
) { return NULL
; }
352 static inline char *blkg_path(struct blkio_group
*blkg
) { return NULL
; }
353 static inline void blkg_get(struct blkio_group
*blkg
) { }
354 static inline void blkg_put(struct blkio_group
*blkg
) { }
358 #define BLKIO_WEIGHT_MIN 10
359 #define BLKIO_WEIGHT_MAX 1000
360 #define BLKIO_WEIGHT_DEFAULT 500
362 #ifdef CONFIG_BLK_CGROUP
363 extern struct blkio_cgroup blkio_root_cgroup
;
364 extern struct blkio_cgroup
*cgroup_to_blkio_cgroup(struct cgroup
*cgroup
);
365 extern struct blkio_cgroup
*bio_blkio_cgroup(struct bio
*bio
);
366 extern struct blkio_group
*blkg_lookup(struct blkio_cgroup
*blkcg
,
367 struct request_queue
*q
);
368 struct blkio_group
*blkg_lookup_create(struct blkio_cgroup
*blkcg
,
369 struct request_queue
*q
,
373 static inline struct blkio_cgroup
*
374 cgroup_to_blkio_cgroup(struct cgroup
*cgroup
) { return NULL
; }
375 static inline struct blkio_cgroup
*
376 bio_blkio_cgroup(struct bio
*bio
) { return NULL
; }
378 static inline struct blkio_group
*blkg_lookup(struct blkio_cgroup
*blkcg
,
379 void *key
) { return NULL
; }
381 #endif /* _BLK_CGROUP_H */