4 * Common Block IO controller cgroup interface
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
16 #include <linux/cgroup.h>
18 enum blkio_policy_id
{
19 BLKIO_POLICY_PROP
= 0, /* Proportional Bandwidth division */
20 BLKIO_POLICY_THROTL
, /* Throttling */
23 #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
25 #ifndef CONFIG_BLK_CGROUP
26 /* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */
27 extern struct cgroup_subsys blkio_subsys
;
28 #define blkio_subsys_id blkio_subsys.subsys_id
32 /* Total time spent (in ns) between request dispatch to the driver and
33 * request completion for IOs doen by this cgroup. This may not be
34 * accurate when NCQ is turned on. */
35 BLKIO_STAT_SERVICE_TIME
= 0,
36 /* Total bytes transferred */
37 BLKIO_STAT_SERVICE_BYTES
,
38 /* Total IOs serviced, post merge */
40 /* Total time spent waiting in scheduler queue in ns */
42 /* Number of IOs merged */
44 /* Number of IOs queued up */
46 /* All the single valued stats go below this */
49 #ifdef CONFIG_DEBUG_BLK_CGROUP
50 BLKIO_STAT_AVG_QUEUE_SIZE
,
52 BLKIO_STAT_EMPTY_TIME
,
53 BLKIO_STAT_GROUP_WAIT_TIME
,
66 /* blkg state flags */
67 enum blkg_state_flags
{
73 /* cgroup files owned by proportional weight policy */
74 enum blkcg_file_name_prop
{
75 BLKIO_PROP_weight
= 1,
76 BLKIO_PROP_weight_device
,
77 BLKIO_PROP_io_service_bytes
,
78 BLKIO_PROP_io_serviced
,
81 BLKIO_PROP_io_service_time
,
82 BLKIO_PROP_io_wait_time
,
85 BLKIO_PROP_avg_queue_size
,
86 BLKIO_PROP_group_wait_time
,
88 BLKIO_PROP_empty_time
,
92 /* cgroup files owned by throttle policy */
93 enum blkcg_file_name_throtl
{
94 BLKIO_THROTL_read_bps_device
,
95 BLKIO_THROTL_write_bps_device
,
96 BLKIO_THROTL_read_iops_device
,
97 BLKIO_THROTL_write_iops_device
,
98 BLKIO_THROTL_io_service_bytes
,
99 BLKIO_THROTL_io_serviced
,
102 struct blkio_cgroup
{
103 struct cgroup_subsys_state css
;
106 struct hlist_head blkg_list
;
107 struct list_head policy_list
; /* list of blkio_policy_node */
110 struct blkio_group_stats
{
111 /* total disk time and nr sectors dispatched by this group */
114 uint64_t stat_arr
[BLKIO_STAT_QUEUED
+ 1][BLKIO_STAT_TOTAL
];
115 #ifdef CONFIG_DEBUG_BLK_CGROUP
116 /* Sum of number of IOs queued across all samples */
117 uint64_t avg_queue_size_sum
;
118 /* Count of samples taken for average */
119 uint64_t avg_queue_size_samples
;
120 /* How many times this group has been removed from service tree */
121 unsigned long dequeue
;
123 /* Total time spent waiting for it to be assigned a timeslice. */
124 uint64_t group_wait_time
;
125 uint64_t start_group_wait_time
;
127 /* Time spent idling for this blkio_group */
129 uint64_t start_idle_time
;
131 * Total time when we have requests queued and do not contain the
132 * current active queue.
135 uint64_t start_empty_time
;
141 /* An rcu protected unique identifier for the group */
143 struct hlist_node blkcg_node
;
144 unsigned short blkcg_id
;
145 /* Store cgroup path */
147 /* The device MKDEV(major, minor), this group has been created for */
149 /* policy which owns this blk group */
150 enum blkio_policy_id plid
;
152 /* Need to serialize the stats in the case of reset/update */
153 spinlock_t stats_lock
;
154 struct blkio_group_stats stats
;
157 struct blkio_policy_node
{
158 struct list_head node
;
160 /* This node belongs to max bw policy or porportional weight policy */
161 enum blkio_policy_id plid
;
162 /* cgroup file to which this rule belongs to */
168 * Rate read/write in terms of byptes per second
169 * Whether this rate represents read or write is determined
170 * by file type "fileid".
177 extern unsigned int blkcg_get_weight(struct blkio_cgroup
*blkcg
,
179 extern uint64_t blkcg_get_read_bps(struct blkio_cgroup
*blkcg
,
181 extern uint64_t blkcg_get_write_bps(struct blkio_cgroup
*blkcg
,
183 extern unsigned int blkcg_get_read_iops(struct blkio_cgroup
*blkcg
,
185 extern unsigned int blkcg_get_write_iops(struct blkio_cgroup
*blkcg
,
188 typedef void (blkio_unlink_group_fn
) (void *key
, struct blkio_group
*blkg
);
190 typedef void (blkio_update_group_weight_fn
) (void *key
,
191 struct blkio_group
*blkg
, unsigned int weight
);
192 typedef void (blkio_update_group_read_bps_fn
) (void * key
,
193 struct blkio_group
*blkg
, u64 read_bps
);
194 typedef void (blkio_update_group_write_bps_fn
) (void *key
,
195 struct blkio_group
*blkg
, u64 write_bps
);
196 typedef void (blkio_update_group_read_iops_fn
) (void *key
,
197 struct blkio_group
*blkg
, unsigned int read_iops
);
198 typedef void (blkio_update_group_write_iops_fn
) (void *key
,
199 struct blkio_group
*blkg
, unsigned int write_iops
);
201 struct blkio_policy_ops
{
202 blkio_unlink_group_fn
*blkio_unlink_group_fn
;
203 blkio_update_group_weight_fn
*blkio_update_group_weight_fn
;
204 blkio_update_group_read_bps_fn
*blkio_update_group_read_bps_fn
;
205 blkio_update_group_write_bps_fn
*blkio_update_group_write_bps_fn
;
206 blkio_update_group_read_iops_fn
*blkio_update_group_read_iops_fn
;
207 blkio_update_group_write_iops_fn
*blkio_update_group_write_iops_fn
;
210 struct blkio_policy_type
{
211 struct list_head list
;
212 struct blkio_policy_ops ops
;
213 enum blkio_policy_id plid
;
216 /* Blkio controller policy registration */
217 extern void blkio_policy_register(struct blkio_policy_type
*);
218 extern void blkio_policy_unregister(struct blkio_policy_type
*);
220 static inline char *blkg_path(struct blkio_group
*blkg
)
230 struct blkio_policy_type
{
233 static inline void blkio_policy_register(struct blkio_policy_type
*blkiop
) { }
234 static inline void blkio_policy_unregister(struct blkio_policy_type
*blkiop
) { }
236 static inline char *blkg_path(struct blkio_group
*blkg
) { return NULL
; }
240 #define BLKIO_WEIGHT_MIN 100
241 #define BLKIO_WEIGHT_MAX 1000
242 #define BLKIO_WEIGHT_DEFAULT 500
244 #ifdef CONFIG_DEBUG_BLK_CGROUP
245 void blkiocg_update_avg_queue_size_stats(struct blkio_group
*blkg
);
246 void blkiocg_update_dequeue_stats(struct blkio_group
*blkg
,
247 unsigned long dequeue
);
248 void blkiocg_update_set_idle_time_stats(struct blkio_group
*blkg
);
249 void blkiocg_update_idle_time_stats(struct blkio_group
*blkg
);
250 void blkiocg_set_start_empty_time(struct blkio_group
*blkg
);
252 #define BLKG_FLAG_FNS(name) \
253 static inline void blkio_mark_blkg_##name( \
254 struct blkio_group_stats *stats) \
256 stats->flags |= (1 << BLKG_##name); \
258 static inline void blkio_clear_blkg_##name( \
259 struct blkio_group_stats *stats) \
261 stats->flags &= ~(1 << BLKG_##name); \
263 static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
265 return (stats->flags & (1 << BLKG_##name)) != 0; \
268 BLKG_FLAG_FNS(waiting)
269 BLKG_FLAG_FNS(idling
)
273 static inline void blkiocg_update_avg_queue_size_stats(
274 struct blkio_group
*blkg
) {}
275 static inline void blkiocg_update_dequeue_stats(struct blkio_group
*blkg
,
276 unsigned long dequeue
) {}
277 static inline void blkiocg_update_set_idle_time_stats(struct blkio_group
*blkg
)
279 static inline void blkiocg_update_idle_time_stats(struct blkio_group
*blkg
) {}
280 static inline void blkiocg_set_start_empty_time(struct blkio_group
*blkg
) {}
283 #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
284 extern struct blkio_cgroup blkio_root_cgroup
;
285 extern struct blkio_cgroup
*cgroup_to_blkio_cgroup(struct cgroup
*cgroup
);
286 extern void blkiocg_add_blkio_group(struct blkio_cgroup
*blkcg
,
287 struct blkio_group
*blkg
, void *key
, dev_t dev
,
288 enum blkio_policy_id plid
);
289 extern int blkiocg_del_blkio_group(struct blkio_group
*blkg
);
290 extern struct blkio_group
*blkiocg_lookup_group(struct blkio_cgroup
*blkcg
,
292 void blkiocg_update_timeslice_used(struct blkio_group
*blkg
,
294 void blkiocg_update_dispatch_stats(struct blkio_group
*blkg
, uint64_t bytes
,
295 bool direction
, bool sync
);
296 void blkiocg_update_completion_stats(struct blkio_group
*blkg
,
297 uint64_t start_time
, uint64_t io_start_time
, bool direction
, bool sync
);
298 void blkiocg_update_io_merged_stats(struct blkio_group
*blkg
, bool direction
,
300 void blkiocg_update_io_add_stats(struct blkio_group
*blkg
,
301 struct blkio_group
*curr_blkg
, bool direction
, bool sync
);
302 void blkiocg_update_io_remove_stats(struct blkio_group
*blkg
,
303 bool direction
, bool sync
);
306 static inline struct blkio_cgroup
*
307 cgroup_to_blkio_cgroup(struct cgroup
*cgroup
) { return NULL
; }
309 static inline void blkiocg_add_blkio_group(struct blkio_cgroup
*blkcg
,
310 struct blkio_group
*blkg
, void *key
, dev_t dev
,
311 enum blkio_policy_id plid
) {}
314 blkiocg_del_blkio_group(struct blkio_group
*blkg
) { return 0; }
316 static inline struct blkio_group
*
317 blkiocg_lookup_group(struct blkio_cgroup
*blkcg
, void *key
) { return NULL
; }
318 static inline void blkiocg_update_timeslice_used(struct blkio_group
*blkg
,
319 unsigned long time
) {}
320 static inline void blkiocg_update_dispatch_stats(struct blkio_group
*blkg
,
321 uint64_t bytes
, bool direction
, bool sync
) {}
322 static inline void blkiocg_update_completion_stats(struct blkio_group
*blkg
,
323 uint64_t start_time
, uint64_t io_start_time
, bool direction
,
325 static inline void blkiocg_update_io_merged_stats(struct blkio_group
*blkg
,
326 bool direction
, bool sync
) {}
327 static inline void blkiocg_update_io_add_stats(struct blkio_group
*blkg
,
328 struct blkio_group
*curr_blkg
, bool direction
, bool sync
) {}
329 static inline void blkiocg_update_io_remove_stats(struct blkio_group
*blkg
,
330 bool direction
, bool sync
) {}
332 #endif /* _BLK_CGROUP_H */