]>
Commit | Line | Data |
---|---|---|
31e4c28d VG |
1 | #ifndef _BLK_CGROUP_H |
2 | #define _BLK_CGROUP_H | |
3 | /* | |
4 | * Common Block IO controller cgroup interface | |
5 | * | |
6 | * Based on ideas and code from CFQ, CFS and BFQ: | |
7 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
8 | * | |
9 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
10 | * Paolo Valente <paolo.valente@unimore.it> | |
11 | * | |
12 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
13 | * Nauman Rafique <nauman@google.com> | |
14 | */ | |
15 | ||
16 | #include <linux/cgroup.h> | |
575969a0 | 17 | #include <linux/u64_stats_sync.h> |
31e4c28d | 18 | |
062a644d VG |
19 | enum blkio_policy_id { |
20 | BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */ | |
4c9eefa1 | 21 | BLKIO_POLICY_THROTL, /* Throttling */ |
035d10b2 TH |
22 | |
23 | BLKIO_NR_POLICIES, | |
062a644d VG |
24 | }; |
25 | ||
9355aede VG |
26 | /* Max limits for throttle policy */ |
27 | #define THROTL_IOPS_MAX UINT_MAX | |
28 | ||
32e380ae | 29 | #ifdef CONFIG_BLK_CGROUP |
2f5ea477 | 30 | |
84c124da | 31 | enum stat_type { |
5fe224d2 TH |
32 | /* Number of IOs merged */ |
33 | BLKIO_STAT_MERGED, | |
84c124da DS |
34 | /* Total time spent (in ns) between request dispatch to the driver and |
35 | * request completion for IOs doen by this cgroup. This may not be | |
36 | * accurate when NCQ is turned on. */ | |
5fe224d2 | 37 | BLKIO_STAT_SERVICE_TIME, |
84c124da DS |
38 | /* Total time spent waiting in scheduler queue in ns */ |
39 | BLKIO_STAT_WAIT_TIME, | |
cdc1184c DS |
40 | /* Number of IOs queued up */ |
41 | BLKIO_STAT_QUEUED, | |
c4c76a05 | 42 | |
84c124da DS |
43 | /* All the single valued stats go below this */ |
44 | BLKIO_STAT_TIME, | |
a23e6869 | 45 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
167400d3 JT |
46 | /* Time not charged to this cgroup */ |
47 | BLKIO_STAT_UNACCOUNTED_TIME, | |
cdc1184c | 48 | BLKIO_STAT_AVG_QUEUE_SIZE, |
812df48d DS |
49 | BLKIO_STAT_IDLE_TIME, |
50 | BLKIO_STAT_EMPTY_TIME, | |
51 | BLKIO_STAT_GROUP_WAIT_TIME, | |
84c124da DS |
52 | BLKIO_STAT_DEQUEUE |
53 | #endif | |
54 | }; | |
55 | ||
c4c76a05 TH |
56 | /* Types lower than this live in stat_arr and have subtypes */ |
57 | #define BLKIO_STAT_ARR_NR (BLKIO_STAT_QUEUED + 1) | |
58 | ||
5624a4e4 VG |
59 | /* Per cpu stats */ |
60 | enum stat_type_cpu { | |
5624a4e4 VG |
61 | /* Total bytes transferred */ |
62 | BLKIO_STAT_CPU_SERVICE_BYTES, | |
63 | /* Total IOs serviced, post merge */ | |
64 | BLKIO_STAT_CPU_SERVICED, | |
2aa4a152 TH |
65 | |
66 | /* All the single valued stats go below this */ | |
67 | BLKIO_STAT_CPU_SECTORS, | |
5624a4e4 VG |
68 | }; |
69 | ||
2aa4a152 TH |
70 | #define BLKIO_STAT_CPU_ARR_NR (BLKIO_STAT_CPU_SERVICED + 1) |
71 | ||
84c124da DS |
72 | enum stat_sub_type { |
73 | BLKIO_STAT_READ = 0, | |
74 | BLKIO_STAT_WRITE, | |
75 | BLKIO_STAT_SYNC, | |
76 | BLKIO_STAT_ASYNC, | |
77 | BLKIO_STAT_TOTAL | |
303a3acb DS |
78 | }; |
79 | ||
812df48d DS |
80 | /* blkg state flags */ |
81 | enum blkg_state_flags { | |
82 | BLKG_waiting = 0, | |
83 | BLKG_idling, | |
84 | BLKG_empty, | |
85 | }; | |
86 | ||
062a644d VG |
87 | /* cgroup files owned by proportional weight policy */ |
88 | enum blkcg_file_name_prop { | |
89 | BLKIO_PROP_weight = 1, | |
90 | BLKIO_PROP_weight_device, | |
91 | BLKIO_PROP_io_service_bytes, | |
92 | BLKIO_PROP_io_serviced, | |
93 | BLKIO_PROP_time, | |
94 | BLKIO_PROP_sectors, | |
167400d3 | 95 | BLKIO_PROP_unaccounted_time, |
062a644d VG |
96 | BLKIO_PROP_io_service_time, |
97 | BLKIO_PROP_io_wait_time, | |
98 | BLKIO_PROP_io_merged, | |
99 | BLKIO_PROP_io_queued, | |
100 | BLKIO_PROP_avg_queue_size, | |
101 | BLKIO_PROP_group_wait_time, | |
102 | BLKIO_PROP_idle_time, | |
103 | BLKIO_PROP_empty_time, | |
104 | BLKIO_PROP_dequeue, | |
105 | }; | |
106 | ||
4c9eefa1 VG |
107 | /* cgroup files owned by throttle policy */ |
108 | enum blkcg_file_name_throtl { | |
109 | BLKIO_THROTL_read_bps_device, | |
110 | BLKIO_THROTL_write_bps_device, | |
7702e8f4 VG |
111 | BLKIO_THROTL_read_iops_device, |
112 | BLKIO_THROTL_write_iops_device, | |
4c9eefa1 VG |
113 | BLKIO_THROTL_io_service_bytes, |
114 | BLKIO_THROTL_io_serviced, | |
115 | }; | |
116 | ||
31e4c28d VG |
117 | struct blkio_cgroup { |
118 | struct cgroup_subsys_state css; | |
119 | unsigned int weight; | |
120 | spinlock_t lock; | |
121 | struct hlist_head blkg_list; | |
9a9e8a26 TH |
122 | |
123 | /* for policies to test whether associated blkcg has changed */ | |
124 | uint64_t id; | |
31e4c28d VG |
125 | }; |
126 | ||
303a3acb | 127 | struct blkio_group_stats { |
edf1b879 | 128 | struct u64_stats_sync syncp; |
303a3acb DS |
129 | /* total disk time and nr sectors dispatched by this group */ |
130 | uint64_t time; | |
c4c76a05 | 131 | uint64_t stat_arr[BLKIO_STAT_ARR_NR][BLKIO_STAT_TOTAL]; |
303a3acb | 132 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
a23e6869 VG |
133 | /* Time not charged to this cgroup */ |
134 | uint64_t unaccounted_time; | |
135 | ||
cdc1184c DS |
136 | /* Sum of number of IOs queued across all samples */ |
137 | uint64_t avg_queue_size_sum; | |
138 | /* Count of samples taken for average */ | |
139 | uint64_t avg_queue_size_samples; | |
303a3acb DS |
140 | /* How many times this group has been removed from service tree */ |
141 | unsigned long dequeue; | |
812df48d DS |
142 | |
143 | /* Total time spent waiting for it to be assigned a timeslice. */ | |
144 | uint64_t group_wait_time; | |
812df48d DS |
145 | |
146 | /* Time spent idling for this blkio_group */ | |
147 | uint64_t idle_time; | |
812df48d DS |
148 | /* |
149 | * Total time when we have requests queued and do not contain the | |
150 | * current active queue. | |
151 | */ | |
152 | uint64_t empty_time; | |
997a026c TH |
153 | |
154 | /* fields after this shouldn't be cleared on stat reset */ | |
155 | uint64_t start_group_wait_time; | |
156 | uint64_t start_idle_time; | |
812df48d DS |
157 | uint64_t start_empty_time; |
158 | uint16_t flags; | |
303a3acb DS |
159 | #endif |
160 | }; | |
161 | ||
997a026c TH |
162 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
163 | #define BLKG_STATS_DEBUG_CLEAR_START \ | |
164 | offsetof(struct blkio_group_stats, unaccounted_time) | |
165 | #define BLKG_STATS_DEBUG_CLEAR_SIZE \ | |
166 | (offsetof(struct blkio_group_stats, start_group_wait_time) - \ | |
167 | BLKG_STATS_DEBUG_CLEAR_START) | |
168 | #endif | |
169 | ||
5624a4e4 VG |
170 | /* Per cpu blkio group stats */ |
171 | struct blkio_group_stats_cpu { | |
172 | uint64_t sectors; | |
2aa4a152 | 173 | uint64_t stat_arr_cpu[BLKIO_STAT_CPU_ARR_NR][BLKIO_STAT_TOTAL]; |
575969a0 | 174 | struct u64_stats_sync syncp; |
5624a4e4 VG |
175 | }; |
176 | ||
e56da7e2 TH |
177 | struct blkio_group_conf { |
178 | unsigned int weight; | |
179 | unsigned int iops[2]; | |
180 | u64 bps[2]; | |
181 | }; | |
182 | ||
0381411e TH |
183 | /* per-blkg per-policy data */ |
184 | struct blkg_policy_data { | |
185 | /* the blkg this per-policy data belongs to */ | |
186 | struct blkio_group *blkg; | |
187 | ||
549d3aa8 TH |
188 | /* Configuration */ |
189 | struct blkio_group_conf conf; | |
190 | ||
191 | struct blkio_group_stats stats; | |
192 | /* Per cpu stats pointer */ | |
193 | struct blkio_group_stats_cpu __percpu *stats_cpu; | |
194 | ||
0381411e TH |
195 | /* pol->pdata_size bytes of private data used by policy impl */ |
196 | char pdata[] __aligned(__alignof__(unsigned long long)); | |
197 | }; | |
198 | ||
31e4c28d | 199 | struct blkio_group { |
c875f4d0 TH |
200 | /* Pointer to the associated request_queue */ |
201 | struct request_queue *q; | |
e8989fae | 202 | struct list_head q_node; |
31e4c28d | 203 | struct hlist_node blkcg_node; |
7ee9c562 | 204 | struct blkio_cgroup *blkcg; |
2868ef7b VG |
205 | /* Store cgroup path */ |
206 | char path[128]; | |
1adaf3dd TH |
207 | /* reference count */ |
208 | int refcnt; | |
22084190 | 209 | |
549d3aa8 | 210 | struct blkg_policy_data *pd[BLKIO_NR_POLICIES]; |
1adaf3dd | 211 | |
1cd9e039 VG |
212 | /* List of blkg waiting for per cpu stats memory to be allocated */ |
213 | struct list_head alloc_node; | |
1adaf3dd | 214 | struct rcu_head rcu_head; |
31e4c28d VG |
215 | }; |
216 | ||
0381411e | 217 | typedef void (blkio_init_group_fn)(struct blkio_group *blkg); |
ca32aefc | 218 | typedef void (blkio_update_group_weight_fn)(struct request_queue *q, |
fe071437 | 219 | struct blkio_group *blkg, unsigned int weight); |
ca32aefc | 220 | typedef void (blkio_update_group_read_bps_fn)(struct request_queue *q, |
fe071437 | 221 | struct blkio_group *blkg, u64 read_bps); |
ca32aefc | 222 | typedef void (blkio_update_group_write_bps_fn)(struct request_queue *q, |
fe071437 | 223 | struct blkio_group *blkg, u64 write_bps); |
ca32aefc | 224 | typedef void (blkio_update_group_read_iops_fn)(struct request_queue *q, |
fe071437 | 225 | struct blkio_group *blkg, unsigned int read_iops); |
ca32aefc | 226 | typedef void (blkio_update_group_write_iops_fn)(struct request_queue *q, |
fe071437 | 227 | struct blkio_group *blkg, unsigned int write_iops); |
3e252066 VG |
228 | |
229 | struct blkio_policy_ops { | |
0381411e | 230 | blkio_init_group_fn *blkio_init_group_fn; |
3e252066 | 231 | blkio_update_group_weight_fn *blkio_update_group_weight_fn; |
4c9eefa1 VG |
232 | blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn; |
233 | blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn; | |
7702e8f4 VG |
234 | blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn; |
235 | blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn; | |
3e252066 VG |
236 | }; |
237 | ||
238 | struct blkio_policy_type { | |
239 | struct list_head list; | |
240 | struct blkio_policy_ops ops; | |
062a644d | 241 | enum blkio_policy_id plid; |
0381411e | 242 | size_t pdata_size; /* policy specific private data size */ |
3e252066 VG |
243 | }; |
244 | ||
5efd6113 TH |
245 | extern int blkcg_init_queue(struct request_queue *q); |
246 | extern void blkcg_drain_queue(struct request_queue *q); | |
247 | extern void blkcg_exit_queue(struct request_queue *q); | |
248 | ||
3e252066 VG |
249 | /* Blkio controller policy registration */ |
250 | extern void blkio_policy_register(struct blkio_policy_type *); | |
251 | extern void blkio_policy_unregister(struct blkio_policy_type *); | |
e8989fae TH |
252 | extern void blkg_destroy_all(struct request_queue *q, bool destroy_root); |
253 | extern void update_root_blkg_pd(struct request_queue *q, | |
254 | enum blkio_policy_id plid); | |
3e252066 | 255 | |
0381411e TH |
256 | /** |
257 | * blkg_to_pdata - get policy private data | |
258 | * @blkg: blkg of interest | |
259 | * @pol: policy of interest | |
260 | * | |
261 | * Return pointer to private data associated with the @blkg-@pol pair. | |
262 | */ | |
263 | static inline void *blkg_to_pdata(struct blkio_group *blkg, | |
264 | struct blkio_policy_type *pol) | |
265 | { | |
549d3aa8 | 266 | return blkg ? blkg->pd[pol->plid]->pdata : NULL; |
0381411e TH |
267 | } |
268 | ||
269 | /** | |
270 | * pdata_to_blkg - get blkg associated with policy private data | |
271 | * @pdata: policy private data of interest | |
0381411e | 272 | * |
aaec55a0 | 273 | * @pdata is policy private data. Determine the blkg it's associated with. |
0381411e | 274 | */ |
aaec55a0 | 275 | static inline struct blkio_group *pdata_to_blkg(void *pdata) |
0381411e TH |
276 | { |
277 | if (pdata) { | |
278 | struct blkg_policy_data *pd = | |
279 | container_of(pdata, struct blkg_policy_data, pdata); | |
280 | return pd->blkg; | |
281 | } | |
282 | return NULL; | |
283 | } | |
284 | ||
afc24d49 VG |
285 | static inline char *blkg_path(struct blkio_group *blkg) |
286 | { | |
287 | return blkg->path; | |
288 | } | |
289 | ||
1adaf3dd TH |
290 | /** |
291 | * blkg_get - get a blkg reference | |
292 | * @blkg: blkg to get | |
293 | * | |
294 | * The caller should be holding queue_lock and an existing reference. | |
295 | */ | |
296 | static inline void blkg_get(struct blkio_group *blkg) | |
297 | { | |
298 | lockdep_assert_held(blkg->q->queue_lock); | |
299 | WARN_ON_ONCE(!blkg->refcnt); | |
300 | blkg->refcnt++; | |
301 | } | |
302 | ||
303 | void __blkg_release(struct blkio_group *blkg); | |
304 | ||
305 | /** | |
306 | * blkg_put - put a blkg reference | |
307 | * @blkg: blkg to put | |
308 | * | |
309 | * The caller should be holding queue_lock. | |
310 | */ | |
311 | static inline void blkg_put(struct blkio_group *blkg) | |
312 | { | |
313 | lockdep_assert_held(blkg->q->queue_lock); | |
314 | WARN_ON_ONCE(blkg->refcnt <= 0); | |
315 | if (!--blkg->refcnt) | |
316 | __blkg_release(blkg); | |
317 | } | |
318 | ||
2f5ea477 JA |
319 | #else |
320 | ||
321 | struct blkio_group { | |
322 | }; | |
323 | ||
3e252066 VG |
324 | struct blkio_policy_type { |
325 | }; | |
326 | ||
5efd6113 TH |
327 | static inline int blkcg_init_queue(struct request_queue *q) { return 0; } |
328 | static inline void blkcg_drain_queue(struct request_queue *q) { } | |
329 | static inline void blkcg_exit_queue(struct request_queue *q) { } | |
3e252066 VG |
330 | static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { } |
331 | static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { } | |
03aa264a | 332 | static inline void blkg_destroy_all(struct request_queue *q, |
03aa264a | 333 | bool destory_root) { } |
e8989fae TH |
334 | static inline void update_root_blkg_pd(struct request_queue *q, |
335 | enum blkio_policy_id plid) { } | |
3e252066 | 336 | |
0381411e TH |
337 | static inline void *blkg_to_pdata(struct blkio_group *blkg, |
338 | struct blkio_policy_type *pol) { return NULL; } | |
339 | static inline struct blkio_group *pdata_to_blkg(void *pdata, | |
340 | struct blkio_policy_type *pol) { return NULL; } | |
afc24d49 | 341 | static inline char *blkg_path(struct blkio_group *blkg) { return NULL; } |
1adaf3dd TH |
342 | static inline void blkg_get(struct blkio_group *blkg) { } |
343 | static inline void blkg_put(struct blkio_group *blkg) { } | |
afc24d49 | 344 | |
2f5ea477 JA |
345 | #endif |
346 | ||
df457f84 | 347 | #define BLKIO_WEIGHT_MIN 10 |
31e4c28d VG |
348 | #define BLKIO_WEIGHT_MAX 1000 |
349 | #define BLKIO_WEIGHT_DEFAULT 500 | |
350 | ||
2868ef7b | 351 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
c1768268 TH |
352 | void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg, |
353 | struct blkio_policy_type *pol); | |
9195291e | 354 | void blkiocg_update_dequeue_stats(struct blkio_group *blkg, |
c1768268 TH |
355 | struct blkio_policy_type *pol, |
356 | unsigned long dequeue); | |
357 | void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg, | |
358 | struct blkio_policy_type *pol); | |
359 | void blkiocg_update_idle_time_stats(struct blkio_group *blkg, | |
360 | struct blkio_policy_type *pol); | |
361 | void blkiocg_set_start_empty_time(struct blkio_group *blkg, | |
362 | struct blkio_policy_type *pol); | |
812df48d DS |
363 | |
364 | #define BLKG_FLAG_FNS(name) \ | |
365 | static inline void blkio_mark_blkg_##name( \ | |
366 | struct blkio_group_stats *stats) \ | |
367 | { \ | |
368 | stats->flags |= (1 << BLKG_##name); \ | |
369 | } \ | |
370 | static inline void blkio_clear_blkg_##name( \ | |
371 | struct blkio_group_stats *stats) \ | |
372 | { \ | |
373 | stats->flags &= ~(1 << BLKG_##name); \ | |
374 | } \ | |
375 | static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \ | |
376 | { \ | |
377 | return (stats->flags & (1 << BLKG_##name)) != 0; \ | |
378 | } \ | |
379 | ||
380 | BLKG_FLAG_FNS(waiting) | |
381 | BLKG_FLAG_FNS(idling) | |
382 | BLKG_FLAG_FNS(empty) | |
383 | #undef BLKG_FLAG_FNS | |
2868ef7b | 384 | #else |
c1768268 TH |
385 | static inline void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg, |
386 | struct blkio_policy_type *pol) { } | |
9195291e | 387 | static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg, |
c1768268 TH |
388 | struct blkio_policy_type *pol, unsigned long dequeue) { } |
389 | static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg, | |
390 | struct blkio_policy_type *pol) { } | |
391 | static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg, | |
392 | struct blkio_policy_type *pol) { } | |
393 | static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg, | |
394 | struct blkio_policy_type *pol) { } | |
2868ef7b VG |
395 | #endif |
396 | ||
32e380ae | 397 | #ifdef CONFIG_BLK_CGROUP |
31e4c28d VG |
398 | extern struct blkio_cgroup blkio_root_cgroup; |
399 | extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); | |
4f85cb96 | 400 | extern struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio); |
cd1604fa | 401 | extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, |
e8989fae | 402 | struct request_queue *q); |
cd1604fa TH |
403 | struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, |
404 | struct request_queue *q, | |
cd1604fa | 405 | bool for_root); |
303a3acb | 406 | void blkiocg_update_timeslice_used(struct blkio_group *blkg, |
c1768268 TH |
407 | struct blkio_policy_type *pol, |
408 | unsigned long time, | |
409 | unsigned long unaccounted_time); | |
410 | void blkiocg_update_dispatch_stats(struct blkio_group *blkg, | |
411 | struct blkio_policy_type *pol, | |
412 | uint64_t bytes, bool direction, bool sync); | |
84c124da | 413 | void blkiocg_update_completion_stats(struct blkio_group *blkg, |
c1768268 TH |
414 | struct blkio_policy_type *pol, |
415 | uint64_t start_time, | |
416 | uint64_t io_start_time, bool direction, | |
417 | bool sync); | |
418 | void blkiocg_update_io_merged_stats(struct blkio_group *blkg, | |
419 | struct blkio_policy_type *pol, | |
420 | bool direction, bool sync); | |
a11cdaa7 | 421 | void blkiocg_update_io_add_stats(struct blkio_group *blkg, |
c1768268 TH |
422 | struct blkio_policy_type *pol, |
423 | struct blkio_group *curr_blkg, bool direction, | |
424 | bool sync); | |
a11cdaa7 | 425 | void blkiocg_update_io_remove_stats(struct blkio_group *blkg, |
c1768268 TH |
426 | struct blkio_policy_type *pol, |
427 | bool direction, bool sync); | |
31e4c28d | 428 | #else |
2f5ea477 | 429 | struct cgroup; |
31e4c28d VG |
430 | static inline struct blkio_cgroup * |
431 | cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } | |
70087dc3 | 432 | static inline struct blkio_cgroup * |
4f85cb96 | 433 | bio_blkio_cgroup(struct bio *bio) { return NULL; } |
31e4c28d | 434 | |
cd1604fa TH |
435 | static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, |
436 | void *key) { return NULL; } | |
303a3acb | 437 | static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg, |
c1768268 TH |
438 | struct blkio_policy_type *pol, unsigned long time, |
439 | unsigned long unaccounted_time) { } | |
84c124da | 440 | static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg, |
c1768268 TH |
441 | struct blkio_policy_type *pol, uint64_t bytes, |
442 | bool direction, bool sync) { } | |
84c124da | 443 | static inline void blkiocg_update_completion_stats(struct blkio_group *blkg, |
c1768268 TH |
444 | struct blkio_policy_type *pol, uint64_t start_time, |
445 | uint64_t io_start_time, bool direction, bool sync) { } | |
812d4026 | 446 | static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg, |
c1768268 TH |
447 | struct blkio_policy_type *pol, bool direction, |
448 | bool sync) { } | |
a11cdaa7 | 449 | static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg, |
c1768268 TH |
450 | struct blkio_policy_type *pol, |
451 | struct blkio_group *curr_blkg, bool direction, | |
452 | bool sync) { } | |
a11cdaa7 | 453 | static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg, |
c1768268 TH |
454 | struct blkio_policy_type *pol, bool direction, |
455 | bool sync) { } | |
31e4c28d VG |
456 | #endif |
457 | #endif /* _BLK_CGROUP_H */ |