]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/blk-cgroup.h
blkcg: make blkcg_policy methods take a pointer to blkcg_policy_data
[mirror_ubuntu-bionic-kernel.git] / include / linux / blk-cgroup.h
CommitLineData
31e4c28d
VG
1#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
575969a0 17#include <linux/u64_stats_sync.h>
829fdb50 18#include <linux/seq_file.h>
a637120e 19#include <linux/radix-tree.h>
a051661c 20#include <linux/blkdev.h>
a5049a8a 21#include <linux/atomic.h>
31e4c28d 22
9355aede
VG
23/* Max limits for throttle policy */
24#define THROTL_IOPS_MAX UINT_MAX
25
f48ec1d7
TH
26#ifdef CONFIG_BLK_CGROUP
27
edcb0722
TH
28enum blkg_rwstat_type {
29 BLKG_RWSTAT_READ,
30 BLKG_RWSTAT_WRITE,
31 BLKG_RWSTAT_SYNC,
32 BLKG_RWSTAT_ASYNC,
33
34 BLKG_RWSTAT_NR,
35 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
303a3acb
DS
36};
37
a637120e
TH
38struct blkcg_gq;
39
3c798398 40struct blkcg {
36558c8a
TH
41 struct cgroup_subsys_state css;
42 spinlock_t lock;
a637120e
TH
43
44 struct radix_tree_root blkg_tree;
45 struct blkcg_gq *blkg_hint;
36558c8a 46 struct hlist_head blkg_list;
9a9e8a26 47
e48453c3 48 struct blkcg_policy_data *pd[BLKCG_MAX_POLS];
52ebea74 49
7876f930 50 struct list_head all_blkcgs_node;
52ebea74
TH
51#ifdef CONFIG_CGROUP_WRITEBACK
52 struct list_head cgwb_list;
53#endif
31e4c28d
VG
54};
55
edcb0722
TH
56struct blkg_stat {
57 struct u64_stats_sync syncp;
58 uint64_t cnt;
59};
60
61struct blkg_rwstat {
62 struct u64_stats_sync syncp;
63 uint64_t cnt[BLKG_RWSTAT_NR];
64};
65
f95a04af
TH
66/*
67 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
68 * request_queue (q). This is used by blkcg policies which need to track
69 * information per blkcg - q pair.
70 *
001bea73
TH
71 * There can be multiple active blkcg policies and each blkg:policy pair is
72 * represented by a blkg_policy_data which is allocated and freed by each
73 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
74 * area by allocating larger data structure which embeds blkg_policy_data
75 * at the beginning.
f95a04af 76 */
0381411e 77struct blkg_policy_data {
b276a876 78 /* the blkg and policy id this per-policy data belongs to */
3c798398 79 struct blkcg_gq *blkg;
b276a876 80 int plid;
0381411e
TH
81};
82
e48453c3
AA
83/*
84 * Policies that need to keep per-blkcg data which is independent
85 * from any request_queue associated to it must specify its size
86 * with the cpd_size field of the blkcg_policy structure and
06b285bd
TH
87 * embed a blkcg_policy_data in it. cpd_init() is invoked to let
88 * each policy handle per-blkcg data.
e48453c3
AA
89 */
90struct blkcg_policy_data {
91 /* the policy id this per-policy data belongs to */
92 int plid;
e48453c3
AA
93};
94
3c798398
TH
95/* association between a blk cgroup and a request queue */
96struct blkcg_gq {
c875f4d0 97 /* Pointer to the associated request_queue */
36558c8a
TH
98 struct request_queue *q;
99 struct list_head q_node;
100 struct hlist_node blkcg_node;
3c798398 101 struct blkcg *blkcg;
3c547865 102
ce7acfea
TH
103 /*
104 * Each blkg gets congested separately and the congestion state is
105 * propagated to the matching bdi_writeback_congested.
106 */
107 struct bdi_writeback_congested *wb_congested;
108
3c547865
TH
109 /* all non-root blkcg_gq's are guaranteed to have access to parent */
110 struct blkcg_gq *parent;
111
a051661c
TH
112 /* request allocation list for this blkcg-q pair */
113 struct request_list rl;
3c547865 114
1adaf3dd 115 /* reference count */
a5049a8a 116 atomic_t refcnt;
22084190 117
f427d909
TH
118 /* is this blkg online? protected by both blkcg and q locks */
119 bool online;
120
36558c8a 121 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
1adaf3dd 122
36558c8a 123 struct rcu_head rcu_head;
31e4c28d
VG
124};
125
e48453c3 126typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg);
001bea73 127typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
a9520cd6
TH
128typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
129typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
130typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
001bea73 131typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
a9520cd6 132typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
3e252066 133
3c798398 134struct blkcg_policy {
36558c8a 135 int plid;
e48453c3
AA
136 /* policy specific per-blkcg data size */
137 size_t cpd_size;
36558c8a
TH
138 /* cgroup files for the policy */
139 struct cftype *cftypes;
f9fcc2d3
TH
140
141 /* operations */
e48453c3 142 blkcg_pol_init_cpd_fn *cpd_init_fn;
001bea73 143 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
f9fcc2d3 144 blkcg_pol_init_pd_fn *pd_init_fn;
f427d909
TH
145 blkcg_pol_online_pd_fn *pd_online_fn;
146 blkcg_pol_offline_pd_fn *pd_offline_fn;
001bea73 147 blkcg_pol_free_pd_fn *pd_free_fn;
f9fcc2d3 148 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
3e252066
VG
149};
150
3c798398 151extern struct blkcg blkcg_root;
496d5e75 152extern struct cgroup_subsys_state * const blkcg_root_css;
36558c8a 153
3c798398
TH
154struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
155struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
156 struct request_queue *q);
36558c8a
TH
157int blkcg_init_queue(struct request_queue *q);
158void blkcg_drain_queue(struct request_queue *q);
159void blkcg_exit_queue(struct request_queue *q);
5efd6113 160
3e252066 161/* Blkio controller policy registration */
d5bf0291 162int blkcg_policy_register(struct blkcg_policy *pol);
3c798398 163void blkcg_policy_unregister(struct blkcg_policy *pol);
36558c8a 164int blkcg_activate_policy(struct request_queue *q,
3c798398 165 const struct blkcg_policy *pol);
36558c8a 166void blkcg_deactivate_policy(struct request_queue *q,
3c798398 167 const struct blkcg_policy *pol);
3e252066 168
3c798398 169void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
f95a04af
TH
170 u64 (*prfill)(struct seq_file *,
171 struct blkg_policy_data *, int),
3c798398 172 const struct blkcg_policy *pol, int data,
ec399347 173 bool show_total);
f95a04af
TH
174u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
175u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
829fdb50 176 const struct blkg_rwstat *rwstat);
f95a04af
TH
177u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
178u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
179 int off);
829fdb50 180
16b3de66
TH
181u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
182struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
183 int off);
184
829fdb50 185struct blkg_conf_ctx {
36558c8a 186 struct gendisk *disk;
3c798398 187 struct blkcg_gq *blkg;
36558c8a 188 u64 v;
829fdb50
TH
189};
190
3c798398
TH
191int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
192 const char *input, struct blkg_conf_ctx *ctx);
829fdb50
TH
193void blkg_conf_finish(struct blkg_conf_ctx *ctx);
194
195
a7c6d554
TH
196static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
197{
198 return css ? container_of(css, struct blkcg, css) : NULL;
199}
200
b1208b56
TH
201static inline struct blkcg *task_blkcg(struct task_struct *tsk)
202{
073219e9 203 return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
b1208b56
TH
204}
205
206static inline struct blkcg *bio_blkcg(struct bio *bio)
207{
208 if (bio && bio->bi_css)
a7c6d554 209 return css_to_blkcg(bio->bi_css);
b1208b56
TH
210 return task_blkcg(current);
211}
212
fd383c2d
TH
213static inline struct cgroup_subsys_state *
214task_get_blkcg_css(struct task_struct *task)
215{
216 return task_get_css(task, blkio_cgrp_id);
217}
218
3c547865
TH
219/**
220 * blkcg_parent - get the parent of a blkcg
221 * @blkcg: blkcg of interest
222 *
223 * Return the parent blkcg of @blkcg. Can be called anytime.
224 */
225static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
226{
5c9d535b 227 return css_to_blkcg(blkcg->css.parent);
3c547865
TH
228}
229
0381411e
TH
230/**
231 * blkg_to_pdata - get policy private data
232 * @blkg: blkg of interest
233 * @pol: policy of interest
234 *
235 * Return pointer to private data associated with the @blkg-@pol pair.
236 */
f95a04af
TH
237static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
238 struct blkcg_policy *pol)
0381411e 239{
f95a04af 240 return blkg ? blkg->pd[pol->plid] : NULL;
0381411e
TH
241}
242
e48453c3
AA
243static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
244 struct blkcg_policy *pol)
245{
246 return blkcg ? blkcg->pd[pol->plid] : NULL;
247}
248
0381411e
TH
249/**
250 * pdata_to_blkg - get blkg associated with policy private data
f95a04af 251 * @pd: policy private data of interest
0381411e 252 *
f95a04af 253 * @pd is policy private data. Determine the blkg it's associated with.
0381411e 254 */
f95a04af 255static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
0381411e 256{
f95a04af 257 return pd ? pd->blkg : NULL;
0381411e
TH
258}
259
54e7ed12
TH
260/**
261 * blkg_path - format cgroup path of blkg
262 * @blkg: blkg of interest
263 * @buf: target buffer
264 * @buflen: target buffer length
265 *
266 * Format the path of the cgroup of @blkg into @buf.
267 */
3c798398 268static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
afc24d49 269{
e61734c5 270 char *p;
54e7ed12 271
e61734c5
TH
272 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
273 if (!p) {
54e7ed12 274 strncpy(buf, "<unavailable>", buflen);
e61734c5
TH
275 return -ENAMETOOLONG;
276 }
277
278 memmove(buf, p, buf + buflen - p);
279 return 0;
afc24d49
VG
280}
281
1adaf3dd
TH
282/**
283 * blkg_get - get a blkg reference
284 * @blkg: blkg to get
285 *
a5049a8a 286 * The caller should be holding an existing reference.
1adaf3dd 287 */
3c798398 288static inline void blkg_get(struct blkcg_gq *blkg)
1adaf3dd 289{
a5049a8a
TH
290 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
291 atomic_inc(&blkg->refcnt);
1adaf3dd
TH
292}
293
2a4fd070 294void __blkg_release_rcu(struct rcu_head *rcu);
1adaf3dd
TH
295
296/**
297 * blkg_put - put a blkg reference
298 * @blkg: blkg to put
1adaf3dd 299 */
3c798398 300static inline void blkg_put(struct blkcg_gq *blkg)
1adaf3dd 301{
a5049a8a
TH
302 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
303 if (atomic_dec_and_test(&blkg->refcnt))
2a4fd070 304 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
1adaf3dd
TH
305}
306
dd4a4ffc
TH
307struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
308 bool update_hint);
309
310/**
311 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
312 * @d_blkg: loop cursor pointing to the current descendant
492eb21b 313 * @pos_css: used for iteration
dd4a4ffc
TH
314 * @p_blkg: target blkg to walk descendants of
315 *
316 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
317 * read locked. If called under either blkcg or queue lock, the iteration
318 * is guaranteed to include all and only online blkgs. The caller may
492eb21b 319 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
bd8815a6 320 * @p_blkg is included in the iteration and the first node to be visited.
dd4a4ffc 321 */
492eb21b
TH
322#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
323 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
324 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
dd4a4ffc
TH
325 (p_blkg)->q, false)))
326
aa539cb3
TH
327/**
328 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
329 * @d_blkg: loop cursor pointing to the current descendant
492eb21b 330 * @pos_css: used for iteration
aa539cb3
TH
331 * @p_blkg: target blkg to walk descendants of
332 *
333 * Similar to blkg_for_each_descendant_pre() but performs post-order
bd8815a6
TH
334 * traversal instead. Synchronization rules are the same. @p_blkg is
335 * included in the iteration and the last node to be visited.
aa539cb3 336 */
492eb21b
TH
337#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
338 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
339 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
aa539cb3
TH
340 (p_blkg)->q, false)))
341
a051661c
TH
342/**
343 * blk_get_rl - get request_list to use
344 * @q: request_queue of interest
345 * @bio: bio which will be attached to the allocated request (may be %NULL)
346 *
347 * The caller wants to allocate a request from @q to use for @bio. Find
348 * the request_list to use and obtain a reference on it. Should be called
349 * under queue_lock. This function is guaranteed to return non-%NULL
350 * request_list.
351 */
352static inline struct request_list *blk_get_rl(struct request_queue *q,
353 struct bio *bio)
354{
355 struct blkcg *blkcg;
356 struct blkcg_gq *blkg;
357
358 rcu_read_lock();
359
360 blkcg = bio_blkcg(bio);
361
362 /* bypass blkg lookup and use @q->root_rl directly for root */
363 if (blkcg == &blkcg_root)
364 goto root_rl;
365
366 /*
367 * Try to use blkg->rl. blkg lookup may fail under memory pressure
368 * or if either the blkcg or queue is going away. Fall back to
369 * root_rl in such cases.
370 */
371 blkg = blkg_lookup_create(blkcg, q);
372 if (unlikely(IS_ERR(blkg)))
373 goto root_rl;
374
375 blkg_get(blkg);
376 rcu_read_unlock();
377 return &blkg->rl;
378root_rl:
379 rcu_read_unlock();
380 return &q->root_rl;
381}
382
383/**
384 * blk_put_rl - put request_list
385 * @rl: request_list to put
386 *
387 * Put the reference acquired by blk_get_rl(). Should be called under
388 * queue_lock.
389 */
390static inline void blk_put_rl(struct request_list *rl)
391{
401efbf8 392 if (rl->blkg->blkcg != &blkcg_root)
a051661c
TH
393 blkg_put(rl->blkg);
394}
395
396/**
397 * blk_rq_set_rl - associate a request with a request_list
398 * @rq: request of interest
399 * @rl: target request_list
400 *
401 * Associate @rq with @rl so that accounting and freeing can know the
402 * request_list @rq came from.
403 */
404static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
405{
406 rq->rl = rl;
407}
408
409/**
410 * blk_rq_rl - return the request_list a request came from
411 * @rq: request of interest
412 *
413 * Return the request_list @rq is allocated from.
414 */
415static inline struct request_list *blk_rq_rl(struct request *rq)
416{
417 return rq->rl;
418}
419
420struct request_list *__blk_queue_next_rl(struct request_list *rl,
421 struct request_queue *q);
422/**
423 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
424 *
425 * Should be used under queue_lock.
426 */
427#define blk_queue_for_each_rl(rl, q) \
428 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
429
90d3839b
PZ
430static inline void blkg_stat_init(struct blkg_stat *stat)
431{
432 u64_stats_init(&stat->syncp);
433}
434
edcb0722
TH
435/**
436 * blkg_stat_add - add a value to a blkg_stat
437 * @stat: target blkg_stat
438 * @val: value to add
439 *
440 * Add @val to @stat. The caller is responsible for synchronizing calls to
441 * this function.
442 */
443static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
444{
445 u64_stats_update_begin(&stat->syncp);
446 stat->cnt += val;
447 u64_stats_update_end(&stat->syncp);
448}
449
450/**
451 * blkg_stat_read - read the current value of a blkg_stat
452 * @stat: blkg_stat to read
453 *
454 * Read the current value of @stat. This function can be called without
455 * synchroniztion and takes care of u64 atomicity.
456 */
457static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
458{
459 unsigned int start;
460 uint64_t v;
461
462 do {
57a7744e 463 start = u64_stats_fetch_begin_irq(&stat->syncp);
edcb0722 464 v = stat->cnt;
57a7744e 465 } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
edcb0722
TH
466
467 return v;
468}
469
470/**
471 * blkg_stat_reset - reset a blkg_stat
472 * @stat: blkg_stat to reset
473 */
474static inline void blkg_stat_reset(struct blkg_stat *stat)
475{
476 stat->cnt = 0;
477}
478
16b3de66
TH
479/**
480 * blkg_stat_merge - merge a blkg_stat into another
481 * @to: the destination blkg_stat
482 * @from: the source
483 *
484 * Add @from's count to @to.
485 */
486static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
487{
488 blkg_stat_add(to, blkg_stat_read(from));
489}
490
90d3839b
PZ
491static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
492{
493 u64_stats_init(&rwstat->syncp);
494}
495
edcb0722
TH
496/**
497 * blkg_rwstat_add - add a value to a blkg_rwstat
498 * @rwstat: target blkg_rwstat
499 * @rw: mask of REQ_{WRITE|SYNC}
500 * @val: value to add
501 *
502 * Add @val to @rwstat. The counters are chosen according to @rw. The
503 * caller is responsible for synchronizing calls to this function.
504 */
505static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
506 int rw, uint64_t val)
507{
508 u64_stats_update_begin(&rwstat->syncp);
509
510 if (rw & REQ_WRITE)
511 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
512 else
513 rwstat->cnt[BLKG_RWSTAT_READ] += val;
514 if (rw & REQ_SYNC)
515 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
516 else
517 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
518
519 u64_stats_update_end(&rwstat->syncp);
520}
521
522/**
523 * blkg_rwstat_read - read the current values of a blkg_rwstat
524 * @rwstat: blkg_rwstat to read
525 *
526 * Read the current snapshot of @rwstat and return it as the return value.
527 * This function can be called without synchronization and takes care of
528 * u64 atomicity.
529 */
c94bed89 530static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
edcb0722
TH
531{
532 unsigned int start;
533 struct blkg_rwstat tmp;
534
535 do {
57a7744e 536 start = u64_stats_fetch_begin_irq(&rwstat->syncp);
edcb0722 537 tmp = *rwstat;
57a7744e 538 } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
edcb0722
TH
539
540 return tmp;
541}
542
543/**
4d5e80a7 544 * blkg_rwstat_total - read the total count of a blkg_rwstat
edcb0722
TH
545 * @rwstat: blkg_rwstat to read
546 *
547 * Return the total count of @rwstat regardless of the IO direction. This
548 * function can be called without synchronization and takes care of u64
549 * atomicity.
550 */
4d5e80a7 551static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
edcb0722
TH
552{
553 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
554
555 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
556}
557
558/**
559 * blkg_rwstat_reset - reset a blkg_rwstat
560 * @rwstat: blkg_rwstat to reset
561 */
562static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
563{
564 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
565}
566
16b3de66
TH
567/**
568 * blkg_rwstat_merge - merge a blkg_rwstat into another
569 * @to: the destination blkg_rwstat
570 * @from: the source
571 *
572 * Add @from's counts to @to.
573 */
574static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
575 struct blkg_rwstat *from)
576{
577 struct blkg_rwstat v = blkg_rwstat_read(from);
578 int i;
579
580 u64_stats_update_begin(&to->syncp);
581 for (i = 0; i < BLKG_RWSTAT_NR; i++)
582 to->cnt[i] += v.cnt[i];
583 u64_stats_update_end(&to->syncp);
584}
585
36558c8a
TH
586#else /* CONFIG_BLK_CGROUP */
587
efa7d1c7
TH
588struct blkcg {
589};
2f5ea477 590
f95a04af
TH
591struct blkg_policy_data {
592};
593
e48453c3
AA
594struct blkcg_policy_data {
595};
596
3c798398 597struct blkcg_gq {
2f5ea477
JA
598};
599
3c798398 600struct blkcg_policy {
3e252066
VG
601};
602
496d5e75
TH
603#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
604
fd383c2d
TH
605static inline struct cgroup_subsys_state *
606task_get_blkcg_css(struct task_struct *task)
607{
608 return NULL;
609}
610
efa7d1c7
TH
611#ifdef CONFIG_BLOCK
612
3c798398 613static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
5efd6113
TH
614static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
615static inline void blkcg_drain_queue(struct request_queue *q) { }
616static inline void blkcg_exit_queue(struct request_queue *q) { }
d5bf0291 617static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
3c798398 618static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
a2b1693b 619static inline int blkcg_activate_policy(struct request_queue *q,
3c798398 620 const struct blkcg_policy *pol) { return 0; }
a2b1693b 621static inline void blkcg_deactivate_policy(struct request_queue *q,
3c798398
TH
622 const struct blkcg_policy *pol) { }
623
b1208b56 624static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
a051661c 625
f95a04af
TH
626static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
627 struct blkcg_policy *pol) { return NULL; }
628static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
3c798398
TH
629static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
630static inline void blkg_get(struct blkcg_gq *blkg) { }
631static inline void blkg_put(struct blkcg_gq *blkg) { }
afc24d49 632
a051661c
TH
633static inline struct request_list *blk_get_rl(struct request_queue *q,
634 struct bio *bio) { return &q->root_rl; }
635static inline void blk_put_rl(struct request_list *rl) { }
636static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
637static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
638
639#define blk_queue_for_each_rl(rl, q) \
640 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
641
efa7d1c7 642#endif /* CONFIG_BLOCK */
36558c8a
TH
643#endif /* CONFIG_BLK_CGROUP */
644#endif /* _BLK_CGROUP_H */