]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/linux/blk-cgroup.h
Merge tag 'fixes_for_v5.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / include / linux / blk-cgroup.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
31e4c28d
VG
2#ifndef _BLK_CGROUP_H
3#define _BLK_CGROUP_H
4/*
5 * Common Block IO controller cgroup interface
6 *
7 * Based on ideas and code from CFQ, CFS and BFQ:
8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 *
10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11 * Paolo Valente <paolo.valente@unimore.it>
12 *
13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14 * Nauman Rafique <nauman@google.com>
15 */
16
17#include <linux/cgroup.h>
f7331648 18#include <linux/percpu.h>
24bdb8ef 19#include <linux/percpu_counter.h>
f7331648 20#include <linux/u64_stats_sync.h>
829fdb50 21#include <linux/seq_file.h>
a637120e 22#include <linux/radix-tree.h>
a051661c 23#include <linux/blkdev.h>
a5049a8a 24#include <linux/atomic.h>
902ec5b6 25#include <linux/kthread.h>
5cdf2e3f 26#include <linux/fs.h>
31e4c28d 27
24bdb8ef
TH
28/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
29#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
30
9355aede
VG
31/* Max limits for throttle policy */
32#define THROTL_IOPS_MAX UINT_MAX
33
f48ec1d7
TH
34#ifdef CONFIG_BLK_CGROUP
35
f7331648
TH
36enum blkg_iostat_type {
37 BLKG_IOSTAT_READ,
38 BLKG_IOSTAT_WRITE,
39 BLKG_IOSTAT_DISCARD,
40
41 BLKG_IOSTAT_NR,
42};
43
a637120e
TH
44struct blkcg_gq;
45
3c798398 46struct blkcg {
36558c8a
TH
47 struct cgroup_subsys_state css;
48 spinlock_t lock;
d866dbf6 49 refcount_t online_pin;
a637120e
TH
50
51 struct radix_tree_root blkg_tree;
55679c8d 52 struct blkcg_gq __rcu *blkg_hint;
36558c8a 53 struct hlist_head blkg_list;
9a9e8a26 54
81437648 55 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
52ebea74 56
7876f930 57 struct list_head all_blkcgs_node;
52ebea74
TH
58#ifdef CONFIG_CGROUP_WRITEBACK
59 struct list_head cgwb_list;
60#endif
31e4c28d
VG
61};
62
f7331648
TH
63struct blkg_iostat {
64 u64 bytes[BLKG_IOSTAT_NR];
65 u64 ios[BLKG_IOSTAT_NR];
66};
67
68struct blkg_iostat_set {
69 struct u64_stats_sync sync;
70 struct blkg_iostat cur;
71 struct blkg_iostat last;
72};
73
f95a04af
TH
74/*
75 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
76 * request_queue (q). This is used by blkcg policies which need to track
77 * information per blkcg - q pair.
78 *
001bea73
TH
79 * There can be multiple active blkcg policies and each blkg:policy pair is
80 * represented by a blkg_policy_data which is allocated and freed by each
81 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
82 * area by allocating larger data structure which embeds blkg_policy_data
83 * at the beginning.
f95a04af 84 */
0381411e 85struct blkg_policy_data {
b276a876 86 /* the blkg and policy id this per-policy data belongs to */
3c798398 87 struct blkcg_gq *blkg;
b276a876 88 int plid;
0381411e
TH
89};
90
e48453c3 91/*
e4a9bde9
TH
92 * Policies that need to keep per-blkcg data which is independent from any
93 * request_queue associated to it should implement cpd_alloc/free_fn()
94 * methods. A policy can allocate private data area by allocating larger
95 * data structure which embeds blkcg_policy_data at the beginning.
96 * cpd_init() is invoked to let each policy handle per-blkcg data.
e48453c3
AA
97 */
98struct blkcg_policy_data {
81437648
TH
99 /* the blkcg and policy id this per-policy data belongs to */
100 struct blkcg *blkcg;
e48453c3 101 int plid;
e48453c3
AA
102};
103
3c798398
TH
104/* association between a blk cgroup and a request queue */
105struct blkcg_gq {
c875f4d0 106 /* Pointer to the associated request_queue */
36558c8a
TH
107 struct request_queue *q;
108 struct list_head q_node;
109 struct hlist_node blkcg_node;
3c798398 110 struct blkcg *blkcg;
3c547865
TH
111
112 /* all non-root blkcg_gq's are guaranteed to have access to parent */
113 struct blkcg_gq *parent;
114
1adaf3dd 115 /* reference count */
7fcf2b03 116 struct percpu_ref refcnt;
22084190 117
f427d909
TH
118 /* is this blkg online? protected by both blkcg and q locks */
119 bool online;
120
f7331648
TH
121 struct blkg_iostat_set __percpu *iostat_cpu;
122 struct blkg_iostat_set iostat;
77ea7338 123
36558c8a 124 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
1adaf3dd 125
d3f77dfd
TH
126 spinlock_t async_bio_lock;
127 struct bio_list async_bios;
128 struct work_struct async_bio_work;
d09d8df3
JB
129
130 atomic_t use_delay;
131 atomic64_t delay_nsec;
132 atomic64_t delay_start;
133 u64 last_delay;
134 int last_use;
d3f77dfd
TH
135
136 struct rcu_head rcu_head;
31e4c28d
VG
137};
138
e4a9bde9 139typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
81437648 140typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
e4a9bde9 141typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
69d7fde5 142typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
cf09a8ee
TH
143typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
144 struct request_queue *q, struct blkcg *blkcg);
a9520cd6
TH
145typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
146typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
147typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
001bea73 148typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
a9520cd6 149typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
903d23f0
JB
150typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
151 size_t size);
3e252066 152
3c798398 153struct blkcg_policy {
36558c8a 154 int plid;
36558c8a 155 /* cgroup files for the policy */
2ee867dc 156 struct cftype *dfl_cftypes;
880f50e2 157 struct cftype *legacy_cftypes;
f9fcc2d3
TH
158
159 /* operations */
e4a9bde9 160 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
e48453c3 161 blkcg_pol_init_cpd_fn *cpd_init_fn;
e4a9bde9 162 blkcg_pol_free_cpd_fn *cpd_free_fn;
69d7fde5 163 blkcg_pol_bind_cpd_fn *cpd_bind_fn;
e4a9bde9 164
001bea73 165 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
f9fcc2d3 166 blkcg_pol_init_pd_fn *pd_init_fn;
f427d909
TH
167 blkcg_pol_online_pd_fn *pd_online_fn;
168 blkcg_pol_offline_pd_fn *pd_offline_fn;
001bea73 169 blkcg_pol_free_pd_fn *pd_free_fn;
f9fcc2d3 170 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
903d23f0 171 blkcg_pol_stat_pd_fn *pd_stat_fn;
3e252066
VG
172};
173
3c798398 174extern struct blkcg blkcg_root;
496d5e75 175extern struct cgroup_subsys_state * const blkcg_root_css;
07b0fdec 176extern bool blkcg_debug_stats;
36558c8a 177
24f29046
TH
178struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
179 struct request_queue *q, bool update_hint);
36558c8a 180int blkcg_init_queue(struct request_queue *q);
36558c8a 181void blkcg_exit_queue(struct request_queue *q);
5efd6113 182
3e252066 183/* Blkio controller policy registration */
d5bf0291 184int blkcg_policy_register(struct blkcg_policy *pol);
3c798398 185void blkcg_policy_unregister(struct blkcg_policy *pol);
36558c8a 186int blkcg_activate_policy(struct request_queue *q,
3c798398 187 const struct blkcg_policy *pol);
36558c8a 188void blkcg_deactivate_policy(struct request_queue *q,
3c798398 189 const struct blkcg_policy *pol);
3e252066 190
dd165eb3 191const char *blkg_dev_name(struct blkcg_gq *blkg);
3c798398 192void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
f95a04af
TH
193 u64 (*prfill)(struct seq_file *,
194 struct blkg_policy_data *, int),
3c798398 195 const struct blkcg_policy *pol, int data,
ec399347 196 bool show_total);
f95a04af 197u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
16b3de66 198
829fdb50 199struct blkg_conf_ctx {
22ae8ce8 200 struct block_device *bdev;
3c798398 201 struct blkcg_gq *blkg;
36aa9e5f 202 char *body;
829fdb50
TH
203};
204
22ae8ce8 205struct block_device *blkcg_conf_open_bdev(char **inputp);
3c798398 206int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
36aa9e5f 207 char *input, struct blkg_conf_ctx *ctx);
829fdb50
TH
208void blkg_conf_finish(struct blkg_conf_ctx *ctx);
209
0fe061b9
DZ
210/**
211 * blkcg_css - find the current css
212 *
213 * Find the css associated with either the kthread or the current task.
214 * This may return a dying css, so it is up to the caller to use tryget logic
215 * to confirm it is alive and well.
216 */
217static inline struct cgroup_subsys_state *blkcg_css(void)
218{
219 struct cgroup_subsys_state *css;
220
221 css = kthread_blkcg();
222 if (css)
223 return css;
224 return task_css(current, io_cgrp_id);
225}
226
a7c6d554
TH
227static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
228{
229 return css ? container_of(css, struct blkcg, css) : NULL;
230}
231
0fe061b9
DZ
232/**
233 * __bio_blkcg - internal, inconsistent version to get blkcg
234 *
235 * DO NOT USE.
236 * This function is inconsistent and consequently is dangerous to use. The
237 * first part of the function returns a blkcg where a reference is owned by the
238 * bio. This means it does not need to be rcu protected as it cannot go away
239 * with the bio owning a reference to it. However, the latter potentially gets
240 * it from task_css(). This can race against task migration and the cgroup
241 * dying. It is also semantically different as it must be called rcu protected
242 * and is susceptible to failure when trying to get a reference to it.
243 * Therefore, it is not ok to assume that *_get() will always succeed on the
244 * blkcg returned here.
245 */
246static inline struct blkcg *__bio_blkcg(struct bio *bio)
27e6fa99 247{
db6638d7
DZ
248 if (bio && bio->bi_blkg)
249 return bio->bi_blkg->blkcg;
0fe061b9
DZ
250 return css_to_blkcg(blkcg_css());
251}
b5f2954d 252
0fe061b9
DZ
253/**
254 * bio_blkcg - grab the blkcg associated with a bio
255 * @bio: target bio
256 *
257 * This returns the blkcg associated with a bio, %NULL if not associated.
258 * Callers are expected to either handle %NULL or know association has been
259 * done prior to calling this.
260 */
261static inline struct blkcg *bio_blkcg(struct bio *bio)
262{
db6638d7
DZ
263 if (bio && bio->bi_blkg)
264 return bio->bi_blkg->blkcg;
0fe061b9 265 return NULL;
fd383c2d
TH
266}
267
d09d8df3
JB
268static inline bool blk_cgroup_congested(void)
269{
270 struct cgroup_subsys_state *css;
271 bool ret = false;
272
273 rcu_read_lock();
274 css = kthread_blkcg();
275 if (!css)
276 css = task_css(current, io_cgrp_id);
277 while (css) {
278 if (atomic_read(&css->cgroup->congestion_count)) {
279 ret = true;
280 break;
281 }
282 css = css->parent;
283 }
284 rcu_read_unlock();
285 return ret;
286}
287
c7c98fd3
JB
288/**
289 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
290 * @return: true if this bio needs to be submitted with the root blkg context.
291 *
292 * In order to avoid priority inversions we sometimes need to issue a bio as if
293 * it were attached to the root blkg, and then backcharge to the actual owning
294 * blkg. The idea is we do bio_blkcg() to look up the actual context for the
295 * bio and attach the appropriate blkg to the bio. Then we call this helper and
296 * if it is true run with the root blkg for that queue and then do any
297 * backcharging to the originating cgroup once the io is complete.
298 */
299static inline bool bio_issue_as_root_blkg(struct bio *bio)
300{
0d1e0c7c 301 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
c7c98fd3
JB
302}
303
3c547865
TH
304/**
305 * blkcg_parent - get the parent of a blkcg
306 * @blkcg: blkcg of interest
307 *
308 * Return the parent blkcg of @blkcg. Can be called anytime.
309 */
310static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
311{
5c9d535b 312 return css_to_blkcg(blkcg->css.parent);
3c547865
TH
313}
314
24f29046
TH
315/**
316 * __blkg_lookup - internal version of blkg_lookup()
317 * @blkcg: blkcg of interest
318 * @q: request_queue of interest
319 * @update_hint: whether to update lookup hint with the result or not
320 *
321 * This is internal version and shouldn't be used by policy
322 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
323 * @q's bypass state. If @update_hint is %true, the caller should be
324 * holding @q->queue_lock and lookup hint is updated on success.
325 */
326static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
327 struct request_queue *q,
328 bool update_hint)
329{
330 struct blkcg_gq *blkg;
331
85b6bc9d
TH
332 if (blkcg == &blkcg_root)
333 return q->root_blkg;
334
24f29046
TH
335 blkg = rcu_dereference(blkcg->blkg_hint);
336 if (blkg && blkg->q == q)
337 return blkg;
338
339 return blkg_lookup_slowpath(blkcg, q, update_hint);
340}
341
342/**
343 * blkg_lookup - lookup blkg for the specified blkcg - q pair
344 * @blkcg: blkcg of interest
345 * @q: request_queue of interest
346 *
347 * Lookup blkg for the @blkcg - @q pair. This function should be called
012d4a65 348 * under RCU read lock.
24f29046
TH
349 */
350static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
351 struct request_queue *q)
352{
353 WARN_ON_ONCE(!rcu_read_lock_held());
24f29046
TH
354 return __blkg_lookup(blkcg, q, false);
355}
356
6bad9b21 357/**
b86d865c 358 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
6bad9b21
BVA
359 * @q: request_queue of interest
360 *
361 * Lookup blkg for @q at the root level. See also blkg_lookup().
362 */
b86d865c 363static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
6bad9b21 364{
b86d865c 365 return q->root_blkg;
6bad9b21
BVA
366}
367
0381411e
TH
368/**
369 * blkg_to_pdata - get policy private data
370 * @blkg: blkg of interest
371 * @pol: policy of interest
372 *
373 * Return pointer to private data associated with the @blkg-@pol pair.
374 */
f95a04af
TH
375static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
376 struct blkcg_policy *pol)
0381411e 377{
f95a04af 378 return blkg ? blkg->pd[pol->plid] : NULL;
0381411e
TH
379}
380
e48453c3
AA
381static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
382 struct blkcg_policy *pol)
383{
81437648 384 return blkcg ? blkcg->cpd[pol->plid] : NULL;
e48453c3
AA
385}
386
0381411e
TH
387/**
388 * pdata_to_blkg - get blkg associated with policy private data
f95a04af 389 * @pd: policy private data of interest
0381411e 390 *
f95a04af 391 * @pd is policy private data. Determine the blkg it's associated with.
0381411e 392 */
f95a04af 393static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
0381411e 394{
f95a04af 395 return pd ? pd->blkg : NULL;
0381411e
TH
396}
397
81437648
TH
398static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
399{
400 return cpd ? cpd->blkcg : NULL;
401}
402
59b57717
DZF
403extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
404
59b57717 405/**
d866dbf6 406 * blkcg_pin_online - pin online state
59b57717
DZF
407 * @blkcg: blkcg of interest
408 *
d866dbf6
TH
409 * While pinned, a blkcg is kept online. This is primarily used to
410 * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
411 * while an associated cgwb is still active.
59b57717 412 */
d866dbf6 413static inline void blkcg_pin_online(struct blkcg *blkcg)
59b57717 414{
d866dbf6 415 refcount_inc(&blkcg->online_pin);
59b57717
DZF
416}
417
418/**
d866dbf6 419 * blkcg_unpin_online - unpin online state
59b57717
DZF
420 * @blkcg: blkcg of interest
421 *
d866dbf6
TH
422 * This is primarily used to impedance-match blkg and cgwb lifetimes so
423 * that blkg doesn't go offline while an associated cgwb is still active.
424 * When this count goes to zero, all active cgwbs have finished so the
59b57717 425 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
59b57717 426 */
d866dbf6 427static inline void blkcg_unpin_online(struct blkcg *blkcg)
59b57717 428{
4308a434
TH
429 do {
430 if (!refcount_dec_and_test(&blkcg->online_pin))
431 break;
59b57717 432 blkcg_destroy_blkgs(blkcg);
4308a434
TH
433 blkcg = blkcg_parent(blkcg);
434 } while (blkcg);
59b57717
DZF
435}
436
54e7ed12
TH
437/**
438 * blkg_path - format cgroup path of blkg
439 * @blkg: blkg of interest
440 * @buf: target buffer
441 * @buflen: target buffer length
442 *
443 * Format the path of the cgroup of @blkg into @buf.
444 */
3c798398 445static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
afc24d49 446{
4c737b41 447 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
afc24d49
VG
448}
449
1adaf3dd
TH
450/**
451 * blkg_get - get a blkg reference
452 * @blkg: blkg to get
453 *
a5049a8a 454 * The caller should be holding an existing reference.
1adaf3dd 455 */
3c798398 456static inline void blkg_get(struct blkcg_gq *blkg)
1adaf3dd 457{
7fcf2b03 458 percpu_ref_get(&blkg->refcnt);
1adaf3dd
TH
459}
460
d09d8df3 461/**
7754f669 462 * blkg_tryget - try and get a blkg reference
d09d8df3
JB
463 * @blkg: blkg to get
464 *
465 * This is for use when doing an RCU lookup of the blkg. We may be in the midst
466 * of freeing this blkg, so we can only use it if the refcnt is not zero.
467 */
7754f669 468static inline bool blkg_tryget(struct blkcg_gq *blkg)
d09d8df3 469{
6ab21879 470 return blkg && percpu_ref_tryget(&blkg->refcnt);
d09d8df3
JB
471}
472
1adaf3dd
TH
473/**
474 * blkg_put - put a blkg reference
475 * @blkg: blkg to put
1adaf3dd 476 */
3c798398 477static inline void blkg_put(struct blkcg_gq *blkg)
1adaf3dd 478{
7fcf2b03 479 percpu_ref_put(&blkg->refcnt);
1adaf3dd
TH
480}
481
dd4a4ffc
TH
482/**
483 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
484 * @d_blkg: loop cursor pointing to the current descendant
492eb21b 485 * @pos_css: used for iteration
dd4a4ffc
TH
486 * @p_blkg: target blkg to walk descendants of
487 *
488 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
489 * read locked. If called under either blkcg or queue lock, the iteration
490 * is guaranteed to include all and only online blkgs. The caller may
492eb21b 491 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
bd8815a6 492 * @p_blkg is included in the iteration and the first node to be visited.
dd4a4ffc 493 */
492eb21b
TH
494#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
495 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
496 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
dd4a4ffc
TH
497 (p_blkg)->q, false)))
498
aa539cb3
TH
499/**
500 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
501 * @d_blkg: loop cursor pointing to the current descendant
492eb21b 502 * @pos_css: used for iteration
aa539cb3
TH
503 * @p_blkg: target blkg to walk descendants of
504 *
505 * Similar to blkg_for_each_descendant_pre() but performs post-order
bd8815a6
TH
506 * traversal instead. Synchronization rules are the same. @p_blkg is
507 * included in the iteration and the last node to be visited.
aa539cb3 508 */
492eb21b
TH
509#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
510 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
511 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
aa539cb3
TH
512 (p_blkg)->q, false)))
513
d3f77dfd
TH
514bool __blkcg_punt_bio_submit(struct bio *bio);
515
516static inline bool blkcg_punt_bio_submit(struct bio *bio)
517{
518 if (bio->bi_opf & REQ_CGROUP_PUNT)
519 return __blkcg_punt_bio_submit(bio);
520 else
521 return false;
522}
e439bedf
DZ
523
524static inline void blkcg_bio_issue_init(struct bio *bio)
525{
526 bio_issue_init(&bio->bi_issue, bio_sectors(bio));
527}
528
d09d8df3
JB
529static inline void blkcg_use_delay(struct blkcg_gq *blkg)
530{
54c52e10
TH
531 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
532 return;
d09d8df3
JB
533 if (atomic_add_return(1, &blkg->use_delay) == 1)
534 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
535}
536
537static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
538{
539 int old = atomic_read(&blkg->use_delay);
540
54c52e10
TH
541 if (WARN_ON_ONCE(old < 0))
542 return 0;
d09d8df3
JB
543 if (old == 0)
544 return 0;
545
546 /*
547 * We do this song and dance because we can race with somebody else
548 * adding or removing delay. If we just did an atomic_dec we'd end up
549 * negative and we'd already be in trouble. We need to subtract 1 and
550 * then check to see if we were the last delay so we can drop the
551 * congestion count on the cgroup.
552 */
553 while (old) {
554 int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
555 if (cur == old)
556 break;
557 old = cur;
558 }
559
560 if (old == 0)
561 return 0;
562 if (old == 1)
563 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
564 return 1;
565}
566
54c52e10
TH
567/**
568 * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
569 * @blkg: target blkg
570 * @delay: delay duration in nsecs
571 *
572 * When enabled with this function, the delay is not decayed and must be
573 * explicitly cleared with blkcg_clear_delay(). Must not be mixed with
574 * blkcg_[un]use_delay() and blkcg_add_delay() usages.
575 */
576static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
577{
578 int old = atomic_read(&blkg->use_delay);
579
580 /* We only want 1 person setting the congestion count for this blkg. */
581 if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old)
582 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
583
584 atomic64_set(&blkg->delay_nsec, delay);
585}
586
587/**
588 * blkcg_clear_delay - Disable allocator delay mechanism
589 * @blkg: target blkg
590 *
591 * Disable use_delay mechanism. See blkcg_set_delay().
592 */
d09d8df3
JB
593static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
594{
595 int old = atomic_read(&blkg->use_delay);
54c52e10 596
d09d8df3 597 /* We only want 1 person clearing the congestion count for this blkg. */
54c52e10
TH
598 if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old)
599 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
d09d8df3
JB
600}
601
db18a53e 602void blk_cgroup_bio_start(struct bio *bio);
d09d8df3
JB
603void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
604void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
605void blkcg_maybe_throttle_current(void);
36558c8a
TH
606#else /* CONFIG_BLK_CGROUP */
607
efa7d1c7
TH
608struct blkcg {
609};
2f5ea477 610
f95a04af
TH
611struct blkg_policy_data {
612};
613
e48453c3
AA
614struct blkcg_policy_data {
615};
616
3c798398 617struct blkcg_gq {
2f5ea477
JA
618};
619
3c798398 620struct blkcg_policy {
3e252066
VG
621};
622
496d5e75
TH
623#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
624
d09d8df3
JB
625static inline void blkcg_maybe_throttle_current(void) { }
626static inline bool blk_cgroup_congested(void) { return false; }
627
efa7d1c7
TH
628#ifdef CONFIG_BLOCK
629
d09d8df3
JB
630static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
631
3c798398 632static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
b86d865c
BVA
633static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
634{ return NULL; }
5efd6113 635static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
5efd6113 636static inline void blkcg_exit_queue(struct request_queue *q) { }
d5bf0291 637static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
3c798398 638static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
a2b1693b 639static inline int blkcg_activate_policy(struct request_queue *q,
3c798398 640 const struct blkcg_policy *pol) { return 0; }
a2b1693b 641static inline void blkcg_deactivate_policy(struct request_queue *q,
3c798398
TH
642 const struct blkcg_policy *pol) { }
643
0fe061b9 644static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
b1208b56 645static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
a051661c 646
f95a04af
TH
647static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
648 struct blkcg_policy *pol) { return NULL; }
649static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
3c798398
TH
650static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
651static inline void blkg_get(struct blkcg_gq *blkg) { }
652static inline void blkg_put(struct blkcg_gq *blkg) { }
afc24d49 653
d3f77dfd 654static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
e439bedf 655static inline void blkcg_bio_issue_init(struct bio *bio) { }
db18a53e 656static inline void blk_cgroup_bio_start(struct bio *bio) { }
ae118896 657
a051661c
TH
658#define blk_queue_for_each_rl(rl, q) \
659 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
660
efa7d1c7 661#endif /* CONFIG_BLOCK */
36558c8a
TH
662#endif /* CONFIG_BLK_CGROUP */
663#endif /* _BLK_CGROUP_H */