1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Common Block IO controller cgroup interface
7 * Based on ideas and code from CFQ, CFS and BFQ:
8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11 * Paolo Valente <paolo.valente@unimore.it>
13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14 * Nauman Rafique <nauman@google.com>
17 #include <linux/cgroup.h>
18 #include <linux/percpu_counter.h>
19 #include <linux/seq_file.h>
20 #include <linux/radix-tree.h>
21 #include <linux/blkdev.h>
22 #include <linux/atomic.h>
23 #include <linux/kthread.h>
26 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
27 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
29 /* Max limits for throttle policy */
30 #define THROTL_IOPS_MAX UINT_MAX
32 #ifdef CONFIG_BLK_CGROUP
34 enum blkg_rwstat_type
{
42 BLKG_RWSTAT_TOTAL
= BLKG_RWSTAT_NR
,
48 struct cgroup_subsys_state css
;
51 struct radix_tree_root blkg_tree
;
52 struct blkcg_gq __rcu
*blkg_hint
;
53 struct hlist_head blkg_list
;
55 struct blkcg_policy_data
*cpd
[BLKCG_MAX_POLS
];
57 struct list_head all_blkcgs_node
;
58 #ifdef CONFIG_CGROUP_WRITEBACK
59 struct list_head cgwb_list
;
60 refcount_t cgwb_refcnt
;
65 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
66 * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
67 * to carry result values from read and sum operations.
70 struct percpu_counter cpu_cnt
;
75 struct percpu_counter cpu_cnt
[BLKG_RWSTAT_NR
];
76 atomic64_t aux_cnt
[BLKG_RWSTAT_NR
];
80 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
81 * request_queue (q). This is used by blkcg policies which need to track
82 * information per blkcg - q pair.
84 * There can be multiple active blkcg policies and each blkg:policy pair is
85 * represented by a blkg_policy_data which is allocated and freed by each
86 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
87 * area by allocating larger data structure which embeds blkg_policy_data
90 struct blkg_policy_data
{
91 /* the blkg and policy id this per-policy data belongs to */
92 struct blkcg_gq
*blkg
;
97 * Policies that need to keep per-blkcg data which is independent from any
98 * request_queue associated to it should implement cpd_alloc/free_fn()
99 * methods. A policy can allocate private data area by allocating larger
100 * data structure which embeds blkcg_policy_data at the beginning.
101 * cpd_init() is invoked to let each policy handle per-blkcg data.
103 struct blkcg_policy_data
{
104 /* the blkcg and policy id this per-policy data belongs to */
109 /* association between a blk cgroup and a request queue */
111 /* Pointer to the associated request_queue */
112 struct request_queue
*q
;
113 struct list_head q_node
;
114 struct hlist_node blkcg_node
;
118 * Each blkg gets congested separately and the congestion state is
119 * propagated to the matching bdi_writeback_congested.
121 struct bdi_writeback_congested
*wb_congested
;
123 /* all non-root blkcg_gq's are guaranteed to have access to parent */
124 struct blkcg_gq
*parent
;
126 /* reference count */
127 struct percpu_ref refcnt
;
129 /* is this blkg online? protected by both blkcg and q locks */
132 struct blkg_rwstat stat_bytes
;
133 struct blkg_rwstat stat_ios
;
135 struct blkg_policy_data
*pd
[BLKCG_MAX_POLS
];
137 struct rcu_head rcu_head
;
140 atomic64_t delay_nsec
;
141 atomic64_t delay_start
;
146 typedef struct blkcg_policy_data
*(blkcg_pol_alloc_cpd_fn
)(gfp_t gfp
);
147 typedef void (blkcg_pol_init_cpd_fn
)(struct blkcg_policy_data
*cpd
);
148 typedef void (blkcg_pol_free_cpd_fn
)(struct blkcg_policy_data
*cpd
);
149 typedef void (blkcg_pol_bind_cpd_fn
)(struct blkcg_policy_data
*cpd
);
150 typedef struct blkg_policy_data
*(blkcg_pol_alloc_pd_fn
)(gfp_t gfp
, int node
);
151 typedef void (blkcg_pol_init_pd_fn
)(struct blkg_policy_data
*pd
);
152 typedef void (blkcg_pol_online_pd_fn
)(struct blkg_policy_data
*pd
);
153 typedef void (blkcg_pol_offline_pd_fn
)(struct blkg_policy_data
*pd
);
154 typedef void (blkcg_pol_free_pd_fn
)(struct blkg_policy_data
*pd
);
155 typedef void (blkcg_pol_reset_pd_stats_fn
)(struct blkg_policy_data
*pd
);
156 typedef size_t (blkcg_pol_stat_pd_fn
)(struct blkg_policy_data
*pd
, char *buf
,
159 struct blkcg_policy
{
161 /* cgroup files for the policy */
162 struct cftype
*dfl_cftypes
;
163 struct cftype
*legacy_cftypes
;
166 blkcg_pol_alloc_cpd_fn
*cpd_alloc_fn
;
167 blkcg_pol_init_cpd_fn
*cpd_init_fn
;
168 blkcg_pol_free_cpd_fn
*cpd_free_fn
;
169 blkcg_pol_bind_cpd_fn
*cpd_bind_fn
;
171 blkcg_pol_alloc_pd_fn
*pd_alloc_fn
;
172 blkcg_pol_init_pd_fn
*pd_init_fn
;
173 blkcg_pol_online_pd_fn
*pd_online_fn
;
174 blkcg_pol_offline_pd_fn
*pd_offline_fn
;
175 blkcg_pol_free_pd_fn
*pd_free_fn
;
176 blkcg_pol_reset_pd_stats_fn
*pd_reset_stats_fn
;
177 blkcg_pol_stat_pd_fn
*pd_stat_fn
;
180 extern struct blkcg blkcg_root
;
181 extern struct cgroup_subsys_state
* const blkcg_root_css
;
183 struct blkcg_gq
*blkg_lookup_slowpath(struct blkcg
*blkcg
,
184 struct request_queue
*q
, bool update_hint
);
185 struct blkcg_gq
*__blkg_lookup_create(struct blkcg
*blkcg
,
186 struct request_queue
*q
);
187 struct blkcg_gq
*blkg_lookup_create(struct blkcg
*blkcg
,
188 struct request_queue
*q
);
189 int blkcg_init_queue(struct request_queue
*q
);
190 void blkcg_drain_queue(struct request_queue
*q
);
191 void blkcg_exit_queue(struct request_queue
*q
);
193 /* Blkio controller policy registration */
194 int blkcg_policy_register(struct blkcg_policy
*pol
);
195 void blkcg_policy_unregister(struct blkcg_policy
*pol
);
196 int blkcg_activate_policy(struct request_queue
*q
,
197 const struct blkcg_policy
*pol
);
198 void blkcg_deactivate_policy(struct request_queue
*q
,
199 const struct blkcg_policy
*pol
);
201 const char *blkg_dev_name(struct blkcg_gq
*blkg
);
202 void blkcg_print_blkgs(struct seq_file
*sf
, struct blkcg
*blkcg
,
203 u64 (*prfill
)(struct seq_file
*,
204 struct blkg_policy_data
*, int),
205 const struct blkcg_policy
*pol
, int data
,
207 u64
__blkg_prfill_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
, u64 v
);
208 u64
__blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
209 const struct blkg_rwstat
*rwstat
);
210 u64
blkg_prfill_stat(struct seq_file
*sf
, struct blkg_policy_data
*pd
, int off
);
211 u64
blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
213 int blkg_print_stat_bytes(struct seq_file
*sf
, void *v
);
214 int blkg_print_stat_ios(struct seq_file
*sf
, void *v
);
215 int blkg_print_stat_bytes_recursive(struct seq_file
*sf
, void *v
);
216 int blkg_print_stat_ios_recursive(struct seq_file
*sf
, void *v
);
218 u64
blkg_stat_recursive_sum(struct blkcg_gq
*blkg
,
219 struct blkcg_policy
*pol
, int off
);
220 struct blkg_rwstat
blkg_rwstat_recursive_sum(struct blkcg_gq
*blkg
,
221 struct blkcg_policy
*pol
, int off
);
223 struct blkg_conf_ctx
{
224 struct gendisk
*disk
;
225 struct blkcg_gq
*blkg
;
229 int blkg_conf_prep(struct blkcg
*blkcg
, const struct blkcg_policy
*pol
,
230 char *input
, struct blkg_conf_ctx
*ctx
);
231 void blkg_conf_finish(struct blkg_conf_ctx
*ctx
);
234 * blkcg_css - find the current css
236 * Find the css associated with either the kthread or the current task.
237 * This may return a dying css, so it is up to the caller to use tryget logic
238 * to confirm it is alive and well.
240 static inline struct cgroup_subsys_state
*blkcg_css(void)
242 struct cgroup_subsys_state
*css
;
244 css
= kthread_blkcg();
247 return task_css(current
, io_cgrp_id
);
250 static inline struct blkcg
*css_to_blkcg(struct cgroup_subsys_state
*css
)
252 return css
? container_of(css
, struct blkcg
, css
) : NULL
;
256 * __bio_blkcg - internal, inconsistent version to get blkcg
259 * This function is inconsistent and consequently is dangerous to use. The
260 * first part of the function returns a blkcg where a reference is owned by the
261 * bio. This means it does not need to be rcu protected as it cannot go away
262 * with the bio owning a reference to it. However, the latter potentially gets
263 * it from task_css(). This can race against task migration and the cgroup
264 * dying. It is also semantically different as it must be called rcu protected
265 * and is susceptible to failure when trying to get a reference to it.
266 * Therefore, it is not ok to assume that *_get() will always succeed on the
267 * blkcg returned here.
269 static inline struct blkcg
*__bio_blkcg(struct bio
*bio
)
271 if (bio
&& bio
->bi_blkg
)
272 return bio
->bi_blkg
->blkcg
;
273 return css_to_blkcg(blkcg_css());
277 * bio_blkcg - grab the blkcg associated with a bio
280 * This returns the blkcg associated with a bio, %NULL if not associated.
281 * Callers are expected to either handle %NULL or know association has been
282 * done prior to calling this.
284 static inline struct blkcg
*bio_blkcg(struct bio
*bio
)
286 if (bio
&& bio
->bi_blkg
)
287 return bio
->bi_blkg
->blkcg
;
291 static inline bool blk_cgroup_congested(void)
293 struct cgroup_subsys_state
*css
;
297 css
= kthread_blkcg();
299 css
= task_css(current
, io_cgrp_id
);
301 if (atomic_read(&css
->cgroup
->congestion_count
)) {
312 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
313 * @return: true if this bio needs to be submitted with the root blkg context.
315 * In order to avoid priority inversions we sometimes need to issue a bio as if
316 * it were attached to the root blkg, and then backcharge to the actual owning
317 * blkg. The idea is we do bio_blkcg() to look up the actual context for the
318 * bio and attach the appropriate blkg to the bio. Then we call this helper and
319 * if it is true run with the root blkg for that queue and then do any
320 * backcharging to the originating cgroup once the io is complete.
322 static inline bool bio_issue_as_root_blkg(struct bio
*bio
)
324 return (bio
->bi_opf
& (REQ_META
| REQ_SWAP
)) != 0;
328 * blkcg_parent - get the parent of a blkcg
329 * @blkcg: blkcg of interest
331 * Return the parent blkcg of @blkcg. Can be called anytime.
333 static inline struct blkcg
*blkcg_parent(struct blkcg
*blkcg
)
335 return css_to_blkcg(blkcg
->css
.parent
);
339 * __blkg_lookup - internal version of blkg_lookup()
340 * @blkcg: blkcg of interest
341 * @q: request_queue of interest
342 * @update_hint: whether to update lookup hint with the result or not
344 * This is internal version and shouldn't be used by policy
345 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
346 * @q's bypass state. If @update_hint is %true, the caller should be
347 * holding @q->queue_lock and lookup hint is updated on success.
349 static inline struct blkcg_gq
*__blkg_lookup(struct blkcg
*blkcg
,
350 struct request_queue
*q
,
353 struct blkcg_gq
*blkg
;
355 if (blkcg
== &blkcg_root
)
358 blkg
= rcu_dereference(blkcg
->blkg_hint
);
359 if (blkg
&& blkg
->q
== q
)
362 return blkg_lookup_slowpath(blkcg
, q
, update_hint
);
366 * blkg_lookup - lookup blkg for the specified blkcg - q pair
367 * @blkcg: blkcg of interest
368 * @q: request_queue of interest
370 * Lookup blkg for the @blkcg - @q pair. This function should be called
371 * under RCU read loc.
373 static inline struct blkcg_gq
*blkg_lookup(struct blkcg
*blkcg
,
374 struct request_queue
*q
)
376 WARN_ON_ONCE(!rcu_read_lock_held());
377 return __blkg_lookup(blkcg
, q
, false);
381 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
382 * @q: request_queue of interest
384 * Lookup blkg for @q at the root level. See also blkg_lookup().
386 static inline struct blkcg_gq
*blk_queue_root_blkg(struct request_queue
*q
)
392 * blkg_to_pdata - get policy private data
393 * @blkg: blkg of interest
394 * @pol: policy of interest
396 * Return pointer to private data associated with the @blkg-@pol pair.
398 static inline struct blkg_policy_data
*blkg_to_pd(struct blkcg_gq
*blkg
,
399 struct blkcg_policy
*pol
)
401 return blkg
? blkg
->pd
[pol
->plid
] : NULL
;
404 static inline struct blkcg_policy_data
*blkcg_to_cpd(struct blkcg
*blkcg
,
405 struct blkcg_policy
*pol
)
407 return blkcg
? blkcg
->cpd
[pol
->plid
] : NULL
;
411 * pdata_to_blkg - get blkg associated with policy private data
412 * @pd: policy private data of interest
414 * @pd is policy private data. Determine the blkg it's associated with.
416 static inline struct blkcg_gq
*pd_to_blkg(struct blkg_policy_data
*pd
)
418 return pd
? pd
->blkg
: NULL
;
421 static inline struct blkcg
*cpd_to_blkcg(struct blkcg_policy_data
*cpd
)
423 return cpd
? cpd
->blkcg
: NULL
;
426 extern void blkcg_destroy_blkgs(struct blkcg
*blkcg
);
428 #ifdef CONFIG_CGROUP_WRITEBACK
431 * blkcg_cgwb_get - get a reference for blkcg->cgwb_list
432 * @blkcg: blkcg of interest
434 * This is used to track the number of active wb's related to a blkcg.
436 static inline void blkcg_cgwb_get(struct blkcg
*blkcg
)
438 refcount_inc(&blkcg
->cgwb_refcnt
);
442 * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list
443 * @blkcg: blkcg of interest
445 * This is used to track the number of active wb's related to a blkcg.
446 * When this count goes to zero, all active wb has finished so the
447 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
448 * This work may occur in cgwb_release_workfn() on the cgwb_release
451 static inline void blkcg_cgwb_put(struct blkcg
*blkcg
)
453 if (refcount_dec_and_test(&blkcg
->cgwb_refcnt
))
454 blkcg_destroy_blkgs(blkcg
);
459 static inline void blkcg_cgwb_get(struct blkcg
*blkcg
) { }
461 static inline void blkcg_cgwb_put(struct blkcg
*blkcg
)
463 /* wb isn't being accounted, so trigger destruction right away */
464 blkcg_destroy_blkgs(blkcg
);
470 * blkg_path - format cgroup path of blkg
471 * @blkg: blkg of interest
472 * @buf: target buffer
473 * @buflen: target buffer length
475 * Format the path of the cgroup of @blkg into @buf.
477 static inline int blkg_path(struct blkcg_gq
*blkg
, char *buf
, int buflen
)
479 return cgroup_path(blkg
->blkcg
->css
.cgroup
, buf
, buflen
);
483 * blkg_get - get a blkg reference
486 * The caller should be holding an existing reference.
488 static inline void blkg_get(struct blkcg_gq
*blkg
)
490 percpu_ref_get(&blkg
->refcnt
);
494 * blkg_tryget - try and get a blkg reference
497 * This is for use when doing an RCU lookup of the blkg. We may be in the midst
498 * of freeing this blkg, so we can only use it if the refcnt is not zero.
500 static inline bool blkg_tryget(struct blkcg_gq
*blkg
)
502 return blkg
&& percpu_ref_tryget(&blkg
->refcnt
);
506 * blkg_tryget_closest - try and get a blkg ref on the closet blkg
509 * This needs to be called rcu protected. As the failure mode here is to walk
510 * up the blkg tree, this ensure that the blkg->parent pointers are always
511 * valid. This returns the blkg that it ended up taking a reference on or %NULL
512 * if no reference was taken.
514 static inline struct blkcg_gq
*blkg_tryget_closest(struct blkcg_gq
*blkg
)
516 struct blkcg_gq
*ret_blkg
= NULL
;
518 WARN_ON_ONCE(!rcu_read_lock_held());
521 if (blkg_tryget(blkg
)) {
532 * blkg_put - put a blkg reference
535 static inline void blkg_put(struct blkcg_gq
*blkg
)
537 percpu_ref_put(&blkg
->refcnt
);
541 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
542 * @d_blkg: loop cursor pointing to the current descendant
543 * @pos_css: used for iteration
544 * @p_blkg: target blkg to walk descendants of
546 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
547 * read locked. If called under either blkcg or queue lock, the iteration
548 * is guaranteed to include all and only online blkgs. The caller may
549 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
550 * @p_blkg is included in the iteration and the first node to be visited.
552 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
553 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
554 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
555 (p_blkg)->q, false)))
558 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
559 * @d_blkg: loop cursor pointing to the current descendant
560 * @pos_css: used for iteration
561 * @p_blkg: target blkg to walk descendants of
563 * Similar to blkg_for_each_descendant_pre() but performs post-order
564 * traversal instead. Synchronization rules are the same. @p_blkg is
565 * included in the iteration and the last node to be visited.
567 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
568 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
569 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
570 (p_blkg)->q, false)))
572 static inline int blkg_stat_init(struct blkg_stat
*stat
, gfp_t gfp
)
576 ret
= percpu_counter_init(&stat
->cpu_cnt
, 0, gfp
);
580 atomic64_set(&stat
->aux_cnt
, 0);
584 static inline void blkg_stat_exit(struct blkg_stat
*stat
)
586 percpu_counter_destroy(&stat
->cpu_cnt
);
590 * blkg_stat_add - add a value to a blkg_stat
591 * @stat: target blkg_stat
594 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
595 * don't re-enter this function for the same counter.
597 static inline void blkg_stat_add(struct blkg_stat
*stat
, uint64_t val
)
599 percpu_counter_add_batch(&stat
->cpu_cnt
, val
, BLKG_STAT_CPU_BATCH
);
603 * blkg_stat_read - read the current value of a blkg_stat
604 * @stat: blkg_stat to read
606 static inline uint64_t blkg_stat_read(struct blkg_stat
*stat
)
608 return percpu_counter_sum_positive(&stat
->cpu_cnt
);
612 * blkg_stat_reset - reset a blkg_stat
613 * @stat: blkg_stat to reset
615 static inline void blkg_stat_reset(struct blkg_stat
*stat
)
617 percpu_counter_set(&stat
->cpu_cnt
, 0);
618 atomic64_set(&stat
->aux_cnt
, 0);
622 * blkg_stat_add_aux - add a blkg_stat into another's aux count
623 * @to: the destination blkg_stat
626 * Add @from's count including the aux one to @to's aux count.
628 static inline void blkg_stat_add_aux(struct blkg_stat
*to
,
629 struct blkg_stat
*from
)
631 atomic64_add(blkg_stat_read(from
) + atomic64_read(&from
->aux_cnt
),
635 static inline int blkg_rwstat_init(struct blkg_rwstat
*rwstat
, gfp_t gfp
)
639 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++) {
640 ret
= percpu_counter_init(&rwstat
->cpu_cnt
[i
], 0, gfp
);
643 percpu_counter_destroy(&rwstat
->cpu_cnt
[i
]);
646 atomic64_set(&rwstat
->aux_cnt
[i
], 0);
651 static inline void blkg_rwstat_exit(struct blkg_rwstat
*rwstat
)
655 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
656 percpu_counter_destroy(&rwstat
->cpu_cnt
[i
]);
660 * blkg_rwstat_add - add a value to a blkg_rwstat
661 * @rwstat: target blkg_rwstat
662 * @op: REQ_OP and flags
665 * Add @val to @rwstat. The counters are chosen according to @rw. The
666 * caller is responsible for synchronizing calls to this function.
668 static inline void blkg_rwstat_add(struct blkg_rwstat
*rwstat
,
669 unsigned int op
, uint64_t val
)
671 struct percpu_counter
*cnt
;
673 if (op_is_discard(op
))
674 cnt
= &rwstat
->cpu_cnt
[BLKG_RWSTAT_DISCARD
];
675 else if (op_is_write(op
))
676 cnt
= &rwstat
->cpu_cnt
[BLKG_RWSTAT_WRITE
];
678 cnt
= &rwstat
->cpu_cnt
[BLKG_RWSTAT_READ
];
680 percpu_counter_add_batch(cnt
, val
, BLKG_STAT_CPU_BATCH
);
683 cnt
= &rwstat
->cpu_cnt
[BLKG_RWSTAT_SYNC
];
685 cnt
= &rwstat
->cpu_cnt
[BLKG_RWSTAT_ASYNC
];
687 percpu_counter_add_batch(cnt
, val
, BLKG_STAT_CPU_BATCH
);
691 * blkg_rwstat_read - read the current values of a blkg_rwstat
692 * @rwstat: blkg_rwstat to read
694 * Read the current snapshot of @rwstat and return it in the aux counts.
696 static inline struct blkg_rwstat
blkg_rwstat_read(struct blkg_rwstat
*rwstat
)
698 struct blkg_rwstat result
;
701 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
702 atomic64_set(&result
.aux_cnt
[i
],
703 percpu_counter_sum_positive(&rwstat
->cpu_cnt
[i
]));
708 * blkg_rwstat_total - read the total count of a blkg_rwstat
709 * @rwstat: blkg_rwstat to read
711 * Return the total count of @rwstat regardless of the IO direction. This
712 * function can be called without synchronization and takes care of u64
715 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat
*rwstat
)
717 struct blkg_rwstat tmp
= blkg_rwstat_read(rwstat
);
719 return atomic64_read(&tmp
.aux_cnt
[BLKG_RWSTAT_READ
]) +
720 atomic64_read(&tmp
.aux_cnt
[BLKG_RWSTAT_WRITE
]);
724 * blkg_rwstat_reset - reset a blkg_rwstat
725 * @rwstat: blkg_rwstat to reset
727 static inline void blkg_rwstat_reset(struct blkg_rwstat
*rwstat
)
731 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++) {
732 percpu_counter_set(&rwstat
->cpu_cnt
[i
], 0);
733 atomic64_set(&rwstat
->aux_cnt
[i
], 0);
738 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
739 * @to: the destination blkg_rwstat
742 * Add @from's count including the aux one to @to's aux count.
744 static inline void blkg_rwstat_add_aux(struct blkg_rwstat
*to
,
745 struct blkg_rwstat
*from
)
747 u64 sum
[BLKG_RWSTAT_NR
];
750 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
751 sum
[i
] = percpu_counter_sum_positive(&from
->cpu_cnt
[i
]);
753 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
754 atomic64_add(sum
[i
] + atomic64_read(&from
->aux_cnt
[i
]),
758 #ifdef CONFIG_BLK_DEV_THROTTLING
759 extern bool blk_throtl_bio(struct request_queue
*q
, struct blkcg_gq
*blkg
,
762 static inline bool blk_throtl_bio(struct request_queue
*q
, struct blkcg_gq
*blkg
,
763 struct bio
*bio
) { return false; }
767 static inline void blkcg_bio_issue_init(struct bio
*bio
)
769 bio_issue_init(&bio
->bi_issue
, bio_sectors(bio
));
772 static inline bool blkcg_bio_issue_check(struct request_queue
*q
,
775 struct blkcg_gq
*blkg
;
781 char b
[BDEVNAME_SIZE
];
784 "no blkg associated for bio on block-device: %s\n",
785 bio_devname(bio
, b
));
786 bio_associate_blkg(bio
);
791 throtl
= blk_throtl_bio(q
, blkg
, bio
);
795 * If the bio is flagged with BIO_QUEUE_ENTERED it means this
796 * is a split bio and we would have already accounted for the
799 if (!bio_flagged(bio
, BIO_QUEUE_ENTERED
))
800 blkg_rwstat_add(&blkg
->stat_bytes
, bio
->bi_opf
,
801 bio
->bi_iter
.bi_size
);
802 blkg_rwstat_add(&blkg
->stat_ios
, bio
->bi_opf
, 1);
805 blkcg_bio_issue_init(bio
);
811 static inline void blkcg_use_delay(struct blkcg_gq
*blkg
)
813 if (atomic_add_return(1, &blkg
->use_delay
) == 1)
814 atomic_inc(&blkg
->blkcg
->css
.cgroup
->congestion_count
);
817 static inline int blkcg_unuse_delay(struct blkcg_gq
*blkg
)
819 int old
= atomic_read(&blkg
->use_delay
);
825 * We do this song and dance because we can race with somebody else
826 * adding or removing delay. If we just did an atomic_dec we'd end up
827 * negative and we'd already be in trouble. We need to subtract 1 and
828 * then check to see if we were the last delay so we can drop the
829 * congestion count on the cgroup.
832 int cur
= atomic_cmpxchg(&blkg
->use_delay
, old
, old
- 1);
841 atomic_dec(&blkg
->blkcg
->css
.cgroup
->congestion_count
);
845 static inline void blkcg_clear_delay(struct blkcg_gq
*blkg
)
847 int old
= atomic_read(&blkg
->use_delay
);
850 /* We only want 1 person clearing the congestion count for this blkg. */
852 int cur
= atomic_cmpxchg(&blkg
->use_delay
, old
, 0);
854 atomic_dec(&blkg
->blkcg
->css
.cgroup
->congestion_count
);
861 void blkcg_add_delay(struct blkcg_gq
*blkg
, u64 now
, u64 delta
);
862 void blkcg_schedule_throttle(struct request_queue
*q
, bool use_memdelay
);
863 void blkcg_maybe_throttle_current(void);
864 #else /* CONFIG_BLK_CGROUP */
869 struct blkg_policy_data
{
872 struct blkcg_policy_data
{
878 struct blkcg_policy
{
881 #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
883 static inline void blkcg_maybe_throttle_current(void) { }
884 static inline bool blk_cgroup_congested(void) { return false; }
888 static inline void blkcg_schedule_throttle(struct request_queue
*q
, bool use_memdelay
) { }
890 static inline struct blkcg_gq
*blkg_lookup(struct blkcg
*blkcg
, void *key
) { return NULL
; }
891 static inline struct blkcg_gq
*blk_queue_root_blkg(struct request_queue
*q
)
893 static inline int blkcg_init_queue(struct request_queue
*q
) { return 0; }
894 static inline void blkcg_drain_queue(struct request_queue
*q
) { }
895 static inline void blkcg_exit_queue(struct request_queue
*q
) { }
896 static inline int blkcg_policy_register(struct blkcg_policy
*pol
) { return 0; }
897 static inline void blkcg_policy_unregister(struct blkcg_policy
*pol
) { }
898 static inline int blkcg_activate_policy(struct request_queue
*q
,
899 const struct blkcg_policy
*pol
) { return 0; }
900 static inline void blkcg_deactivate_policy(struct request_queue
*q
,
901 const struct blkcg_policy
*pol
) { }
903 static inline struct blkcg
*__bio_blkcg(struct bio
*bio
) { return NULL
; }
904 static inline struct blkcg
*bio_blkcg(struct bio
*bio
) { return NULL
; }
906 static inline struct blkg_policy_data
*blkg_to_pd(struct blkcg_gq
*blkg
,
907 struct blkcg_policy
*pol
) { return NULL
; }
908 static inline struct blkcg_gq
*pd_to_blkg(struct blkg_policy_data
*pd
) { return NULL
; }
909 static inline char *blkg_path(struct blkcg_gq
*blkg
) { return NULL
; }
910 static inline void blkg_get(struct blkcg_gq
*blkg
) { }
911 static inline void blkg_put(struct blkcg_gq
*blkg
) { }
913 static inline void blkcg_bio_issue_init(struct bio
*bio
) { }
914 static inline bool blkcg_bio_issue_check(struct request_queue
*q
,
915 struct bio
*bio
) { return true; }
917 #define blk_queue_for_each_rl(rl, q) \
918 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
920 #endif /* CONFIG_BLOCK */
921 #endif /* CONFIG_BLK_CGROUP */
922 #endif /* _BLK_CGROUP_H */