]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - block/bfq-cgroup.c
Merge tag 'pstore-v5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees...
[mirror_ubuntu-hirsute-kernel.git] / block / bfq-cgroup.c
CommitLineData
a497ee34 1// SPDX-License-Identifier: GPL-2.0-or-later
ea25da48
PV
2/*
3 * cgroups support for the BFQ I/O scheduler.
ea25da48
PV
4 */
5#include <linux/module.h>
6#include <linux/slab.h>
7#include <linux/blkdev.h>
8#include <linux/cgroup.h>
9#include <linux/elevator.h>
10#include <linux/ktime.h>
11#include <linux/rbtree.h>
12#include <linux/ioprio.h>
13#include <linux/sbitmap.h>
14#include <linux/delay.h>
15
16#include "bfq-iosched.h"
17
8060c47b 18#ifdef CONFIG_BFQ_CGROUP_DEBUG
c0ce79dc
CH
19static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
20{
21 int ret;
22
23 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
24 if (ret)
25 return ret;
26
27 atomic64_set(&stat->aux_cnt, 0);
28 return 0;
29}
30
31static void bfq_stat_exit(struct bfq_stat *stat)
32{
33 percpu_counter_destroy(&stat->cpu_cnt);
34}
35
36/**
37 * bfq_stat_add - add a value to a bfq_stat
38 * @stat: target bfq_stat
39 * @val: value to add
40 *
41 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
42 * don't re-enter this function for the same counter.
43 */
44static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
45{
46 percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
47}
48
49/**
50 * bfq_stat_read - read the current value of a bfq_stat
51 * @stat: bfq_stat to read
52 */
53static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
54{
55 return percpu_counter_sum_positive(&stat->cpu_cnt);
56}
57
58/**
59 * bfq_stat_reset - reset a bfq_stat
60 * @stat: bfq_stat to reset
61 */
62static inline void bfq_stat_reset(struct bfq_stat *stat)
63{
64 percpu_counter_set(&stat->cpu_cnt, 0);
65 atomic64_set(&stat->aux_cnt, 0);
66}
67
68/**
69 * bfq_stat_add_aux - add a bfq_stat into another's aux count
70 * @to: the destination bfq_stat
71 * @from: the source
72 *
73 * Add @from's count including the aux one to @to's aux count.
74 */
75static inline void bfq_stat_add_aux(struct bfq_stat *to,
76 struct bfq_stat *from)
77{
78 atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
79 &to->aux_cnt);
80}
81
c0ce79dc
CH
82/**
83 * blkg_prfill_stat - prfill callback for bfq_stat
84 * @sf: seq_file to print to
85 * @pd: policy private data of interest
86 * @off: offset to the bfq_stat in @pd
87 *
88 * prfill callback for printing a bfq_stat.
89 */
90static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
91 int off)
92{
93 return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
94}
95
ea25da48
PV
96/* bfqg stats flags */
97enum bfqg_stats_flags {
98 BFQG_stats_waiting = 0,
99 BFQG_stats_idling,
100 BFQG_stats_empty,
101};
102
103#define BFQG_FLAG_FNS(name) \
104static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
105{ \
106 stats->flags |= (1 << BFQG_stats_##name); \
107} \
108static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
109{ \
110 stats->flags &= ~(1 << BFQG_stats_##name); \
111} \
112static int bfqg_stats_##name(struct bfqg_stats *stats) \
113{ \
114 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
115} \
116
117BFQG_FLAG_FNS(waiting)
118BFQG_FLAG_FNS(idling)
119BFQG_FLAG_FNS(empty)
120#undef BFQG_FLAG_FNS
121
8f9bebc3 122/* This should be called with the scheduler lock held. */
ea25da48
PV
123static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
124{
84c7afce 125 u64 now;
ea25da48
PV
126
127 if (!bfqg_stats_waiting(stats))
128 return;
129
84c7afce
OS
130 now = ktime_get_ns();
131 if (now > stats->start_group_wait_time)
c0ce79dc 132 bfq_stat_add(&stats->group_wait_time,
ea25da48
PV
133 now - stats->start_group_wait_time);
134 bfqg_stats_clear_waiting(stats);
135}
136
8f9bebc3 137/* This should be called with the scheduler lock held. */
ea25da48
PV
138static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
139 struct bfq_group *curr_bfqg)
140{
141 struct bfqg_stats *stats = &bfqg->stats;
142
143 if (bfqg_stats_waiting(stats))
144 return;
145 if (bfqg == curr_bfqg)
146 return;
84c7afce 147 stats->start_group_wait_time = ktime_get_ns();
ea25da48
PV
148 bfqg_stats_mark_waiting(stats);
149}
150
8f9bebc3 151/* This should be called with the scheduler lock held. */
ea25da48
PV
152static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
153{
84c7afce 154 u64 now;
ea25da48
PV
155
156 if (!bfqg_stats_empty(stats))
157 return;
158
84c7afce
OS
159 now = ktime_get_ns();
160 if (now > stats->start_empty_time)
c0ce79dc 161 bfq_stat_add(&stats->empty_time,
ea25da48
PV
162 now - stats->start_empty_time);
163 bfqg_stats_clear_empty(stats);
164}
165
166void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
167{
c0ce79dc 168 bfq_stat_add(&bfqg->stats.dequeue, 1);
ea25da48
PV
169}
170
171void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
172{
173 struct bfqg_stats *stats = &bfqg->stats;
174
175 if (blkg_rwstat_total(&stats->queued))
176 return;
177
178 /*
179 * group is already marked empty. This can happen if bfqq got new
180 * request in parent group and moved to this group while being added
181 * to service tree. Just ignore the event and move on.
182 */
183 if (bfqg_stats_empty(stats))
184 return;
185
84c7afce 186 stats->start_empty_time = ktime_get_ns();
ea25da48
PV
187 bfqg_stats_mark_empty(stats);
188}
189
190void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
191{
192 struct bfqg_stats *stats = &bfqg->stats;
193
194 if (bfqg_stats_idling(stats)) {
84c7afce 195 u64 now = ktime_get_ns();
ea25da48 196
84c7afce 197 if (now > stats->start_idle_time)
c0ce79dc 198 bfq_stat_add(&stats->idle_time,
ea25da48
PV
199 now - stats->start_idle_time);
200 bfqg_stats_clear_idling(stats);
201 }
202}
203
204void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
205{
206 struct bfqg_stats *stats = &bfqg->stats;
207
84c7afce 208 stats->start_idle_time = ktime_get_ns();
ea25da48
PV
209 bfqg_stats_mark_idling(stats);
210}
211
212void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
213{
214 struct bfqg_stats *stats = &bfqg->stats;
215
c0ce79dc 216 bfq_stat_add(&stats->avg_queue_size_sum,
ea25da48 217 blkg_rwstat_total(&stats->queued));
c0ce79dc 218 bfq_stat_add(&stats->avg_queue_size_samples, 1);
ea25da48
PV
219 bfqg_stats_update_group_wait_time(stats);
220}
221
a33801e8
LM
222void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
223 unsigned int op)
224{
225 blkg_rwstat_add(&bfqg->stats.queued, op, 1);
226 bfqg_stats_end_empty_time(&bfqg->stats);
227 if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
228 bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
229}
230
231void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
232{
233 blkg_rwstat_add(&bfqg->stats.queued, op, -1);
234}
235
236void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
237{
238 blkg_rwstat_add(&bfqg->stats.merged, op, 1);
239}
240
84c7afce
OS
241void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
242 u64 io_start_time_ns, unsigned int op)
a33801e8
LM
243{
244 struct bfqg_stats *stats = &bfqg->stats;
84c7afce 245 u64 now = ktime_get_ns();
a33801e8 246
84c7afce 247 if (now > io_start_time_ns)
a33801e8 248 blkg_rwstat_add(&stats->service_time, op,
84c7afce
OS
249 now - io_start_time_ns);
250 if (io_start_time_ns > start_time_ns)
a33801e8 251 blkg_rwstat_add(&stats->wait_time, op,
84c7afce 252 io_start_time_ns - start_time_ns);
a33801e8
LM
253}
254
8060c47b 255#else /* CONFIG_BFQ_CGROUP_DEBUG */
a33801e8
LM
256
257void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
258 unsigned int op) { }
259void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
260void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
84c7afce
OS
261void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
262 u64 io_start_time_ns, unsigned int op) { }
a33801e8
LM
263void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
264void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
265void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
266void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
267void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
268
8060c47b 269#endif /* CONFIG_BFQ_CGROUP_DEBUG */
a33801e8
LM
270
271#ifdef CONFIG_BFQ_GROUP_IOSCHED
272
ea25da48
PV
273/*
274 * blk-cgroup policy-related handlers
275 * The following functions help in converting between blk-cgroup
276 * internal structures and BFQ-specific structures.
277 */
278
279static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
280{
281 return pd ? container_of(pd, struct bfq_group, pd) : NULL;
282}
283
284struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
285{
286 return pd_to_blkg(&bfqg->pd);
287}
288
289static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
290{
291 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
292}
293
294/*
295 * bfq_group handlers
296 * The following functions help in navigating the bfq_group hierarchy
297 * by allowing to find the parent of a bfq_group or the bfq_group
298 * associated to a bfq_queue.
299 */
300
301static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
302{
303 struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
304
305 return pblkg ? blkg_to_bfqg(pblkg) : NULL;
306}
307
308struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
309{
310 struct bfq_entity *group_entity = bfqq->entity.parent;
311
312 return group_entity ? container_of(group_entity, struct bfq_group,
313 entity) :
314 bfqq->bfqd->root_group;
315}
316
317/*
318 * The following two functions handle get and put of a bfq_group by
319 * wrapping the related blk-cgroup hooks.
320 */
321
322static void bfqg_get(struct bfq_group *bfqg)
323{
8f9bebc3 324 bfqg->ref++;
ea25da48
PV
325}
326
dfb79af5 327static void bfqg_put(struct bfq_group *bfqg)
ea25da48 328{
8f9bebc3
PV
329 bfqg->ref--;
330
331 if (bfqg->ref == 0)
332 kfree(bfqg);
333}
334
335static void bfqg_and_blkg_get(struct bfq_group *bfqg)
336{
337 /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
338 bfqg_get(bfqg);
339
340 blkg_get(bfqg_to_blkg(bfqg));
341}
342
343void bfqg_and_blkg_put(struct bfq_group *bfqg)
344{
8f9bebc3 345 blkg_put(bfqg_to_blkg(bfqg));
d5274b3c
KK
346
347 bfqg_put(bfqg);
ea25da48
PV
348}
349
fd41e603
TH
350void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
351{
352 struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
353
354 blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq));
355 blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1);
356}
357
ea25da48
PV
358/* @stats = 0 */
359static void bfqg_stats_reset(struct bfqg_stats *stats)
360{
8060c47b 361#ifdef CONFIG_BFQ_CGROUP_DEBUG
ea25da48
PV
362 /* queued stats shouldn't be cleared */
363 blkg_rwstat_reset(&stats->merged);
364 blkg_rwstat_reset(&stats->service_time);
365 blkg_rwstat_reset(&stats->wait_time);
c0ce79dc
CH
366 bfq_stat_reset(&stats->time);
367 bfq_stat_reset(&stats->avg_queue_size_sum);
368 bfq_stat_reset(&stats->avg_queue_size_samples);
369 bfq_stat_reset(&stats->dequeue);
370 bfq_stat_reset(&stats->group_wait_time);
371 bfq_stat_reset(&stats->idle_time);
372 bfq_stat_reset(&stats->empty_time);
a33801e8 373#endif
ea25da48
PV
374}
375
376/* @to += @from */
377static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
378{
379 if (!to || !from)
380 return;
381
8060c47b 382#ifdef CONFIG_BFQ_CGROUP_DEBUG
ea25da48
PV
383 /* queued stats shouldn't be cleared */
384 blkg_rwstat_add_aux(&to->merged, &from->merged);
385 blkg_rwstat_add_aux(&to->service_time, &from->service_time);
386 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
c0ce79dc
CH
387 bfq_stat_add_aux(&from->time, &from->time);
388 bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
389 bfq_stat_add_aux(&to->avg_queue_size_samples,
ea25da48 390 &from->avg_queue_size_samples);
c0ce79dc
CH
391 bfq_stat_add_aux(&to->dequeue, &from->dequeue);
392 bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
393 bfq_stat_add_aux(&to->idle_time, &from->idle_time);
394 bfq_stat_add_aux(&to->empty_time, &from->empty_time);
a33801e8 395#endif
ea25da48
PV
396}
397
398/*
399 * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
400 * recursive stats can still account for the amount used by this bfqg after
401 * it's gone.
402 */
403static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
404{
405 struct bfq_group *parent;
406
407 if (!bfqg) /* root_group */
408 return;
409
410 parent = bfqg_parent(bfqg);
411
0d945c1f 412 lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
ea25da48
PV
413
414 if (unlikely(!parent))
415 return;
416
417 bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
418 bfqg_stats_reset(&bfqg->stats);
419}
420
421void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
422{
423 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
424
425 entity->weight = entity->new_weight;
426 entity->orig_weight = entity->new_weight;
427 if (bfqq) {
428 bfqq->ioprio = bfqq->new_ioprio;
429 bfqq->ioprio_class = bfqq->new_ioprio_class;
8f9bebc3
PV
430 /*
431 * Make sure that bfqg and its associated blkg do not
432 * disappear before entity.
433 */
434 bfqg_and_blkg_get(bfqg);
ea25da48
PV
435 }
436 entity->parent = bfqg->my_entity; /* NULL for root group */
437 entity->sched_data = &bfqg->sched_data;
438}
439
440static void bfqg_stats_exit(struct bfqg_stats *stats)
441{
fd41e603
TH
442 blkg_rwstat_exit(&stats->bytes);
443 blkg_rwstat_exit(&stats->ios);
8060c47b 444#ifdef CONFIG_BFQ_CGROUP_DEBUG
ea25da48
PV
445 blkg_rwstat_exit(&stats->merged);
446 blkg_rwstat_exit(&stats->service_time);
447 blkg_rwstat_exit(&stats->wait_time);
448 blkg_rwstat_exit(&stats->queued);
c0ce79dc
CH
449 bfq_stat_exit(&stats->time);
450 bfq_stat_exit(&stats->avg_queue_size_sum);
451 bfq_stat_exit(&stats->avg_queue_size_samples);
452 bfq_stat_exit(&stats->dequeue);
453 bfq_stat_exit(&stats->group_wait_time);
454 bfq_stat_exit(&stats->idle_time);
455 bfq_stat_exit(&stats->empty_time);
a33801e8 456#endif
ea25da48
PV
457}
458
459static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
460{
fd41e603
TH
461 if (blkg_rwstat_init(&stats->bytes, gfp) ||
462 blkg_rwstat_init(&stats->ios, gfp))
463 return -ENOMEM;
464
8060c47b 465#ifdef CONFIG_BFQ_CGROUP_DEBUG
ea25da48
PV
466 if (blkg_rwstat_init(&stats->merged, gfp) ||
467 blkg_rwstat_init(&stats->service_time, gfp) ||
468 blkg_rwstat_init(&stats->wait_time, gfp) ||
469 blkg_rwstat_init(&stats->queued, gfp) ||
c0ce79dc
CH
470 bfq_stat_init(&stats->time, gfp) ||
471 bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
472 bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
473 bfq_stat_init(&stats->dequeue, gfp) ||
474 bfq_stat_init(&stats->group_wait_time, gfp) ||
475 bfq_stat_init(&stats->idle_time, gfp) ||
476 bfq_stat_init(&stats->empty_time, gfp)) {
ea25da48
PV
477 bfqg_stats_exit(stats);
478 return -ENOMEM;
479 }
a33801e8 480#endif
ea25da48
PV
481
482 return 0;
483}
484
485static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
486{
487 return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
488}
489
490static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
491{
492 return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
493}
494
dfb79af5 495static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
ea25da48
PV
496{
497 struct bfq_group_data *bgd;
498
499 bgd = kzalloc(sizeof(*bgd), gfp);
500 if (!bgd)
501 return NULL;
502 return &bgd->pd;
503}
504
dfb79af5 505static void bfq_cpd_init(struct blkcg_policy_data *cpd)
ea25da48
PV
506{
507 struct bfq_group_data *d = cpd_to_bfqgd(cpd);
508
509 d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
510 CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
511}
512
dfb79af5 513static void bfq_cpd_free(struct blkcg_policy_data *cpd)
ea25da48
PV
514{
515 kfree(cpd_to_bfqgd(cpd));
516}
517
cf09a8ee
TH
518static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
519 struct blkcg *blkcg)
ea25da48
PV
520{
521 struct bfq_group *bfqg;
522
cf09a8ee 523 bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node);
ea25da48
PV
524 if (!bfqg)
525 return NULL;
526
527 if (bfqg_stats_init(&bfqg->stats, gfp)) {
528 kfree(bfqg);
529 return NULL;
530 }
531
8f9bebc3
PV
532 /* see comments in bfq_bic_update_cgroup for why refcounting */
533 bfqg_get(bfqg);
ea25da48
PV
534 return &bfqg->pd;
535}
536
dfb79af5 537static void bfq_pd_init(struct blkg_policy_data *pd)
ea25da48
PV
538{
539 struct blkcg_gq *blkg = pd_to_blkg(pd);
540 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
541 struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
542 struct bfq_entity *entity = &bfqg->entity;
543 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
544
545 entity->orig_weight = entity->weight = entity->new_weight = d->weight;
546 entity->my_sched_data = &bfqg->sched_data;
547 bfqg->my_entity = entity; /*
548 * the root_group's will be set to NULL
549 * in bfq_init_queue()
550 */
551 bfqg->bfqd = bfqd;
552 bfqg->active_entities = 0;
553 bfqg->rq_pos_tree = RB_ROOT;
554}
555
dfb79af5 556static void bfq_pd_free(struct blkg_policy_data *pd)
ea25da48
PV
557{
558 struct bfq_group *bfqg = pd_to_bfqg(pd);
559
560 bfqg_stats_exit(&bfqg->stats);
8f9bebc3 561 bfqg_put(bfqg);
ea25da48
PV
562}
563
dfb79af5 564static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
ea25da48
PV
565{
566 struct bfq_group *bfqg = pd_to_bfqg(pd);
567
568 bfqg_stats_reset(&bfqg->stats);
569}
570
571static void bfq_group_set_parent(struct bfq_group *bfqg,
572 struct bfq_group *parent)
573{
574 struct bfq_entity *entity;
575
576 entity = &bfqg->entity;
577 entity->parent = parent->my_entity;
578 entity->sched_data = &parent->sched_data;
579}
580
581static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
582 struct blkcg *blkcg)
583{
584 struct blkcg_gq *blkg;
585
586 blkg = blkg_lookup(blkcg, bfqd->queue);
587 if (likely(blkg))
588 return blkg_to_bfqg(blkg);
589 return NULL;
590}
591
592struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
593 struct blkcg *blkcg)
594{
595 struct bfq_group *bfqg, *parent;
596 struct bfq_entity *entity;
597
598 bfqg = bfq_lookup_bfqg(bfqd, blkcg);
599
600 if (unlikely(!bfqg))
601 return NULL;
602
603 /*
604 * Update chain of bfq_groups as we might be handling a leaf group
605 * which, along with some of its relatives, has not been hooked yet
606 * to the private hierarchy of BFQ.
607 */
608 entity = &bfqg->entity;
609 for_each_entity(entity) {
610 bfqg = container_of(entity, struct bfq_group, entity);
611 if (bfqg != bfqd->root_group) {
612 parent = bfqg_parent(bfqg);
613 if (!parent)
614 parent = bfqd->root_group;
615 bfq_group_set_parent(bfqg, parent);
616 }
617 }
618
619 return bfqg;
620}
621
622/**
623 * bfq_bfqq_move - migrate @bfqq to @bfqg.
624 * @bfqd: queue descriptor.
625 * @bfqq: the queue to move.
626 * @bfqg: the group to move to.
627 *
628 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
629 * it on the new one. Avoid putting the entity on the old group idle tree.
630 *
8f9bebc3
PV
631 * Must be called under the scheduler lock, to make sure that the blkg
632 * owning @bfqg does not disappear (see comments in
633 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
634 * objects).
ea25da48
PV
635 */
636void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
637 struct bfq_group *bfqg)
638{
639 struct bfq_entity *entity = &bfqq->entity;
640
641 /* If bfqq is empty, then bfq_bfqq_expire also invokes
642 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
643 * from data structures related to current group. Otherwise we
644 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
645 * we do below.
646 */
647 if (bfqq == bfqd->in_service_queue)
648 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
649 false, BFQQE_PREEMPTED);
650
651 if (bfq_bfqq_busy(bfqq))
652 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
653 else if (entity->on_st)
654 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
8f9bebc3 655 bfqg_and_blkg_put(bfqq_group(bfqq));
ea25da48 656
ea25da48
PV
657 entity->parent = bfqg->my_entity;
658 entity->sched_data = &bfqg->sched_data;
8f9bebc3
PV
659 /* pin down bfqg and its associated blkg */
660 bfqg_and_blkg_get(bfqg);
ea25da48
PV
661
662 if (bfq_bfqq_busy(bfqq)) {
8cacc5ab
PV
663 if (unlikely(!bfqd->nonrot_with_queueing))
664 bfq_pos_tree_add_move(bfqd, bfqq);
ea25da48
PV
665 bfq_activate_bfqq(bfqd, bfqq);
666 }
667
668 if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
669 bfq_schedule_dispatch(bfqd);
670}
671
672/**
673 * __bfq_bic_change_cgroup - move @bic to @cgroup.
674 * @bfqd: the queue descriptor.
675 * @bic: the bic to move.
676 * @blkcg: the blk-cgroup to move to.
677 *
8f9bebc3
PV
678 * Move bic to blkcg, assuming that bfqd->lock is held; which makes
679 * sure that the reference to cgroup is valid across the call (see
680 * comments in bfq_bic_update_cgroup on this issue)
ea25da48
PV
681 *
682 * NOTE: an alternative approach might have been to store the current
683 * cgroup in bfqq and getting a reference to it, reducing the lookup
684 * time here, at the price of slightly more complex code.
685 */
686static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
687 struct bfq_io_cq *bic,
688 struct blkcg *blkcg)
689{
690 struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
691 struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
692 struct bfq_group *bfqg;
693 struct bfq_entity *entity;
694
695 bfqg = bfq_find_set_group(bfqd, blkcg);
696
697 if (unlikely(!bfqg))
698 bfqg = bfqd->root_group;
699
700 if (async_bfqq) {
701 entity = &async_bfqq->entity;
702
703 if (entity->sched_data != &bfqg->sched_data) {
704 bic_set_bfqq(bic, NULL, 0);
705 bfq_log_bfqq(bfqd, async_bfqq,
706 "bic_change_group: %p %d",
707 async_bfqq, async_bfqq->ref);
708 bfq_put_queue(async_bfqq);
709 }
710 }
711
712 if (sync_bfqq) {
713 entity = &sync_bfqq->entity;
714 if (entity->sched_data != &bfqg->sched_data)
715 bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
716 }
717
718 return bfqg;
719}
720
721void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
722{
723 struct bfq_data *bfqd = bic_to_bfqd(bic);
724 struct bfq_group *bfqg = NULL;
725 uint64_t serial_nr;
726
727 rcu_read_lock();
0fe061b9 728 serial_nr = __bio_blkcg(bio)->css.serial_nr;
ea25da48
PV
729
730 /*
731 * Check whether blkcg has changed. The condition may trigger
732 * spuriously on a newly created cic but there's no harm.
733 */
734 if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
735 goto out;
736
0fe061b9 737 bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
8f9bebc3
PV
738 /*
739 * Update blkg_path for bfq_log_* functions. We cache this
740 * path, and update it here, for the following
741 * reasons. Operations on blkg objects in blk-cgroup are
742 * protected with the request_queue lock, and not with the
743 * lock that protects the instances of this scheduler
744 * (bfqd->lock). This exposes BFQ to the following sort of
745 * race.
746 *
747 * The blkg_lookup performed in bfq_get_queue, protected
748 * through rcu, may happen to return the address of a copy of
749 * the original blkg. If this is the case, then the
750 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
751 * the blkg, is useless: it does not prevent blk-cgroup code
752 * from destroying both the original blkg and all objects
753 * directly or indirectly referred by the copy of the
754 * blkg.
755 *
756 * On the bright side, destroy operations on a blkg invoke, as
757 * a first step, hooks of the scheduler associated with the
758 * blkg. And these hooks are executed with bfqd->lock held for
759 * BFQ. As a consequence, for any blkg associated with the
760 * request queue this instance of the scheduler is attached
761 * to, we are guaranteed that such a blkg is not destroyed, and
762 * that all the pointers it contains are consistent, while we
763 * are holding bfqd->lock. A blkg_lookup performed with
764 * bfqd->lock held then returns a fully consistent blkg, which
765 * remains consistent until this lock is held.
766 *
767 * Thanks to the last fact, and to the fact that: (1) bfqg has
768 * been obtained through a blkg_lookup in the above
769 * assignment, and (2) bfqd->lock is being held, here we can
770 * safely use the policy data for the involved blkg (i.e., the
771 * field bfqg->pd) to get to the blkg associated with bfqg,
772 * and then we can safely use any field of blkg. After we
773 * release bfqd->lock, even just getting blkg through this
774 * bfqg may cause dangling references to be traversed, as
775 * bfqg->pd may not exist any more.
776 *
777 * In view of the above facts, here we cache, in the bfqg, any
778 * blkg data we may need for this bic, and for its associated
779 * bfq_queue. As of now, we need to cache only the path of the
780 * blkg, which is used in the bfq_log_* functions.
781 *
782 * Finally, note that bfqg itself needs to be protected from
783 * destruction on the blkg_free of the original blkg (which
784 * invokes bfq_pd_free). We use an additional private
785 * refcounter for bfqg, to let it disappear only after no
786 * bfq_queue refers to it any longer.
787 */
788 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
ea25da48
PV
789 bic->blkcg_serial_nr = serial_nr;
790out:
791 rcu_read_unlock();
792}
793
794/**
795 * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
796 * @st: the service tree being flushed.
797 */
798static void bfq_flush_idle_tree(struct bfq_service_tree *st)
799{
800 struct bfq_entity *entity = st->first_idle;
801
802 for (; entity ; entity = st->first_idle)
803 __bfq_deactivate_entity(entity, false);
804}
805
806/**
807 * bfq_reparent_leaf_entity - move leaf entity to the root_group.
808 * @bfqd: the device data structure with the root group.
809 * @entity: the entity to move.
810 */
811static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
812 struct bfq_entity *entity)
813{
814 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
815
816 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
817}
818
819/**
820 * bfq_reparent_active_entities - move to the root group all active
821 * entities.
822 * @bfqd: the device data structure with the root group.
823 * @bfqg: the group to move from.
824 * @st: the service tree with the entities.
ea25da48
PV
825 */
826static void bfq_reparent_active_entities(struct bfq_data *bfqd,
827 struct bfq_group *bfqg,
828 struct bfq_service_tree *st)
829{
830 struct rb_root *active = &st->active;
831 struct bfq_entity *entity = NULL;
832
833 if (!RB_EMPTY_ROOT(&st->active))
834 entity = bfq_entity_of(rb_first(active));
835
836 for (; entity ; entity = bfq_entity_of(rb_first(active)))
837 bfq_reparent_leaf_entity(bfqd, entity);
838
839 if (bfqg->sched_data.in_service_entity)
840 bfq_reparent_leaf_entity(bfqd,
841 bfqg->sched_data.in_service_entity);
842}
843
844/**
845 * bfq_pd_offline - deactivate the entity associated with @pd,
846 * and reparent its children entities.
847 * @pd: descriptor of the policy going offline.
848 *
849 * blkio already grabs the queue_lock for us, so no need to use
850 * RCU-based magic
851 */
dfb79af5 852static void bfq_pd_offline(struct blkg_policy_data *pd)
ea25da48
PV
853{
854 struct bfq_service_tree *st;
855 struct bfq_group *bfqg = pd_to_bfqg(pd);
856 struct bfq_data *bfqd = bfqg->bfqd;
857 struct bfq_entity *entity = bfqg->my_entity;
858 unsigned long flags;
859 int i;
860
52257ffb
PV
861 spin_lock_irqsave(&bfqd->lock, flags);
862
ea25da48 863 if (!entity) /* root group */
52257ffb 864 goto put_async_queues;
ea25da48 865
ea25da48
PV
866 /*
867 * Empty all service_trees belonging to this group before
868 * deactivating the group itself.
869 */
870 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
871 st = bfqg->sched_data.service_tree + i;
872
873 /*
874 * The idle tree may still contain bfq_queues belonging
875 * to exited task because they never migrated to a different
8f9bebc3 876 * cgroup from the one being destroyed now.
ea25da48
PV
877 */
878 bfq_flush_idle_tree(st);
879
880 /*
881 * It may happen that some queues are still active
882 * (busy) upon group destruction (if the corresponding
883 * processes have been forced to terminate). We move
884 * all the leaf entities corresponding to these queues
885 * to the root_group.
886 * Also, it may happen that the group has an entity
887 * in service, which is disconnected from the active
888 * tree: it must be moved, too.
889 * There is no need to put the sync queues, as the
890 * scheduler has taken no reference.
891 */
892 bfq_reparent_active_entities(bfqd, bfqg, st);
893 }
894
895 __bfq_deactivate_entity(entity, false);
52257ffb
PV
896
897put_async_queues:
ea25da48
PV
898 bfq_put_async_queues(bfqd, bfqg);
899
900 spin_unlock_irqrestore(&bfqd->lock, flags);
901 /*
902 * @blkg is going offline and will be ignored by
903 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
904 * that they don't get lost. If IOs complete after this point, the
905 * stats for them will be lost. Oh well...
906 */
907 bfqg_stats_xfer_dead(bfqg);
908}
909
910void bfq_end_wr_async(struct bfq_data *bfqd)
911{
912 struct blkcg_gq *blkg;
913
914 list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
915 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
916
917 bfq_end_wr_async_queues(bfqd, bfqg);
918 }
919 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
920}
921
795fe54c 922static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
ea25da48
PV
923{
924 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
925 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
926 unsigned int val = 0;
927
928 if (bfqgd)
929 val = bfqgd->weight;
930
931 seq_printf(sf, "%u\n", val);
932
933 return 0;
934}
935
795fe54c
FZ
936static u64 bfqg_prfill_weight_device(struct seq_file *sf,
937 struct blkg_policy_data *pd, int off)
5ff047e3 938{
795fe54c
FZ
939 struct bfq_group *bfqg = pd_to_bfqg(pd);
940
941 if (!bfqg->entity.dev_weight)
942 return 0;
943 return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
944}
945
946static int bfq_io_show_weight(struct seq_file *sf, void *v)
947{
948 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
949 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
950
951 seq_printf(sf, "default %u\n", bfqgd->weight);
952 blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device,
953 &blkcg_policy_bfq, 0, false);
954 return 0;
955}
956
957static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
958{
959 weight = dev_weight ?: weight;
960
961 bfqg->entity.dev_weight = dev_weight;
5ff047e3
FZ
962 /*
963 * Setting the prio_changed flag of the entity
964 * to 1 with new_weight == weight would re-set
965 * the value of the weight to its ioprio mapping.
966 * Set the flag only if necessary.
967 */
968 if ((unsigned short)weight != bfqg->entity.new_weight) {
969 bfqg->entity.new_weight = (unsigned short)weight;
970 /*
971 * Make sure that the above new value has been
972 * stored in bfqg->entity.new_weight before
973 * setting the prio_changed flag. In fact,
974 * this flag may be read asynchronously (in
975 * critical sections protected by a different
976 * lock than that held here), and finding this
977 * flag set may cause the execution of the code
978 * for updating parameters whose value may
979 * depend also on bfqg->entity.new_weight (in
980 * __bfq_entity_update_weight_prio).
981 * This barrier makes sure that the new value
982 * of bfqg->entity.new_weight is correctly
983 * seen in that code.
984 */
985 smp_wmb();
986 bfqg->entity.prio_changed = 1;
987 }
988}
989
ea25da48
PV
990static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
991 struct cftype *cftype,
992 u64 val)
993{
994 struct blkcg *blkcg = css_to_blkcg(css);
995 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
996 struct blkcg_gq *blkg;
997 int ret = -ERANGE;
998
999 if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
1000 return ret;
1001
1002 ret = 0;
1003 spin_lock_irq(&blkcg->lock);
1004 bfqgd->weight = (unsigned short)val;
1005 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1006 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1007
5ff047e3 1008 if (bfqg)
795fe54c 1009 bfq_group_set_weight(bfqg, val, 0);
ea25da48
PV
1010 }
1011 spin_unlock_irq(&blkcg->lock);
1012
1013 return ret;
1014}
1015
795fe54c
FZ
1016static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
1017 char *buf, size_t nbytes,
1018 loff_t off)
ea25da48 1019{
795fe54c
FZ
1020 int ret;
1021 struct blkg_conf_ctx ctx;
1022 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1023 struct bfq_group *bfqg;
1024 u64 v;
ea25da48 1025
795fe54c 1026 ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx);
ea25da48
PV
1027 if (ret)
1028 return ret;
1029
795fe54c
FZ
1030 if (sscanf(ctx.body, "%llu", &v) == 1) {
1031 /* require "default" on dfl */
1032 ret = -ERANGE;
1033 if (!v)
1034 goto out;
1035 } else if (!strcmp(strim(ctx.body), "default")) {
1036 v = 0;
1037 } else {
1038 ret = -EINVAL;
1039 goto out;
1040 }
1041
1042 bfqg = blkg_to_bfqg(ctx.blkg);
1043
1044 ret = -ERANGE;
1045 if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
1046 bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
1047 ret = 0;
1048 }
1049out:
1050 blkg_conf_finish(&ctx);
fc8ebd01 1051 return ret ?: nbytes;
ea25da48
PV
1052}
1053
795fe54c
FZ
1054static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
1055 char *buf, size_t nbytes,
1056 loff_t off)
1057{
1058 char *endp;
1059 int ret;
1060 u64 v;
1061
1062 buf = strim(buf);
1063
1064 /* "WEIGHT" or "default WEIGHT" sets the default weight */
1065 v = simple_strtoull(buf, &endp, 0);
1066 if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
1067 ret = bfq_io_set_weight_legacy(of_css(of), NULL, v);
1068 return ret ?: nbytes;
1069 }
1070
1071 return bfq_io_set_device_weight(of, buf, nbytes, off);
1072}
1073
a557f1c7 1074static int bfqg_print_rwstat(struct seq_file *sf, void *v)
ea25da48 1075{
a557f1c7
TH
1076 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1077 &blkcg_policy_bfq, seq_cft(sf)->private, true);
ea25da48
PV
1078 return 0;
1079}
1080
a557f1c7
TH
1081static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
1082 struct blkg_policy_data *pd, int off)
ea25da48 1083{
a557f1c7
TH
1084 struct blkg_rwstat_sample sum;
1085
1086 blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
1087 return __blkg_prfill_rwstat(sf, pd, &sum);
1088}
1089
1090static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1091{
1092 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1093 bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
1094 seq_cft(sf)->private, true);
1095 return 0;
1096}
1097
fd41e603 1098#ifdef CONFIG_BFQ_CGROUP_DEBUG
a557f1c7
TH
1099static int bfqg_print_stat(struct seq_file *sf, void *v)
1100{
1101 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1102 &blkcg_policy_bfq, seq_cft(sf)->private, false);
ea25da48
PV
1103 return 0;
1104}
1105
1106static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
1107 struct blkg_policy_data *pd, int off)
1108{
d6258980
CH
1109 struct blkcg_gq *blkg = pd_to_blkg(pd);
1110 struct blkcg_gq *pos_blkg;
1111 struct cgroup_subsys_state *pos_css;
1112 u64 sum = 0;
1113
1114 lockdep_assert_held(&blkg->q->queue_lock);
1115
1116 rcu_read_lock();
1117 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
1118 struct bfq_stat *stat;
1119
1120 if (!pos_blkg->online)
1121 continue;
1122
1123 stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off;
1124 sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt);
1125 }
1126 rcu_read_unlock();
1127
ea25da48
PV
1128 return __blkg_prfill_u64(sf, pd, sum);
1129}
1130
ea25da48
PV
1131static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
1132{
1133 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1134 bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
1135 seq_cft(sf)->private, false);
1136 return 0;
1137}
1138
ea25da48
PV
1139static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1140 int off)
1141{
fd41e603
TH
1142 struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
1143 u64 sum = blkg_rwstat_total(&bfqg->stats.bytes);
ea25da48
PV
1144
1145 return __blkg_prfill_u64(sf, pd, sum >> 9);
1146}
1147
1148static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
1149{
1150 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1151 bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
1152 return 0;
1153}
1154
1155static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
1156 struct blkg_policy_data *pd, int off)
1157{
7af6fd91 1158 struct blkg_rwstat_sample tmp;
5d0b6e48 1159
fd41e603
TH
1160 blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq,
1161 offsetof(struct bfq_group, stats.bytes), &tmp);
ea25da48 1162
7af6fd91
CH
1163 return __blkg_prfill_u64(sf, pd,
1164 (tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
ea25da48
PV
1165}
1166
1167static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1168{
1169 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1170 bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
1171 false);
1172 return 0;
1173}
1174
1175static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
1176 struct blkg_policy_data *pd, int off)
1177{
1178 struct bfq_group *bfqg = pd_to_bfqg(pd);
c0ce79dc 1179 u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
ea25da48
PV
1180 u64 v = 0;
1181
1182 if (samples) {
c0ce79dc 1183 v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
ea25da48
PV
1184 v = div64_u64(v, samples);
1185 }
1186 __blkg_prfill_u64(sf, pd, v);
1187 return 0;
1188}
1189
1190/* print avg_queue_size */
1191static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1192{
1193 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1194 bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
1195 0, false);
1196 return 0;
1197}
8060c47b 1198#endif /* CONFIG_BFQ_CGROUP_DEBUG */
ea25da48
PV
1199
1200struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1201{
1202 int ret;
1203
1204 ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
1205 if (ret)
1206 return NULL;
1207
1208 return blkg_to_bfqg(bfqd->queue->root_blkg);
1209}
1210
1211struct blkcg_policy blkcg_policy_bfq = {
1212 .dfl_cftypes = bfq_blkg_files,
1213 .legacy_cftypes = bfq_blkcg_legacy_files,
1214
1215 .cpd_alloc_fn = bfq_cpd_alloc,
1216 .cpd_init_fn = bfq_cpd_init,
1217 .cpd_bind_fn = bfq_cpd_init,
1218 .cpd_free_fn = bfq_cpd_free,
1219
1220 .pd_alloc_fn = bfq_pd_alloc,
1221 .pd_init_fn = bfq_pd_init,
1222 .pd_offline_fn = bfq_pd_offline,
1223 .pd_free_fn = bfq_pd_free,
1224 .pd_reset_stats_fn = bfq_pd_reset_stats,
1225};
1226
1227struct cftype bfq_blkcg_legacy_files[] = {
1228 {
1229 .name = "bfq.weight",
cf892988 1230 .flags = CFTYPE_NOT_ON_ROOT,
795fe54c 1231 .seq_show = bfq_io_show_weight_legacy,
ea25da48
PV
1232 .write_u64 = bfq_io_set_weight_legacy,
1233 },
795fe54c
FZ
1234 {
1235 .name = "bfq.weight_device",
1236 .flags = CFTYPE_NOT_ON_ROOT,
1237 .seq_show = bfq_io_show_weight,
1238 .write = bfq_io_set_weight,
1239 },
ea25da48
PV
1240
1241 /* statistics, covers only the tasks in the bfqg */
ea25da48
PV
1242 {
1243 .name = "bfq.io_service_bytes",
fd41e603
TH
1244 .private = offsetof(struct bfq_group, stats.bytes),
1245 .seq_show = bfqg_print_rwstat,
ea25da48
PV
1246 },
1247 {
1248 .name = "bfq.io_serviced",
fd41e603
TH
1249 .private = offsetof(struct bfq_group, stats.ios),
1250 .seq_show = bfqg_print_rwstat,
ea25da48 1251 },
8060c47b 1252#ifdef CONFIG_BFQ_CGROUP_DEBUG
a33801e8
LM
1253 {
1254 .name = "bfq.time",
1255 .private = offsetof(struct bfq_group, stats.time),
1256 .seq_show = bfqg_print_stat,
1257 },
1258 {
1259 .name = "bfq.sectors",
1260 .seq_show = bfqg_print_stat_sectors,
1261 },
ea25da48
PV
1262 {
1263 .name = "bfq.io_service_time",
1264 .private = offsetof(struct bfq_group, stats.service_time),
1265 .seq_show = bfqg_print_rwstat,
1266 },
1267 {
1268 .name = "bfq.io_wait_time",
1269 .private = offsetof(struct bfq_group, stats.wait_time),
1270 .seq_show = bfqg_print_rwstat,
1271 },
1272 {
1273 .name = "bfq.io_merged",
1274 .private = offsetof(struct bfq_group, stats.merged),
1275 .seq_show = bfqg_print_rwstat,
1276 },
1277 {
1278 .name = "bfq.io_queued",
1279 .private = offsetof(struct bfq_group, stats.queued),
1280 .seq_show = bfqg_print_rwstat,
1281 },
8060c47b 1282#endif /* CONFIG_BFQ_CGROUP_DEBUG */
ea25da48 1283
636b8fe8 1284 /* the same statistics which cover the bfqg and its descendants */
ea25da48
PV
1285 {
1286 .name = "bfq.io_service_bytes_recursive",
fd41e603
TH
1287 .private = offsetof(struct bfq_group, stats.bytes),
1288 .seq_show = bfqg_print_rwstat_recursive,
ea25da48
PV
1289 },
1290 {
1291 .name = "bfq.io_serviced_recursive",
fd41e603
TH
1292 .private = offsetof(struct bfq_group, stats.ios),
1293 .seq_show = bfqg_print_rwstat_recursive,
ea25da48 1294 },
8060c47b 1295#ifdef CONFIG_BFQ_CGROUP_DEBUG
a33801e8
LM
1296 {
1297 .name = "bfq.time_recursive",
1298 .private = offsetof(struct bfq_group, stats.time),
1299 .seq_show = bfqg_print_stat_recursive,
1300 },
1301 {
1302 .name = "bfq.sectors_recursive",
1303 .seq_show = bfqg_print_stat_sectors_recursive,
1304 },
ea25da48
PV
1305 {
1306 .name = "bfq.io_service_time_recursive",
1307 .private = offsetof(struct bfq_group, stats.service_time),
1308 .seq_show = bfqg_print_rwstat_recursive,
1309 },
1310 {
1311 .name = "bfq.io_wait_time_recursive",
1312 .private = offsetof(struct bfq_group, stats.wait_time),
1313 .seq_show = bfqg_print_rwstat_recursive,
1314 },
1315 {
1316 .name = "bfq.io_merged_recursive",
1317 .private = offsetof(struct bfq_group, stats.merged),
1318 .seq_show = bfqg_print_rwstat_recursive,
1319 },
1320 {
1321 .name = "bfq.io_queued_recursive",
1322 .private = offsetof(struct bfq_group, stats.queued),
1323 .seq_show = bfqg_print_rwstat_recursive,
1324 },
1325 {
1326 .name = "bfq.avg_queue_size",
1327 .seq_show = bfqg_print_avg_queue_size,
1328 },
1329 {
1330 .name = "bfq.group_wait_time",
1331 .private = offsetof(struct bfq_group, stats.group_wait_time),
1332 .seq_show = bfqg_print_stat,
1333 },
1334 {
1335 .name = "bfq.idle_time",
1336 .private = offsetof(struct bfq_group, stats.idle_time),
1337 .seq_show = bfqg_print_stat,
1338 },
1339 {
1340 .name = "bfq.empty_time",
1341 .private = offsetof(struct bfq_group, stats.empty_time),
1342 .seq_show = bfqg_print_stat,
1343 },
1344 {
1345 .name = "bfq.dequeue",
1346 .private = offsetof(struct bfq_group, stats.dequeue),
1347 .seq_show = bfqg_print_stat,
1348 },
8060c47b 1349#endif /* CONFIG_BFQ_CGROUP_DEBUG */
ea25da48
PV
1350 { } /* terminate */
1351};
1352
1353struct cftype bfq_blkg_files[] = {
1354 {
1355 .name = "bfq.weight",
cf892988 1356 .flags = CFTYPE_NOT_ON_ROOT,
ea25da48
PV
1357 .seq_show = bfq_io_show_weight,
1358 .write = bfq_io_set_weight,
1359 },
1360 {} /* terminate */
1361};
1362
1363#else /* CONFIG_BFQ_GROUP_IOSCHED */
1364
ea25da48
PV
1365void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1366 struct bfq_group *bfqg) {}
1367
1368void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1369{
1370 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1371
1372 entity->weight = entity->new_weight;
1373 entity->orig_weight = entity->new_weight;
1374 if (bfqq) {
1375 bfqq->ioprio = bfqq->new_ioprio;
1376 bfqq->ioprio_class = bfqq->new_ioprio_class;
1377 }
1378 entity->sched_data = &bfqg->sched_data;
1379}
1380
1381void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1382
1383void bfq_end_wr_async(struct bfq_data *bfqd)
1384{
1385 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1386}
1387
1388struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg)
1389{
1390 return bfqd->root_group;
1391}
1392
1393struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1394{
1395 return bfqq->bfqd->root_group;
1396}
1397
1398struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1399{
1400 struct bfq_group *bfqg;
1401 int i;
1402
1403 bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1404 if (!bfqg)
1405 return NULL;
1406
1407 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1408 bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1409
1410 return bfqg;
1411}
1412#endif /* CONFIG_BFQ_GROUP_IOSCHED */