]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - block/bfq-cgroup.c
NFS: swap-out must always use STABLE writes.
[mirror_ubuntu-focal-kernel.git] / block / bfq-cgroup.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * cgroups support for the BFQ I/O scheduler.
4 */
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/cgroup.h>
9 #include <linux/elevator.h>
10 #include <linux/ktime.h>
11 #include <linux/rbtree.h>
12 #include <linux/ioprio.h>
13 #include <linux/sbitmap.h>
14 #include <linux/delay.h>
15
16 #include "bfq-iosched.h"
17
18 #ifdef CONFIG_BFQ_CGROUP_DEBUG
19 static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
20 {
21 int ret;
22
23 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
24 if (ret)
25 return ret;
26
27 atomic64_set(&stat->aux_cnt, 0);
28 return 0;
29 }
30
31 static void bfq_stat_exit(struct bfq_stat *stat)
32 {
33 percpu_counter_destroy(&stat->cpu_cnt);
34 }
35
36 /**
37 * bfq_stat_add - add a value to a bfq_stat
38 * @stat: target bfq_stat
39 * @val: value to add
40 *
41 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
42 * don't re-enter this function for the same counter.
43 */
44 static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
45 {
46 percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
47 }
48
49 /**
50 * bfq_stat_read - read the current value of a bfq_stat
51 * @stat: bfq_stat to read
52 */
53 static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
54 {
55 return percpu_counter_sum_positive(&stat->cpu_cnt);
56 }
57
58 /**
59 * bfq_stat_reset - reset a bfq_stat
60 * @stat: bfq_stat to reset
61 */
62 static inline void bfq_stat_reset(struct bfq_stat *stat)
63 {
64 percpu_counter_set(&stat->cpu_cnt, 0);
65 atomic64_set(&stat->aux_cnt, 0);
66 }
67
68 /**
69 * bfq_stat_add_aux - add a bfq_stat into another's aux count
70 * @to: the destination bfq_stat
71 * @from: the source
72 *
73 * Add @from's count including the aux one to @to's aux count.
74 */
75 static inline void bfq_stat_add_aux(struct bfq_stat *to,
76 struct bfq_stat *from)
77 {
78 atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
79 &to->aux_cnt);
80 }
81
82 /**
83 * blkg_prfill_stat - prfill callback for bfq_stat
84 * @sf: seq_file to print to
85 * @pd: policy private data of interest
86 * @off: offset to the bfq_stat in @pd
87 *
88 * prfill callback for printing a bfq_stat.
89 */
90 static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
91 int off)
92 {
93 return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
94 }
95
96 /* bfqg stats flags */
97 enum bfqg_stats_flags {
98 BFQG_stats_waiting = 0,
99 BFQG_stats_idling,
100 BFQG_stats_empty,
101 };
102
103 #define BFQG_FLAG_FNS(name) \
104 static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
105 { \
106 stats->flags |= (1 << BFQG_stats_##name); \
107 } \
108 static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
109 { \
110 stats->flags &= ~(1 << BFQG_stats_##name); \
111 } \
112 static int bfqg_stats_##name(struct bfqg_stats *stats) \
113 { \
114 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
115 } \
116
117 BFQG_FLAG_FNS(waiting)
118 BFQG_FLAG_FNS(idling)
119 BFQG_FLAG_FNS(empty)
120 #undef BFQG_FLAG_FNS
121
122 /* This should be called with the scheduler lock held. */
123 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
124 {
125 u64 now;
126
127 if (!bfqg_stats_waiting(stats))
128 return;
129
130 now = ktime_get_ns();
131 if (now > stats->start_group_wait_time)
132 bfq_stat_add(&stats->group_wait_time,
133 now - stats->start_group_wait_time);
134 bfqg_stats_clear_waiting(stats);
135 }
136
137 /* This should be called with the scheduler lock held. */
138 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
139 struct bfq_group *curr_bfqg)
140 {
141 struct bfqg_stats *stats = &bfqg->stats;
142
143 if (bfqg_stats_waiting(stats))
144 return;
145 if (bfqg == curr_bfqg)
146 return;
147 stats->start_group_wait_time = ktime_get_ns();
148 bfqg_stats_mark_waiting(stats);
149 }
150
151 /* This should be called with the scheduler lock held. */
152 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
153 {
154 u64 now;
155
156 if (!bfqg_stats_empty(stats))
157 return;
158
159 now = ktime_get_ns();
160 if (now > stats->start_empty_time)
161 bfq_stat_add(&stats->empty_time,
162 now - stats->start_empty_time);
163 bfqg_stats_clear_empty(stats);
164 }
165
166 void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
167 {
168 bfq_stat_add(&bfqg->stats.dequeue, 1);
169 }
170
171 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
172 {
173 struct bfqg_stats *stats = &bfqg->stats;
174
175 if (blkg_rwstat_total(&stats->queued))
176 return;
177
178 /*
179 * group is already marked empty. This can happen if bfqq got new
180 * request in parent group and moved to this group while being added
181 * to service tree. Just ignore the event and move on.
182 */
183 if (bfqg_stats_empty(stats))
184 return;
185
186 stats->start_empty_time = ktime_get_ns();
187 bfqg_stats_mark_empty(stats);
188 }
189
190 void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
191 {
192 struct bfqg_stats *stats = &bfqg->stats;
193
194 if (bfqg_stats_idling(stats)) {
195 u64 now = ktime_get_ns();
196
197 if (now > stats->start_idle_time)
198 bfq_stat_add(&stats->idle_time,
199 now - stats->start_idle_time);
200 bfqg_stats_clear_idling(stats);
201 }
202 }
203
204 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
205 {
206 struct bfqg_stats *stats = &bfqg->stats;
207
208 stats->start_idle_time = ktime_get_ns();
209 bfqg_stats_mark_idling(stats);
210 }
211
212 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
213 {
214 struct bfqg_stats *stats = &bfqg->stats;
215
216 bfq_stat_add(&stats->avg_queue_size_sum,
217 blkg_rwstat_total(&stats->queued));
218 bfq_stat_add(&stats->avg_queue_size_samples, 1);
219 bfqg_stats_update_group_wait_time(stats);
220 }
221
222 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
223 unsigned int op)
224 {
225 blkg_rwstat_add(&bfqg->stats.queued, op, 1);
226 bfqg_stats_end_empty_time(&bfqg->stats);
227 if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
228 bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
229 }
230
231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
232 {
233 blkg_rwstat_add(&bfqg->stats.queued, op, -1);
234 }
235
236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
237 {
238 blkg_rwstat_add(&bfqg->stats.merged, op, 1);
239 }
240
241 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
242 u64 io_start_time_ns, unsigned int op)
243 {
244 struct bfqg_stats *stats = &bfqg->stats;
245 u64 now = ktime_get_ns();
246
247 if (now > io_start_time_ns)
248 blkg_rwstat_add(&stats->service_time, op,
249 now - io_start_time_ns);
250 if (io_start_time_ns > start_time_ns)
251 blkg_rwstat_add(&stats->wait_time, op,
252 io_start_time_ns - start_time_ns);
253 }
254
255 #else /* CONFIG_BFQ_CGROUP_DEBUG */
256
257 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
258 unsigned int op) { }
259 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
260 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
261 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
262 u64 io_start_time_ns, unsigned int op) { }
263 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
264 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
265 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
266 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
267 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
268
269 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
270
271 #ifdef CONFIG_BFQ_GROUP_IOSCHED
272
273 /*
274 * blk-cgroup policy-related handlers
275 * The following functions help in converting between blk-cgroup
276 * internal structures and BFQ-specific structures.
277 */
278
279 static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
280 {
281 return pd ? container_of(pd, struct bfq_group, pd) : NULL;
282 }
283
284 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
285 {
286 return pd_to_blkg(&bfqg->pd);
287 }
288
289 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
290 {
291 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
292 }
293
294 /*
295 * bfq_group handlers
296 * The following functions help in navigating the bfq_group hierarchy
297 * by allowing to find the parent of a bfq_group or the bfq_group
298 * associated to a bfq_queue.
299 */
300
301 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
302 {
303 struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
304
305 return pblkg ? blkg_to_bfqg(pblkg) : NULL;
306 }
307
308 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
309 {
310 struct bfq_entity *group_entity = bfqq->entity.parent;
311
312 return group_entity ? container_of(group_entity, struct bfq_group,
313 entity) :
314 bfqq->bfqd->root_group;
315 }
316
317 /*
318 * The following two functions handle get and put of a bfq_group by
319 * wrapping the related blk-cgroup hooks.
320 */
321
322 static void bfqg_get(struct bfq_group *bfqg)
323 {
324 bfqg->ref++;
325 }
326
327 static void bfqg_put(struct bfq_group *bfqg)
328 {
329 bfqg->ref--;
330
331 if (bfqg->ref == 0)
332 kfree(bfqg);
333 }
334
335 static void bfqg_and_blkg_get(struct bfq_group *bfqg)
336 {
337 /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
338 bfqg_get(bfqg);
339
340 blkg_get(bfqg_to_blkg(bfqg));
341 }
342
343 void bfqg_and_blkg_put(struct bfq_group *bfqg)
344 {
345 blkg_put(bfqg_to_blkg(bfqg));
346
347 bfqg_put(bfqg);
348 }
349
350 /* @stats = 0 */
351 static void bfqg_stats_reset(struct bfqg_stats *stats)
352 {
353 #ifdef CONFIG_BFQ_CGROUP_DEBUG
354 /* queued stats shouldn't be cleared */
355 blkg_rwstat_reset(&stats->merged);
356 blkg_rwstat_reset(&stats->service_time);
357 blkg_rwstat_reset(&stats->wait_time);
358 bfq_stat_reset(&stats->time);
359 bfq_stat_reset(&stats->avg_queue_size_sum);
360 bfq_stat_reset(&stats->avg_queue_size_samples);
361 bfq_stat_reset(&stats->dequeue);
362 bfq_stat_reset(&stats->group_wait_time);
363 bfq_stat_reset(&stats->idle_time);
364 bfq_stat_reset(&stats->empty_time);
365 #endif
366 }
367
368 /* @to += @from */
369 static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
370 {
371 if (!to || !from)
372 return;
373
374 #ifdef CONFIG_BFQ_CGROUP_DEBUG
375 /* queued stats shouldn't be cleared */
376 blkg_rwstat_add_aux(&to->merged, &from->merged);
377 blkg_rwstat_add_aux(&to->service_time, &from->service_time);
378 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
379 bfq_stat_add_aux(&from->time, &from->time);
380 bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
381 bfq_stat_add_aux(&to->avg_queue_size_samples,
382 &from->avg_queue_size_samples);
383 bfq_stat_add_aux(&to->dequeue, &from->dequeue);
384 bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
385 bfq_stat_add_aux(&to->idle_time, &from->idle_time);
386 bfq_stat_add_aux(&to->empty_time, &from->empty_time);
387 #endif
388 }
389
390 /*
391 * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
392 * recursive stats can still account for the amount used by this bfqg after
393 * it's gone.
394 */
395 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
396 {
397 struct bfq_group *parent;
398
399 if (!bfqg) /* root_group */
400 return;
401
402 parent = bfqg_parent(bfqg);
403
404 lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
405
406 if (unlikely(!parent))
407 return;
408
409 bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
410 bfqg_stats_reset(&bfqg->stats);
411 }
412
413 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
414 {
415 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
416
417 entity->weight = entity->new_weight;
418 entity->orig_weight = entity->new_weight;
419 if (bfqq) {
420 bfqq->ioprio = bfqq->new_ioprio;
421 bfqq->ioprio_class = bfqq->new_ioprio_class;
422 /*
423 * Make sure that bfqg and its associated blkg do not
424 * disappear before entity.
425 */
426 bfqg_and_blkg_get(bfqg);
427 }
428 entity->parent = bfqg->my_entity; /* NULL for root group */
429 entity->sched_data = &bfqg->sched_data;
430 }
431
432 static void bfqg_stats_exit(struct bfqg_stats *stats)
433 {
434 #ifdef CONFIG_BFQ_CGROUP_DEBUG
435 blkg_rwstat_exit(&stats->merged);
436 blkg_rwstat_exit(&stats->service_time);
437 blkg_rwstat_exit(&stats->wait_time);
438 blkg_rwstat_exit(&stats->queued);
439 bfq_stat_exit(&stats->time);
440 bfq_stat_exit(&stats->avg_queue_size_sum);
441 bfq_stat_exit(&stats->avg_queue_size_samples);
442 bfq_stat_exit(&stats->dequeue);
443 bfq_stat_exit(&stats->group_wait_time);
444 bfq_stat_exit(&stats->idle_time);
445 bfq_stat_exit(&stats->empty_time);
446 #endif
447 }
448
449 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
450 {
451 #ifdef CONFIG_BFQ_CGROUP_DEBUG
452 if (blkg_rwstat_init(&stats->merged, gfp) ||
453 blkg_rwstat_init(&stats->service_time, gfp) ||
454 blkg_rwstat_init(&stats->wait_time, gfp) ||
455 blkg_rwstat_init(&stats->queued, gfp) ||
456 bfq_stat_init(&stats->time, gfp) ||
457 bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
458 bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
459 bfq_stat_init(&stats->dequeue, gfp) ||
460 bfq_stat_init(&stats->group_wait_time, gfp) ||
461 bfq_stat_init(&stats->idle_time, gfp) ||
462 bfq_stat_init(&stats->empty_time, gfp)) {
463 bfqg_stats_exit(stats);
464 return -ENOMEM;
465 }
466 #endif
467
468 return 0;
469 }
470
471 static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
472 {
473 return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
474 }
475
476 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
477 {
478 return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
479 }
480
481 static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
482 {
483 struct bfq_group_data *bgd;
484
485 bgd = kzalloc(sizeof(*bgd), gfp);
486 if (!bgd)
487 return NULL;
488 return &bgd->pd;
489 }
490
491 static void bfq_cpd_init(struct blkcg_policy_data *cpd)
492 {
493 struct bfq_group_data *d = cpd_to_bfqgd(cpd);
494
495 d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
496 CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
497 }
498
499 static void bfq_cpd_free(struct blkcg_policy_data *cpd)
500 {
501 kfree(cpd_to_bfqgd(cpd));
502 }
503
504 static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
505 struct blkcg *blkcg)
506 {
507 struct bfq_group *bfqg;
508
509 bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node);
510 if (!bfqg)
511 return NULL;
512
513 if (bfqg_stats_init(&bfqg->stats, gfp)) {
514 kfree(bfqg);
515 return NULL;
516 }
517
518 /* see comments in bfq_bic_update_cgroup for why refcounting */
519 bfqg_get(bfqg);
520 return &bfqg->pd;
521 }
522
523 static void bfq_pd_init(struct blkg_policy_data *pd)
524 {
525 struct blkcg_gq *blkg = pd_to_blkg(pd);
526 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
527 struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
528 struct bfq_entity *entity = &bfqg->entity;
529 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
530
531 entity->orig_weight = entity->weight = entity->new_weight = d->weight;
532 entity->my_sched_data = &bfqg->sched_data;
533 bfqg->my_entity = entity; /*
534 * the root_group's will be set to NULL
535 * in bfq_init_queue()
536 */
537 bfqg->bfqd = bfqd;
538 bfqg->active_entities = 0;
539 bfqg->rq_pos_tree = RB_ROOT;
540 }
541
542 static void bfq_pd_free(struct blkg_policy_data *pd)
543 {
544 struct bfq_group *bfqg = pd_to_bfqg(pd);
545
546 bfqg_stats_exit(&bfqg->stats);
547 bfqg_put(bfqg);
548 }
549
550 static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
551 {
552 struct bfq_group *bfqg = pd_to_bfqg(pd);
553
554 bfqg_stats_reset(&bfqg->stats);
555 }
556
557 static void bfq_group_set_parent(struct bfq_group *bfqg,
558 struct bfq_group *parent)
559 {
560 struct bfq_entity *entity;
561
562 entity = &bfqg->entity;
563 entity->parent = parent->my_entity;
564 entity->sched_data = &parent->sched_data;
565 }
566
567 static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
568 struct blkcg *blkcg)
569 {
570 struct blkcg_gq *blkg;
571
572 blkg = blkg_lookup(blkcg, bfqd->queue);
573 if (likely(blkg))
574 return blkg_to_bfqg(blkg);
575 return NULL;
576 }
577
578 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
579 struct blkcg *blkcg)
580 {
581 struct bfq_group *bfqg, *parent;
582 struct bfq_entity *entity;
583
584 bfqg = bfq_lookup_bfqg(bfqd, blkcg);
585
586 if (unlikely(!bfqg))
587 return NULL;
588
589 /*
590 * Update chain of bfq_groups as we might be handling a leaf group
591 * which, along with some of its relatives, has not been hooked yet
592 * to the private hierarchy of BFQ.
593 */
594 entity = &bfqg->entity;
595 for_each_entity(entity) {
596 struct bfq_group *curr_bfqg = container_of(entity,
597 struct bfq_group, entity);
598 if (curr_bfqg != bfqd->root_group) {
599 parent = bfqg_parent(curr_bfqg);
600 if (!parent)
601 parent = bfqd->root_group;
602 bfq_group_set_parent(curr_bfqg, parent);
603 }
604 }
605
606 return bfqg;
607 }
608
609 /**
610 * bfq_bfqq_move - migrate @bfqq to @bfqg.
611 * @bfqd: queue descriptor.
612 * @bfqq: the queue to move.
613 * @bfqg: the group to move to.
614 *
615 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
616 * it on the new one. Avoid putting the entity on the old group idle tree.
617 *
618 * Must be called under the scheduler lock, to make sure that the blkg
619 * owning @bfqg does not disappear (see comments in
620 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
621 * objects).
622 */
623 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
624 struct bfq_group *bfqg)
625 {
626 struct bfq_entity *entity = &bfqq->entity;
627
628 /*
629 * oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group
630 * until elevator exit.
631 */
632 if (bfqq == &bfqd->oom_bfqq)
633 return;
634 /*
635 * Get extra reference to prevent bfqq from being freed in
636 * next possible expire or deactivate.
637 */
638 bfqq->ref++;
639
640 /* If bfqq is empty, then bfq_bfqq_expire also invokes
641 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
642 * from data structures related to current group. Otherwise we
643 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
644 * we do below.
645 */
646 if (bfqq == bfqd->in_service_queue)
647 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
648 false, BFQQE_PREEMPTED);
649
650 if (bfq_bfqq_busy(bfqq))
651 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
652 else if (entity->on_st)
653 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
654 bfqg_and_blkg_put(bfqq_group(bfqq));
655
656 entity->parent = bfqg->my_entity;
657 entity->sched_data = &bfqg->sched_data;
658 /* pin down bfqg and its associated blkg */
659 bfqg_and_blkg_get(bfqg);
660
661 if (bfq_bfqq_busy(bfqq)) {
662 if (unlikely(!bfqd->nonrot_with_queueing))
663 bfq_pos_tree_add_move(bfqd, bfqq);
664 bfq_activate_bfqq(bfqd, bfqq);
665 }
666
667 if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
668 bfq_schedule_dispatch(bfqd);
669 /* release extra ref taken above, bfqq may happen to be freed now */
670 bfq_put_queue(bfqq);
671 }
672
673 /**
674 * __bfq_bic_change_cgroup - move @bic to @cgroup.
675 * @bfqd: the queue descriptor.
676 * @bic: the bic to move.
677 * @blkcg: the blk-cgroup to move to.
678 *
679 * Move bic to blkcg, assuming that bfqd->lock is held; which makes
680 * sure that the reference to cgroup is valid across the call (see
681 * comments in bfq_bic_update_cgroup on this issue)
682 *
683 * NOTE: an alternative approach might have been to store the current
684 * cgroup in bfqq and getting a reference to it, reducing the lookup
685 * time here, at the price of slightly more complex code.
686 */
687 static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
688 struct bfq_io_cq *bic,
689 struct blkcg *blkcg)
690 {
691 struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
692 struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
693 struct bfq_group *bfqg;
694 struct bfq_entity *entity;
695
696 bfqg = bfq_find_set_group(bfqd, blkcg);
697
698 if (unlikely(!bfqg))
699 bfqg = bfqd->root_group;
700
701 if (async_bfqq) {
702 entity = &async_bfqq->entity;
703
704 if (entity->sched_data != &bfqg->sched_data) {
705 bic_set_bfqq(bic, NULL, 0);
706 bfq_release_process_ref(bfqd, async_bfqq);
707 }
708 }
709
710 if (sync_bfqq) {
711 entity = &sync_bfqq->entity;
712 if (entity->sched_data != &bfqg->sched_data)
713 bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
714 }
715
716 return bfqg;
717 }
718
719 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
720 {
721 struct bfq_data *bfqd = bic_to_bfqd(bic);
722 struct bfq_group *bfqg = NULL;
723 uint64_t serial_nr;
724
725 rcu_read_lock();
726 serial_nr = __bio_blkcg(bio)->css.serial_nr;
727
728 /*
729 * Check whether blkcg has changed. The condition may trigger
730 * spuriously on a newly created cic but there's no harm.
731 */
732 if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
733 goto out;
734
735 bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
736 /*
737 * Update blkg_path for bfq_log_* functions. We cache this
738 * path, and update it here, for the following
739 * reasons. Operations on blkg objects in blk-cgroup are
740 * protected with the request_queue lock, and not with the
741 * lock that protects the instances of this scheduler
742 * (bfqd->lock). This exposes BFQ to the following sort of
743 * race.
744 *
745 * The blkg_lookup performed in bfq_get_queue, protected
746 * through rcu, may happen to return the address of a copy of
747 * the original blkg. If this is the case, then the
748 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
749 * the blkg, is useless: it does not prevent blk-cgroup code
750 * from destroying both the original blkg and all objects
751 * directly or indirectly referred by the copy of the
752 * blkg.
753 *
754 * On the bright side, destroy operations on a blkg invoke, as
755 * a first step, hooks of the scheduler associated with the
756 * blkg. And these hooks are executed with bfqd->lock held for
757 * BFQ. As a consequence, for any blkg associated with the
758 * request queue this instance of the scheduler is attached
759 * to, we are guaranteed that such a blkg is not destroyed, and
760 * that all the pointers it contains are consistent, while we
761 * are holding bfqd->lock. A blkg_lookup performed with
762 * bfqd->lock held then returns a fully consistent blkg, which
763 * remains consistent until this lock is held.
764 *
765 * Thanks to the last fact, and to the fact that: (1) bfqg has
766 * been obtained through a blkg_lookup in the above
767 * assignment, and (2) bfqd->lock is being held, here we can
768 * safely use the policy data for the involved blkg (i.e., the
769 * field bfqg->pd) to get to the blkg associated with bfqg,
770 * and then we can safely use any field of blkg. After we
771 * release bfqd->lock, even just getting blkg through this
772 * bfqg may cause dangling references to be traversed, as
773 * bfqg->pd may not exist any more.
774 *
775 * In view of the above facts, here we cache, in the bfqg, any
776 * blkg data we may need for this bic, and for its associated
777 * bfq_queue. As of now, we need to cache only the path of the
778 * blkg, which is used in the bfq_log_* functions.
779 *
780 * Finally, note that bfqg itself needs to be protected from
781 * destruction on the blkg_free of the original blkg (which
782 * invokes bfq_pd_free). We use an additional private
783 * refcounter for bfqg, to let it disappear only after no
784 * bfq_queue refers to it any longer.
785 */
786 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
787 bic->blkcg_serial_nr = serial_nr;
788 out:
789 rcu_read_unlock();
790 }
791
792 /**
793 * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
794 * @st: the service tree being flushed.
795 */
796 static void bfq_flush_idle_tree(struct bfq_service_tree *st)
797 {
798 struct bfq_entity *entity = st->first_idle;
799
800 for (; entity ; entity = st->first_idle)
801 __bfq_deactivate_entity(entity, false);
802 }
803
804 /**
805 * bfq_reparent_leaf_entity - move leaf entity to the root_group.
806 * @bfqd: the device data structure with the root group.
807 * @entity: the entity to move, if entity is a leaf; or the parent entity
808 * of an active leaf entity to move, if entity is not a leaf.
809 */
810 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
811 struct bfq_entity *entity,
812 int ioprio_class)
813 {
814 struct bfq_queue *bfqq;
815 struct bfq_entity *child_entity = entity;
816
817 while (child_entity->my_sched_data) { /* leaf not reached yet */
818 struct bfq_sched_data *child_sd = child_entity->my_sched_data;
819 struct bfq_service_tree *child_st = child_sd->service_tree +
820 ioprio_class;
821 struct rb_root *child_active = &child_st->active;
822
823 child_entity = bfq_entity_of(rb_first(child_active));
824
825 if (!child_entity)
826 child_entity = child_sd->in_service_entity;
827 }
828
829 bfqq = bfq_entity_to_bfqq(child_entity);
830 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
831 }
832
833 /**
834 * bfq_reparent_active_queues - move to the root group all active queues.
835 * @bfqd: the device data structure with the root group.
836 * @bfqg: the group to move from.
837 * @st: the service tree to start the search from.
838 */
839 static void bfq_reparent_active_queues(struct bfq_data *bfqd,
840 struct bfq_group *bfqg,
841 struct bfq_service_tree *st,
842 int ioprio_class)
843 {
844 struct rb_root *active = &st->active;
845 struct bfq_entity *entity;
846
847 while ((entity = bfq_entity_of(rb_first(active))))
848 bfq_reparent_leaf_entity(bfqd, entity, ioprio_class);
849
850 if (bfqg->sched_data.in_service_entity)
851 bfq_reparent_leaf_entity(bfqd,
852 bfqg->sched_data.in_service_entity,
853 ioprio_class);
854 }
855
856 /**
857 * bfq_pd_offline - deactivate the entity associated with @pd,
858 * and reparent its children entities.
859 * @pd: descriptor of the policy going offline.
860 *
861 * blkio already grabs the queue_lock for us, so no need to use
862 * RCU-based magic
863 */
864 static void bfq_pd_offline(struct blkg_policy_data *pd)
865 {
866 struct bfq_service_tree *st;
867 struct bfq_group *bfqg = pd_to_bfqg(pd);
868 struct bfq_data *bfqd = bfqg->bfqd;
869 struct bfq_entity *entity = bfqg->my_entity;
870 unsigned long flags;
871 int i;
872
873 spin_lock_irqsave(&bfqd->lock, flags);
874
875 if (!entity) /* root group */
876 goto put_async_queues;
877
878 /*
879 * Empty all service_trees belonging to this group before
880 * deactivating the group itself.
881 */
882 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
883 st = bfqg->sched_data.service_tree + i;
884
885 /*
886 * It may happen that some queues are still active
887 * (busy) upon group destruction (if the corresponding
888 * processes have been forced to terminate). We move
889 * all the leaf entities corresponding to these queues
890 * to the root_group.
891 * Also, it may happen that the group has an entity
892 * in service, which is disconnected from the active
893 * tree: it must be moved, too.
894 * There is no need to put the sync queues, as the
895 * scheduler has taken no reference.
896 */
897 bfq_reparent_active_queues(bfqd, bfqg, st, i);
898
899 /*
900 * The idle tree may still contain bfq_queues
901 * belonging to exited task because they never
902 * migrated to a different cgroup from the one being
903 * destroyed now. In addition, even
904 * bfq_reparent_active_queues() may happen to add some
905 * entities to the idle tree. It happens if, in some
906 * of the calls to bfq_bfqq_move() performed by
907 * bfq_reparent_active_queues(), the queue to move is
908 * empty and gets expired.
909 */
910 bfq_flush_idle_tree(st);
911 }
912
913 __bfq_deactivate_entity(entity, false);
914
915 put_async_queues:
916 bfq_put_async_queues(bfqd, bfqg);
917
918 spin_unlock_irqrestore(&bfqd->lock, flags);
919 /*
920 * @blkg is going offline and will be ignored by
921 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
922 * that they don't get lost. If IOs complete after this point, the
923 * stats for them will be lost. Oh well...
924 */
925 bfqg_stats_xfer_dead(bfqg);
926 }
927
928 void bfq_end_wr_async(struct bfq_data *bfqd)
929 {
930 struct blkcg_gq *blkg;
931
932 list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
933 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
934
935 bfq_end_wr_async_queues(bfqd, bfqg);
936 }
937 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
938 }
939
940 static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
941 {
942 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
943 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
944 unsigned int val = 0;
945
946 if (bfqgd)
947 val = bfqgd->weight;
948
949 seq_printf(sf, "%u\n", val);
950
951 return 0;
952 }
953
954 static u64 bfqg_prfill_weight_device(struct seq_file *sf,
955 struct blkg_policy_data *pd, int off)
956 {
957 struct bfq_group *bfqg = pd_to_bfqg(pd);
958
959 if (!bfqg->entity.dev_weight)
960 return 0;
961 return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
962 }
963
964 static int bfq_io_show_weight(struct seq_file *sf, void *v)
965 {
966 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
967 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
968
969 seq_printf(sf, "default %u\n", bfqgd->weight);
970 blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device,
971 &blkcg_policy_bfq, 0, false);
972 return 0;
973 }
974
975 static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
976 {
977 weight = dev_weight ?: weight;
978
979 bfqg->entity.dev_weight = dev_weight;
980 /*
981 * Setting the prio_changed flag of the entity
982 * to 1 with new_weight == weight would re-set
983 * the value of the weight to its ioprio mapping.
984 * Set the flag only if necessary.
985 */
986 if ((unsigned short)weight != bfqg->entity.new_weight) {
987 bfqg->entity.new_weight = (unsigned short)weight;
988 /*
989 * Make sure that the above new value has been
990 * stored in bfqg->entity.new_weight before
991 * setting the prio_changed flag. In fact,
992 * this flag may be read asynchronously (in
993 * critical sections protected by a different
994 * lock than that held here), and finding this
995 * flag set may cause the execution of the code
996 * for updating parameters whose value may
997 * depend also on bfqg->entity.new_weight (in
998 * __bfq_entity_update_weight_prio).
999 * This barrier makes sure that the new value
1000 * of bfqg->entity.new_weight is correctly
1001 * seen in that code.
1002 */
1003 smp_wmb();
1004 bfqg->entity.prio_changed = 1;
1005 }
1006 }
1007
1008 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
1009 struct cftype *cftype,
1010 u64 val)
1011 {
1012 struct blkcg *blkcg = css_to_blkcg(css);
1013 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1014 struct blkcg_gq *blkg;
1015 int ret = -ERANGE;
1016
1017 if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
1018 return ret;
1019
1020 ret = 0;
1021 spin_lock_irq(&blkcg->lock);
1022 bfqgd->weight = (unsigned short)val;
1023 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1024 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1025
1026 if (bfqg)
1027 bfq_group_set_weight(bfqg, val, 0);
1028 }
1029 spin_unlock_irq(&blkcg->lock);
1030
1031 return ret;
1032 }
1033
1034 static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
1035 char *buf, size_t nbytes,
1036 loff_t off)
1037 {
1038 int ret;
1039 struct blkg_conf_ctx ctx;
1040 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1041 struct bfq_group *bfqg;
1042 u64 v;
1043
1044 ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx);
1045 if (ret)
1046 return ret;
1047
1048 if (sscanf(ctx.body, "%llu", &v) == 1) {
1049 /* require "default" on dfl */
1050 ret = -ERANGE;
1051 if (!v)
1052 goto out;
1053 } else if (!strcmp(strim(ctx.body), "default")) {
1054 v = 0;
1055 } else {
1056 ret = -EINVAL;
1057 goto out;
1058 }
1059
1060 bfqg = blkg_to_bfqg(ctx.blkg);
1061
1062 ret = -ERANGE;
1063 if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
1064 bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
1065 ret = 0;
1066 }
1067 out:
1068 blkg_conf_finish(&ctx);
1069 return ret ?: nbytes;
1070 }
1071
1072 static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
1073 char *buf, size_t nbytes,
1074 loff_t off)
1075 {
1076 char *endp;
1077 int ret;
1078 u64 v;
1079
1080 buf = strim(buf);
1081
1082 /* "WEIGHT" or "default WEIGHT" sets the default weight */
1083 v = simple_strtoull(buf, &endp, 0);
1084 if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
1085 ret = bfq_io_set_weight_legacy(of_css(of), NULL, v);
1086 return ret ?: nbytes;
1087 }
1088
1089 return bfq_io_set_device_weight(of, buf, nbytes, off);
1090 }
1091
1092 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1093 static int bfqg_print_stat(struct seq_file *sf, void *v)
1094 {
1095 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1096 &blkcg_policy_bfq, seq_cft(sf)->private, false);
1097 return 0;
1098 }
1099
1100 static int bfqg_print_rwstat(struct seq_file *sf, void *v)
1101 {
1102 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1103 &blkcg_policy_bfq, seq_cft(sf)->private, true);
1104 return 0;
1105 }
1106
1107 static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
1108 struct blkg_policy_data *pd, int off)
1109 {
1110 struct blkcg_gq *blkg = pd_to_blkg(pd);
1111 struct blkcg_gq *pos_blkg;
1112 struct cgroup_subsys_state *pos_css;
1113 u64 sum = 0;
1114
1115 lockdep_assert_held(&blkg->q->queue_lock);
1116
1117 rcu_read_lock();
1118 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
1119 struct bfq_stat *stat;
1120
1121 if (!pos_blkg->online)
1122 continue;
1123
1124 stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off;
1125 sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt);
1126 }
1127 rcu_read_unlock();
1128
1129 return __blkg_prfill_u64(sf, pd, sum);
1130 }
1131
1132 static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
1133 struct blkg_policy_data *pd, int off)
1134 {
1135 struct blkg_rwstat_sample sum;
1136
1137 blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
1138 return __blkg_prfill_rwstat(sf, pd, &sum);
1139 }
1140
1141 static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
1142 {
1143 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1144 bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
1145 seq_cft(sf)->private, false);
1146 return 0;
1147 }
1148
1149 static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1150 {
1151 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1152 bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
1153 seq_cft(sf)->private, true);
1154 return 0;
1155 }
1156
1157 static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1158 int off)
1159 {
1160 u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
1161
1162 return __blkg_prfill_u64(sf, pd, sum >> 9);
1163 }
1164
1165 static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
1166 {
1167 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1168 bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
1169 return 0;
1170 }
1171
1172 static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
1173 struct blkg_policy_data *pd, int off)
1174 {
1175 struct blkg_rwstat_sample tmp;
1176
1177 blkg_rwstat_recursive_sum(pd->blkg, NULL,
1178 offsetof(struct blkcg_gq, stat_bytes), &tmp);
1179
1180 return __blkg_prfill_u64(sf, pd,
1181 (tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
1182 }
1183
1184 static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1185 {
1186 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1187 bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
1188 false);
1189 return 0;
1190 }
1191
1192 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
1193 struct blkg_policy_data *pd, int off)
1194 {
1195 struct bfq_group *bfqg = pd_to_bfqg(pd);
1196 u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
1197 u64 v = 0;
1198
1199 if (samples) {
1200 v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
1201 v = div64_u64(v, samples);
1202 }
1203 __blkg_prfill_u64(sf, pd, v);
1204 return 0;
1205 }
1206
1207 /* print avg_queue_size */
1208 static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1209 {
1210 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1211 bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
1212 0, false);
1213 return 0;
1214 }
1215 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1216
1217 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1218 {
1219 int ret;
1220
1221 ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
1222 if (ret)
1223 return NULL;
1224
1225 return blkg_to_bfqg(bfqd->queue->root_blkg);
1226 }
1227
1228 struct blkcg_policy blkcg_policy_bfq = {
1229 .dfl_cftypes = bfq_blkg_files,
1230 .legacy_cftypes = bfq_blkcg_legacy_files,
1231
1232 .cpd_alloc_fn = bfq_cpd_alloc,
1233 .cpd_init_fn = bfq_cpd_init,
1234 .cpd_bind_fn = bfq_cpd_init,
1235 .cpd_free_fn = bfq_cpd_free,
1236
1237 .pd_alloc_fn = bfq_pd_alloc,
1238 .pd_init_fn = bfq_pd_init,
1239 .pd_offline_fn = bfq_pd_offline,
1240 .pd_free_fn = bfq_pd_free,
1241 .pd_reset_stats_fn = bfq_pd_reset_stats,
1242 };
1243
1244 struct cftype bfq_blkcg_legacy_files[] = {
1245 {
1246 .name = "bfq.weight",
1247 .flags = CFTYPE_NOT_ON_ROOT,
1248 .seq_show = bfq_io_show_weight_legacy,
1249 .write_u64 = bfq_io_set_weight_legacy,
1250 },
1251 {
1252 .name = "bfq.weight_device",
1253 .flags = CFTYPE_NOT_ON_ROOT,
1254 .seq_show = bfq_io_show_weight,
1255 .write = bfq_io_set_weight,
1256 },
1257
1258 /* statistics, covers only the tasks in the bfqg */
1259 {
1260 .name = "bfq.io_service_bytes",
1261 .private = (unsigned long)&blkcg_policy_bfq,
1262 .seq_show = blkg_print_stat_bytes,
1263 },
1264 {
1265 .name = "bfq.io_serviced",
1266 .private = (unsigned long)&blkcg_policy_bfq,
1267 .seq_show = blkg_print_stat_ios,
1268 },
1269 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1270 {
1271 .name = "bfq.time",
1272 .private = offsetof(struct bfq_group, stats.time),
1273 .seq_show = bfqg_print_stat,
1274 },
1275 {
1276 .name = "bfq.sectors",
1277 .seq_show = bfqg_print_stat_sectors,
1278 },
1279 {
1280 .name = "bfq.io_service_time",
1281 .private = offsetof(struct bfq_group, stats.service_time),
1282 .seq_show = bfqg_print_rwstat,
1283 },
1284 {
1285 .name = "bfq.io_wait_time",
1286 .private = offsetof(struct bfq_group, stats.wait_time),
1287 .seq_show = bfqg_print_rwstat,
1288 },
1289 {
1290 .name = "bfq.io_merged",
1291 .private = offsetof(struct bfq_group, stats.merged),
1292 .seq_show = bfqg_print_rwstat,
1293 },
1294 {
1295 .name = "bfq.io_queued",
1296 .private = offsetof(struct bfq_group, stats.queued),
1297 .seq_show = bfqg_print_rwstat,
1298 },
1299 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1300
1301 /* the same statistics which cover the bfqg and its descendants */
1302 {
1303 .name = "bfq.io_service_bytes_recursive",
1304 .private = (unsigned long)&blkcg_policy_bfq,
1305 .seq_show = blkg_print_stat_bytes_recursive,
1306 },
1307 {
1308 .name = "bfq.io_serviced_recursive",
1309 .private = (unsigned long)&blkcg_policy_bfq,
1310 .seq_show = blkg_print_stat_ios_recursive,
1311 },
1312 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1313 {
1314 .name = "bfq.time_recursive",
1315 .private = offsetof(struct bfq_group, stats.time),
1316 .seq_show = bfqg_print_stat_recursive,
1317 },
1318 {
1319 .name = "bfq.sectors_recursive",
1320 .seq_show = bfqg_print_stat_sectors_recursive,
1321 },
1322 {
1323 .name = "bfq.io_service_time_recursive",
1324 .private = offsetof(struct bfq_group, stats.service_time),
1325 .seq_show = bfqg_print_rwstat_recursive,
1326 },
1327 {
1328 .name = "bfq.io_wait_time_recursive",
1329 .private = offsetof(struct bfq_group, stats.wait_time),
1330 .seq_show = bfqg_print_rwstat_recursive,
1331 },
1332 {
1333 .name = "bfq.io_merged_recursive",
1334 .private = offsetof(struct bfq_group, stats.merged),
1335 .seq_show = bfqg_print_rwstat_recursive,
1336 },
1337 {
1338 .name = "bfq.io_queued_recursive",
1339 .private = offsetof(struct bfq_group, stats.queued),
1340 .seq_show = bfqg_print_rwstat_recursive,
1341 },
1342 {
1343 .name = "bfq.avg_queue_size",
1344 .seq_show = bfqg_print_avg_queue_size,
1345 },
1346 {
1347 .name = "bfq.group_wait_time",
1348 .private = offsetof(struct bfq_group, stats.group_wait_time),
1349 .seq_show = bfqg_print_stat,
1350 },
1351 {
1352 .name = "bfq.idle_time",
1353 .private = offsetof(struct bfq_group, stats.idle_time),
1354 .seq_show = bfqg_print_stat,
1355 },
1356 {
1357 .name = "bfq.empty_time",
1358 .private = offsetof(struct bfq_group, stats.empty_time),
1359 .seq_show = bfqg_print_stat,
1360 },
1361 {
1362 .name = "bfq.dequeue",
1363 .private = offsetof(struct bfq_group, stats.dequeue),
1364 .seq_show = bfqg_print_stat,
1365 },
1366 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1367 { } /* terminate */
1368 };
1369
1370 struct cftype bfq_blkg_files[] = {
1371 {
1372 .name = "bfq.weight",
1373 .flags = CFTYPE_NOT_ON_ROOT,
1374 .seq_show = bfq_io_show_weight,
1375 .write = bfq_io_set_weight,
1376 },
1377 {} /* terminate */
1378 };
1379
1380 #else /* CONFIG_BFQ_GROUP_IOSCHED */
1381
1382 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1383 struct bfq_group *bfqg) {}
1384
1385 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1386 {
1387 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1388
1389 entity->weight = entity->new_weight;
1390 entity->orig_weight = entity->new_weight;
1391 if (bfqq) {
1392 bfqq->ioprio = bfqq->new_ioprio;
1393 bfqq->ioprio_class = bfqq->new_ioprio_class;
1394 }
1395 entity->sched_data = &bfqg->sched_data;
1396 }
1397
1398 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1399
1400 void bfq_end_wr_async(struct bfq_data *bfqd)
1401 {
1402 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1403 }
1404
1405 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg)
1406 {
1407 return bfqd->root_group;
1408 }
1409
1410 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1411 {
1412 return bfqq->bfqd->root_group;
1413 }
1414
1415 void bfqg_and_blkg_get(struct bfq_group *bfqg) {}
1416
1417 void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
1418
1419 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1420 {
1421 struct bfq_group *bfqg;
1422 int i;
1423
1424 bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1425 if (!bfqg)
1426 return NULL;
1427
1428 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1429 bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1430
1431 return bfqg;
1432 }
1433 #endif /* CONFIG_BFQ_GROUP_IOSCHED */