]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - block/bfq-cgroup.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[mirror_ubuntu-artful-kernel.git] / block / bfq-cgroup.c
1 /*
2 * cgroups support for the BFQ I/O scheduler.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 */
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/blkdev.h>
17 #include <linux/cgroup.h>
18 #include <linux/elevator.h>
19 #include <linux/ktime.h>
20 #include <linux/rbtree.h>
21 #include <linux/ioprio.h>
22 #include <linux/sbitmap.h>
23 #include <linux/delay.h>
24
25 #include "bfq-iosched.h"
26
27 #ifdef CONFIG_BFQ_GROUP_IOSCHED
28
29 /* bfqg stats flags */
30 enum bfqg_stats_flags {
31 BFQG_stats_waiting = 0,
32 BFQG_stats_idling,
33 BFQG_stats_empty,
34 };
35
36 #define BFQG_FLAG_FNS(name) \
37 static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
38 { \
39 stats->flags |= (1 << BFQG_stats_##name); \
40 } \
41 static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
42 { \
43 stats->flags &= ~(1 << BFQG_stats_##name); \
44 } \
45 static int bfqg_stats_##name(struct bfqg_stats *stats) \
46 { \
47 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
48 } \
49
50 BFQG_FLAG_FNS(waiting)
51 BFQG_FLAG_FNS(idling)
52 BFQG_FLAG_FNS(empty)
53 #undef BFQG_FLAG_FNS
54
55 /* This should be called with the queue_lock held. */
56 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
57 {
58 unsigned long long now;
59
60 if (!bfqg_stats_waiting(stats))
61 return;
62
63 now = sched_clock();
64 if (time_after64(now, stats->start_group_wait_time))
65 blkg_stat_add(&stats->group_wait_time,
66 now - stats->start_group_wait_time);
67 bfqg_stats_clear_waiting(stats);
68 }
69
70 /* This should be called with the queue_lock held. */
71 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
72 struct bfq_group *curr_bfqg)
73 {
74 struct bfqg_stats *stats = &bfqg->stats;
75
76 if (bfqg_stats_waiting(stats))
77 return;
78 if (bfqg == curr_bfqg)
79 return;
80 stats->start_group_wait_time = sched_clock();
81 bfqg_stats_mark_waiting(stats);
82 }
83
84 /* This should be called with the queue_lock held. */
85 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
86 {
87 unsigned long long now;
88
89 if (!bfqg_stats_empty(stats))
90 return;
91
92 now = sched_clock();
93 if (time_after64(now, stats->start_empty_time))
94 blkg_stat_add(&stats->empty_time,
95 now - stats->start_empty_time);
96 bfqg_stats_clear_empty(stats);
97 }
98
99 void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
100 {
101 blkg_stat_add(&bfqg->stats.dequeue, 1);
102 }
103
104 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
105 {
106 struct bfqg_stats *stats = &bfqg->stats;
107
108 if (blkg_rwstat_total(&stats->queued))
109 return;
110
111 /*
112 * group is already marked empty. This can happen if bfqq got new
113 * request in parent group and moved to this group while being added
114 * to service tree. Just ignore the event and move on.
115 */
116 if (bfqg_stats_empty(stats))
117 return;
118
119 stats->start_empty_time = sched_clock();
120 bfqg_stats_mark_empty(stats);
121 }
122
123 void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
124 {
125 struct bfqg_stats *stats = &bfqg->stats;
126
127 if (bfqg_stats_idling(stats)) {
128 unsigned long long now = sched_clock();
129
130 if (time_after64(now, stats->start_idle_time))
131 blkg_stat_add(&stats->idle_time,
132 now - stats->start_idle_time);
133 bfqg_stats_clear_idling(stats);
134 }
135 }
136
137 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
138 {
139 struct bfqg_stats *stats = &bfqg->stats;
140
141 stats->start_idle_time = sched_clock();
142 bfqg_stats_mark_idling(stats);
143 }
144
145 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
146 {
147 struct bfqg_stats *stats = &bfqg->stats;
148
149 blkg_stat_add(&stats->avg_queue_size_sum,
150 blkg_rwstat_total(&stats->queued));
151 blkg_stat_add(&stats->avg_queue_size_samples, 1);
152 bfqg_stats_update_group_wait_time(stats);
153 }
154
155 /*
156 * blk-cgroup policy-related handlers
157 * The following functions help in converting between blk-cgroup
158 * internal structures and BFQ-specific structures.
159 */
160
161 static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
162 {
163 return pd ? container_of(pd, struct bfq_group, pd) : NULL;
164 }
165
166 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
167 {
168 return pd_to_blkg(&bfqg->pd);
169 }
170
171 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
172 {
173 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
174 }
175
176 /*
177 * bfq_group handlers
178 * The following functions help in navigating the bfq_group hierarchy
179 * by allowing to find the parent of a bfq_group or the bfq_group
180 * associated to a bfq_queue.
181 */
182
183 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
184 {
185 struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
186
187 return pblkg ? blkg_to_bfqg(pblkg) : NULL;
188 }
189
190 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
191 {
192 struct bfq_entity *group_entity = bfqq->entity.parent;
193
194 return group_entity ? container_of(group_entity, struct bfq_group,
195 entity) :
196 bfqq->bfqd->root_group;
197 }
198
199 /*
200 * The following two functions handle get and put of a bfq_group by
201 * wrapping the related blk-cgroup hooks.
202 */
203
204 static void bfqg_get(struct bfq_group *bfqg)
205 {
206 return blkg_get(bfqg_to_blkg(bfqg));
207 }
208
209 void bfqg_put(struct bfq_group *bfqg)
210 {
211 return blkg_put(bfqg_to_blkg(bfqg));
212 }
213
214 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
215 unsigned int op)
216 {
217 blkg_rwstat_add(&bfqg->stats.queued, op, 1);
218 bfqg_stats_end_empty_time(&bfqg->stats);
219 if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
220 bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
221 }
222
223 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
224 {
225 blkg_rwstat_add(&bfqg->stats.queued, op, -1);
226 }
227
228 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
229 {
230 blkg_rwstat_add(&bfqg->stats.merged, op, 1);
231 }
232
233 void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
234 uint64_t io_start_time, unsigned int op)
235 {
236 struct bfqg_stats *stats = &bfqg->stats;
237 unsigned long long now = sched_clock();
238
239 if (time_after64(now, io_start_time))
240 blkg_rwstat_add(&stats->service_time, op,
241 now - io_start_time);
242 if (time_after64(io_start_time, start_time))
243 blkg_rwstat_add(&stats->wait_time, op,
244 io_start_time - start_time);
245 }
246
247 /* @stats = 0 */
248 static void bfqg_stats_reset(struct bfqg_stats *stats)
249 {
250 /* queued stats shouldn't be cleared */
251 blkg_rwstat_reset(&stats->merged);
252 blkg_rwstat_reset(&stats->service_time);
253 blkg_rwstat_reset(&stats->wait_time);
254 blkg_stat_reset(&stats->time);
255 blkg_stat_reset(&stats->avg_queue_size_sum);
256 blkg_stat_reset(&stats->avg_queue_size_samples);
257 blkg_stat_reset(&stats->dequeue);
258 blkg_stat_reset(&stats->group_wait_time);
259 blkg_stat_reset(&stats->idle_time);
260 blkg_stat_reset(&stats->empty_time);
261 }
262
263 /* @to += @from */
264 static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
265 {
266 if (!to || !from)
267 return;
268
269 /* queued stats shouldn't be cleared */
270 blkg_rwstat_add_aux(&to->merged, &from->merged);
271 blkg_rwstat_add_aux(&to->service_time, &from->service_time);
272 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
273 blkg_stat_add_aux(&from->time, &from->time);
274 blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
275 blkg_stat_add_aux(&to->avg_queue_size_samples,
276 &from->avg_queue_size_samples);
277 blkg_stat_add_aux(&to->dequeue, &from->dequeue);
278 blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
279 blkg_stat_add_aux(&to->idle_time, &from->idle_time);
280 blkg_stat_add_aux(&to->empty_time, &from->empty_time);
281 }
282
283 /*
284 * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
285 * recursive stats can still account for the amount used by this bfqg after
286 * it's gone.
287 */
288 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
289 {
290 struct bfq_group *parent;
291
292 if (!bfqg) /* root_group */
293 return;
294
295 parent = bfqg_parent(bfqg);
296
297 lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock);
298
299 if (unlikely(!parent))
300 return;
301
302 bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
303 bfqg_stats_reset(&bfqg->stats);
304 }
305
306 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
307 {
308 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
309
310 entity->weight = entity->new_weight;
311 entity->orig_weight = entity->new_weight;
312 if (bfqq) {
313 bfqq->ioprio = bfqq->new_ioprio;
314 bfqq->ioprio_class = bfqq->new_ioprio_class;
315 bfqg_get(bfqg);
316 }
317 entity->parent = bfqg->my_entity; /* NULL for root group */
318 entity->sched_data = &bfqg->sched_data;
319 }
320
321 static void bfqg_stats_exit(struct bfqg_stats *stats)
322 {
323 blkg_rwstat_exit(&stats->merged);
324 blkg_rwstat_exit(&stats->service_time);
325 blkg_rwstat_exit(&stats->wait_time);
326 blkg_rwstat_exit(&stats->queued);
327 blkg_stat_exit(&stats->time);
328 blkg_stat_exit(&stats->avg_queue_size_sum);
329 blkg_stat_exit(&stats->avg_queue_size_samples);
330 blkg_stat_exit(&stats->dequeue);
331 blkg_stat_exit(&stats->group_wait_time);
332 blkg_stat_exit(&stats->idle_time);
333 blkg_stat_exit(&stats->empty_time);
334 }
335
336 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
337 {
338 if (blkg_rwstat_init(&stats->merged, gfp) ||
339 blkg_rwstat_init(&stats->service_time, gfp) ||
340 blkg_rwstat_init(&stats->wait_time, gfp) ||
341 blkg_rwstat_init(&stats->queued, gfp) ||
342 blkg_stat_init(&stats->time, gfp) ||
343 blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
344 blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
345 blkg_stat_init(&stats->dequeue, gfp) ||
346 blkg_stat_init(&stats->group_wait_time, gfp) ||
347 blkg_stat_init(&stats->idle_time, gfp) ||
348 blkg_stat_init(&stats->empty_time, gfp)) {
349 bfqg_stats_exit(stats);
350 return -ENOMEM;
351 }
352
353 return 0;
354 }
355
356 static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
357 {
358 return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
359 }
360
361 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
362 {
363 return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
364 }
365
366 struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
367 {
368 struct bfq_group_data *bgd;
369
370 bgd = kzalloc(sizeof(*bgd), gfp);
371 if (!bgd)
372 return NULL;
373 return &bgd->pd;
374 }
375
376 void bfq_cpd_init(struct blkcg_policy_data *cpd)
377 {
378 struct bfq_group_data *d = cpd_to_bfqgd(cpd);
379
380 d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
381 CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
382 }
383
384 void bfq_cpd_free(struct blkcg_policy_data *cpd)
385 {
386 kfree(cpd_to_bfqgd(cpd));
387 }
388
389 struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
390 {
391 struct bfq_group *bfqg;
392
393 bfqg = kzalloc_node(sizeof(*bfqg), gfp, node);
394 if (!bfqg)
395 return NULL;
396
397 if (bfqg_stats_init(&bfqg->stats, gfp)) {
398 kfree(bfqg);
399 return NULL;
400 }
401
402 return &bfqg->pd;
403 }
404
405 void bfq_pd_init(struct blkg_policy_data *pd)
406 {
407 struct blkcg_gq *blkg = pd_to_blkg(pd);
408 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
409 struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
410 struct bfq_entity *entity = &bfqg->entity;
411 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
412
413 entity->orig_weight = entity->weight = entity->new_weight = d->weight;
414 entity->my_sched_data = &bfqg->sched_data;
415 bfqg->my_entity = entity; /*
416 * the root_group's will be set to NULL
417 * in bfq_init_queue()
418 */
419 bfqg->bfqd = bfqd;
420 bfqg->active_entities = 0;
421 bfqg->rq_pos_tree = RB_ROOT;
422 }
423
424 void bfq_pd_free(struct blkg_policy_data *pd)
425 {
426 struct bfq_group *bfqg = pd_to_bfqg(pd);
427
428 bfqg_stats_exit(&bfqg->stats);
429 return kfree(bfqg);
430 }
431
432 void bfq_pd_reset_stats(struct blkg_policy_data *pd)
433 {
434 struct bfq_group *bfqg = pd_to_bfqg(pd);
435
436 bfqg_stats_reset(&bfqg->stats);
437 }
438
439 static void bfq_group_set_parent(struct bfq_group *bfqg,
440 struct bfq_group *parent)
441 {
442 struct bfq_entity *entity;
443
444 entity = &bfqg->entity;
445 entity->parent = parent->my_entity;
446 entity->sched_data = &parent->sched_data;
447 }
448
449 static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
450 struct blkcg *blkcg)
451 {
452 struct blkcg_gq *blkg;
453
454 blkg = blkg_lookup(blkcg, bfqd->queue);
455 if (likely(blkg))
456 return blkg_to_bfqg(blkg);
457 return NULL;
458 }
459
460 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
461 struct blkcg *blkcg)
462 {
463 struct bfq_group *bfqg, *parent;
464 struct bfq_entity *entity;
465
466 bfqg = bfq_lookup_bfqg(bfqd, blkcg);
467
468 if (unlikely(!bfqg))
469 return NULL;
470
471 /*
472 * Update chain of bfq_groups as we might be handling a leaf group
473 * which, along with some of its relatives, has not been hooked yet
474 * to the private hierarchy of BFQ.
475 */
476 entity = &bfqg->entity;
477 for_each_entity(entity) {
478 bfqg = container_of(entity, struct bfq_group, entity);
479 if (bfqg != bfqd->root_group) {
480 parent = bfqg_parent(bfqg);
481 if (!parent)
482 parent = bfqd->root_group;
483 bfq_group_set_parent(bfqg, parent);
484 }
485 }
486
487 return bfqg;
488 }
489
490 /**
491 * bfq_bfqq_move - migrate @bfqq to @bfqg.
492 * @bfqd: queue descriptor.
493 * @bfqq: the queue to move.
494 * @bfqg: the group to move to.
495 *
496 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
497 * it on the new one. Avoid putting the entity on the old group idle tree.
498 *
499 * Must be called under the queue lock; the cgroup owning @bfqg must
500 * not disappear (by now this just means that we are called under
501 * rcu_read_lock()).
502 */
503 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
504 struct bfq_group *bfqg)
505 {
506 struct bfq_entity *entity = &bfqq->entity;
507
508 /* If bfqq is empty, then bfq_bfqq_expire also invokes
509 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
510 * from data structures related to current group. Otherwise we
511 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
512 * we do below.
513 */
514 if (bfqq == bfqd->in_service_queue)
515 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
516 false, BFQQE_PREEMPTED);
517
518 if (bfq_bfqq_busy(bfqq))
519 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
520 else if (entity->on_st)
521 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
522 bfqg_put(bfqq_group(bfqq));
523
524 /*
525 * Here we use a reference to bfqg. We don't need a refcounter
526 * as the cgroup reference will not be dropped, so that its
527 * destroy() callback will not be invoked.
528 */
529 entity->parent = bfqg->my_entity;
530 entity->sched_data = &bfqg->sched_data;
531 bfqg_get(bfqg);
532
533 if (bfq_bfqq_busy(bfqq)) {
534 bfq_pos_tree_add_move(bfqd, bfqq);
535 bfq_activate_bfqq(bfqd, bfqq);
536 }
537
538 if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
539 bfq_schedule_dispatch(bfqd);
540 }
541
542 /**
543 * __bfq_bic_change_cgroup - move @bic to @cgroup.
544 * @bfqd: the queue descriptor.
545 * @bic: the bic to move.
546 * @blkcg: the blk-cgroup to move to.
547 *
548 * Move bic to blkcg, assuming that bfqd->queue is locked; the caller
549 * has to make sure that the reference to cgroup is valid across the call.
550 *
551 * NOTE: an alternative approach might have been to store the current
552 * cgroup in bfqq and getting a reference to it, reducing the lookup
553 * time here, at the price of slightly more complex code.
554 */
555 static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
556 struct bfq_io_cq *bic,
557 struct blkcg *blkcg)
558 {
559 struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
560 struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
561 struct bfq_group *bfqg;
562 struct bfq_entity *entity;
563
564 bfqg = bfq_find_set_group(bfqd, blkcg);
565
566 if (unlikely(!bfqg))
567 bfqg = bfqd->root_group;
568
569 if (async_bfqq) {
570 entity = &async_bfqq->entity;
571
572 if (entity->sched_data != &bfqg->sched_data) {
573 bic_set_bfqq(bic, NULL, 0);
574 bfq_log_bfqq(bfqd, async_bfqq,
575 "bic_change_group: %p %d",
576 async_bfqq, async_bfqq->ref);
577 bfq_put_queue(async_bfqq);
578 }
579 }
580
581 if (sync_bfqq) {
582 entity = &sync_bfqq->entity;
583 if (entity->sched_data != &bfqg->sched_data)
584 bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
585 }
586
587 return bfqg;
588 }
589
590 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
591 {
592 struct bfq_data *bfqd = bic_to_bfqd(bic);
593 struct bfq_group *bfqg = NULL;
594 uint64_t serial_nr;
595
596 rcu_read_lock();
597 serial_nr = bio_blkcg(bio)->css.serial_nr;
598
599 /*
600 * Check whether blkcg has changed. The condition may trigger
601 * spuriously on a newly created cic but there's no harm.
602 */
603 if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
604 goto out;
605
606 bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
607 bic->blkcg_serial_nr = serial_nr;
608 out:
609 rcu_read_unlock();
610 }
611
612 /**
613 * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
614 * @st: the service tree being flushed.
615 */
616 static void bfq_flush_idle_tree(struct bfq_service_tree *st)
617 {
618 struct bfq_entity *entity = st->first_idle;
619
620 for (; entity ; entity = st->first_idle)
621 __bfq_deactivate_entity(entity, false);
622 }
623
624 /**
625 * bfq_reparent_leaf_entity - move leaf entity to the root_group.
626 * @bfqd: the device data structure with the root group.
627 * @entity: the entity to move.
628 */
629 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
630 struct bfq_entity *entity)
631 {
632 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
633
634 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
635 }
636
637 /**
638 * bfq_reparent_active_entities - move to the root group all active
639 * entities.
640 * @bfqd: the device data structure with the root group.
641 * @bfqg: the group to move from.
642 * @st: the service tree with the entities.
643 *
644 * Needs queue_lock to be taken and reference to be valid over the call.
645 */
646 static void bfq_reparent_active_entities(struct bfq_data *bfqd,
647 struct bfq_group *bfqg,
648 struct bfq_service_tree *st)
649 {
650 struct rb_root *active = &st->active;
651 struct bfq_entity *entity = NULL;
652
653 if (!RB_EMPTY_ROOT(&st->active))
654 entity = bfq_entity_of(rb_first(active));
655
656 for (; entity ; entity = bfq_entity_of(rb_first(active)))
657 bfq_reparent_leaf_entity(bfqd, entity);
658
659 if (bfqg->sched_data.in_service_entity)
660 bfq_reparent_leaf_entity(bfqd,
661 bfqg->sched_data.in_service_entity);
662 }
663
664 /**
665 * bfq_pd_offline - deactivate the entity associated with @pd,
666 * and reparent its children entities.
667 * @pd: descriptor of the policy going offline.
668 *
669 * blkio already grabs the queue_lock for us, so no need to use
670 * RCU-based magic
671 */
672 void bfq_pd_offline(struct blkg_policy_data *pd)
673 {
674 struct bfq_service_tree *st;
675 struct bfq_group *bfqg = pd_to_bfqg(pd);
676 struct bfq_data *bfqd = bfqg->bfqd;
677 struct bfq_entity *entity = bfqg->my_entity;
678 unsigned long flags;
679 int i;
680
681 if (!entity) /* root group */
682 return;
683
684 spin_lock_irqsave(&bfqd->lock, flags);
685 /*
686 * Empty all service_trees belonging to this group before
687 * deactivating the group itself.
688 */
689 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
690 st = bfqg->sched_data.service_tree + i;
691
692 /*
693 * The idle tree may still contain bfq_queues belonging
694 * to exited task because they never migrated to a different
695 * cgroup from the one being destroyed now. No one else
696 * can access them so it's safe to act without any lock.
697 */
698 bfq_flush_idle_tree(st);
699
700 /*
701 * It may happen that some queues are still active
702 * (busy) upon group destruction (if the corresponding
703 * processes have been forced to terminate). We move
704 * all the leaf entities corresponding to these queues
705 * to the root_group.
706 * Also, it may happen that the group has an entity
707 * in service, which is disconnected from the active
708 * tree: it must be moved, too.
709 * There is no need to put the sync queues, as the
710 * scheduler has taken no reference.
711 */
712 bfq_reparent_active_entities(bfqd, bfqg, st);
713 }
714
715 __bfq_deactivate_entity(entity, false);
716 bfq_put_async_queues(bfqd, bfqg);
717
718 spin_unlock_irqrestore(&bfqd->lock, flags);
719 /*
720 * @blkg is going offline and will be ignored by
721 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
722 * that they don't get lost. If IOs complete after this point, the
723 * stats for them will be lost. Oh well...
724 */
725 bfqg_stats_xfer_dead(bfqg);
726 }
727
728 void bfq_end_wr_async(struct bfq_data *bfqd)
729 {
730 struct blkcg_gq *blkg;
731
732 list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
733 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
734
735 bfq_end_wr_async_queues(bfqd, bfqg);
736 }
737 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
738 }
739
740 static int bfq_io_show_weight(struct seq_file *sf, void *v)
741 {
742 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
743 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
744 unsigned int val = 0;
745
746 if (bfqgd)
747 val = bfqgd->weight;
748
749 seq_printf(sf, "%u\n", val);
750
751 return 0;
752 }
753
754 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
755 struct cftype *cftype,
756 u64 val)
757 {
758 struct blkcg *blkcg = css_to_blkcg(css);
759 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
760 struct blkcg_gq *blkg;
761 int ret = -ERANGE;
762
763 if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
764 return ret;
765
766 ret = 0;
767 spin_lock_irq(&blkcg->lock);
768 bfqgd->weight = (unsigned short)val;
769 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
770 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
771
772 if (!bfqg)
773 continue;
774 /*
775 * Setting the prio_changed flag of the entity
776 * to 1 with new_weight == weight would re-set
777 * the value of the weight to its ioprio mapping.
778 * Set the flag only if necessary.
779 */
780 if ((unsigned short)val != bfqg->entity.new_weight) {
781 bfqg->entity.new_weight = (unsigned short)val;
782 /*
783 * Make sure that the above new value has been
784 * stored in bfqg->entity.new_weight before
785 * setting the prio_changed flag. In fact,
786 * this flag may be read asynchronously (in
787 * critical sections protected by a different
788 * lock than that held here), and finding this
789 * flag set may cause the execution of the code
790 * for updating parameters whose value may
791 * depend also on bfqg->entity.new_weight (in
792 * __bfq_entity_update_weight_prio).
793 * This barrier makes sure that the new value
794 * of bfqg->entity.new_weight is correctly
795 * seen in that code.
796 */
797 smp_wmb();
798 bfqg->entity.prio_changed = 1;
799 }
800 }
801 spin_unlock_irq(&blkcg->lock);
802
803 return ret;
804 }
805
806 static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
807 char *buf, size_t nbytes,
808 loff_t off)
809 {
810 u64 weight;
811 /* First unsigned long found in the file is used */
812 int ret = kstrtoull(strim(buf), 0, &weight);
813
814 if (ret)
815 return ret;
816
817 return bfq_io_set_weight_legacy(of_css(of), NULL, weight);
818 }
819
820 static int bfqg_print_stat(struct seq_file *sf, void *v)
821 {
822 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
823 &blkcg_policy_bfq, seq_cft(sf)->private, false);
824 return 0;
825 }
826
827 static int bfqg_print_rwstat(struct seq_file *sf, void *v)
828 {
829 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
830 &blkcg_policy_bfq, seq_cft(sf)->private, true);
831 return 0;
832 }
833
834 static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
835 struct blkg_policy_data *pd, int off)
836 {
837 u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
838 &blkcg_policy_bfq, off);
839 return __blkg_prfill_u64(sf, pd, sum);
840 }
841
842 static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
843 struct blkg_policy_data *pd, int off)
844 {
845 struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
846 &blkcg_policy_bfq,
847 off);
848 return __blkg_prfill_rwstat(sf, pd, &sum);
849 }
850
851 static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
852 {
853 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
854 bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
855 seq_cft(sf)->private, false);
856 return 0;
857 }
858
859 static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
860 {
861 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
862 bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
863 seq_cft(sf)->private, true);
864 return 0;
865 }
866
867 static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
868 int off)
869 {
870 u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
871
872 return __blkg_prfill_u64(sf, pd, sum >> 9);
873 }
874
875 static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
876 {
877 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
878 bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
879 return 0;
880 }
881
882 static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
883 struct blkg_policy_data *pd, int off)
884 {
885 struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
886 offsetof(struct blkcg_gq, stat_bytes));
887 u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
888 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
889
890 return __blkg_prfill_u64(sf, pd, sum >> 9);
891 }
892
893 static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
894 {
895 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
896 bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
897 false);
898 return 0;
899 }
900
901 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
902 struct blkg_policy_data *pd, int off)
903 {
904 struct bfq_group *bfqg = pd_to_bfqg(pd);
905 u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples);
906 u64 v = 0;
907
908 if (samples) {
909 v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum);
910 v = div64_u64(v, samples);
911 }
912 __blkg_prfill_u64(sf, pd, v);
913 return 0;
914 }
915
916 /* print avg_queue_size */
917 static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
918 {
919 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
920 bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
921 0, false);
922 return 0;
923 }
924
925 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
926 {
927 int ret;
928
929 ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
930 if (ret)
931 return NULL;
932
933 return blkg_to_bfqg(bfqd->queue->root_blkg);
934 }
935
936 struct blkcg_policy blkcg_policy_bfq = {
937 .dfl_cftypes = bfq_blkg_files,
938 .legacy_cftypes = bfq_blkcg_legacy_files,
939
940 .cpd_alloc_fn = bfq_cpd_alloc,
941 .cpd_init_fn = bfq_cpd_init,
942 .cpd_bind_fn = bfq_cpd_init,
943 .cpd_free_fn = bfq_cpd_free,
944
945 .pd_alloc_fn = bfq_pd_alloc,
946 .pd_init_fn = bfq_pd_init,
947 .pd_offline_fn = bfq_pd_offline,
948 .pd_free_fn = bfq_pd_free,
949 .pd_reset_stats_fn = bfq_pd_reset_stats,
950 };
951
952 struct cftype bfq_blkcg_legacy_files[] = {
953 {
954 .name = "bfq.weight",
955 .flags = CFTYPE_NOT_ON_ROOT,
956 .seq_show = bfq_io_show_weight,
957 .write_u64 = bfq_io_set_weight_legacy,
958 },
959
960 /* statistics, covers only the tasks in the bfqg */
961 {
962 .name = "bfq.time",
963 .private = offsetof(struct bfq_group, stats.time),
964 .seq_show = bfqg_print_stat,
965 },
966 {
967 .name = "bfq.sectors",
968 .seq_show = bfqg_print_stat_sectors,
969 },
970 {
971 .name = "bfq.io_service_bytes",
972 .private = (unsigned long)&blkcg_policy_bfq,
973 .seq_show = blkg_print_stat_bytes,
974 },
975 {
976 .name = "bfq.io_serviced",
977 .private = (unsigned long)&blkcg_policy_bfq,
978 .seq_show = blkg_print_stat_ios,
979 },
980 {
981 .name = "bfq.io_service_time",
982 .private = offsetof(struct bfq_group, stats.service_time),
983 .seq_show = bfqg_print_rwstat,
984 },
985 {
986 .name = "bfq.io_wait_time",
987 .private = offsetof(struct bfq_group, stats.wait_time),
988 .seq_show = bfqg_print_rwstat,
989 },
990 {
991 .name = "bfq.io_merged",
992 .private = offsetof(struct bfq_group, stats.merged),
993 .seq_show = bfqg_print_rwstat,
994 },
995 {
996 .name = "bfq.io_queued",
997 .private = offsetof(struct bfq_group, stats.queued),
998 .seq_show = bfqg_print_rwstat,
999 },
1000
1001 /* the same statictics which cover the bfqg and its descendants */
1002 {
1003 .name = "bfq.time_recursive",
1004 .private = offsetof(struct bfq_group, stats.time),
1005 .seq_show = bfqg_print_stat_recursive,
1006 },
1007 {
1008 .name = "bfq.sectors_recursive",
1009 .seq_show = bfqg_print_stat_sectors_recursive,
1010 },
1011 {
1012 .name = "bfq.io_service_bytes_recursive",
1013 .private = (unsigned long)&blkcg_policy_bfq,
1014 .seq_show = blkg_print_stat_bytes_recursive,
1015 },
1016 {
1017 .name = "bfq.io_serviced_recursive",
1018 .private = (unsigned long)&blkcg_policy_bfq,
1019 .seq_show = blkg_print_stat_ios_recursive,
1020 },
1021 {
1022 .name = "bfq.io_service_time_recursive",
1023 .private = offsetof(struct bfq_group, stats.service_time),
1024 .seq_show = bfqg_print_rwstat_recursive,
1025 },
1026 {
1027 .name = "bfq.io_wait_time_recursive",
1028 .private = offsetof(struct bfq_group, stats.wait_time),
1029 .seq_show = bfqg_print_rwstat_recursive,
1030 },
1031 {
1032 .name = "bfq.io_merged_recursive",
1033 .private = offsetof(struct bfq_group, stats.merged),
1034 .seq_show = bfqg_print_rwstat_recursive,
1035 },
1036 {
1037 .name = "bfq.io_queued_recursive",
1038 .private = offsetof(struct bfq_group, stats.queued),
1039 .seq_show = bfqg_print_rwstat_recursive,
1040 },
1041 {
1042 .name = "bfq.avg_queue_size",
1043 .seq_show = bfqg_print_avg_queue_size,
1044 },
1045 {
1046 .name = "bfq.group_wait_time",
1047 .private = offsetof(struct bfq_group, stats.group_wait_time),
1048 .seq_show = bfqg_print_stat,
1049 },
1050 {
1051 .name = "bfq.idle_time",
1052 .private = offsetof(struct bfq_group, stats.idle_time),
1053 .seq_show = bfqg_print_stat,
1054 },
1055 {
1056 .name = "bfq.empty_time",
1057 .private = offsetof(struct bfq_group, stats.empty_time),
1058 .seq_show = bfqg_print_stat,
1059 },
1060 {
1061 .name = "bfq.dequeue",
1062 .private = offsetof(struct bfq_group, stats.dequeue),
1063 .seq_show = bfqg_print_stat,
1064 },
1065 { } /* terminate */
1066 };
1067
1068 struct cftype bfq_blkg_files[] = {
1069 {
1070 .name = "bfq.weight",
1071 .flags = CFTYPE_NOT_ON_ROOT,
1072 .seq_show = bfq_io_show_weight,
1073 .write = bfq_io_set_weight,
1074 },
1075 {} /* terminate */
1076 };
1077
1078 #else /* CONFIG_BFQ_GROUP_IOSCHED */
1079
1080 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
1081 unsigned int op) { }
1082 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
1083 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
1084 void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
1085 uint64_t io_start_time, unsigned int op) { }
1086 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
1087 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
1088 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
1089 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
1090 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
1091
1092 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1093 struct bfq_group *bfqg) {}
1094
1095 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1096 {
1097 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1098
1099 entity->weight = entity->new_weight;
1100 entity->orig_weight = entity->new_weight;
1101 if (bfqq) {
1102 bfqq->ioprio = bfqq->new_ioprio;
1103 bfqq->ioprio_class = bfqq->new_ioprio_class;
1104 }
1105 entity->sched_data = &bfqg->sched_data;
1106 }
1107
1108 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1109
1110 void bfq_end_wr_async(struct bfq_data *bfqd)
1111 {
1112 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1113 }
1114
1115 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg)
1116 {
1117 return bfqd->root_group;
1118 }
1119
1120 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1121 {
1122 return bfqq->bfqd->root_group;
1123 }
1124
1125 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1126 {
1127 struct bfq_group *bfqg;
1128 int i;
1129
1130 bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1131 if (!bfqg)
1132 return NULL;
1133
1134 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1135 bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1136
1137 return bfqg;
1138 }
1139 #endif /* CONFIG_BFQ_GROUP_IOSCHED */