]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - block/bfq-wf2q.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
[mirror_ubuntu-artful-kernel.git] / block / bfq-wf2q.c
CommitLineData
ea25da48
PV
1/*
2 * Hierarchical Budget Worst-case Fair Weighted Fair Queueing
3 * (B-WF2Q+): hierarchical scheduling algorithm by which the BFQ I/O
4 * scheduler schedules generic entities. The latter can represent
5 * either single bfq queues (associated with processes) or groups of
6 * bfq queues (associated with cgroups).
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18#include "bfq-iosched.h"
19
20/**
21 * bfq_gt - compare two timestamps.
22 * @a: first ts.
23 * @b: second ts.
24 *
25 * Return @a > @b, dealing with wrapping correctly.
26 */
27static int bfq_gt(u64 a, u64 b)
28{
29 return (s64)(a - b) > 0;
30}
31
32static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree)
33{
34 struct rb_node *node = tree->rb_node;
35
36 return rb_entry(node, struct bfq_entity, rb_node);
37}
38
39static unsigned int bfq_class_idx(struct bfq_entity *entity)
40{
41 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
42
43 return bfqq ? bfqq->ioprio_class - 1 :
44 BFQ_DEFAULT_GRP_CLASS - 1;
45}
46
47static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd);
48
49static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
50
51/**
52 * bfq_update_next_in_service - update sd->next_in_service
53 * @sd: sched_data for which to perform the update.
54 * @new_entity: if not NULL, pointer to the entity whose activation,
55 * requeueing or repositionig triggered the invocation of
56 * this function.
57 *
58 * This function is called to update sd->next_in_service, which, in
59 * its turn, may change as a consequence of the insertion or
60 * extraction of an entity into/from one of the active trees of
61 * sd. These insertions/extractions occur as a consequence of
62 * activations/deactivations of entities, with some activations being
63 * 'true' activations, and other activations being requeueings (i.e.,
64 * implementing the second, requeueing phase of the mechanism used to
65 * reposition an entity in its active tree; see comments on
66 * __bfq_activate_entity and __bfq_requeue_entity for details). In
67 * both the last two activation sub-cases, new_entity points to the
68 * just activated or requeued entity.
69 *
70 * Returns true if sd->next_in_service changes in such a way that
71 * entity->parent may become the next_in_service for its parent
72 * entity.
73 */
74static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
75 struct bfq_entity *new_entity)
76{
77 struct bfq_entity *next_in_service = sd->next_in_service;
78 bool parent_sched_may_change = false;
79
80 /*
81 * If this update is triggered by the activation, requeueing
82 * or repositiong of an entity that does not coincide with
83 * sd->next_in_service, then a full lookup in the active tree
84 * can be avoided. In fact, it is enough to check whether the
85 * just-modified entity has a higher priority than
86 * sd->next_in_service, or, even if it has the same priority
87 * as sd->next_in_service, is eligible and has a lower virtual
88 * finish time than sd->next_in_service. If this compound
89 * condition holds, then the new entity becomes the new
90 * next_in_service. Otherwise no change is needed.
91 */
92 if (new_entity && new_entity != sd->next_in_service) {
93 /*
94 * Flag used to decide whether to replace
95 * sd->next_in_service with new_entity. Tentatively
96 * set to true, and left as true if
97 * sd->next_in_service is NULL.
98 */
99 bool replace_next = true;
100
101 /*
102 * If there is already a next_in_service candidate
103 * entity, then compare class priorities or timestamps
104 * to decide whether to replace sd->service_tree with
105 * new_entity.
106 */
107 if (next_in_service) {
108 unsigned int new_entity_class_idx =
109 bfq_class_idx(new_entity);
110 struct bfq_service_tree *st =
111 sd->service_tree + new_entity_class_idx;
112
113 /*
114 * For efficiency, evaluate the most likely
115 * sub-condition first.
116 */
117 replace_next =
118 (new_entity_class_idx ==
119 bfq_class_idx(next_in_service)
120 &&
121 !bfq_gt(new_entity->start, st->vtime)
122 &&
123 bfq_gt(next_in_service->finish,
124 new_entity->finish))
125 ||
126 new_entity_class_idx <
127 bfq_class_idx(next_in_service);
128 }
129
130 if (replace_next)
131 next_in_service = new_entity;
132 } else /* invoked because of a deactivation: lookup needed */
133 next_in_service = bfq_lookup_next_entity(sd);
134
135 if (next_in_service) {
136 parent_sched_may_change = !sd->next_in_service ||
137 bfq_update_parent_budget(next_in_service);
138 }
139
140 sd->next_in_service = next_in_service;
141
142 if (!next_in_service)
143 return parent_sched_may_change;
144
145 return parent_sched_may_change;
146}
147
148#ifdef CONFIG_BFQ_GROUP_IOSCHED
149
150struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
151{
152 struct bfq_entity *group_entity = bfqq->entity.parent;
153
154 if (!group_entity)
155 group_entity = &bfqq->bfqd->root_group->entity;
156
157 return container_of(group_entity, struct bfq_group, entity);
158}
159
160/*
161 * Returns true if this budget changes may let next_in_service->parent
162 * become the next_in_service entity for its parent entity.
163 */
164static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
165{
166 struct bfq_entity *bfqg_entity;
167 struct bfq_group *bfqg;
168 struct bfq_sched_data *group_sd;
169 bool ret = false;
170
171 group_sd = next_in_service->sched_data;
172
173 bfqg = container_of(group_sd, struct bfq_group, sched_data);
174 /*
175 * bfq_group's my_entity field is not NULL only if the group
176 * is not the root group. We must not touch the root entity
177 * as it must never become an in-service entity.
178 */
179 bfqg_entity = bfqg->my_entity;
180 if (bfqg_entity) {
181 if (bfqg_entity->budget > next_in_service->budget)
182 ret = true;
183 bfqg_entity->budget = next_in_service->budget;
184 }
185
186 return ret;
187}
188
189/*
190 * This function tells whether entity stops being a candidate for next
191 * service, according to the following logic.
192 *
193 * This function is invoked for an entity that is about to be set in
194 * service. If such an entity is a queue, then the entity is no longer
195 * a candidate for next service (i.e, a candidate entity to serve
196 * after the in-service entity is expired). The function then returns
197 * true.
198 *
199 * In contrast, the entity could stil be a candidate for next service
200 * if it is not a queue, and has more than one child. In fact, even if
201 * one of its children is about to be set in service, other children
202 * may still be the next to serve. As a consequence, a non-queue
203 * entity is not a candidate for next-service only if it has only one
204 * child. And only if this condition holds, then the function returns
205 * true for a non-queue entity.
206 */
207static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
208{
209 struct bfq_group *bfqg;
210
211 if (bfq_entity_to_bfqq(entity))
212 return true;
213
214 bfqg = container_of(entity, struct bfq_group, entity);
215
216 if (bfqg->active_entities == 1)
217 return true;
218
219 return false;
220}
221
222#else /* CONFIG_BFQ_GROUP_IOSCHED */
223
224struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
225{
226 return bfqq->bfqd->root_group;
227}
228
229static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
230{
231 return false;
232}
233
234static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
235{
236 return true;
237}
238
239#endif /* CONFIG_BFQ_GROUP_IOSCHED */
240
241/*
242 * Shift for timestamp calculations. This actually limits the maximum
243 * service allowed in one timestamp delta (small shift values increase it),
244 * the maximum total weight that can be used for the queues in the system
245 * (big shift values increase it), and the period of virtual time
246 * wraparounds.
247 */
248#define WFQ_SERVICE_SHIFT 22
249
250struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
251{
252 struct bfq_queue *bfqq = NULL;
253
254 if (!entity->my_sched_data)
255 bfqq = container_of(entity, struct bfq_queue, entity);
256
257 return bfqq;
258}
259
260
261/**
262 * bfq_delta - map service into the virtual time domain.
263 * @service: amount of service.
264 * @weight: scale factor (weight of an entity or weight sum).
265 */
266static u64 bfq_delta(unsigned long service, unsigned long weight)
267{
268 u64 d = (u64)service << WFQ_SERVICE_SHIFT;
269
270 do_div(d, weight);
271 return d;
272}
273
274/**
275 * bfq_calc_finish - assign the finish time to an entity.
276 * @entity: the entity to act upon.
277 * @service: the service to be charged to the entity.
278 */
279static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service)
280{
281 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
282
283 entity->finish = entity->start +
284 bfq_delta(service, entity->weight);
285
286 if (bfqq) {
287 bfq_log_bfqq(bfqq->bfqd, bfqq,
288 "calc_finish: serv %lu, w %d",
289 service, entity->weight);
290 bfq_log_bfqq(bfqq->bfqd, bfqq,
291 "calc_finish: start %llu, finish %llu, delta %llu",
292 entity->start, entity->finish,
293 bfq_delta(service, entity->weight));
294 }
295}
296
297/**
298 * bfq_entity_of - get an entity from a node.
299 * @node: the node field of the entity.
300 *
301 * Convert a node pointer to the relative entity. This is used only
302 * to simplify the logic of some functions and not as the generic
303 * conversion mechanism because, e.g., in the tree walking functions,
304 * the check for a %NULL value would be redundant.
305 */
306struct bfq_entity *bfq_entity_of(struct rb_node *node)
307{
308 struct bfq_entity *entity = NULL;
309
310 if (node)
311 entity = rb_entry(node, struct bfq_entity, rb_node);
312
313 return entity;
314}
315
316/**
317 * bfq_extract - remove an entity from a tree.
318 * @root: the tree root.
319 * @entity: the entity to remove.
320 */
321static void bfq_extract(struct rb_root *root, struct bfq_entity *entity)
322{
323 entity->tree = NULL;
324 rb_erase(&entity->rb_node, root);
325}
326
327/**
328 * bfq_idle_extract - extract an entity from the idle tree.
329 * @st: the service tree of the owning @entity.
330 * @entity: the entity being removed.
331 */
332static void bfq_idle_extract(struct bfq_service_tree *st,
333 struct bfq_entity *entity)
334{
335 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
336 struct rb_node *next;
337
338 if (entity == st->first_idle) {
339 next = rb_next(&entity->rb_node);
340 st->first_idle = bfq_entity_of(next);
341 }
342
343 if (entity == st->last_idle) {
344 next = rb_prev(&entity->rb_node);
345 st->last_idle = bfq_entity_of(next);
346 }
347
348 bfq_extract(&st->idle, entity);
349
350 if (bfqq)
351 list_del(&bfqq->bfqq_list);
352}
353
354/**
355 * bfq_insert - generic tree insertion.
356 * @root: tree root.
357 * @entity: entity to insert.
358 *
359 * This is used for the idle and the active tree, since they are both
360 * ordered by finish time.
361 */
362static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
363{
364 struct bfq_entity *entry;
365 struct rb_node **node = &root->rb_node;
366 struct rb_node *parent = NULL;
367
368 while (*node) {
369 parent = *node;
370 entry = rb_entry(parent, struct bfq_entity, rb_node);
371
372 if (bfq_gt(entry->finish, entity->finish))
373 node = &parent->rb_left;
374 else
375 node = &parent->rb_right;
376 }
377
378 rb_link_node(&entity->rb_node, parent, node);
379 rb_insert_color(&entity->rb_node, root);
380
381 entity->tree = root;
382}
383
384/**
385 * bfq_update_min - update the min_start field of a entity.
386 * @entity: the entity to update.
387 * @node: one of its children.
388 *
389 * This function is called when @entity may store an invalid value for
390 * min_start due to updates to the active tree. The function assumes
391 * that the subtree rooted at @node (which may be its left or its right
392 * child) has a valid min_start value.
393 */
394static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node)
395{
396 struct bfq_entity *child;
397
398 if (node) {
399 child = rb_entry(node, struct bfq_entity, rb_node);
400 if (bfq_gt(entity->min_start, child->min_start))
401 entity->min_start = child->min_start;
402 }
403}
404
405/**
406 * bfq_update_active_node - recalculate min_start.
407 * @node: the node to update.
408 *
409 * @node may have changed position or one of its children may have moved,
410 * this function updates its min_start value. The left and right subtrees
411 * are assumed to hold a correct min_start value.
412 */
413static void bfq_update_active_node(struct rb_node *node)
414{
415 struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
416
417 entity->min_start = entity->start;
418 bfq_update_min(entity, node->rb_right);
419 bfq_update_min(entity, node->rb_left);
420}
421
422/**
423 * bfq_update_active_tree - update min_start for the whole active tree.
424 * @node: the starting node.
425 *
426 * @node must be the deepest modified node after an update. This function
427 * updates its min_start using the values held by its children, assuming
428 * that they did not change, and then updates all the nodes that may have
429 * changed in the path to the root. The only nodes that may have changed
430 * are the ones in the path or their siblings.
431 */
432static void bfq_update_active_tree(struct rb_node *node)
433{
434 struct rb_node *parent;
435
436up:
437 bfq_update_active_node(node);
438
439 parent = rb_parent(node);
440 if (!parent)
441 return;
442
443 if (node == parent->rb_left && parent->rb_right)
444 bfq_update_active_node(parent->rb_right);
445 else if (parent->rb_left)
446 bfq_update_active_node(parent->rb_left);
447
448 node = parent;
449 goto up;
450}
451
452/**
453 * bfq_active_insert - insert an entity in the active tree of its
454 * group/device.
455 * @st: the service tree of the entity.
456 * @entity: the entity being inserted.
457 *
458 * The active tree is ordered by finish time, but an extra key is kept
459 * per each node, containing the minimum value for the start times of
460 * its children (and the node itself), so it's possible to search for
461 * the eligible node with the lowest finish time in logarithmic time.
462 */
463static void bfq_active_insert(struct bfq_service_tree *st,
464 struct bfq_entity *entity)
465{
466 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
467 struct rb_node *node = &entity->rb_node;
468#ifdef CONFIG_BFQ_GROUP_IOSCHED
469 struct bfq_sched_data *sd = NULL;
470 struct bfq_group *bfqg = NULL;
471 struct bfq_data *bfqd = NULL;
472#endif
473
474 bfq_insert(&st->active, entity);
475
476 if (node->rb_left)
477 node = node->rb_left;
478 else if (node->rb_right)
479 node = node->rb_right;
480
481 bfq_update_active_tree(node);
482
483#ifdef CONFIG_BFQ_GROUP_IOSCHED
484 sd = entity->sched_data;
485 bfqg = container_of(sd, struct bfq_group, sched_data);
486 bfqd = (struct bfq_data *)bfqg->bfqd;
487#endif
488 if (bfqq)
489 list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
490#ifdef CONFIG_BFQ_GROUP_IOSCHED
491 else /* bfq_group */
492 bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree);
493
494 if (bfqg != bfqd->root_group)
495 bfqg->active_entities++;
496#endif
497}
498
499/**
500 * bfq_ioprio_to_weight - calc a weight from an ioprio.
501 * @ioprio: the ioprio value to convert.
502 */
503unsigned short bfq_ioprio_to_weight(int ioprio)
504{
505 return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF;
506}
507
508/**
509 * bfq_weight_to_ioprio - calc an ioprio from a weight.
510 * @weight: the weight value to convert.
511 *
512 * To preserve as much as possible the old only-ioprio user interface,
513 * 0 is used as an escape ioprio value for weights (numerically) equal or
514 * larger than IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF.
515 */
516static unsigned short bfq_weight_to_ioprio(int weight)
517{
518 return max_t(int, 0,
519 IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight);
520}
521
522static void bfq_get_entity(struct bfq_entity *entity)
523{
524 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
525
526 if (bfqq) {
527 bfqq->ref++;
528 bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
529 bfqq, bfqq->ref);
530 }
531}
532
533/**
534 * bfq_find_deepest - find the deepest node that an extraction can modify.
535 * @node: the node being removed.
536 *
537 * Do the first step of an extraction in an rb tree, looking for the
538 * node that will replace @node, and returning the deepest node that
539 * the following modifications to the tree can touch. If @node is the
540 * last node in the tree return %NULL.
541 */
542static struct rb_node *bfq_find_deepest(struct rb_node *node)
543{
544 struct rb_node *deepest;
545
546 if (!node->rb_right && !node->rb_left)
547 deepest = rb_parent(node);
548 else if (!node->rb_right)
549 deepest = node->rb_left;
550 else if (!node->rb_left)
551 deepest = node->rb_right;
552 else {
553 deepest = rb_next(node);
554 if (deepest->rb_right)
555 deepest = deepest->rb_right;
556 else if (rb_parent(deepest) != node)
557 deepest = rb_parent(deepest);
558 }
559
560 return deepest;
561}
562
563/**
564 * bfq_active_extract - remove an entity from the active tree.
565 * @st: the service_tree containing the tree.
566 * @entity: the entity being removed.
567 */
568static void bfq_active_extract(struct bfq_service_tree *st,
569 struct bfq_entity *entity)
570{
571 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
572 struct rb_node *node;
573#ifdef CONFIG_BFQ_GROUP_IOSCHED
574 struct bfq_sched_data *sd = NULL;
575 struct bfq_group *bfqg = NULL;
576 struct bfq_data *bfqd = NULL;
577#endif
578
579 node = bfq_find_deepest(&entity->rb_node);
580 bfq_extract(&st->active, entity);
581
582 if (node)
583 bfq_update_active_tree(node);
584
585#ifdef CONFIG_BFQ_GROUP_IOSCHED
586 sd = entity->sched_data;
587 bfqg = container_of(sd, struct bfq_group, sched_data);
588 bfqd = (struct bfq_data *)bfqg->bfqd;
589#endif
590 if (bfqq)
591 list_del(&bfqq->bfqq_list);
592#ifdef CONFIG_BFQ_GROUP_IOSCHED
593 else /* bfq_group */
594 bfq_weights_tree_remove(bfqd, entity,
595 &bfqd->group_weights_tree);
596
597 if (bfqg != bfqd->root_group)
598 bfqg->active_entities--;
599#endif
600}
601
602/**
603 * bfq_idle_insert - insert an entity into the idle tree.
604 * @st: the service tree containing the tree.
605 * @entity: the entity to insert.
606 */
607static void bfq_idle_insert(struct bfq_service_tree *st,
608 struct bfq_entity *entity)
609{
610 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
611 struct bfq_entity *first_idle = st->first_idle;
612 struct bfq_entity *last_idle = st->last_idle;
613
614 if (!first_idle || bfq_gt(first_idle->finish, entity->finish))
615 st->first_idle = entity;
616 if (!last_idle || bfq_gt(entity->finish, last_idle->finish))
617 st->last_idle = entity;
618
619 bfq_insert(&st->idle, entity);
620
621 if (bfqq)
622 list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
623}
624
625/**
626 * bfq_forget_entity - do not consider entity any longer for scheduling
627 * @st: the service tree.
628 * @entity: the entity being removed.
629 * @is_in_service: true if entity is currently the in-service entity.
630 *
631 * Forget everything about @entity. In addition, if entity represents
632 * a queue, and the latter is not in service, then release the service
633 * reference to the queue (the one taken through bfq_get_entity). In
634 * fact, in this case, there is really no more service reference to
635 * the queue, as the latter is also outside any service tree. If,
636 * instead, the queue is in service, then __bfq_bfqd_reset_in_service
637 * will take care of putting the reference when the queue finally
638 * stops being served.
639 */
640static void bfq_forget_entity(struct bfq_service_tree *st,
641 struct bfq_entity *entity,
642 bool is_in_service)
643{
644 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
645
646 entity->on_st = false;
647 st->wsum -= entity->weight;
648 if (bfqq && !is_in_service)
649 bfq_put_queue(bfqq);
650}
651
652/**
653 * bfq_put_idle_entity - release the idle tree ref of an entity.
654 * @st: service tree for the entity.
655 * @entity: the entity being released.
656 */
657void bfq_put_idle_entity(struct bfq_service_tree *st, struct bfq_entity *entity)
658{
659 bfq_idle_extract(st, entity);
660 bfq_forget_entity(st, entity,
661 entity == entity->sched_data->in_service_entity);
662}
663
664/**
665 * bfq_forget_idle - update the idle tree if necessary.
666 * @st: the service tree to act upon.
667 *
668 * To preserve the global O(log N) complexity we only remove one entry here;
669 * as the idle tree will not grow indefinitely this can be done safely.
670 */
671static void bfq_forget_idle(struct bfq_service_tree *st)
672{
673 struct bfq_entity *first_idle = st->first_idle;
674 struct bfq_entity *last_idle = st->last_idle;
675
676 if (RB_EMPTY_ROOT(&st->active) && last_idle &&
677 !bfq_gt(last_idle->finish, st->vtime)) {
678 /*
679 * Forget the whole idle tree, increasing the vtime past
680 * the last finish time of idle entities.
681 */
682 st->vtime = last_idle->finish;
683 }
684
685 if (first_idle && !bfq_gt(first_idle->finish, st->vtime))
686 bfq_put_idle_entity(st, first_idle);
687}
688
689struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity)
690{
691 struct bfq_sched_data *sched_data = entity->sched_data;
692 unsigned int idx = bfq_class_idx(entity);
693
694 return sched_data->service_tree + idx;
695}
696
431b17f9
PV
697/*
698 * Update weight and priority of entity. If update_class_too is true,
699 * then update the ioprio_class of entity too.
700 *
701 * The reason why the update of ioprio_class is controlled through the
702 * last parameter is as follows. Changing the ioprio class of an
703 * entity implies changing the destination service trees for that
704 * entity. If such a change occurred when the entity is already on one
705 * of the service trees for its previous class, then the state of the
706 * entity would become more complex: none of the new possible service
707 * trees for the entity, according to bfq_entity_service_tree(), would
708 * match any of the possible service trees on which the entity
709 * is. Complex operations involving these trees, such as entity
710 * activations and deactivations, should take into account this
711 * additional complexity. To avoid this issue, this function is
712 * invoked with update_class_too unset in the points in the code where
713 * entity may happen to be on some tree.
714 */
ea25da48
PV
715struct bfq_service_tree *
716__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
431b17f9
PV
717 struct bfq_entity *entity,
718 bool update_class_too)
ea25da48
PV
719{
720 struct bfq_service_tree *new_st = old_st;
721
722 if (entity->prio_changed) {
723 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
724 unsigned int prev_weight, new_weight;
725 struct bfq_data *bfqd = NULL;
726 struct rb_root *root;
727#ifdef CONFIG_BFQ_GROUP_IOSCHED
728 struct bfq_sched_data *sd;
729 struct bfq_group *bfqg;
730#endif
731
732 if (bfqq)
733 bfqd = bfqq->bfqd;
734#ifdef CONFIG_BFQ_GROUP_IOSCHED
735 else {
736 sd = entity->my_sched_data;
737 bfqg = container_of(sd, struct bfq_group, sched_data);
738 bfqd = (struct bfq_data *)bfqg->bfqd;
739 }
740#endif
741
742 old_st->wsum -= entity->weight;
743
744 if (entity->new_weight != entity->orig_weight) {
745 if (entity->new_weight < BFQ_MIN_WEIGHT ||
746 entity->new_weight > BFQ_MAX_WEIGHT) {
747 pr_crit("update_weight_prio: new_weight %d\n",
748 entity->new_weight);
749 if (entity->new_weight < BFQ_MIN_WEIGHT)
750 entity->new_weight = BFQ_MIN_WEIGHT;
751 else
752 entity->new_weight = BFQ_MAX_WEIGHT;
753 }
754 entity->orig_weight = entity->new_weight;
755 if (bfqq)
756 bfqq->ioprio =
757 bfq_weight_to_ioprio(entity->orig_weight);
758 }
759
431b17f9 760 if (bfqq && update_class_too)
ea25da48 761 bfqq->ioprio_class = bfqq->new_ioprio_class;
431b17f9
PV
762
763 /*
764 * Reset prio_changed only if the ioprio_class change
765 * is not pending any longer.
766 */
767 if (!bfqq || bfqq->ioprio_class == bfqq->new_ioprio_class)
768 entity->prio_changed = 0;
ea25da48
PV
769
770 /*
771 * NOTE: here we may be changing the weight too early,
772 * this will cause unfairness. The correct approach
773 * would have required additional complexity to defer
774 * weight changes to the proper time instants (i.e.,
775 * when entity->finish <= old_st->vtime).
776 */
777 new_st = bfq_entity_service_tree(entity);
778
779 prev_weight = entity->weight;
780 new_weight = entity->orig_weight *
781 (bfqq ? bfqq->wr_coeff : 1);
782 /*
783 * If the weight of the entity changes, remove the entity
784 * from its old weight counter (if there is a counter
785 * associated with the entity), and add it to the counter
786 * associated with its new weight.
787 */
788 if (prev_weight != new_weight) {
789 root = bfqq ? &bfqd->queue_weights_tree :
790 &bfqd->group_weights_tree;
791 bfq_weights_tree_remove(bfqd, entity, root);
792 }
793 entity->weight = new_weight;
794 /*
795 * Add the entity to its weights tree only if it is
796 * not associated with a weight-raised queue.
797 */
798 if (prev_weight != new_weight &&
799 (bfqq ? bfqq->wr_coeff == 1 : 1))
800 /* If we get here, root has been initialized. */
801 bfq_weights_tree_add(bfqd, entity, root);
802
803 new_st->wsum += entity->weight;
804
805 if (new_st != old_st)
806 entity->start = new_st->vtime;
807 }
808
809 return new_st;
810}
811
812/**
813 * bfq_bfqq_served - update the scheduler status after selection for
814 * service.
815 * @bfqq: the queue being served.
816 * @served: bytes to transfer.
817 *
818 * NOTE: this can be optimized, as the timestamps of upper level entities
819 * are synchronized every time a new bfqq is selected for service. By now,
820 * we keep it to better check consistency.
821 */
822void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
823{
824 struct bfq_entity *entity = &bfqq->entity;
825 struct bfq_service_tree *st;
826
827 for_each_entity(entity) {
828 st = bfq_entity_service_tree(entity);
829
830 entity->service += served;
831
832 st->vtime += bfq_delta(served, st->wsum);
833 bfq_forget_idle(st);
834 }
835 bfqg_stats_set_start_empty_time(bfqq_group(bfqq));
836 bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs", served);
837}
838
839/**
840 * bfq_bfqq_charge_time - charge an amount of service equivalent to the length
841 * of the time interval during which bfqq has been in
842 * service.
843 * @bfqd: the device
844 * @bfqq: the queue that needs a service update.
845 * @time_ms: the amount of time during which the queue has received service
846 *
847 * If a queue does not consume its budget fast enough, then providing
848 * the queue with service fairness may impair throughput, more or less
849 * severely. For this reason, queues that consume their budget slowly
850 * are provided with time fairness instead of service fairness. This
851 * goal is achieved through the BFQ scheduling engine, even if such an
852 * engine works in the service, and not in the time domain. The trick
853 * is charging these queues with an inflated amount of service, equal
854 * to the amount of service that they would have received during their
855 * service slot if they had been fast, i.e., if their requests had
856 * been dispatched at a rate equal to the estimated peak rate.
857 *
858 * It is worth noting that time fairness can cause important
859 * distortions in terms of bandwidth distribution, on devices with
860 * internal queueing. The reason is that I/O requests dispatched
861 * during the service slot of a queue may be served after that service
862 * slot is finished, and may have a total processing time loosely
863 * correlated with the duration of the service slot. This is
864 * especially true for short service slots.
865 */
866void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
867 unsigned long time_ms)
868{
869 struct bfq_entity *entity = &bfqq->entity;
870 int tot_serv_to_charge = entity->service;
871 unsigned int timeout_ms = jiffies_to_msecs(bfq_timeout);
872
873 if (time_ms > 0 && time_ms < timeout_ms)
874 tot_serv_to_charge =
875 (bfqd->bfq_max_budget * time_ms) / timeout_ms;
876
877 if (tot_serv_to_charge < entity->service)
878 tot_serv_to_charge = entity->service;
879
880 /* Increase budget to avoid inconsistencies */
881 if (tot_serv_to_charge > entity->budget)
882 entity->budget = tot_serv_to_charge;
883
884 bfq_bfqq_served(bfqq,
885 max_t(int, 0, tot_serv_to_charge - entity->service));
886}
887
888static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
889 struct bfq_service_tree *st,
890 bool backshifted)
891{
892 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
893
431b17f9
PV
894 /*
895 * When this function is invoked, entity is not in any service
896 * tree, then it is safe to invoke next function with the last
897 * parameter set (see the comments on the function).
898 */
899 st = __bfq_entity_update_weight_prio(st, entity, true);
ea25da48
PV
900 bfq_calc_finish(entity, entity->budget);
901
902 /*
903 * If some queues enjoy backshifting for a while, then their
904 * (virtual) finish timestamps may happen to become lower and
905 * lower than the system virtual time. In particular, if
906 * these queues often happen to be idle for short time
907 * periods, and during such time periods other queues with
908 * higher timestamps happen to be busy, then the backshifted
909 * timestamps of the former queues can become much lower than
910 * the system virtual time. In fact, to serve the queues with
911 * higher timestamps while the ones with lower timestamps are
912 * idle, the system virtual time may be pushed-up to much
913 * higher values than the finish timestamps of the idle
914 * queues. As a consequence, the finish timestamps of all new
915 * or newly activated queues may end up being much larger than
916 * those of lucky queues with backshifted timestamps. The
917 * latter queues may then monopolize the device for a lot of
918 * time. This would simply break service guarantees.
919 *
920 * To reduce this problem, push up a little bit the
921 * backshifted timestamps of the queue associated with this
922 * entity (only a queue can happen to have the backshifted
923 * flag set): just enough to let the finish timestamp of the
924 * queue be equal to the current value of the system virtual
925 * time. This may introduce a little unfairness among queues
926 * with backshifted timestamps, but it does not break
927 * worst-case fairness guarantees.
928 *
929 * As a special case, if bfqq is weight-raised, push up
930 * timestamps much less, to keep very low the probability that
931 * this push up causes the backshifted finish timestamps of
932 * weight-raised queues to become higher than the backshifted
933 * finish timestamps of non weight-raised queues.
934 */
935 if (backshifted && bfq_gt(st->vtime, entity->finish)) {
936 unsigned long delta = st->vtime - entity->finish;
937
938 if (bfqq)
939 delta /= bfqq->wr_coeff;
940
941 entity->start += delta;
942 entity->finish += delta;
943 }
944
945 bfq_active_insert(st, entity);
946}
947
948/**
949 * __bfq_activate_entity - handle activation of entity.
950 * @entity: the entity being activated.
951 * @non_blocking_wait_rq: true if entity was waiting for a request
952 *
953 * Called for a 'true' activation, i.e., if entity is not active and
954 * one of its children receives a new request.
955 *
956 * Basically, this function updates the timestamps of entity and
957 * inserts entity into its active tree, ater possible extracting it
958 * from its idle tree.
959 */
960static void __bfq_activate_entity(struct bfq_entity *entity,
961 bool non_blocking_wait_rq)
962{
963 struct bfq_service_tree *st = bfq_entity_service_tree(entity);
964 bool backshifted = false;
965 unsigned long long min_vstart;
966
967 /* See comments on bfq_fqq_update_budg_for_activation */
968 if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) {
969 backshifted = true;
970 min_vstart = entity->finish;
971 } else
972 min_vstart = st->vtime;
973
974 if (entity->tree == &st->idle) {
975 /*
976 * Must be on the idle tree, bfq_idle_extract() will
977 * check for that.
978 */
979 bfq_idle_extract(st, entity);
980 entity->start = bfq_gt(min_vstart, entity->finish) ?
981 min_vstart : entity->finish;
982 } else {
983 /*
984 * The finish time of the entity may be invalid, and
985 * it is in the past for sure, otherwise the queue
986 * would have been on the idle tree.
987 */
988 entity->start = min_vstart;
989 st->wsum += entity->weight;
990 /*
991 * entity is about to be inserted into a service tree,
992 * and then set in service: get a reference to make
993 * sure entity does not disappear until it is no
994 * longer in service or scheduled for service.
995 */
996 bfq_get_entity(entity);
997
998 entity->on_st = true;
999 }
1000
1001 bfq_update_fin_time_enqueue(entity, st, backshifted);
1002}
1003
1004/**
1005 * __bfq_requeue_entity - handle requeueing or repositioning of an entity.
1006 * @entity: the entity being requeued or repositioned.
1007 *
1008 * Requeueing is needed if this entity stops being served, which
1009 * happens if a leaf descendant entity has expired. On the other hand,
1010 * repositioning is needed if the next_inservice_entity for the child
1011 * entity has changed. See the comments inside the function for
1012 * details.
1013 *
1014 * Basically, this function: 1) removes entity from its active tree if
1015 * present there, 2) updates the timestamps of entity and 3) inserts
1016 * entity back into its active tree (in the new, right position for
1017 * the new values of the timestamps).
1018 */
1019static void __bfq_requeue_entity(struct bfq_entity *entity)
1020{
1021 struct bfq_sched_data *sd = entity->sched_data;
1022 struct bfq_service_tree *st = bfq_entity_service_tree(entity);
1023
1024 if (entity == sd->in_service_entity) {
1025 /*
1026 * We are requeueing the current in-service entity,
1027 * which may have to be done for one of the following
1028 * reasons:
1029 * - entity represents the in-service queue, and the
1030 * in-service queue is being requeued after an
1031 * expiration;
1032 * - entity represents a group, and its budget has
1033 * changed because one of its child entities has
1034 * just been either activated or requeued for some
1035 * reason; the timestamps of the entity need then to
1036 * be updated, and the entity needs to be enqueued
1037 * or repositioned accordingly.
1038 *
1039 * In particular, before requeueing, the start time of
1040 * the entity must be moved forward to account for the
1041 * service that the entity has received while in
1042 * service. This is done by the next instructions. The
1043 * finish time will then be updated according to this
1044 * new value of the start time, and to the budget of
1045 * the entity.
1046 */
1047 bfq_calc_finish(entity, entity->service);
1048 entity->start = entity->finish;
1049 /*
1050 * In addition, if the entity had more than one child
1051 * when set in service, then was not extracted from
1052 * the active tree. This implies that the position of
1053 * the entity in the active tree may need to be
1054 * changed now, because we have just updated the start
1055 * time of the entity, and we will update its finish
1056 * time in a moment (the requeueing is then, more
1057 * precisely, a repositioning in this case). To
1058 * implement this repositioning, we: 1) dequeue the
1059 * entity here, 2) update the finish time and
1060 * requeue the entity according to the new
1061 * timestamps below.
1062 */
1063 if (entity->tree)
1064 bfq_active_extract(st, entity);
1065 } else { /* The entity is already active, and not in service */
1066 /*
1067 * In this case, this function gets called only if the
1068 * next_in_service entity below this entity has
1069 * changed, and this change has caused the budget of
1070 * this entity to change, which, finally implies that
1071 * the finish time of this entity must be
1072 * updated. Such an update may cause the scheduling,
1073 * i.e., the position in the active tree, of this
1074 * entity to change. We handle this change by: 1)
1075 * dequeueing the entity here, 2) updating the finish
1076 * time and requeueing the entity according to the new
1077 * timestamps below. This is the same approach as the
1078 * non-extracted-entity sub-case above.
1079 */
1080 bfq_active_extract(st, entity);
1081 }
1082
1083 bfq_update_fin_time_enqueue(entity, st, false);
1084}
1085
1086static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
1087 struct bfq_sched_data *sd,
1088 bool non_blocking_wait_rq)
1089{
1090 struct bfq_service_tree *st = bfq_entity_service_tree(entity);
1091
1092 if (sd->in_service_entity == entity || entity->tree == &st->active)
1093 /*
1094 * in service or already queued on the active tree,
1095 * requeue or reposition
1096 */
1097 __bfq_requeue_entity(entity);
1098 else
1099 /*
1100 * Not in service and not queued on its active tree:
1101 * the activity is idle and this is a true activation.
1102 */
1103 __bfq_activate_entity(entity, non_blocking_wait_rq);
1104}
1105
1106
1107/**
1108 * bfq_activate_entity - activate or requeue an entity representing a bfq_queue,
1109 * and activate, requeue or reposition all ancestors
1110 * for which such an update becomes necessary.
1111 * @entity: the entity to activate.
1112 * @non_blocking_wait_rq: true if this entity was waiting for a request
1113 * @requeue: true if this is a requeue, which implies that bfqq is
1114 * being expired; thus ALL its ancestors stop being served and must
1115 * therefore be requeued
1116 */
1117static void bfq_activate_requeue_entity(struct bfq_entity *entity,
1118 bool non_blocking_wait_rq,
1119 bool requeue)
1120{
1121 struct bfq_sched_data *sd;
1122
1123 for_each_entity(entity) {
1124 sd = entity->sched_data;
1125 __bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq);
1126
1127 if (!bfq_update_next_in_service(sd, entity) && !requeue)
1128 break;
1129 }
1130}
1131
1132/**
1133 * __bfq_deactivate_entity - deactivate an entity from its service tree.
1134 * @entity: the entity to deactivate.
1135 * @ins_into_idle_tree: if false, the entity will not be put into the
1136 * idle tree.
1137 *
1138 * Deactivates an entity, independently from its previous state. Must
1139 * be invoked only if entity is on a service tree. Extracts the entity
1140 * from that tree, and if necessary and allowed, puts it on the idle
1141 * tree.
1142 */
1143bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
1144{
1145 struct bfq_sched_data *sd = entity->sched_data;
a66c38a1
PV
1146 struct bfq_service_tree *st;
1147 bool is_in_service;
ea25da48
PV
1148
1149 if (!entity->on_st) /* entity never activated, or already inactive */
1150 return false;
1151
a66c38a1
PV
1152 /*
1153 * If we get here, then entity is active, which implies that
1154 * bfq_group_set_parent has already been invoked for the group
1155 * represented by entity. Therefore, the field
1156 * entity->sched_data has been set, and we can safely use it.
1157 */
1158 st = bfq_entity_service_tree(entity);
1159 is_in_service = entity == sd->in_service_entity;
1160
ea25da48
PV
1161 if (is_in_service)
1162 bfq_calc_finish(entity, entity->service);
1163
1164 if (entity->tree == &st->active)
1165 bfq_active_extract(st, entity);
1166 else if (!is_in_service && entity->tree == &st->idle)
1167 bfq_idle_extract(st, entity);
1168
1169 if (!ins_into_idle_tree || !bfq_gt(entity->finish, st->vtime))
1170 bfq_forget_entity(st, entity, is_in_service);
1171 else
1172 bfq_idle_insert(st, entity);
1173
1174 return true;
1175}
1176
1177/**
1178 * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
1179 * @entity: the entity to deactivate.
1180 * @ins_into_idle_tree: true if the entity can be put on the idle tree
1181 */
1182static void bfq_deactivate_entity(struct bfq_entity *entity,
1183 bool ins_into_idle_tree,
1184 bool expiration)
1185{
1186 struct bfq_sched_data *sd;
1187 struct bfq_entity *parent = NULL;
1188
1189 for_each_entity_safe(entity, parent) {
1190 sd = entity->sched_data;
1191
1192 if (!__bfq_deactivate_entity(entity, ins_into_idle_tree)) {
1193 /*
1194 * entity is not in any tree any more, so
1195 * this deactivation is a no-op, and there is
1196 * nothing to change for upper-level entities
1197 * (in case of expiration, this can never
1198 * happen).
1199 */
1200 return;
1201 }
1202
1203 if (sd->next_in_service == entity)
1204 /*
1205 * entity was the next_in_service entity,
1206 * then, since entity has just been
1207 * deactivated, a new one must be found.
1208 */
1209 bfq_update_next_in_service(sd, NULL);
1210
1211 if (sd->next_in_service)
1212 /*
1213 * The parent entity is still backlogged,
1214 * because next_in_service is not NULL. So, no
1215 * further upwards deactivation must be
1216 * performed. Yet, next_in_service has
1217 * changed. Then the schedule does need to be
1218 * updated upwards.
1219 */
1220 break;
1221
1222 /*
1223 * If we get here, then the parent is no more
1224 * backlogged and we need to propagate the
1225 * deactivation upwards. Thus let the loop go on.
1226 */
1227
1228 /*
1229 * Also let parent be queued into the idle tree on
1230 * deactivation, to preserve service guarantees, and
1231 * assuming that who invoked this function does not
1232 * need parent entities too to be removed completely.
1233 */
1234 ins_into_idle_tree = true;
1235 }
1236
1237 /*
1238 * If the deactivation loop is fully executed, then there are
1239 * no more entities to touch and next loop is not executed at
1240 * all. Otherwise, requeue remaining entities if they are
1241 * about to stop receiving service, or reposition them if this
1242 * is not the case.
1243 */
1244 entity = parent;
1245 for_each_entity(entity) {
1246 /*
1247 * Invoke __bfq_requeue_entity on entity, even if
1248 * already active, to requeue/reposition it in the
1249 * active tree (because sd->next_in_service has
1250 * changed)
1251 */
1252 __bfq_requeue_entity(entity);
1253
1254 sd = entity->sched_data;
1255 if (!bfq_update_next_in_service(sd, entity) &&
1256 !expiration)
1257 /*
1258 * next_in_service unchanged or not causing
1259 * any change in entity->parent->sd, and no
1260 * requeueing needed for expiration: stop
1261 * here.
1262 */
1263 break;
1264 }
1265}
1266
1267/**
1268 * bfq_calc_vtime_jump - compute the value to which the vtime should jump,
1269 * if needed, to have at least one entity eligible.
1270 * @st: the service tree to act upon.
1271 *
1272 * Assumes that st is not empty.
1273 */
1274static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
1275{
1276 struct bfq_entity *root_entity = bfq_root_active_entity(&st->active);
1277
1278 if (bfq_gt(root_entity->min_start, st->vtime))
1279 return root_entity->min_start;
1280
1281 return st->vtime;
1282}
1283
1284static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value)
1285{
1286 if (new_value > st->vtime) {
1287 st->vtime = new_value;
1288 bfq_forget_idle(st);
1289 }
1290}
1291
1292/**
1293 * bfq_first_active_entity - find the eligible entity with
1294 * the smallest finish time
1295 * @st: the service tree to select from.
1296 * @vtime: the system virtual to use as a reference for eligibility
1297 *
1298 * This function searches the first schedulable entity, starting from the
1299 * root of the tree and going on the left every time on this side there is
1300 * a subtree with at least one eligible (start >= vtime) entity. The path on
1301 * the right is followed only if a) the left subtree contains no eligible
1302 * entities and b) no eligible entity has been found yet.
1303 */
1304static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st,
1305 u64 vtime)
1306{
1307 struct bfq_entity *entry, *first = NULL;
1308 struct rb_node *node = st->active.rb_node;
1309
1310 while (node) {
1311 entry = rb_entry(node, struct bfq_entity, rb_node);
1312left:
1313 if (!bfq_gt(entry->start, vtime))
1314 first = entry;
1315
1316 if (node->rb_left) {
1317 entry = rb_entry(node->rb_left,
1318 struct bfq_entity, rb_node);
1319 if (!bfq_gt(entry->min_start, vtime)) {
1320 node = node->rb_left;
1321 goto left;
1322 }
1323 }
1324 if (first)
1325 break;
1326 node = node->rb_right;
1327 }
1328
1329 return first;
1330}
1331
1332/**
1333 * __bfq_lookup_next_entity - return the first eligible entity in @st.
1334 * @st: the service tree.
1335 *
1336 * If there is no in-service entity for the sched_data st belongs to,
1337 * then return the entity that will be set in service if:
1338 * 1) the parent entity this st belongs to is set in service;
1339 * 2) no entity belonging to such parent entity undergoes a state change
1340 * that would influence the timestamps of the entity (e.g., becomes idle,
1341 * becomes backlogged, changes its budget, ...).
1342 *
1343 * In this first case, update the virtual time in @st too (see the
1344 * comments on this update inside the function).
1345 *
1346 * In constrast, if there is an in-service entity, then return the
1347 * entity that would be set in service if not only the above
1348 * conditions, but also the next one held true: the currently
1349 * in-service entity, on expiration,
1350 * 1) gets a finish time equal to the current one, or
1351 * 2) is not eligible any more, or
1352 * 3) is idle.
1353 */
1354static struct bfq_entity *
1355__bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service)
1356{
1357 struct bfq_entity *entity;
1358 u64 new_vtime;
1359
1360 if (RB_EMPTY_ROOT(&st->active))
1361 return NULL;
1362
1363 /*
1364 * Get the value of the system virtual time for which at
1365 * least one entity is eligible.
1366 */
1367 new_vtime = bfq_calc_vtime_jump(st);
1368
1369 /*
1370 * If there is no in-service entity for the sched_data this
1371 * active tree belongs to, then push the system virtual time
1372 * up to the value that guarantees that at least one entity is
1373 * eligible. If, instead, there is an in-service entity, then
1374 * do not make any such update, because there is already an
1375 * eligible entity, namely the in-service one (even if the
1376 * entity is not on st, because it was extracted when set in
1377 * service).
1378 */
1379 if (!in_service)
1380 bfq_update_vtime(st, new_vtime);
1381
1382 entity = bfq_first_active_entity(st, new_vtime);
1383
1384 return entity;
1385}
1386
1387/**
1388 * bfq_lookup_next_entity - return the first eligible entity in @sd.
1389 * @sd: the sched_data.
1390 *
1391 * This function is invoked when there has been a change in the trees
1392 * for sd, and we need know what is the new next entity after this
1393 * change.
1394 */
1395static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd)
1396{
1397 struct bfq_service_tree *st = sd->service_tree;
1398 struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1);
1399 struct bfq_entity *entity = NULL;
1400 int class_idx = 0;
1401
1402 /*
1403 * Choose from idle class, if needed to guarantee a minimum
1404 * bandwidth to this class (and if there is some active entity
1405 * in idle class). This should also mitigate
1406 * priority-inversion problems in case a low priority task is
1407 * holding file system resources.
1408 */
1409 if (time_is_before_jiffies(sd->bfq_class_idle_last_service +
1410 BFQ_CL_IDLE_TIMEOUT)) {
1411 if (!RB_EMPTY_ROOT(&idle_class_st->active))
1412 class_idx = BFQ_IOPRIO_CLASSES - 1;
1413 /* About to be served if backlogged, or not yet backlogged */
1414 sd->bfq_class_idle_last_service = jiffies;
1415 }
1416
1417 /*
1418 * Find the next entity to serve for the highest-priority
1419 * class, unless the idle class needs to be served.
1420 */
1421 for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) {
1422 entity = __bfq_lookup_next_entity(st + class_idx,
1423 sd->in_service_entity);
1424
1425 if (entity)
1426 break;
1427 }
1428
1429 if (!entity)
1430 return NULL;
1431
1432 return entity;
1433}
1434
1435bool next_queue_may_preempt(struct bfq_data *bfqd)
1436{
1437 struct bfq_sched_data *sd = &bfqd->root_group->sched_data;
1438
1439 return sd->next_in_service != sd->in_service_entity;
1440}
1441
1442/*
1443 * Get next queue for service.
1444 */
1445struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
1446{
1447 struct bfq_entity *entity = NULL;
1448 struct bfq_sched_data *sd;
1449 struct bfq_queue *bfqq;
1450
1451 if (bfqd->busy_queues == 0)
1452 return NULL;
1453
1454 /*
1455 * Traverse the path from the root to the leaf entity to
1456 * serve. Set in service all the entities visited along the
1457 * way.
1458 */
1459 sd = &bfqd->root_group->sched_data;
1460 for (; sd ; sd = entity->my_sched_data) {
1461 /*
1462 * WARNING. We are about to set the in-service entity
1463 * to sd->next_in_service, i.e., to the (cached) value
1464 * returned by bfq_lookup_next_entity(sd) the last
1465 * time it was invoked, i.e., the last time when the
1466 * service order in sd changed as a consequence of the
1467 * activation or deactivation of an entity. In this
1468 * respect, if we execute bfq_lookup_next_entity(sd)
1469 * in this very moment, it may, although with low
1470 * probability, yield a different entity than that
1471 * pointed to by sd->next_in_service. This rare event
1472 * happens in case there was no CLASS_IDLE entity to
1473 * serve for sd when bfq_lookup_next_entity(sd) was
1474 * invoked for the last time, while there is now one
1475 * such entity.
1476 *
1477 * If the above event happens, then the scheduling of
1478 * such entity in CLASS_IDLE is postponed until the
1479 * service of the sd->next_in_service entity
1480 * finishes. In fact, when the latter is expired,
1481 * bfq_lookup_next_entity(sd) gets called again,
1482 * exactly to update sd->next_in_service.
1483 */
1484
1485 /* Make next_in_service entity become in_service_entity */
1486 entity = sd->next_in_service;
1487 sd->in_service_entity = entity;
1488
1489 /*
1490 * Reset the accumulator of the amount of service that
1491 * the entity is about to receive.
1492 */
1493 entity->service = 0;
1494
1495 /*
1496 * If entity is no longer a candidate for next
1497 * service, then we extract it from its active tree,
1498 * for the following reason. To further boost the
1499 * throughput in some special case, BFQ needs to know
1500 * which is the next candidate entity to serve, while
1501 * there is already an entity in service. In this
1502 * respect, to make it easy to compute/update the next
1503 * candidate entity to serve after the current
1504 * candidate has been set in service, there is a case
1505 * where it is necessary to extract the current
1506 * candidate from its service tree. Such a case is
1507 * when the entity just set in service cannot be also
1508 * a candidate for next service. Details about when
1509 * this conditions holds are reported in the comments
1510 * on the function bfq_no_longer_next_in_service()
1511 * invoked below.
1512 */
1513 if (bfq_no_longer_next_in_service(entity))
1514 bfq_active_extract(bfq_entity_service_tree(entity),
1515 entity);
1516
1517 /*
1518 * For the same reason why we may have just extracted
1519 * entity from its active tree, we may need to update
1520 * next_in_service for the sched_data of entity too,
1521 * regardless of whether entity has been extracted.
1522 * In fact, even if entity has not been extracted, a
1523 * descendant entity may get extracted. Such an event
1524 * would cause a change in next_in_service for the
1525 * level of the descendant entity, and thus possibly
1526 * back to upper levels.
1527 *
1528 * We cannot perform the resulting needed update
1529 * before the end of this loop, because, to know which
1530 * is the correct next-to-serve candidate entity for
1531 * each level, we need first to find the leaf entity
1532 * to set in service. In fact, only after we know
1533 * which is the next-to-serve leaf entity, we can
1534 * discover whether the parent entity of the leaf
1535 * entity becomes the next-to-serve, and so on.
1536 */
1537
1538 }
1539
1540 bfqq = bfq_entity_to_bfqq(entity);
1541
1542 /*
1543 * We can finally update all next-to-serve entities along the
1544 * path from the leaf entity just set in service to the root.
1545 */
1546 for_each_entity(entity) {
1547 struct bfq_sched_data *sd = entity->sched_data;
1548
1549 if (!bfq_update_next_in_service(sd, NULL))
1550 break;
1551 }
1552
1553 return bfqq;
1554}
1555
1556void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
1557{
1558 struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
1559 struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
1560 struct bfq_entity *entity = in_serv_entity;
1561
1562 bfq_clear_bfqq_wait_request(in_serv_bfqq);
1563 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
1564 bfqd->in_service_queue = NULL;
1565
1566 /*
1567 * When this function is called, all in-service entities have
1568 * been properly deactivated or requeued, so we can safely
1569 * execute the final step: reset in_service_entity along the
1570 * path from entity to the root.
1571 */
1572 for_each_entity(entity)
1573 entity->sched_data->in_service_entity = NULL;
1574
1575 /*
1576 * in_serv_entity is no longer in service, so, if it is in no
1577 * service tree either, then release the service reference to
1578 * the queue it represents (taken with bfq_get_entity).
1579 */
1580 if (!in_serv_entity->on_st)
1581 bfq_put_queue(in_serv_bfqq);
1582}
1583
1584void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1585 bool ins_into_idle_tree, bool expiration)
1586{
1587 struct bfq_entity *entity = &bfqq->entity;
1588
1589 bfq_deactivate_entity(entity, ins_into_idle_tree, expiration);
1590}
1591
1592void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1593{
1594 struct bfq_entity *entity = &bfqq->entity;
1595
1596 bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq),
1597 false);
1598 bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
1599}
1600
1601void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1602{
1603 struct bfq_entity *entity = &bfqq->entity;
1604
1605 bfq_activate_requeue_entity(entity, false,
1606 bfqq == bfqd->in_service_queue);
1607}
1608
1609/*
1610 * Called when the bfqq no longer has requests pending, remove it from
1611 * the service tree. As a special case, it can be invoked during an
1612 * expiration.
1613 */
1614void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1615 bool expiration)
1616{
1617 bfq_log_bfqq(bfqd, bfqq, "del from busy");
1618
1619 bfq_clear_bfqq_busy(bfqq);
1620
1621 bfqd->busy_queues--;
1622
1623 if (!bfqq->dispatched)
1624 bfq_weights_tree_remove(bfqd, &bfqq->entity,
1625 &bfqd->queue_weights_tree);
1626
1627 if (bfqq->wr_coeff > 1)
1628 bfqd->wr_busy_queues--;
1629
1630 bfqg_stats_update_dequeue(bfqq_group(bfqq));
1631
1632 bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
1633}
1634
1635/*
1636 * Called when an inactive queue receives a new request.
1637 */
1638void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1639{
1640 bfq_log_bfqq(bfqd, bfqq, "add to busy");
1641
1642 bfq_activate_bfqq(bfqd, bfqq);
1643
1644 bfq_mark_bfqq_busy(bfqq);
1645 bfqd->busy_queues++;
1646
1647 if (!bfqq->dispatched)
1648 if (bfqq->wr_coeff == 1)
1649 bfq_weights_tree_add(bfqd, &bfqq->entity,
1650 &bfqd->queue_weights_tree);
1651
1652 if (bfqq->wr_coeff > 1)
1653 bfqd->wr_busy_queues++;
1654}