2 * Hierarchical Budget Worst-case Fair Weighted Fair Queueing
3 * (B-WF2Q+): hierarchical scheduling algorithm by which the BFQ I/O
4 * scheduler schedules generic entities. The latter can represent
5 * either single bfq queues (associated with processes) or groups of
6 * bfq queues (associated with cgroups).
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 #include "bfq-iosched.h"
21 * bfq_gt - compare two timestamps.
25 * Return @a > @b, dealing with wrapping correctly.
27 static int bfq_gt(u64 a
, u64 b
)
29 return (s64
)(a
- b
) > 0;
32 static struct bfq_entity
*bfq_root_active_entity(struct rb_root
*tree
)
34 struct rb_node
*node
= tree
->rb_node
;
36 return rb_entry(node
, struct bfq_entity
, rb_node
);
39 static unsigned int bfq_class_idx(struct bfq_entity
*entity
)
41 struct bfq_queue
*bfqq
= bfq_entity_to_bfqq(entity
);
43 return bfqq
? bfqq
->ioprio_class
- 1 :
44 BFQ_DEFAULT_GRP_CLASS
- 1;
47 unsigned int bfq_tot_busy_queues(struct bfq_data
*bfqd
)
49 return bfqd
->busy_queues
[0] + bfqd
->busy_queues
[1] +
53 static struct bfq_entity
*bfq_lookup_next_entity(struct bfq_sched_data
*sd
,
56 static bool bfq_update_parent_budget(struct bfq_entity
*next_in_service
);
59 * bfq_update_next_in_service - update sd->next_in_service
60 * @sd: sched_data for which to perform the update.
61 * @new_entity: if not NULL, pointer to the entity whose activation,
62 * requeueing or repositionig triggered the invocation of
64 * @expiration: id true, this function is being invoked after the
65 * expiration of the in-service entity
67 * This function is called to update sd->next_in_service, which, in
68 * its turn, may change as a consequence of the insertion or
69 * extraction of an entity into/from one of the active trees of
70 * sd. These insertions/extractions occur as a consequence of
71 * activations/deactivations of entities, with some activations being
72 * 'true' activations, and other activations being requeueings (i.e.,
73 * implementing the second, requeueing phase of the mechanism used to
74 * reposition an entity in its active tree; see comments on
75 * __bfq_activate_entity and __bfq_requeue_entity for details). In
76 * both the last two activation sub-cases, new_entity points to the
77 * just activated or requeued entity.
79 * Returns true if sd->next_in_service changes in such a way that
80 * entity->parent may become the next_in_service for its parent
83 static bool bfq_update_next_in_service(struct bfq_sched_data
*sd
,
84 struct bfq_entity
*new_entity
,
87 struct bfq_entity
*next_in_service
= sd
->next_in_service
;
88 bool parent_sched_may_change
= false;
89 bool change_without_lookup
= false;
92 * If this update is triggered by the activation, requeueing
93 * or repositiong of an entity that does not coincide with
94 * sd->next_in_service, then a full lookup in the active tree
95 * can be avoided. In fact, it is enough to check whether the
96 * just-modified entity has the same priority as
97 * sd->next_in_service, is eligible and has a lower virtual
98 * finish time than sd->next_in_service. If this compound
99 * condition holds, then the new entity becomes the new
100 * next_in_service. Otherwise no change is needed.
102 if (new_entity
&& new_entity
!= sd
->next_in_service
) {
104 * Flag used to decide whether to replace
105 * sd->next_in_service with new_entity. Tentatively
106 * set to true, and left as true if
107 * sd->next_in_service is NULL.
109 change_without_lookup
= true;
112 * If there is already a next_in_service candidate
113 * entity, then compare timestamps to decide whether
114 * to replace sd->service_tree with new_entity.
116 if (next_in_service
) {
117 unsigned int new_entity_class_idx
=
118 bfq_class_idx(new_entity
);
119 struct bfq_service_tree
*st
=
120 sd
->service_tree
+ new_entity_class_idx
;
122 change_without_lookup
=
123 (new_entity_class_idx
==
124 bfq_class_idx(next_in_service
)
126 !bfq_gt(new_entity
->start
, st
->vtime
)
128 bfq_gt(next_in_service
->finish
,
129 new_entity
->finish
));
132 if (change_without_lookup
)
133 next_in_service
= new_entity
;
136 if (!change_without_lookup
) /* lookup needed */
137 next_in_service
= bfq_lookup_next_entity(sd
, expiration
);
139 if (next_in_service
) {
140 bool new_budget_triggers_change
=
141 bfq_update_parent_budget(next_in_service
);
143 parent_sched_may_change
= !sd
->next_in_service
||
144 new_budget_triggers_change
;
147 sd
->next_in_service
= next_in_service
;
149 if (!next_in_service
)
150 return parent_sched_may_change
;
152 return parent_sched_may_change
;
155 #ifdef CONFIG_BFQ_GROUP_IOSCHED
157 struct bfq_group
*bfq_bfqq_to_bfqg(struct bfq_queue
*bfqq
)
159 struct bfq_entity
*group_entity
= bfqq
->entity
.parent
;
162 group_entity
= &bfqq
->bfqd
->root_group
->entity
;
164 return container_of(group_entity
, struct bfq_group
, entity
);
168 * Returns true if this budget changes may let next_in_service->parent
169 * become the next_in_service entity for its parent entity.
171 static bool bfq_update_parent_budget(struct bfq_entity
*next_in_service
)
173 struct bfq_entity
*bfqg_entity
;
174 struct bfq_group
*bfqg
;
175 struct bfq_sched_data
*group_sd
;
178 group_sd
= next_in_service
->sched_data
;
180 bfqg
= container_of(group_sd
, struct bfq_group
, sched_data
);
182 * bfq_group's my_entity field is not NULL only if the group
183 * is not the root group. We must not touch the root entity
184 * as it must never become an in-service entity.
186 bfqg_entity
= bfqg
->my_entity
;
188 if (bfqg_entity
->budget
> next_in_service
->budget
)
190 bfqg_entity
->budget
= next_in_service
->budget
;
197 * This function tells whether entity stops being a candidate for next
198 * service, according to the restrictive definition of the field
199 * next_in_service. In particular, this function is invoked for an
200 * entity that is about to be set in service.
202 * If entity is a queue, then the entity is no longer a candidate for
203 * next service according to the that definition, because entity is
204 * about to become the in-service queue. This function then returns
205 * true if entity is a queue.
207 * In contrast, entity could still be a candidate for next service if
208 * it is not a queue, and has more than one active child. In fact,
209 * even if one of its children is about to be set in service, other
210 * active children may still be the next to serve, for the parent
211 * entity, even according to the above definition. As a consequence, a
212 * non-queue entity is not a candidate for next-service only if it has
213 * only one active child. And only if this condition holds, then this
214 * function returns true for a non-queue entity.
216 static bool bfq_no_longer_next_in_service(struct bfq_entity
*entity
)
218 struct bfq_group
*bfqg
;
220 if (bfq_entity_to_bfqq(entity
))
223 bfqg
= container_of(entity
, struct bfq_group
, entity
);
226 * The field active_entities does not always contain the
227 * actual number of active children entities: it happens to
228 * not account for the in-service entity in case the latter is
229 * removed from its active tree (which may get done after
230 * invoking the function bfq_no_longer_next_in_service in
231 * bfq_get_next_queue). Fortunately, here, i.e., while
232 * bfq_no_longer_next_in_service is not yet completed in
233 * bfq_get_next_queue, bfq_active_extract has not yet been
234 * invoked, and thus active_entities still coincides with the
235 * actual number of active entities.
237 if (bfqg
->active_entities
== 1)
243 #else /* CONFIG_BFQ_GROUP_IOSCHED */
245 struct bfq_group
*bfq_bfqq_to_bfqg(struct bfq_queue
*bfqq
)
247 return bfqq
->bfqd
->root_group
;
250 static bool bfq_update_parent_budget(struct bfq_entity
*next_in_service
)
255 static bool bfq_no_longer_next_in_service(struct bfq_entity
*entity
)
260 #endif /* CONFIG_BFQ_GROUP_IOSCHED */
263 * Shift for timestamp calculations. This actually limits the maximum
264 * service allowed in one timestamp delta (small shift values increase it),
265 * the maximum total weight that can be used for the queues in the system
266 * (big shift values increase it), and the period of virtual time
269 #define WFQ_SERVICE_SHIFT 22
271 struct bfq_queue
*bfq_entity_to_bfqq(struct bfq_entity
*entity
)
273 struct bfq_queue
*bfqq
= NULL
;
275 if (!entity
->my_sched_data
)
276 bfqq
= container_of(entity
, struct bfq_queue
, entity
);
283 * bfq_delta - map service into the virtual time domain.
284 * @service: amount of service.
285 * @weight: scale factor (weight of an entity or weight sum).
287 static u64
bfq_delta(unsigned long service
, unsigned long weight
)
289 u64 d
= (u64
)service
<< WFQ_SERVICE_SHIFT
;
296 * bfq_calc_finish - assign the finish time to an entity.
297 * @entity: the entity to act upon.
298 * @service: the service to be charged to the entity.
300 static void bfq_calc_finish(struct bfq_entity
*entity
, unsigned long service
)
302 struct bfq_queue
*bfqq
= bfq_entity_to_bfqq(entity
);
304 entity
->finish
= entity
->start
+
305 bfq_delta(service
, entity
->weight
);
308 bfq_log_bfqq(bfqq
->bfqd
, bfqq
,
309 "calc_finish: serv %lu, w %d",
310 service
, entity
->weight
);
311 bfq_log_bfqq(bfqq
->bfqd
, bfqq
,
312 "calc_finish: start %llu, finish %llu, delta %llu",
313 entity
->start
, entity
->finish
,
314 bfq_delta(service
, entity
->weight
));
319 * bfq_entity_of - get an entity from a node.
320 * @node: the node field of the entity.
322 * Convert a node pointer to the relative entity. This is used only
323 * to simplify the logic of some functions and not as the generic
324 * conversion mechanism because, e.g., in the tree walking functions,
325 * the check for a %NULL value would be redundant.
327 struct bfq_entity
*bfq_entity_of(struct rb_node
*node
)
329 struct bfq_entity
*entity
= NULL
;
332 entity
= rb_entry(node
, struct bfq_entity
, rb_node
);
338 * bfq_extract - remove an entity from a tree.
339 * @root: the tree root.
340 * @entity: the entity to remove.
342 static void bfq_extract(struct rb_root
*root
, struct bfq_entity
*entity
)
345 rb_erase(&entity
->rb_node
, root
);
349 * bfq_idle_extract - extract an entity from the idle tree.
350 * @st: the service tree of the owning @entity.
351 * @entity: the entity being removed.
353 static void bfq_idle_extract(struct bfq_service_tree
*st
,
354 struct bfq_entity
*entity
)
356 struct bfq_queue
*bfqq
= bfq_entity_to_bfqq(entity
);
357 struct rb_node
*next
;
359 if (entity
== st
->first_idle
) {
360 next
= rb_next(&entity
->rb_node
);
361 st
->first_idle
= bfq_entity_of(next
);
364 if (entity
== st
->last_idle
) {
365 next
= rb_prev(&entity
->rb_node
);
366 st
->last_idle
= bfq_entity_of(next
);
369 bfq_extract(&st
->idle
, entity
);
372 list_del(&bfqq
->bfqq_list
);
376 * bfq_insert - generic tree insertion.
378 * @entity: entity to insert.
380 * This is used for the idle and the active tree, since they are both
381 * ordered by finish time.
383 static void bfq_insert(struct rb_root
*root
, struct bfq_entity
*entity
)
385 struct bfq_entity
*entry
;
386 struct rb_node
**node
= &root
->rb_node
;
387 struct rb_node
*parent
= NULL
;
391 entry
= rb_entry(parent
, struct bfq_entity
, rb_node
);
393 if (bfq_gt(entry
->finish
, entity
->finish
))
394 node
= &parent
->rb_left
;
396 node
= &parent
->rb_right
;
399 rb_link_node(&entity
->rb_node
, parent
, node
);
400 rb_insert_color(&entity
->rb_node
, root
);
406 * bfq_update_min - update the min_start field of a entity.
407 * @entity: the entity to update.
408 * @node: one of its children.
410 * This function is called when @entity may store an invalid value for
411 * min_start due to updates to the active tree. The function assumes
412 * that the subtree rooted at @node (which may be its left or its right
413 * child) has a valid min_start value.
415 static void bfq_update_min(struct bfq_entity
*entity
, struct rb_node
*node
)
417 struct bfq_entity
*child
;
420 child
= rb_entry(node
, struct bfq_entity
, rb_node
);
421 if (bfq_gt(entity
->min_start
, child
->min_start
))
422 entity
->min_start
= child
->min_start
;
427 * bfq_update_active_node - recalculate min_start.
428 * @node: the node to update.
430 * @node may have changed position or one of its children may have moved,
431 * this function updates its min_start value. The left and right subtrees
432 * are assumed to hold a correct min_start value.
434 static void bfq_update_active_node(struct rb_node
*node
)
436 struct bfq_entity
*entity
= rb_entry(node
, struct bfq_entity
, rb_node
);
438 entity
->min_start
= entity
->start
;
439 bfq_update_min(entity
, node
->rb_right
);
440 bfq_update_min(entity
, node
->rb_left
);
444 * bfq_update_active_tree - update min_start for the whole active tree.
445 * @node: the starting node.
447 * @node must be the deepest modified node after an update. This function
448 * updates its min_start using the values held by its children, assuming
449 * that they did not change, and then updates all the nodes that may have
450 * changed in the path to the root. The only nodes that may have changed
451 * are the ones in the path or their siblings.
453 static void bfq_update_active_tree(struct rb_node
*node
)
455 struct rb_node
*parent
;
458 bfq_update_active_node(node
);
460 parent
= rb_parent(node
);
464 if (node
== parent
->rb_left
&& parent
->rb_right
)
465 bfq_update_active_node(parent
->rb_right
);
466 else if (parent
->rb_left
)
467 bfq_update_active_node(parent
->rb_left
);
474 * bfq_active_insert - insert an entity in the active tree of its
476 * @st: the service tree of the entity.
477 * @entity: the entity being inserted.
479 * The active tree is ordered by finish time, but an extra key is kept
480 * per each node, containing the minimum value for the start times of
481 * its children (and the node itself), so it's possible to search for
482 * the eligible node with the lowest finish time in logarithmic time.
484 static void bfq_active_insert(struct bfq_service_tree
*st
,
485 struct bfq_entity
*entity
)
487 struct bfq_queue
*bfqq
= bfq_entity_to_bfqq(entity
);
488 struct rb_node
*node
= &entity
->rb_node
;
489 #ifdef CONFIG_BFQ_GROUP_IOSCHED
490 struct bfq_sched_data
*sd
= NULL
;
491 struct bfq_group
*bfqg
= NULL
;
492 struct bfq_data
*bfqd
= NULL
;
495 bfq_insert(&st
->active
, entity
);
498 node
= node
->rb_left
;
499 else if (node
->rb_right
)
500 node
= node
->rb_right
;
502 bfq_update_active_tree(node
);
504 #ifdef CONFIG_BFQ_GROUP_IOSCHED
505 sd
= entity
->sched_data
;
506 bfqg
= container_of(sd
, struct bfq_group
, sched_data
);
507 bfqd
= (struct bfq_data
*)bfqg
->bfqd
;
510 list_add(&bfqq
->bfqq_list
, &bfqq
->bfqd
->active_list
);
511 #ifdef CONFIG_BFQ_GROUP_IOSCHED
512 if (bfqg
!= bfqd
->root_group
)
513 bfqg
->active_entities
++;
518 * bfq_ioprio_to_weight - calc a weight from an ioprio.
519 * @ioprio: the ioprio value to convert.
521 unsigned short bfq_ioprio_to_weight(int ioprio
)
523 return (IOPRIO_BE_NR
- ioprio
) * BFQ_WEIGHT_CONVERSION_COEFF
;
527 * bfq_weight_to_ioprio - calc an ioprio from a weight.
528 * @weight: the weight value to convert.
530 * To preserve as much as possible the old only-ioprio user interface,
531 * 0 is used as an escape ioprio value for weights (numerically) equal or
532 * larger than IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF.
534 static unsigned short bfq_weight_to_ioprio(int weight
)
537 IOPRIO_BE_NR
* BFQ_WEIGHT_CONVERSION_COEFF
- weight
);
540 static void bfq_get_entity(struct bfq_entity
*entity
)
542 struct bfq_queue
*bfqq
= bfq_entity_to_bfqq(entity
);
546 bfq_log_bfqq(bfqq
->bfqd
, bfqq
, "get_entity: %p %d",
552 * bfq_find_deepest - find the deepest node that an extraction can modify.
553 * @node: the node being removed.
555 * Do the first step of an extraction in an rb tree, looking for the
556 * node that will replace @node, and returning the deepest node that
557 * the following modifications to the tree can touch. If @node is the
558 * last node in the tree return %NULL.
560 static struct rb_node
*bfq_find_deepest(struct rb_node
*node
)
562 struct rb_node
*deepest
;
564 if (!node
->rb_right
&& !node
->rb_left
)
565 deepest
= rb_parent(node
);
566 else if (!node
->rb_right
)
567 deepest
= node
->rb_left
;
568 else if (!node
->rb_left
)
569 deepest
= node
->rb_right
;
571 deepest
= rb_next(node
);
572 if (deepest
->rb_right
)
573 deepest
= deepest
->rb_right
;
574 else if (rb_parent(deepest
) != node
)
575 deepest
= rb_parent(deepest
);
582 * bfq_active_extract - remove an entity from the active tree.
583 * @st: the service_tree containing the tree.
584 * @entity: the entity being removed.
586 static void bfq_active_extract(struct bfq_service_tree
*st
,
587 struct bfq_entity
*entity
)
589 struct bfq_queue
*bfqq
= bfq_entity_to_bfqq(entity
);
590 struct rb_node
*node
;
591 #ifdef CONFIG_BFQ_GROUP_IOSCHED
592 struct bfq_sched_data
*sd
= NULL
;
593 struct bfq_group
*bfqg
= NULL
;
594 struct bfq_data
*bfqd
= NULL
;
597 node
= bfq_find_deepest(&entity
->rb_node
);
598 bfq_extract(&st
->active
, entity
);
601 bfq_update_active_tree(node
);
603 #ifdef CONFIG_BFQ_GROUP_IOSCHED
604 sd
= entity
->sched_data
;
605 bfqg
= container_of(sd
, struct bfq_group
, sched_data
);
606 bfqd
= (struct bfq_data
*)bfqg
->bfqd
;
609 list_del(&bfqq
->bfqq_list
);
610 #ifdef CONFIG_BFQ_GROUP_IOSCHED
611 if (bfqg
!= bfqd
->root_group
)
612 bfqg
->active_entities
--;
617 * bfq_idle_insert - insert an entity into the idle tree.
618 * @st: the service tree containing the tree.
619 * @entity: the entity to insert.
621 static void bfq_idle_insert(struct bfq_service_tree
*st
,
622 struct bfq_entity
*entity
)
624 struct bfq_queue
*bfqq
= bfq_entity_to_bfqq(entity
);
625 struct bfq_entity
*first_idle
= st
->first_idle
;
626 struct bfq_entity
*last_idle
= st
->last_idle
;
628 if (!first_idle
|| bfq_gt(first_idle
->finish
, entity
->finish
))
629 st
->first_idle
= entity
;
630 if (!last_idle
|| bfq_gt(entity
->finish
, last_idle
->finish
))
631 st
->last_idle
= entity
;
633 bfq_insert(&st
->idle
, entity
);
636 list_add(&bfqq
->bfqq_list
, &bfqq
->bfqd
->idle_list
);
640 * bfq_forget_entity - do not consider entity any longer for scheduling
641 * @st: the service tree.
642 * @entity: the entity being removed.
643 * @is_in_service: true if entity is currently the in-service entity.
645 * Forget everything about @entity. In addition, if entity represents
646 * a queue, and the latter is not in service, then release the service
647 * reference to the queue (the one taken through bfq_get_entity). In
648 * fact, in this case, there is really no more service reference to
649 * the queue, as the latter is also outside any service tree. If,
650 * instead, the queue is in service, then __bfq_bfqd_reset_in_service
651 * will take care of putting the reference when the queue finally
652 * stops being served.
654 static void bfq_forget_entity(struct bfq_service_tree
*st
,
655 struct bfq_entity
*entity
,
658 struct bfq_queue
*bfqq
= bfq_entity_to_bfqq(entity
);
660 entity
->on_st
= false;
661 st
->wsum
-= entity
->weight
;
662 if (bfqq
&& !is_in_service
)
667 * bfq_put_idle_entity - release the idle tree ref of an entity.
668 * @st: service tree for the entity.
669 * @entity: the entity being released.
671 void bfq_put_idle_entity(struct bfq_service_tree
*st
, struct bfq_entity
*entity
)
673 bfq_idle_extract(st
, entity
);
674 bfq_forget_entity(st
, entity
,
675 entity
== entity
->sched_data
->in_service_entity
);
679 * bfq_forget_idle - update the idle tree if necessary.
680 * @st: the service tree to act upon.
682 * To preserve the global O(log N) complexity we only remove one entry here;
683 * as the idle tree will not grow indefinitely this can be done safely.
685 static void bfq_forget_idle(struct bfq_service_tree
*st
)
687 struct bfq_entity
*first_idle
= st
->first_idle
;
688 struct bfq_entity
*last_idle
= st
->last_idle
;
690 if (RB_EMPTY_ROOT(&st
->active
) && last_idle
&&
691 !bfq_gt(last_idle
->finish
, st
->vtime
)) {
693 * Forget the whole idle tree, increasing the vtime past
694 * the last finish time of idle entities.
696 st
->vtime
= last_idle
->finish
;
699 if (first_idle
&& !bfq_gt(first_idle
->finish
, st
->vtime
))
700 bfq_put_idle_entity(st
, first_idle
);
703 struct bfq_service_tree
*bfq_entity_service_tree(struct bfq_entity
*entity
)
705 struct bfq_sched_data
*sched_data
= entity
->sched_data
;
706 unsigned int idx
= bfq_class_idx(entity
);
708 return sched_data
->service_tree
+ idx
;
712 * Update weight and priority of entity. If update_class_too is true,
713 * then update the ioprio_class of entity too.
715 * The reason why the update of ioprio_class is controlled through the
716 * last parameter is as follows. Changing the ioprio class of an
717 * entity implies changing the destination service trees for that
718 * entity. If such a change occurred when the entity is already on one
719 * of the service trees for its previous class, then the state of the
720 * entity would become more complex: none of the new possible service
721 * trees for the entity, according to bfq_entity_service_tree(), would
722 * match any of the possible service trees on which the entity
723 * is. Complex operations involving these trees, such as entity
724 * activations and deactivations, should take into account this
725 * additional complexity. To avoid this issue, this function is
726 * invoked with update_class_too unset in the points in the code where
727 * entity may happen to be on some tree.
729 struct bfq_service_tree
*
730 __bfq_entity_update_weight_prio(struct bfq_service_tree
*old_st
,
731 struct bfq_entity
*entity
,
732 bool update_class_too
)
734 struct bfq_service_tree
*new_st
= old_st
;
736 if (entity
->prio_changed
) {
737 struct bfq_queue
*bfqq
= bfq_entity_to_bfqq(entity
);
738 unsigned int prev_weight
, new_weight
;
739 struct bfq_data
*bfqd
= NULL
;
740 struct rb_root
*root
;
741 #ifdef CONFIG_BFQ_GROUP_IOSCHED
742 struct bfq_sched_data
*sd
;
743 struct bfq_group
*bfqg
;
748 #ifdef CONFIG_BFQ_GROUP_IOSCHED
750 sd
= entity
->my_sched_data
;
751 bfqg
= container_of(sd
, struct bfq_group
, sched_data
);
752 bfqd
= (struct bfq_data
*)bfqg
->bfqd
;
756 old_st
->wsum
-= entity
->weight
;
758 if (entity
->new_weight
!= entity
->orig_weight
) {
759 if (entity
->new_weight
< BFQ_MIN_WEIGHT
||
760 entity
->new_weight
> BFQ_MAX_WEIGHT
) {
761 pr_crit("update_weight_prio: new_weight %d\n",
763 if (entity
->new_weight
< BFQ_MIN_WEIGHT
)
764 entity
->new_weight
= BFQ_MIN_WEIGHT
;
766 entity
->new_weight
= BFQ_MAX_WEIGHT
;
768 entity
->orig_weight
= entity
->new_weight
;
771 bfq_weight_to_ioprio(entity
->orig_weight
);
774 if (bfqq
&& update_class_too
)
775 bfqq
->ioprio_class
= bfqq
->new_ioprio_class
;
778 * Reset prio_changed only if the ioprio_class change
779 * is not pending any longer.
781 if (!bfqq
|| bfqq
->ioprio_class
== bfqq
->new_ioprio_class
)
782 entity
->prio_changed
= 0;
785 * NOTE: here we may be changing the weight too early,
786 * this will cause unfairness. The correct approach
787 * would have required additional complexity to defer
788 * weight changes to the proper time instants (i.e.,
789 * when entity->finish <= old_st->vtime).
791 new_st
= bfq_entity_service_tree(entity
);
793 prev_weight
= entity
->weight
;
794 new_weight
= entity
->orig_weight
*
795 (bfqq
? bfqq
->wr_coeff
: 1);
797 * If the weight of the entity changes, and the entity is a
798 * queue, remove the entity from its old weight counter (if
799 * there is a counter associated with the entity).
801 if (prev_weight
!= new_weight
&& bfqq
) {
802 root
= &bfqd
->queue_weights_tree
;
803 __bfq_weights_tree_remove(bfqd
, bfqq
, root
);
805 entity
->weight
= new_weight
;
807 * Add the entity, if it is not a weight-raised queue,
808 * to the counter associated with its new weight.
810 if (prev_weight
!= new_weight
&& bfqq
&& bfqq
->wr_coeff
== 1) {
811 /* If we get here, root has been initialized. */
812 bfq_weights_tree_add(bfqd
, bfqq
, root
);
815 new_st
->wsum
+= entity
->weight
;
817 if (new_st
!= old_st
)
818 entity
->start
= new_st
->vtime
;
825 * bfq_bfqq_served - update the scheduler status after selection for
827 * @bfqq: the queue being served.
828 * @served: bytes to transfer.
830 * NOTE: this can be optimized, as the timestamps of upper level entities
831 * are synchronized every time a new bfqq is selected for service. By now,
832 * we keep it to better check consistency.
834 void bfq_bfqq_served(struct bfq_queue
*bfqq
, int served
)
836 struct bfq_entity
*entity
= &bfqq
->entity
;
837 struct bfq_service_tree
*st
;
839 if (!bfqq
->service_from_backlogged
)
840 bfqq
->first_IO_time
= jiffies
;
842 if (bfqq
->wr_coeff
> 1)
843 bfqq
->service_from_wr
+= served
;
845 bfqq
->service_from_backlogged
+= served
;
846 for_each_entity(entity
) {
847 st
= bfq_entity_service_tree(entity
);
849 entity
->service
+= served
;
851 st
->vtime
+= bfq_delta(served
, st
->wsum
);
854 bfq_log_bfqq(bfqq
->bfqd
, bfqq
, "bfqq_served %d secs", served
);
858 * bfq_bfqq_charge_time - charge an amount of service equivalent to the length
859 * of the time interval during which bfqq has been in
862 * @bfqq: the queue that needs a service update.
863 * @time_ms: the amount of time during which the queue has received service
865 * If a queue does not consume its budget fast enough, then providing
866 * the queue with service fairness may impair throughput, more or less
867 * severely. For this reason, queues that consume their budget slowly
868 * are provided with time fairness instead of service fairness. This
869 * goal is achieved through the BFQ scheduling engine, even if such an
870 * engine works in the service, and not in the time domain. The trick
871 * is charging these queues with an inflated amount of service, equal
872 * to the amount of service that they would have received during their
873 * service slot if they had been fast, i.e., if their requests had
874 * been dispatched at a rate equal to the estimated peak rate.
876 * It is worth noting that time fairness can cause important
877 * distortions in terms of bandwidth distribution, on devices with
878 * internal queueing. The reason is that I/O requests dispatched
879 * during the service slot of a queue may be served after that service
880 * slot is finished, and may have a total processing time loosely
881 * correlated with the duration of the service slot. This is
882 * especially true for short service slots.
884 void bfq_bfqq_charge_time(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
,
885 unsigned long time_ms
)
887 struct bfq_entity
*entity
= &bfqq
->entity
;
888 unsigned long timeout_ms
= jiffies_to_msecs(bfq_timeout
);
889 unsigned long bounded_time_ms
= min(time_ms
, timeout_ms
);
890 int serv_to_charge_for_time
=
891 (bfqd
->bfq_max_budget
* bounded_time_ms
) / timeout_ms
;
892 int tot_serv_to_charge
= max(serv_to_charge_for_time
, entity
->service
);
894 /* Increase budget to avoid inconsistencies */
895 if (tot_serv_to_charge
> entity
->budget
)
896 entity
->budget
= tot_serv_to_charge
;
898 bfq_bfqq_served(bfqq
,
899 max_t(int, 0, tot_serv_to_charge
- entity
->service
));
902 static void bfq_update_fin_time_enqueue(struct bfq_entity
*entity
,
903 struct bfq_service_tree
*st
,
906 struct bfq_queue
*bfqq
= bfq_entity_to_bfqq(entity
);
909 * When this function is invoked, entity is not in any service
910 * tree, then it is safe to invoke next function with the last
911 * parameter set (see the comments on the function).
913 st
= __bfq_entity_update_weight_prio(st
, entity
, true);
914 bfq_calc_finish(entity
, entity
->budget
);
917 * If some queues enjoy backshifting for a while, then their
918 * (virtual) finish timestamps may happen to become lower and
919 * lower than the system virtual time. In particular, if
920 * these queues often happen to be idle for short time
921 * periods, and during such time periods other queues with
922 * higher timestamps happen to be busy, then the backshifted
923 * timestamps of the former queues can become much lower than
924 * the system virtual time. In fact, to serve the queues with
925 * higher timestamps while the ones with lower timestamps are
926 * idle, the system virtual time may be pushed-up to much
927 * higher values than the finish timestamps of the idle
928 * queues. As a consequence, the finish timestamps of all new
929 * or newly activated queues may end up being much larger than
930 * those of lucky queues with backshifted timestamps. The
931 * latter queues may then monopolize the device for a lot of
932 * time. This would simply break service guarantees.
934 * To reduce this problem, push up a little bit the
935 * backshifted timestamps of the queue associated with this
936 * entity (only a queue can happen to have the backshifted
937 * flag set): just enough to let the finish timestamp of the
938 * queue be equal to the current value of the system virtual
939 * time. This may introduce a little unfairness among queues
940 * with backshifted timestamps, but it does not break
941 * worst-case fairness guarantees.
943 * As a special case, if bfqq is weight-raised, push up
944 * timestamps much less, to keep very low the probability that
945 * this push up causes the backshifted finish timestamps of
946 * weight-raised queues to become higher than the backshifted
947 * finish timestamps of non weight-raised queues.
949 if (backshifted
&& bfq_gt(st
->vtime
, entity
->finish
)) {
950 unsigned long delta
= st
->vtime
- entity
->finish
;
953 delta
/= bfqq
->wr_coeff
;
955 entity
->start
+= delta
;
956 entity
->finish
+= delta
;
959 bfq_active_insert(st
, entity
);
963 * __bfq_activate_entity - handle activation of entity.
964 * @entity: the entity being activated.
965 * @non_blocking_wait_rq: true if entity was waiting for a request
967 * Called for a 'true' activation, i.e., if entity is not active and
968 * one of its children receives a new request.
970 * Basically, this function updates the timestamps of entity and
971 * inserts entity into its active tree, after possibly extracting it
972 * from its idle tree.
974 static void __bfq_activate_entity(struct bfq_entity
*entity
,
975 bool non_blocking_wait_rq
)
977 struct bfq_service_tree
*st
= bfq_entity_service_tree(entity
);
978 bool backshifted
= false;
979 unsigned long long min_vstart
;
981 /* See comments on bfq_fqq_update_budg_for_activation */
982 if (non_blocking_wait_rq
&& bfq_gt(st
->vtime
, entity
->finish
)) {
984 min_vstart
= entity
->finish
;
986 min_vstart
= st
->vtime
;
988 if (entity
->tree
== &st
->idle
) {
990 * Must be on the idle tree, bfq_idle_extract() will
993 bfq_idle_extract(st
, entity
);
994 entity
->start
= bfq_gt(min_vstart
, entity
->finish
) ?
995 min_vstart
: entity
->finish
;
998 * The finish time of the entity may be invalid, and
999 * it is in the past for sure, otherwise the queue
1000 * would have been on the idle tree.
1002 entity
->start
= min_vstart
;
1003 st
->wsum
+= entity
->weight
;
1005 * entity is about to be inserted into a service tree,
1006 * and then set in service: get a reference to make
1007 * sure entity does not disappear until it is no
1008 * longer in service or scheduled for service.
1010 bfq_get_entity(entity
);
1012 entity
->on_st
= true;
1015 #ifdef BFQ_GROUP_IOSCHED_ENABLED
1016 if (!bfq_entity_to_bfqq(entity
)) { /* bfq_group */
1017 struct bfq_group
*bfqg
=
1018 container_of(entity
, struct bfq_group
, entity
);
1019 struct bfq_data
*bfqd
= bfqg
->bfqd
;
1021 if (!entity
->in_groups_with_pending_reqs
) {
1022 entity
->in_groups_with_pending_reqs
= true;
1023 bfqd
->num_groups_with_pending_reqs
++;
1028 bfq_update_fin_time_enqueue(entity
, st
, backshifted
);
1032 * __bfq_requeue_entity - handle requeueing or repositioning of an entity.
1033 * @entity: the entity being requeued or repositioned.
1035 * Requeueing is needed if this entity stops being served, which
1036 * happens if a leaf descendant entity has expired. On the other hand,
1037 * repositioning is needed if the next_inservice_entity for the child
1038 * entity has changed. See the comments inside the function for
1041 * Basically, this function: 1) removes entity from its active tree if
1042 * present there, 2) updates the timestamps of entity and 3) inserts
1043 * entity back into its active tree (in the new, right position for
1044 * the new values of the timestamps).
1046 static void __bfq_requeue_entity(struct bfq_entity
*entity
)
1048 struct bfq_sched_data
*sd
= entity
->sched_data
;
1049 struct bfq_service_tree
*st
= bfq_entity_service_tree(entity
);
1051 if (entity
== sd
->in_service_entity
) {
1053 * We are requeueing the current in-service entity,
1054 * which may have to be done for one of the following
1056 * - entity represents the in-service queue, and the
1057 * in-service queue is being requeued after an
1059 * - entity represents a group, and its budget has
1060 * changed because one of its child entities has
1061 * just been either activated or requeued for some
1062 * reason; the timestamps of the entity need then to
1063 * be updated, and the entity needs to be enqueued
1064 * or repositioned accordingly.
1066 * In particular, before requeueing, the start time of
1067 * the entity must be moved forward to account for the
1068 * service that the entity has received while in
1069 * service. This is done by the next instructions. The
1070 * finish time will then be updated according to this
1071 * new value of the start time, and to the budget of
1074 bfq_calc_finish(entity
, entity
->service
);
1075 entity
->start
= entity
->finish
;
1077 * In addition, if the entity had more than one child
1078 * when set in service, then it was not extracted from
1079 * the active tree. This implies that the position of
1080 * the entity in the active tree may need to be
1081 * changed now, because we have just updated the start
1082 * time of the entity, and we will update its finish
1083 * time in a moment (the requeueing is then, more
1084 * precisely, a repositioning in this case). To
1085 * implement this repositioning, we: 1) dequeue the
1086 * entity here, 2) update the finish time and requeue
1087 * the entity according to the new timestamps below.
1090 bfq_active_extract(st
, entity
);
1091 } else { /* The entity is already active, and not in service */
1093 * In this case, this function gets called only if the
1094 * next_in_service entity below this entity has
1095 * changed, and this change has caused the budget of
1096 * this entity to change, which, finally implies that
1097 * the finish time of this entity must be
1098 * updated. Such an update may cause the scheduling,
1099 * i.e., the position in the active tree, of this
1100 * entity to change. We handle this change by: 1)
1101 * dequeueing the entity here, 2) updating the finish
1102 * time and requeueing the entity according to the new
1103 * timestamps below. This is the same approach as the
1104 * non-extracted-entity sub-case above.
1106 bfq_active_extract(st
, entity
);
1109 bfq_update_fin_time_enqueue(entity
, st
, false);
1112 static void __bfq_activate_requeue_entity(struct bfq_entity
*entity
,
1113 struct bfq_sched_data
*sd
,
1114 bool non_blocking_wait_rq
)
1116 struct bfq_service_tree
*st
= bfq_entity_service_tree(entity
);
1118 if (sd
->in_service_entity
== entity
|| entity
->tree
== &st
->active
)
1120 * in service or already queued on the active tree,
1121 * requeue or reposition
1123 __bfq_requeue_entity(entity
);
1126 * Not in service and not queued on its active tree:
1127 * the activity is idle and this is a true activation.
1129 __bfq_activate_entity(entity
, non_blocking_wait_rq
);
1134 * bfq_activate_requeue_entity - activate or requeue an entity representing a
1135 * bfq_queue, and activate, requeue or reposition
1136 * all ancestors for which such an update becomes
1138 * @entity: the entity to activate.
1139 * @non_blocking_wait_rq: true if this entity was waiting for a request
1140 * @requeue: true if this is a requeue, which implies that bfqq is
1141 * being expired; thus ALL its ancestors stop being served and must
1142 * therefore be requeued
1143 * @expiration: true if this function is being invoked in the expiration path
1144 * of the in-service queue
1146 static void bfq_activate_requeue_entity(struct bfq_entity
*entity
,
1147 bool non_blocking_wait_rq
,
1148 bool requeue
, bool expiration
)
1150 struct bfq_sched_data
*sd
;
1152 for_each_entity(entity
) {
1153 sd
= entity
->sched_data
;
1154 __bfq_activate_requeue_entity(entity
, sd
, non_blocking_wait_rq
);
1156 if (!bfq_update_next_in_service(sd
, entity
, expiration
) &&
1163 * __bfq_deactivate_entity - update sched_data and service trees for
1164 * entity, so as to represent entity as inactive
1165 * @entity: the entity being deactivated.
1166 * @ins_into_idle_tree: if false, the entity will not be put into the
1169 * If necessary and allowed, puts entity into the idle tree. NOTE:
1170 * entity may be on no tree if in service.
1172 bool __bfq_deactivate_entity(struct bfq_entity
*entity
, bool ins_into_idle_tree
)
1174 struct bfq_sched_data
*sd
= entity
->sched_data
;
1175 struct bfq_service_tree
*st
;
1178 if (!entity
->on_st
) /* entity never activated, or already inactive */
1182 * If we get here, then entity is active, which implies that
1183 * bfq_group_set_parent has already been invoked for the group
1184 * represented by entity. Therefore, the field
1185 * entity->sched_data has been set, and we can safely use it.
1187 st
= bfq_entity_service_tree(entity
);
1188 is_in_service
= entity
== sd
->in_service_entity
;
1190 bfq_calc_finish(entity
, entity
->service
);
1193 sd
->in_service_entity
= NULL
;
1196 * Non in-service entity: nobody will take care of
1197 * resetting its service counter on expiration. Do it
1200 entity
->service
= 0;
1202 if (entity
->tree
== &st
->active
)
1203 bfq_active_extract(st
, entity
);
1204 else if (!is_in_service
&& entity
->tree
== &st
->idle
)
1205 bfq_idle_extract(st
, entity
);
1207 if (!ins_into_idle_tree
|| !bfq_gt(entity
->finish
, st
->vtime
))
1208 bfq_forget_entity(st
, entity
, is_in_service
);
1210 bfq_idle_insert(st
, entity
);
1216 * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
1217 * @entity: the entity to deactivate.
1218 * @ins_into_idle_tree: true if the entity can be put into the idle tree
1219 * @expiration: true if this function is being invoked in the expiration path
1220 * of the in-service queue
1222 static void bfq_deactivate_entity(struct bfq_entity
*entity
,
1223 bool ins_into_idle_tree
,
1226 struct bfq_sched_data
*sd
;
1227 struct bfq_entity
*parent
= NULL
;
1229 for_each_entity_safe(entity
, parent
) {
1230 sd
= entity
->sched_data
;
1232 if (!__bfq_deactivate_entity(entity
, ins_into_idle_tree
)) {
1234 * entity is not in any tree any more, so
1235 * this deactivation is a no-op, and there is
1236 * nothing to change for upper-level entities
1237 * (in case of expiration, this can never
1243 if (sd
->next_in_service
== entity
)
1245 * entity was the next_in_service entity,
1246 * then, since entity has just been
1247 * deactivated, a new one must be found.
1249 bfq_update_next_in_service(sd
, NULL
, expiration
);
1251 if (sd
->next_in_service
|| sd
->in_service_entity
) {
1253 * The parent entity is still active, because
1254 * either next_in_service or in_service_entity
1255 * is not NULL. So, no further upwards
1256 * deactivation must be performed. Yet,
1257 * next_in_service has changed. Then the
1258 * schedule does need to be updated upwards.
1260 * NOTE If in_service_entity is not NULL, then
1261 * next_in_service may happen to be NULL,
1262 * although the parent entity is evidently
1263 * active. This happens if 1) the entity
1264 * pointed by in_service_entity is the only
1265 * active entity in the parent entity, and 2)
1266 * according to the definition of
1267 * next_in_service, the in_service_entity
1268 * cannot be considered as
1269 * next_in_service. See the comments on the
1270 * definition of next_in_service for details.
1276 * If we get here, then the parent is no more
1277 * backlogged and we need to propagate the
1278 * deactivation upwards. Thus let the loop go on.
1282 * Also let parent be queued into the idle tree on
1283 * deactivation, to preserve service guarantees, and
1284 * assuming that who invoked this function does not
1285 * need parent entities too to be removed completely.
1287 ins_into_idle_tree
= true;
1291 * If the deactivation loop is fully executed, then there are
1292 * no more entities to touch and next loop is not executed at
1293 * all. Otherwise, requeue remaining entities if they are
1294 * about to stop receiving service, or reposition them if this
1298 for_each_entity(entity
) {
1300 * Invoke __bfq_requeue_entity on entity, even if
1301 * already active, to requeue/reposition it in the
1302 * active tree (because sd->next_in_service has
1305 __bfq_requeue_entity(entity
);
1307 sd
= entity
->sched_data
;
1308 if (!bfq_update_next_in_service(sd
, entity
, expiration
) &&
1311 * next_in_service unchanged or not causing
1312 * any change in entity->parent->sd, and no
1313 * requeueing needed for expiration: stop
1321 * bfq_calc_vtime_jump - compute the value to which the vtime should jump,
1322 * if needed, to have at least one entity eligible.
1323 * @st: the service tree to act upon.
1325 * Assumes that st is not empty.
1327 static u64
bfq_calc_vtime_jump(struct bfq_service_tree
*st
)
1329 struct bfq_entity
*root_entity
= bfq_root_active_entity(&st
->active
);
1331 if (bfq_gt(root_entity
->min_start
, st
->vtime
))
1332 return root_entity
->min_start
;
1337 static void bfq_update_vtime(struct bfq_service_tree
*st
, u64 new_value
)
1339 if (new_value
> st
->vtime
) {
1340 st
->vtime
= new_value
;
1341 bfq_forget_idle(st
);
1346 * bfq_first_active_entity - find the eligible entity with
1347 * the smallest finish time
1348 * @st: the service tree to select from.
1349 * @vtime: the system virtual to use as a reference for eligibility
1351 * This function searches the first schedulable entity, starting from the
1352 * root of the tree and going on the left every time on this side there is
1353 * a subtree with at least one eligible (start <= vtime) entity. The path on
1354 * the right is followed only if a) the left subtree contains no eligible
1355 * entities and b) no eligible entity has been found yet.
1357 static struct bfq_entity
*bfq_first_active_entity(struct bfq_service_tree
*st
,
1360 struct bfq_entity
*entry
, *first
= NULL
;
1361 struct rb_node
*node
= st
->active
.rb_node
;
1364 entry
= rb_entry(node
, struct bfq_entity
, rb_node
);
1366 if (!bfq_gt(entry
->start
, vtime
))
1369 if (node
->rb_left
) {
1370 entry
= rb_entry(node
->rb_left
,
1371 struct bfq_entity
, rb_node
);
1372 if (!bfq_gt(entry
->min_start
, vtime
)) {
1373 node
= node
->rb_left
;
1379 node
= node
->rb_right
;
1386 * __bfq_lookup_next_entity - return the first eligible entity in @st.
1387 * @st: the service tree.
1389 * If there is no in-service entity for the sched_data st belongs to,
1390 * then return the entity that will be set in service if:
1391 * 1) the parent entity this st belongs to is set in service;
1392 * 2) no entity belonging to such parent entity undergoes a state change
1393 * that would influence the timestamps of the entity (e.g., becomes idle,
1394 * becomes backlogged, changes its budget, ...).
1396 * In this first case, update the virtual time in @st too (see the
1397 * comments on this update inside the function).
1399 * In constrast, if there is an in-service entity, then return the
1400 * entity that would be set in service if not only the above
1401 * conditions, but also the next one held true: the currently
1402 * in-service entity, on expiration,
1403 * 1) gets a finish time equal to the current one, or
1404 * 2) is not eligible any more, or
1407 static struct bfq_entity
*
1408 __bfq_lookup_next_entity(struct bfq_service_tree
*st
, bool in_service
)
1410 struct bfq_entity
*entity
;
1413 if (RB_EMPTY_ROOT(&st
->active
))
1417 * Get the value of the system virtual time for which at
1418 * least one entity is eligible.
1420 new_vtime
= bfq_calc_vtime_jump(st
);
1423 * If there is no in-service entity for the sched_data this
1424 * active tree belongs to, then push the system virtual time
1425 * up to the value that guarantees that at least one entity is
1426 * eligible. If, instead, there is an in-service entity, then
1427 * do not make any such update, because there is already an
1428 * eligible entity, namely the in-service one (even if the
1429 * entity is not on st, because it was extracted when set in
1433 bfq_update_vtime(st
, new_vtime
);
1435 entity
= bfq_first_active_entity(st
, new_vtime
);
1441 * bfq_lookup_next_entity - return the first eligible entity in @sd.
1442 * @sd: the sched_data.
1443 * @expiration: true if we are on the expiration path of the in-service queue
1445 * This function is invoked when there has been a change in the trees
1446 * for sd, and we need to know what is the new next entity to serve
1447 * after this change.
1449 static struct bfq_entity
*bfq_lookup_next_entity(struct bfq_sched_data
*sd
,
1452 struct bfq_service_tree
*st
= sd
->service_tree
;
1453 struct bfq_service_tree
*idle_class_st
= st
+ (BFQ_IOPRIO_CLASSES
- 1);
1454 struct bfq_entity
*entity
= NULL
;
1458 * Choose from idle class, if needed to guarantee a minimum
1459 * bandwidth to this class (and if there is some active entity
1460 * in idle class). This should also mitigate
1461 * priority-inversion problems in case a low priority task is
1462 * holding file system resources.
1464 if (time_is_before_jiffies(sd
->bfq_class_idle_last_service
+
1465 BFQ_CL_IDLE_TIMEOUT
)) {
1466 if (!RB_EMPTY_ROOT(&idle_class_st
->active
))
1467 class_idx
= BFQ_IOPRIO_CLASSES
- 1;
1468 /* About to be served if backlogged, or not yet backlogged */
1469 sd
->bfq_class_idle_last_service
= jiffies
;
1473 * Find the next entity to serve for the highest-priority
1474 * class, unless the idle class needs to be served.
1476 for (; class_idx
< BFQ_IOPRIO_CLASSES
; class_idx
++) {
1478 * If expiration is true, then bfq_lookup_next_entity
1479 * is being invoked as a part of the expiration path
1480 * of the in-service queue. In this case, even if
1481 * sd->in_service_entity is not NULL,
1482 * sd->in_service_entiy at this point is actually not
1483 * in service any more, and, if needed, has already
1484 * been properly queued or requeued into the right
1485 * tree. The reason why sd->in_service_entity is still
1486 * not NULL here, even if expiration is true, is that
1487 * sd->in_service_entiy is reset as a last step in the
1488 * expiration path. So, if expiration is true, tell
1489 * __bfq_lookup_next_entity that there is no
1490 * sd->in_service_entity.
1492 entity
= __bfq_lookup_next_entity(st
+ class_idx
,
1493 sd
->in_service_entity
&&
1506 bool next_queue_may_preempt(struct bfq_data
*bfqd
)
1508 struct bfq_sched_data
*sd
= &bfqd
->root_group
->sched_data
;
1510 return sd
->next_in_service
!= sd
->in_service_entity
;
1514 * Get next queue for service.
1516 struct bfq_queue
*bfq_get_next_queue(struct bfq_data
*bfqd
)
1518 struct bfq_entity
*entity
= NULL
;
1519 struct bfq_sched_data
*sd
;
1520 struct bfq_queue
*bfqq
;
1522 if (bfq_tot_busy_queues(bfqd
) == 0)
1526 * Traverse the path from the root to the leaf entity to
1527 * serve. Set in service all the entities visited along the
1530 sd
= &bfqd
->root_group
->sched_data
;
1531 for (; sd
; sd
= entity
->my_sched_data
) {
1533 * WARNING. We are about to set the in-service entity
1534 * to sd->next_in_service, i.e., to the (cached) value
1535 * returned by bfq_lookup_next_entity(sd) the last
1536 * time it was invoked, i.e., the last time when the
1537 * service order in sd changed as a consequence of the
1538 * activation or deactivation of an entity. In this
1539 * respect, if we execute bfq_lookup_next_entity(sd)
1540 * in this very moment, it may, although with low
1541 * probability, yield a different entity than that
1542 * pointed to by sd->next_in_service. This rare event
1543 * happens in case there was no CLASS_IDLE entity to
1544 * serve for sd when bfq_lookup_next_entity(sd) was
1545 * invoked for the last time, while there is now one
1548 * If the above event happens, then the scheduling of
1549 * such entity in CLASS_IDLE is postponed until the
1550 * service of the sd->next_in_service entity
1551 * finishes. In fact, when the latter is expired,
1552 * bfq_lookup_next_entity(sd) gets called again,
1553 * exactly to update sd->next_in_service.
1556 /* Make next_in_service entity become in_service_entity */
1557 entity
= sd
->next_in_service
;
1558 sd
->in_service_entity
= entity
;
1561 * If entity is no longer a candidate for next
1562 * service, then it must be extracted from its active
1563 * tree, so as to make sure that it won't be
1564 * considered when computing next_in_service. See the
1565 * comments on the function
1566 * bfq_no_longer_next_in_service() for details.
1568 if (bfq_no_longer_next_in_service(entity
))
1569 bfq_active_extract(bfq_entity_service_tree(entity
),
1573 * Even if entity is not to be extracted according to
1574 * the above check, a descendant entity may get
1575 * extracted in one of the next iterations of this
1576 * loop. Such an event could cause a change in
1577 * next_in_service for the level of the descendant
1578 * entity, and thus possibly back to this level.
1580 * However, we cannot perform the resulting needed
1581 * update of next_in_service for this level before the
1582 * end of the whole loop, because, to know which is
1583 * the correct next-to-serve candidate entity for each
1584 * level, we need first to find the leaf entity to set
1585 * in service. In fact, only after we know which is
1586 * the next-to-serve leaf entity, we can discover
1587 * whether the parent entity of the leaf entity
1588 * becomes the next-to-serve, and so on.
1592 bfqq
= bfq_entity_to_bfqq(entity
);
1595 * We can finally update all next-to-serve entities along the
1596 * path from the leaf entity just set in service to the root.
1598 for_each_entity(entity
) {
1599 struct bfq_sched_data
*sd
= entity
->sched_data
;
1601 if (!bfq_update_next_in_service(sd
, NULL
, false))
1608 void __bfq_bfqd_reset_in_service(struct bfq_data
*bfqd
)
1610 struct bfq_queue
*in_serv_bfqq
= bfqd
->in_service_queue
;
1611 struct bfq_entity
*in_serv_entity
= &in_serv_bfqq
->entity
;
1612 struct bfq_entity
*entity
= in_serv_entity
;
1614 bfq_clear_bfqq_wait_request(in_serv_bfqq
);
1615 hrtimer_try_to_cancel(&bfqd
->idle_slice_timer
);
1616 bfqd
->in_service_queue
= NULL
;
1619 * When this function is called, all in-service entities have
1620 * been properly deactivated or requeued, so we can safely
1621 * execute the final step: reset in_service_entity along the
1622 * path from entity to the root.
1624 for_each_entity(entity
)
1625 entity
->sched_data
->in_service_entity
= NULL
;
1628 * in_serv_entity is no longer in service, so, if it is in no
1629 * service tree either, then release the service reference to
1630 * the queue it represents (taken with bfq_get_entity).
1632 if (!in_serv_entity
->on_st
)
1633 bfq_put_queue(in_serv_bfqq
);
1636 void bfq_deactivate_bfqq(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
,
1637 bool ins_into_idle_tree
, bool expiration
)
1639 struct bfq_entity
*entity
= &bfqq
->entity
;
1641 bfq_deactivate_entity(entity
, ins_into_idle_tree
, expiration
);
1644 void bfq_activate_bfqq(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
)
1646 struct bfq_entity
*entity
= &bfqq
->entity
;
1648 bfq_activate_requeue_entity(entity
, bfq_bfqq_non_blocking_wait_rq(bfqq
),
1650 bfq_clear_bfqq_non_blocking_wait_rq(bfqq
);
1653 void bfq_requeue_bfqq(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
,
1656 struct bfq_entity
*entity
= &bfqq
->entity
;
1658 bfq_activate_requeue_entity(entity
, false,
1659 bfqq
== bfqd
->in_service_queue
, expiration
);
1663 * Called when the bfqq no longer has requests pending, remove it from
1664 * the service tree. As a special case, it can be invoked during an
1667 void bfq_del_bfqq_busy(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
,
1670 bfq_log_bfqq(bfqd
, bfqq
, "del from busy");
1672 bfq_clear_bfqq_busy(bfqq
);
1674 bfqd
->busy_queues
[bfqq
->ioprio_class
- 1]--;
1676 if (bfqq
->wr_coeff
> 1)
1677 bfqd
->wr_busy_queues
--;
1679 bfqg_stats_update_dequeue(bfqq_group(bfqq
));
1681 bfq_deactivate_bfqq(bfqd
, bfqq
, true, expiration
);
1683 if (!bfqq
->dispatched
)
1684 bfq_weights_tree_remove(bfqd
, bfqq
);
1688 * Called when an inactive queue receives a new request.
1690 void bfq_add_bfqq_busy(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
)
1692 bfq_log_bfqq(bfqd
, bfqq
, "add to busy");
1694 bfq_activate_bfqq(bfqd
, bfqq
);
1696 bfq_mark_bfqq_busy(bfqq
);
1697 bfqd
->busy_queues
[bfqq
->ioprio_class
- 1]++;
1699 if (!bfqq
->dispatched
)
1700 if (bfqq
->wr_coeff
== 1)
1701 bfq_weights_tree_add(bfqd
, bfqq
,
1702 &bfqd
->queue_weights_tree
);
1704 if (bfqq
->wr_coeff
> 1)
1705 bfqd
->wr_busy_queues
++;