bfq-iosched: ensure to clear bic/bfqq pointers when preparing request
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / block / bfq-wf2q.c
CommitLineData
ea25da48
PV
1/*
2 * Hierarchical Budget Worst-case Fair Weighted Fair Queueing
3 * (B-WF2Q+): hierarchical scheduling algorithm by which the BFQ I/O
4 * scheduler schedules generic entities. The latter can represent
5 * either single bfq queues (associated with processes) or groups of
6 * bfq queues (associated with cgroups).
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18#include "bfq-iosched.h"
19
20/**
21 * bfq_gt - compare two timestamps.
22 * @a: first ts.
23 * @b: second ts.
24 *
25 * Return @a > @b, dealing with wrapping correctly.
26 */
27static int bfq_gt(u64 a, u64 b)
28{
29 return (s64)(a - b) > 0;
30}
31
32static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree)
33{
34 struct rb_node *node = tree->rb_node;
35
36 return rb_entry(node, struct bfq_entity, rb_node);
37}
38
39static unsigned int bfq_class_idx(struct bfq_entity *entity)
40{
41 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
42
43 return bfqq ? bfqq->ioprio_class - 1 :
44 BFQ_DEFAULT_GRP_CLASS - 1;
45}
46
80294c3b
PV
47static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
48 bool expiration);
ea25da48
PV
49
50static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
51
52/**
53 * bfq_update_next_in_service - update sd->next_in_service
54 * @sd: sched_data for which to perform the update.
55 * @new_entity: if not NULL, pointer to the entity whose activation,
56 * requeueing or repositionig triggered the invocation of
57 * this function.
80294c3b
PV
58 * @expiration: id true, this function is being invoked after the
59 * expiration of the in-service entity
ea25da48
PV
60 *
61 * This function is called to update sd->next_in_service, which, in
62 * its turn, may change as a consequence of the insertion or
63 * extraction of an entity into/from one of the active trees of
64 * sd. These insertions/extractions occur as a consequence of
65 * activations/deactivations of entities, with some activations being
66 * 'true' activations, and other activations being requeueings (i.e.,
67 * implementing the second, requeueing phase of the mechanism used to
68 * reposition an entity in its active tree; see comments on
69 * __bfq_activate_entity and __bfq_requeue_entity for details). In
70 * both the last two activation sub-cases, new_entity points to the
71 * just activated or requeued entity.
72 *
73 * Returns true if sd->next_in_service changes in such a way that
74 * entity->parent may become the next_in_service for its parent
75 * entity.
76 */
77static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
80294c3b
PV
78 struct bfq_entity *new_entity,
79 bool expiration)
ea25da48
PV
80{
81 struct bfq_entity *next_in_service = sd->next_in_service;
82 bool parent_sched_may_change = false;
24d90bb2 83 bool change_without_lookup = false;
ea25da48
PV
84
85 /*
86 * If this update is triggered by the activation, requeueing
87 * or repositiong of an entity that does not coincide with
88 * sd->next_in_service, then a full lookup in the active tree
89 * can be avoided. In fact, it is enough to check whether the
a02195ce
PV
90 * just-modified entity has the same priority as
91 * sd->next_in_service, is eligible and has a lower virtual
ea25da48
PV
92 * finish time than sd->next_in_service. If this compound
93 * condition holds, then the new entity becomes the new
94 * next_in_service. Otherwise no change is needed.
95 */
96 if (new_entity && new_entity != sd->next_in_service) {
97 /*
98 * Flag used to decide whether to replace
99 * sd->next_in_service with new_entity. Tentatively
100 * set to true, and left as true if
101 * sd->next_in_service is NULL.
102 */
24d90bb2 103 change_without_lookup = true;
ea25da48
PV
104
105 /*
106 * If there is already a next_in_service candidate
a02195ce
PV
107 * entity, then compare timestamps to decide whether
108 * to replace sd->service_tree with new_entity.
ea25da48
PV
109 */
110 if (next_in_service) {
111 unsigned int new_entity_class_idx =
112 bfq_class_idx(new_entity);
113 struct bfq_service_tree *st =
114 sd->service_tree + new_entity_class_idx;
115
24d90bb2 116 change_without_lookup =
ea25da48
PV
117 (new_entity_class_idx ==
118 bfq_class_idx(next_in_service)
119 &&
120 !bfq_gt(new_entity->start, st->vtime)
121 &&
122 bfq_gt(next_in_service->finish,
a02195ce 123 new_entity->finish));
ea25da48
PV
124 }
125
24d90bb2 126 if (change_without_lookup)
ea25da48 127 next_in_service = new_entity;
24d90bb2
PV
128 }
129
130 if (!change_without_lookup) /* lookup needed */
80294c3b 131 next_in_service = bfq_lookup_next_entity(sd, expiration);
ea25da48 132
24d90bb2 133 if (next_in_service)
ea25da48
PV
134 parent_sched_may_change = !sd->next_in_service ||
135 bfq_update_parent_budget(next_in_service);
ea25da48
PV
136
137 sd->next_in_service = next_in_service;
138
139 if (!next_in_service)
140 return parent_sched_may_change;
141
142 return parent_sched_may_change;
143}
144
145#ifdef CONFIG_BFQ_GROUP_IOSCHED
146
147struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
148{
149 struct bfq_entity *group_entity = bfqq->entity.parent;
150
151 if (!group_entity)
152 group_entity = &bfqq->bfqd->root_group->entity;
153
154 return container_of(group_entity, struct bfq_group, entity);
155}
156
157/*
158 * Returns true if this budget changes may let next_in_service->parent
159 * become the next_in_service entity for its parent entity.
160 */
161static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
162{
163 struct bfq_entity *bfqg_entity;
164 struct bfq_group *bfqg;
165 struct bfq_sched_data *group_sd;
166 bool ret = false;
167
168 group_sd = next_in_service->sched_data;
169
170 bfqg = container_of(group_sd, struct bfq_group, sched_data);
171 /*
172 * bfq_group's my_entity field is not NULL only if the group
173 * is not the root group. We must not touch the root entity
174 * as it must never become an in-service entity.
175 */
176 bfqg_entity = bfqg->my_entity;
177 if (bfqg_entity) {
178 if (bfqg_entity->budget > next_in_service->budget)
179 ret = true;
180 bfqg_entity->budget = next_in_service->budget;
181 }
182
183 return ret;
184}
185
186/*
187 * This function tells whether entity stops being a candidate for next
46d556e6
PV
188 * service, according to the restrictive definition of the field
189 * next_in_service. In particular, this function is invoked for an
190 * entity that is about to be set in service.
ea25da48 191 *
46d556e6
PV
192 * If entity is a queue, then the entity is no longer a candidate for
193 * next service according to the that definition, because entity is
194 * about to become the in-service queue. This function then returns
195 * true if entity is a queue.
ea25da48 196 *
46d556e6
PV
197 * In contrast, entity could still be a candidate for next service if
198 * it is not a queue, and has more than one active child. In fact,
199 * even if one of its children is about to be set in service, other
200 * active children may still be the next to serve, for the parent
201 * entity, even according to the above definition. As a consequence, a
202 * non-queue entity is not a candidate for next-service only if it has
203 * only one active child. And only if this condition holds, then this
204 * function returns true for a non-queue entity.
ea25da48
PV
205 */
206static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
207{
208 struct bfq_group *bfqg;
209
210 if (bfq_entity_to_bfqq(entity))
211 return true;
212
213 bfqg = container_of(entity, struct bfq_group, entity);
214
46d556e6
PV
215 /*
216 * The field active_entities does not always contain the
217 * actual number of active children entities: it happens to
218 * not account for the in-service entity in case the latter is
219 * removed from its active tree (which may get done after
220 * invoking the function bfq_no_longer_next_in_service in
221 * bfq_get_next_queue). Fortunately, here, i.e., while
222 * bfq_no_longer_next_in_service is not yet completed in
223 * bfq_get_next_queue, bfq_active_extract has not yet been
224 * invoked, and thus active_entities still coincides with the
225 * actual number of active entities.
226 */
ea25da48
PV
227 if (bfqg->active_entities == 1)
228 return true;
229
230 return false;
231}
232
233#else /* CONFIG_BFQ_GROUP_IOSCHED */
234
235struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
236{
237 return bfqq->bfqd->root_group;
238}
239
240static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
241{
242 return false;
243}
244
245static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
246{
247 return true;
248}
249
250#endif /* CONFIG_BFQ_GROUP_IOSCHED */
251
252/*
253 * Shift for timestamp calculations. This actually limits the maximum
254 * service allowed in one timestamp delta (small shift values increase it),
255 * the maximum total weight that can be used for the queues in the system
256 * (big shift values increase it), and the period of virtual time
257 * wraparounds.
258 */
259#define WFQ_SERVICE_SHIFT 22
260
261struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
262{
263 struct bfq_queue *bfqq = NULL;
264
265 if (!entity->my_sched_data)
266 bfqq = container_of(entity, struct bfq_queue, entity);
267
268 return bfqq;
269}
270
271
272/**
273 * bfq_delta - map service into the virtual time domain.
274 * @service: amount of service.
275 * @weight: scale factor (weight of an entity or weight sum).
276 */
277static u64 bfq_delta(unsigned long service, unsigned long weight)
278{
279 u64 d = (u64)service << WFQ_SERVICE_SHIFT;
280
281 do_div(d, weight);
282 return d;
283}
284
285/**
286 * bfq_calc_finish - assign the finish time to an entity.
287 * @entity: the entity to act upon.
288 * @service: the service to be charged to the entity.
289 */
290static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service)
291{
292 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
293
294 entity->finish = entity->start +
295 bfq_delta(service, entity->weight);
296
297 if (bfqq) {
298 bfq_log_bfqq(bfqq->bfqd, bfqq,
299 "calc_finish: serv %lu, w %d",
300 service, entity->weight);
301 bfq_log_bfqq(bfqq->bfqd, bfqq,
302 "calc_finish: start %llu, finish %llu, delta %llu",
303 entity->start, entity->finish,
304 bfq_delta(service, entity->weight));
305 }
306}
307
308/**
309 * bfq_entity_of - get an entity from a node.
310 * @node: the node field of the entity.
311 *
312 * Convert a node pointer to the relative entity. This is used only
313 * to simplify the logic of some functions and not as the generic
314 * conversion mechanism because, e.g., in the tree walking functions,
315 * the check for a %NULL value would be redundant.
316 */
317struct bfq_entity *bfq_entity_of(struct rb_node *node)
318{
319 struct bfq_entity *entity = NULL;
320
321 if (node)
322 entity = rb_entry(node, struct bfq_entity, rb_node);
323
324 return entity;
325}
326
327/**
328 * bfq_extract - remove an entity from a tree.
329 * @root: the tree root.
330 * @entity: the entity to remove.
331 */
332static void bfq_extract(struct rb_root *root, struct bfq_entity *entity)
333{
334 entity->tree = NULL;
335 rb_erase(&entity->rb_node, root);
336}
337
338/**
339 * bfq_idle_extract - extract an entity from the idle tree.
340 * @st: the service tree of the owning @entity.
341 * @entity: the entity being removed.
342 */
343static void bfq_idle_extract(struct bfq_service_tree *st,
344 struct bfq_entity *entity)
345{
346 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
347 struct rb_node *next;
348
349 if (entity == st->first_idle) {
350 next = rb_next(&entity->rb_node);
351 st->first_idle = bfq_entity_of(next);
352 }
353
354 if (entity == st->last_idle) {
355 next = rb_prev(&entity->rb_node);
356 st->last_idle = bfq_entity_of(next);
357 }
358
359 bfq_extract(&st->idle, entity);
360
361 if (bfqq)
362 list_del(&bfqq->bfqq_list);
363}
364
365/**
366 * bfq_insert - generic tree insertion.
367 * @root: tree root.
368 * @entity: entity to insert.
369 *
370 * This is used for the idle and the active tree, since they are both
371 * ordered by finish time.
372 */
373static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
374{
375 struct bfq_entity *entry;
376 struct rb_node **node = &root->rb_node;
377 struct rb_node *parent = NULL;
378
379 while (*node) {
380 parent = *node;
381 entry = rb_entry(parent, struct bfq_entity, rb_node);
382
383 if (bfq_gt(entry->finish, entity->finish))
384 node = &parent->rb_left;
385 else
386 node = &parent->rb_right;
387 }
388
389 rb_link_node(&entity->rb_node, parent, node);
390 rb_insert_color(&entity->rb_node, root);
391
392 entity->tree = root;
393}
394
395/**
396 * bfq_update_min - update the min_start field of a entity.
397 * @entity: the entity to update.
398 * @node: one of its children.
399 *
400 * This function is called when @entity may store an invalid value for
401 * min_start due to updates to the active tree. The function assumes
402 * that the subtree rooted at @node (which may be its left or its right
403 * child) has a valid min_start value.
404 */
405static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node)
406{
407 struct bfq_entity *child;
408
409 if (node) {
410 child = rb_entry(node, struct bfq_entity, rb_node);
411 if (bfq_gt(entity->min_start, child->min_start))
412 entity->min_start = child->min_start;
413 }
414}
415
416/**
417 * bfq_update_active_node - recalculate min_start.
418 * @node: the node to update.
419 *
420 * @node may have changed position or one of its children may have moved,
421 * this function updates its min_start value. The left and right subtrees
422 * are assumed to hold a correct min_start value.
423 */
424static void bfq_update_active_node(struct rb_node *node)
425{
426 struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
427
428 entity->min_start = entity->start;
429 bfq_update_min(entity, node->rb_right);
430 bfq_update_min(entity, node->rb_left);
431}
432
433/**
434 * bfq_update_active_tree - update min_start for the whole active tree.
435 * @node: the starting node.
436 *
437 * @node must be the deepest modified node after an update. This function
438 * updates its min_start using the values held by its children, assuming
439 * that they did not change, and then updates all the nodes that may have
440 * changed in the path to the root. The only nodes that may have changed
441 * are the ones in the path or their siblings.
442 */
443static void bfq_update_active_tree(struct rb_node *node)
444{
445 struct rb_node *parent;
446
447up:
448 bfq_update_active_node(node);
449
450 parent = rb_parent(node);
451 if (!parent)
452 return;
453
454 if (node == parent->rb_left && parent->rb_right)
455 bfq_update_active_node(parent->rb_right);
456 else if (parent->rb_left)
457 bfq_update_active_node(parent->rb_left);
458
459 node = parent;
460 goto up;
461}
462
463/**
464 * bfq_active_insert - insert an entity in the active tree of its
465 * group/device.
466 * @st: the service tree of the entity.
467 * @entity: the entity being inserted.
468 *
469 * The active tree is ordered by finish time, but an extra key is kept
470 * per each node, containing the minimum value for the start times of
471 * its children (and the node itself), so it's possible to search for
472 * the eligible node with the lowest finish time in logarithmic time.
473 */
474static void bfq_active_insert(struct bfq_service_tree *st,
475 struct bfq_entity *entity)
476{
477 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
478 struct rb_node *node = &entity->rb_node;
479#ifdef CONFIG_BFQ_GROUP_IOSCHED
480 struct bfq_sched_data *sd = NULL;
481 struct bfq_group *bfqg = NULL;
482 struct bfq_data *bfqd = NULL;
483#endif
484
485 bfq_insert(&st->active, entity);
486
487 if (node->rb_left)
488 node = node->rb_left;
489 else if (node->rb_right)
490 node = node->rb_right;
491
492 bfq_update_active_tree(node);
493
494#ifdef CONFIG_BFQ_GROUP_IOSCHED
495 sd = entity->sched_data;
496 bfqg = container_of(sd, struct bfq_group, sched_data);
497 bfqd = (struct bfq_data *)bfqg->bfqd;
498#endif
499 if (bfqq)
500 list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
501#ifdef CONFIG_BFQ_GROUP_IOSCHED
502 else /* bfq_group */
503 bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree);
504
505 if (bfqg != bfqd->root_group)
506 bfqg->active_entities++;
507#endif
508}
509
510/**
511 * bfq_ioprio_to_weight - calc a weight from an ioprio.
512 * @ioprio: the ioprio value to convert.
513 */
514unsigned short bfq_ioprio_to_weight(int ioprio)
515{
516 return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF;
517}
518
519/**
520 * bfq_weight_to_ioprio - calc an ioprio from a weight.
521 * @weight: the weight value to convert.
522 *
523 * To preserve as much as possible the old only-ioprio user interface,
524 * 0 is used as an escape ioprio value for weights (numerically) equal or
525 * larger than IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF.
526 */
527static unsigned short bfq_weight_to_ioprio(int weight)
528{
529 return max_t(int, 0,
530 IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight);
531}
532
533static void bfq_get_entity(struct bfq_entity *entity)
534{
535 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
536
537 if (bfqq) {
538 bfqq->ref++;
539 bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
540 bfqq, bfqq->ref);
541 }
542}
543
544/**
545 * bfq_find_deepest - find the deepest node that an extraction can modify.
546 * @node: the node being removed.
547 *
548 * Do the first step of an extraction in an rb tree, looking for the
549 * node that will replace @node, and returning the deepest node that
550 * the following modifications to the tree can touch. If @node is the
551 * last node in the tree return %NULL.
552 */
553static struct rb_node *bfq_find_deepest(struct rb_node *node)
554{
555 struct rb_node *deepest;
556
557 if (!node->rb_right && !node->rb_left)
558 deepest = rb_parent(node);
559 else if (!node->rb_right)
560 deepest = node->rb_left;
561 else if (!node->rb_left)
562 deepest = node->rb_right;
563 else {
564 deepest = rb_next(node);
565 if (deepest->rb_right)
566 deepest = deepest->rb_right;
567 else if (rb_parent(deepest) != node)
568 deepest = rb_parent(deepest);
569 }
570
571 return deepest;
572}
573
574/**
575 * bfq_active_extract - remove an entity from the active tree.
576 * @st: the service_tree containing the tree.
577 * @entity: the entity being removed.
578 */
579static void bfq_active_extract(struct bfq_service_tree *st,
580 struct bfq_entity *entity)
581{
582 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
583 struct rb_node *node;
584#ifdef CONFIG_BFQ_GROUP_IOSCHED
585 struct bfq_sched_data *sd = NULL;
586 struct bfq_group *bfqg = NULL;
587 struct bfq_data *bfqd = NULL;
588#endif
589
590 node = bfq_find_deepest(&entity->rb_node);
591 bfq_extract(&st->active, entity);
592
593 if (node)
594 bfq_update_active_tree(node);
595
596#ifdef CONFIG_BFQ_GROUP_IOSCHED
597 sd = entity->sched_data;
598 bfqg = container_of(sd, struct bfq_group, sched_data);
599 bfqd = (struct bfq_data *)bfqg->bfqd;
600#endif
601 if (bfqq)
602 list_del(&bfqq->bfqq_list);
603#ifdef CONFIG_BFQ_GROUP_IOSCHED
604 else /* bfq_group */
605 bfq_weights_tree_remove(bfqd, entity,
606 &bfqd->group_weights_tree);
607
608 if (bfqg != bfqd->root_group)
609 bfqg->active_entities--;
610#endif
611}
612
613/**
614 * bfq_idle_insert - insert an entity into the idle tree.
615 * @st: the service tree containing the tree.
616 * @entity: the entity to insert.
617 */
618static void bfq_idle_insert(struct bfq_service_tree *st,
619 struct bfq_entity *entity)
620{
621 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
622 struct bfq_entity *first_idle = st->first_idle;
623 struct bfq_entity *last_idle = st->last_idle;
624
625 if (!first_idle || bfq_gt(first_idle->finish, entity->finish))
626 st->first_idle = entity;
627 if (!last_idle || bfq_gt(entity->finish, last_idle->finish))
628 st->last_idle = entity;
629
630 bfq_insert(&st->idle, entity);
631
632 if (bfqq)
633 list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
634}
635
636/**
637 * bfq_forget_entity - do not consider entity any longer for scheduling
638 * @st: the service tree.
639 * @entity: the entity being removed.
640 * @is_in_service: true if entity is currently the in-service entity.
641 *
642 * Forget everything about @entity. In addition, if entity represents
643 * a queue, and the latter is not in service, then release the service
644 * reference to the queue (the one taken through bfq_get_entity). In
645 * fact, in this case, there is really no more service reference to
646 * the queue, as the latter is also outside any service tree. If,
647 * instead, the queue is in service, then __bfq_bfqd_reset_in_service
648 * will take care of putting the reference when the queue finally
649 * stops being served.
650 */
651static void bfq_forget_entity(struct bfq_service_tree *st,
652 struct bfq_entity *entity,
653 bool is_in_service)
654{
655 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
656
657 entity->on_st = false;
658 st->wsum -= entity->weight;
659 if (bfqq && !is_in_service)
660 bfq_put_queue(bfqq);
661}
662
663/**
664 * bfq_put_idle_entity - release the idle tree ref of an entity.
665 * @st: service tree for the entity.
666 * @entity: the entity being released.
667 */
668void bfq_put_idle_entity(struct bfq_service_tree *st, struct bfq_entity *entity)
669{
670 bfq_idle_extract(st, entity);
671 bfq_forget_entity(st, entity,
672 entity == entity->sched_data->in_service_entity);
673}
674
675/**
676 * bfq_forget_idle - update the idle tree if necessary.
677 * @st: the service tree to act upon.
678 *
679 * To preserve the global O(log N) complexity we only remove one entry here;
680 * as the idle tree will not grow indefinitely this can be done safely.
681 */
682static void bfq_forget_idle(struct bfq_service_tree *st)
683{
684 struct bfq_entity *first_idle = st->first_idle;
685 struct bfq_entity *last_idle = st->last_idle;
686
687 if (RB_EMPTY_ROOT(&st->active) && last_idle &&
688 !bfq_gt(last_idle->finish, st->vtime)) {
689 /*
690 * Forget the whole idle tree, increasing the vtime past
691 * the last finish time of idle entities.
692 */
693 st->vtime = last_idle->finish;
694 }
695
696 if (first_idle && !bfq_gt(first_idle->finish, st->vtime))
697 bfq_put_idle_entity(st, first_idle);
698}
699
700struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity)
701{
702 struct bfq_sched_data *sched_data = entity->sched_data;
703 unsigned int idx = bfq_class_idx(entity);
704
705 return sched_data->service_tree + idx;
706}
707
431b17f9
PV
708/*
709 * Update weight and priority of entity. If update_class_too is true,
710 * then update the ioprio_class of entity too.
711 *
712 * The reason why the update of ioprio_class is controlled through the
713 * last parameter is as follows. Changing the ioprio class of an
714 * entity implies changing the destination service trees for that
715 * entity. If such a change occurred when the entity is already on one
716 * of the service trees for its previous class, then the state of the
717 * entity would become more complex: none of the new possible service
718 * trees for the entity, according to bfq_entity_service_tree(), would
719 * match any of the possible service trees on which the entity
720 * is. Complex operations involving these trees, such as entity
721 * activations and deactivations, should take into account this
722 * additional complexity. To avoid this issue, this function is
723 * invoked with update_class_too unset in the points in the code where
724 * entity may happen to be on some tree.
725 */
ea25da48
PV
726struct bfq_service_tree *
727__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
431b17f9
PV
728 struct bfq_entity *entity,
729 bool update_class_too)
ea25da48
PV
730{
731 struct bfq_service_tree *new_st = old_st;
732
733 if (entity->prio_changed) {
734 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
735 unsigned int prev_weight, new_weight;
736 struct bfq_data *bfqd = NULL;
737 struct rb_root *root;
738#ifdef CONFIG_BFQ_GROUP_IOSCHED
739 struct bfq_sched_data *sd;
740 struct bfq_group *bfqg;
741#endif
742
743 if (bfqq)
744 bfqd = bfqq->bfqd;
745#ifdef CONFIG_BFQ_GROUP_IOSCHED
746 else {
747 sd = entity->my_sched_data;
748 bfqg = container_of(sd, struct bfq_group, sched_data);
749 bfqd = (struct bfq_data *)bfqg->bfqd;
750 }
751#endif
752
753 old_st->wsum -= entity->weight;
754
755 if (entity->new_weight != entity->orig_weight) {
756 if (entity->new_weight < BFQ_MIN_WEIGHT ||
757 entity->new_weight > BFQ_MAX_WEIGHT) {
758 pr_crit("update_weight_prio: new_weight %d\n",
759 entity->new_weight);
760 if (entity->new_weight < BFQ_MIN_WEIGHT)
761 entity->new_weight = BFQ_MIN_WEIGHT;
762 else
763 entity->new_weight = BFQ_MAX_WEIGHT;
764 }
765 entity->orig_weight = entity->new_weight;
766 if (bfqq)
767 bfqq->ioprio =
768 bfq_weight_to_ioprio(entity->orig_weight);
769 }
770
431b17f9 771 if (bfqq && update_class_too)
ea25da48 772 bfqq->ioprio_class = bfqq->new_ioprio_class;
431b17f9
PV
773
774 /*
775 * Reset prio_changed only if the ioprio_class change
776 * is not pending any longer.
777 */
778 if (!bfqq || bfqq->ioprio_class == bfqq->new_ioprio_class)
779 entity->prio_changed = 0;
ea25da48
PV
780
781 /*
782 * NOTE: here we may be changing the weight too early,
783 * this will cause unfairness. The correct approach
784 * would have required additional complexity to defer
785 * weight changes to the proper time instants (i.e.,
786 * when entity->finish <= old_st->vtime).
787 */
788 new_st = bfq_entity_service_tree(entity);
789
790 prev_weight = entity->weight;
791 new_weight = entity->orig_weight *
792 (bfqq ? bfqq->wr_coeff : 1);
793 /*
794 * If the weight of the entity changes, remove the entity
795 * from its old weight counter (if there is a counter
796 * associated with the entity), and add it to the counter
797 * associated with its new weight.
798 */
799 if (prev_weight != new_weight) {
800 root = bfqq ? &bfqd->queue_weights_tree :
801 &bfqd->group_weights_tree;
802 bfq_weights_tree_remove(bfqd, entity, root);
803 }
804 entity->weight = new_weight;
805 /*
806 * Add the entity to its weights tree only if it is
807 * not associated with a weight-raised queue.
808 */
809 if (prev_weight != new_weight &&
810 (bfqq ? bfqq->wr_coeff == 1 : 1))
811 /* If we get here, root has been initialized. */
812 bfq_weights_tree_add(bfqd, entity, root);
813
814 new_st->wsum += entity->weight;
815
816 if (new_st != old_st)
817 entity->start = new_st->vtime;
818 }
819
820 return new_st;
821}
822
823/**
824 * bfq_bfqq_served - update the scheduler status after selection for
825 * service.
826 * @bfqq: the queue being served.
827 * @served: bytes to transfer.
828 *
829 * NOTE: this can be optimized, as the timestamps of upper level entities
830 * are synchronized every time a new bfqq is selected for service. By now,
831 * we keep it to better check consistency.
832 */
833void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
834{
835 struct bfq_entity *entity = &bfqq->entity;
836 struct bfq_service_tree *st;
837
838 for_each_entity(entity) {
839 st = bfq_entity_service_tree(entity);
840
841 entity->service += served;
842
843 st->vtime += bfq_delta(served, st->wsum);
844 bfq_forget_idle(st);
845 }
846 bfqg_stats_set_start_empty_time(bfqq_group(bfqq));
847 bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs", served);
848}
849
850/**
851 * bfq_bfqq_charge_time - charge an amount of service equivalent to the length
852 * of the time interval during which bfqq has been in
853 * service.
854 * @bfqd: the device
855 * @bfqq: the queue that needs a service update.
856 * @time_ms: the amount of time during which the queue has received service
857 *
858 * If a queue does not consume its budget fast enough, then providing
859 * the queue with service fairness may impair throughput, more or less
860 * severely. For this reason, queues that consume their budget slowly
861 * are provided with time fairness instead of service fairness. This
862 * goal is achieved through the BFQ scheduling engine, even if such an
863 * engine works in the service, and not in the time domain. The trick
864 * is charging these queues with an inflated amount of service, equal
865 * to the amount of service that they would have received during their
866 * service slot if they had been fast, i.e., if their requests had
867 * been dispatched at a rate equal to the estimated peak rate.
868 *
869 * It is worth noting that time fairness can cause important
870 * distortions in terms of bandwidth distribution, on devices with
871 * internal queueing. The reason is that I/O requests dispatched
872 * during the service slot of a queue may be served after that service
873 * slot is finished, and may have a total processing time loosely
874 * correlated with the duration of the service slot. This is
875 * especially true for short service slots.
876 */
877void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
878 unsigned long time_ms)
879{
880 struct bfq_entity *entity = &bfqq->entity;
881 int tot_serv_to_charge = entity->service;
882 unsigned int timeout_ms = jiffies_to_msecs(bfq_timeout);
883
884 if (time_ms > 0 && time_ms < timeout_ms)
885 tot_serv_to_charge =
886 (bfqd->bfq_max_budget * time_ms) / timeout_ms;
887
888 if (tot_serv_to_charge < entity->service)
889 tot_serv_to_charge = entity->service;
890
891 /* Increase budget to avoid inconsistencies */
892 if (tot_serv_to_charge > entity->budget)
893 entity->budget = tot_serv_to_charge;
894
895 bfq_bfqq_served(bfqq,
896 max_t(int, 0, tot_serv_to_charge - entity->service));
897}
898
899static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
900 struct bfq_service_tree *st,
901 bool backshifted)
902{
903 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
904
431b17f9
PV
905 /*
906 * When this function is invoked, entity is not in any service
907 * tree, then it is safe to invoke next function with the last
908 * parameter set (see the comments on the function).
909 */
910 st = __bfq_entity_update_weight_prio(st, entity, true);
ea25da48
PV
911 bfq_calc_finish(entity, entity->budget);
912
913 /*
914 * If some queues enjoy backshifting for a while, then their
915 * (virtual) finish timestamps may happen to become lower and
916 * lower than the system virtual time. In particular, if
917 * these queues often happen to be idle for short time
918 * periods, and during such time periods other queues with
919 * higher timestamps happen to be busy, then the backshifted
920 * timestamps of the former queues can become much lower than
921 * the system virtual time. In fact, to serve the queues with
922 * higher timestamps while the ones with lower timestamps are
923 * idle, the system virtual time may be pushed-up to much
924 * higher values than the finish timestamps of the idle
925 * queues. As a consequence, the finish timestamps of all new
926 * or newly activated queues may end up being much larger than
927 * those of lucky queues with backshifted timestamps. The
928 * latter queues may then monopolize the device for a lot of
929 * time. This would simply break service guarantees.
930 *
931 * To reduce this problem, push up a little bit the
932 * backshifted timestamps of the queue associated with this
933 * entity (only a queue can happen to have the backshifted
934 * flag set): just enough to let the finish timestamp of the
935 * queue be equal to the current value of the system virtual
936 * time. This may introduce a little unfairness among queues
937 * with backshifted timestamps, but it does not break
938 * worst-case fairness guarantees.
939 *
940 * As a special case, if bfqq is weight-raised, push up
941 * timestamps much less, to keep very low the probability that
942 * this push up causes the backshifted finish timestamps of
943 * weight-raised queues to become higher than the backshifted
944 * finish timestamps of non weight-raised queues.
945 */
946 if (backshifted && bfq_gt(st->vtime, entity->finish)) {
947 unsigned long delta = st->vtime - entity->finish;
948
949 if (bfqq)
950 delta /= bfqq->wr_coeff;
951
952 entity->start += delta;
953 entity->finish += delta;
954 }
955
956 bfq_active_insert(st, entity);
957}
958
959/**
960 * __bfq_activate_entity - handle activation of entity.
961 * @entity: the entity being activated.
962 * @non_blocking_wait_rq: true if entity was waiting for a request
963 *
964 * Called for a 'true' activation, i.e., if entity is not active and
965 * one of its children receives a new request.
966 *
967 * Basically, this function updates the timestamps of entity and
46d556e6 968 * inserts entity into its active tree, ater possibly extracting it
ea25da48
PV
969 * from its idle tree.
970 */
971static void __bfq_activate_entity(struct bfq_entity *entity,
972 bool non_blocking_wait_rq)
973{
974 struct bfq_service_tree *st = bfq_entity_service_tree(entity);
975 bool backshifted = false;
976 unsigned long long min_vstart;
977
978 /* See comments on bfq_fqq_update_budg_for_activation */
979 if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) {
980 backshifted = true;
981 min_vstart = entity->finish;
982 } else
983 min_vstart = st->vtime;
984
985 if (entity->tree == &st->idle) {
986 /*
987 * Must be on the idle tree, bfq_idle_extract() will
988 * check for that.
989 */
990 bfq_idle_extract(st, entity);
991 entity->start = bfq_gt(min_vstart, entity->finish) ?
992 min_vstart : entity->finish;
993 } else {
994 /*
995 * The finish time of the entity may be invalid, and
996 * it is in the past for sure, otherwise the queue
997 * would have been on the idle tree.
998 */
999 entity->start = min_vstart;
1000 st->wsum += entity->weight;
1001 /*
1002 * entity is about to be inserted into a service tree,
1003 * and then set in service: get a reference to make
1004 * sure entity does not disappear until it is no
1005 * longer in service or scheduled for service.
1006 */
1007 bfq_get_entity(entity);
1008
1009 entity->on_st = true;
1010 }
1011
1012 bfq_update_fin_time_enqueue(entity, st, backshifted);
1013}
1014
1015/**
1016 * __bfq_requeue_entity - handle requeueing or repositioning of an entity.
1017 * @entity: the entity being requeued or repositioned.
1018 *
1019 * Requeueing is needed if this entity stops being served, which
1020 * happens if a leaf descendant entity has expired. On the other hand,
1021 * repositioning is needed if the next_inservice_entity for the child
1022 * entity has changed. See the comments inside the function for
1023 * details.
1024 *
1025 * Basically, this function: 1) removes entity from its active tree if
1026 * present there, 2) updates the timestamps of entity and 3) inserts
1027 * entity back into its active tree (in the new, right position for
1028 * the new values of the timestamps).
1029 */
1030static void __bfq_requeue_entity(struct bfq_entity *entity)
1031{
1032 struct bfq_sched_data *sd = entity->sched_data;
1033 struct bfq_service_tree *st = bfq_entity_service_tree(entity);
1034
1035 if (entity == sd->in_service_entity) {
1036 /*
1037 * We are requeueing the current in-service entity,
1038 * which may have to be done for one of the following
1039 * reasons:
1040 * - entity represents the in-service queue, and the
1041 * in-service queue is being requeued after an
1042 * expiration;
1043 * - entity represents a group, and its budget has
1044 * changed because one of its child entities has
1045 * just been either activated or requeued for some
1046 * reason; the timestamps of the entity need then to
1047 * be updated, and the entity needs to be enqueued
1048 * or repositioned accordingly.
1049 *
1050 * In particular, before requeueing, the start time of
1051 * the entity must be moved forward to account for the
1052 * service that the entity has received while in
1053 * service. This is done by the next instructions. The
1054 * finish time will then be updated according to this
1055 * new value of the start time, and to the budget of
1056 * the entity.
1057 */
1058 bfq_calc_finish(entity, entity->service);
1059 entity->start = entity->finish;
1060 /*
1061 * In addition, if the entity had more than one child
46d556e6 1062 * when set in service, then it was not extracted from
ea25da48
PV
1063 * the active tree. This implies that the position of
1064 * the entity in the active tree may need to be
1065 * changed now, because we have just updated the start
1066 * time of the entity, and we will update its finish
1067 * time in a moment (the requeueing is then, more
1068 * precisely, a repositioning in this case). To
1069 * implement this repositioning, we: 1) dequeue the
46d556e6
PV
1070 * entity here, 2) update the finish time and requeue
1071 * the entity according to the new timestamps below.
ea25da48
PV
1072 */
1073 if (entity->tree)
1074 bfq_active_extract(st, entity);
1075 } else { /* The entity is already active, and not in service */
1076 /*
1077 * In this case, this function gets called only if the
1078 * next_in_service entity below this entity has
1079 * changed, and this change has caused the budget of
1080 * this entity to change, which, finally implies that
1081 * the finish time of this entity must be
1082 * updated. Such an update may cause the scheduling,
1083 * i.e., the position in the active tree, of this
1084 * entity to change. We handle this change by: 1)
1085 * dequeueing the entity here, 2) updating the finish
1086 * time and requeueing the entity according to the new
1087 * timestamps below. This is the same approach as the
1088 * non-extracted-entity sub-case above.
1089 */
1090 bfq_active_extract(st, entity);
1091 }
1092
1093 bfq_update_fin_time_enqueue(entity, st, false);
1094}
1095
1096static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
1097 struct bfq_sched_data *sd,
1098 bool non_blocking_wait_rq)
1099{
1100 struct bfq_service_tree *st = bfq_entity_service_tree(entity);
1101
1102 if (sd->in_service_entity == entity || entity->tree == &st->active)
1103 /*
1104 * in service or already queued on the active tree,
1105 * requeue or reposition
1106 */
1107 __bfq_requeue_entity(entity);
1108 else
1109 /*
1110 * Not in service and not queued on its active tree:
1111 * the activity is idle and this is a true activation.
1112 */
1113 __bfq_activate_entity(entity, non_blocking_wait_rq);
1114}
1115
1116
1117/**
46d556e6
PV
1118 * bfq_activate_requeue_entity - activate or requeue an entity representing a
1119 * bfq_queue, and activate, requeue or reposition
1120 * all ancestors for which such an update becomes
1121 * necessary.
ea25da48
PV
1122 * @entity: the entity to activate.
1123 * @non_blocking_wait_rq: true if this entity was waiting for a request
1124 * @requeue: true if this is a requeue, which implies that bfqq is
1125 * being expired; thus ALL its ancestors stop being served and must
1126 * therefore be requeued
80294c3b
PV
1127 * @expiration: true if this function is being invoked in the expiration path
1128 * of the in-service queue
ea25da48
PV
1129 */
1130static void bfq_activate_requeue_entity(struct bfq_entity *entity,
1131 bool non_blocking_wait_rq,
80294c3b 1132 bool requeue, bool expiration)
ea25da48
PV
1133{
1134 struct bfq_sched_data *sd;
1135
1136 for_each_entity(entity) {
1137 sd = entity->sched_data;
1138 __bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq);
1139
80294c3b
PV
1140 if (!bfq_update_next_in_service(sd, entity, expiration) &&
1141 !requeue)
ea25da48
PV
1142 break;
1143 }
1144}
1145
1146/**
1147 * __bfq_deactivate_entity - deactivate an entity from its service tree.
1148 * @entity: the entity to deactivate.
1149 * @ins_into_idle_tree: if false, the entity will not be put into the
1150 * idle tree.
1151 *
46d556e6 1152 * Deactivates an entity, independently of its previous state. Must
ea25da48 1153 * be invoked only if entity is on a service tree. Extracts the entity
46d556e6 1154 * from that tree, and if necessary and allowed, puts it into the idle
ea25da48
PV
1155 * tree.
1156 */
1157bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
1158{
1159 struct bfq_sched_data *sd = entity->sched_data;
a66c38a1
PV
1160 struct bfq_service_tree *st;
1161 bool is_in_service;
ea25da48
PV
1162
1163 if (!entity->on_st) /* entity never activated, or already inactive */
1164 return false;
1165
a66c38a1
PV
1166 /*
1167 * If we get here, then entity is active, which implies that
1168 * bfq_group_set_parent has already been invoked for the group
1169 * represented by entity. Therefore, the field
1170 * entity->sched_data has been set, and we can safely use it.
1171 */
1172 st = bfq_entity_service_tree(entity);
1173 is_in_service = entity == sd->in_service_entity;
1174
6ab1d8da 1175 if (is_in_service) {
ea25da48 1176 bfq_calc_finish(entity, entity->service);
6ab1d8da
PV
1177 sd->in_service_entity = NULL;
1178 }
ea25da48
PV
1179
1180 if (entity->tree == &st->active)
1181 bfq_active_extract(st, entity);
1182 else if (!is_in_service && entity->tree == &st->idle)
1183 bfq_idle_extract(st, entity);
1184
1185 if (!ins_into_idle_tree || !bfq_gt(entity->finish, st->vtime))
1186 bfq_forget_entity(st, entity, is_in_service);
1187 else
1188 bfq_idle_insert(st, entity);
1189
1190 return true;
1191}
1192
1193/**
1194 * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
1195 * @entity: the entity to deactivate.
46d556e6 1196 * @ins_into_idle_tree: true if the entity can be put into the idle tree
80294c3b
PV
1197 * @expiration: true if this function is being invoked in the expiration path
1198 * of the in-service queue
ea25da48
PV
1199 */
1200static void bfq_deactivate_entity(struct bfq_entity *entity,
1201 bool ins_into_idle_tree,
1202 bool expiration)
1203{
1204 struct bfq_sched_data *sd;
1205 struct bfq_entity *parent = NULL;
1206
1207 for_each_entity_safe(entity, parent) {
1208 sd = entity->sched_data;
1209
1210 if (!__bfq_deactivate_entity(entity, ins_into_idle_tree)) {
1211 /*
1212 * entity is not in any tree any more, so
1213 * this deactivation is a no-op, and there is
1214 * nothing to change for upper-level entities
1215 * (in case of expiration, this can never
1216 * happen).
1217 */
1218 return;
1219 }
1220
1221 if (sd->next_in_service == entity)
1222 /*
1223 * entity was the next_in_service entity,
1224 * then, since entity has just been
1225 * deactivated, a new one must be found.
1226 */
80294c3b 1227 bfq_update_next_in_service(sd, NULL, expiration);
ea25da48 1228
46d556e6 1229 if (sd->next_in_service || sd->in_service_entity) {
ea25da48 1230 /*
46d556e6
PV
1231 * The parent entity is still active, because
1232 * either next_in_service or in_service_entity
1233 * is not NULL. So, no further upwards
1234 * deactivation must be performed. Yet,
1235 * next_in_service has changed. Then the
1236 * schedule does need to be updated upwards.
1237 *
1238 * NOTE If in_service_entity is not NULL, then
1239 * next_in_service may happen to be NULL,
1240 * although the parent entity is evidently
1241 * active. This happens if 1) the entity
1242 * pointed by in_service_entity is the only
1243 * active entity in the parent entity, and 2)
1244 * according to the definition of
1245 * next_in_service, the in_service_entity
1246 * cannot be considered as
1247 * next_in_service. See the comments on the
1248 * definition of next_in_service for details.
ea25da48
PV
1249 */
1250 break;
46d556e6 1251 }
ea25da48
PV
1252
1253 /*
1254 * If we get here, then the parent is no more
1255 * backlogged and we need to propagate the
1256 * deactivation upwards. Thus let the loop go on.
1257 */
1258
1259 /*
1260 * Also let parent be queued into the idle tree on
1261 * deactivation, to preserve service guarantees, and
1262 * assuming that who invoked this function does not
1263 * need parent entities too to be removed completely.
1264 */
1265 ins_into_idle_tree = true;
1266 }
1267
1268 /*
1269 * If the deactivation loop is fully executed, then there are
1270 * no more entities to touch and next loop is not executed at
1271 * all. Otherwise, requeue remaining entities if they are
1272 * about to stop receiving service, or reposition them if this
1273 * is not the case.
1274 */
1275 entity = parent;
1276 for_each_entity(entity) {
1277 /*
1278 * Invoke __bfq_requeue_entity on entity, even if
1279 * already active, to requeue/reposition it in the
1280 * active tree (because sd->next_in_service has
1281 * changed)
1282 */
1283 __bfq_requeue_entity(entity);
1284
1285 sd = entity->sched_data;
80294c3b 1286 if (!bfq_update_next_in_service(sd, entity, expiration) &&
ea25da48
PV
1287 !expiration)
1288 /*
1289 * next_in_service unchanged or not causing
1290 * any change in entity->parent->sd, and no
1291 * requeueing needed for expiration: stop
1292 * here.
1293 */
1294 break;
1295 }
1296}
1297
1298/**
1299 * bfq_calc_vtime_jump - compute the value to which the vtime should jump,
1300 * if needed, to have at least one entity eligible.
1301 * @st: the service tree to act upon.
1302 *
1303 * Assumes that st is not empty.
1304 */
1305static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
1306{
1307 struct bfq_entity *root_entity = bfq_root_active_entity(&st->active);
1308
1309 if (bfq_gt(root_entity->min_start, st->vtime))
1310 return root_entity->min_start;
1311
1312 return st->vtime;
1313}
1314
1315static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value)
1316{
1317 if (new_value > st->vtime) {
1318 st->vtime = new_value;
1319 bfq_forget_idle(st);
1320 }
1321}
1322
1323/**
1324 * bfq_first_active_entity - find the eligible entity with
1325 * the smallest finish time
1326 * @st: the service tree to select from.
1327 * @vtime: the system virtual to use as a reference for eligibility
1328 *
1329 * This function searches the first schedulable entity, starting from the
1330 * root of the tree and going on the left every time on this side there is
38c91407 1331 * a subtree with at least one eligible (start <= vtime) entity. The path on
ea25da48
PV
1332 * the right is followed only if a) the left subtree contains no eligible
1333 * entities and b) no eligible entity has been found yet.
1334 */
1335static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st,
1336 u64 vtime)
1337{
1338 struct bfq_entity *entry, *first = NULL;
1339 struct rb_node *node = st->active.rb_node;
1340
1341 while (node) {
1342 entry = rb_entry(node, struct bfq_entity, rb_node);
1343left:
1344 if (!bfq_gt(entry->start, vtime))
1345 first = entry;
1346
1347 if (node->rb_left) {
1348 entry = rb_entry(node->rb_left,
1349 struct bfq_entity, rb_node);
1350 if (!bfq_gt(entry->min_start, vtime)) {
1351 node = node->rb_left;
1352 goto left;
1353 }
1354 }
1355 if (first)
1356 break;
1357 node = node->rb_right;
1358 }
1359
1360 return first;
1361}
1362
1363/**
1364 * __bfq_lookup_next_entity - return the first eligible entity in @st.
1365 * @st: the service tree.
1366 *
1367 * If there is no in-service entity for the sched_data st belongs to,
1368 * then return the entity that will be set in service if:
1369 * 1) the parent entity this st belongs to is set in service;
1370 * 2) no entity belonging to such parent entity undergoes a state change
1371 * that would influence the timestamps of the entity (e.g., becomes idle,
1372 * becomes backlogged, changes its budget, ...).
1373 *
1374 * In this first case, update the virtual time in @st too (see the
1375 * comments on this update inside the function).
1376 *
1377 * In constrast, if there is an in-service entity, then return the
1378 * entity that would be set in service if not only the above
1379 * conditions, but also the next one held true: the currently
1380 * in-service entity, on expiration,
1381 * 1) gets a finish time equal to the current one, or
1382 * 2) is not eligible any more, or
1383 * 3) is idle.
1384 */
1385static struct bfq_entity *
1386__bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service)
1387{
1388 struct bfq_entity *entity;
1389 u64 new_vtime;
1390
1391 if (RB_EMPTY_ROOT(&st->active))
1392 return NULL;
1393
1394 /*
1395 * Get the value of the system virtual time for which at
1396 * least one entity is eligible.
1397 */
1398 new_vtime = bfq_calc_vtime_jump(st);
1399
1400 /*
1401 * If there is no in-service entity for the sched_data this
1402 * active tree belongs to, then push the system virtual time
1403 * up to the value that guarantees that at least one entity is
1404 * eligible. If, instead, there is an in-service entity, then
1405 * do not make any such update, because there is already an
1406 * eligible entity, namely the in-service one (even if the
1407 * entity is not on st, because it was extracted when set in
1408 * service).
1409 */
1410 if (!in_service)
1411 bfq_update_vtime(st, new_vtime);
1412
1413 entity = bfq_first_active_entity(st, new_vtime);
1414
1415 return entity;
1416}
1417
1418/**
1419 * bfq_lookup_next_entity - return the first eligible entity in @sd.
1420 * @sd: the sched_data.
80294c3b 1421 * @expiration: true if we are on the expiration path of the in-service queue
ea25da48
PV
1422 *
1423 * This function is invoked when there has been a change in the trees
80294c3b
PV
1424 * for sd, and we need to know what is the new next entity to serve
1425 * after this change.
ea25da48 1426 */
80294c3b
PV
1427static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
1428 bool expiration)
ea25da48
PV
1429{
1430 struct bfq_service_tree *st = sd->service_tree;
1431 struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1);
1432 struct bfq_entity *entity = NULL;
1433 int class_idx = 0;
1434
1435 /*
1436 * Choose from idle class, if needed to guarantee a minimum
1437 * bandwidth to this class (and if there is some active entity
1438 * in idle class). This should also mitigate
1439 * priority-inversion problems in case a low priority task is
1440 * holding file system resources.
1441 */
1442 if (time_is_before_jiffies(sd->bfq_class_idle_last_service +
1443 BFQ_CL_IDLE_TIMEOUT)) {
1444 if (!RB_EMPTY_ROOT(&idle_class_st->active))
1445 class_idx = BFQ_IOPRIO_CLASSES - 1;
1446 /* About to be served if backlogged, or not yet backlogged */
1447 sd->bfq_class_idle_last_service = jiffies;
1448 }
1449
1450 /*
1451 * Find the next entity to serve for the highest-priority
1452 * class, unless the idle class needs to be served.
1453 */
1454 for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) {
80294c3b
PV
1455 /*
1456 * If expiration is true, then bfq_lookup_next_entity
1457 * is being invoked as a part of the expiration path
1458 * of the in-service queue. In this case, even if
1459 * sd->in_service_entity is not NULL,
1460 * sd->in_service_entiy at this point is actually not
1461 * in service any more, and, if needed, has already
1462 * been properly queued or requeued into the right
1463 * tree. The reason why sd->in_service_entity is still
1464 * not NULL here, even if expiration is true, is that
1465 * sd->in_service_entiy is reset as a last step in the
1466 * expiration path. So, if expiration is true, tell
1467 * __bfq_lookup_next_entity that there is no
1468 * sd->in_service_entity.
1469 */
ea25da48 1470 entity = __bfq_lookup_next_entity(st + class_idx,
80294c3b
PV
1471 sd->in_service_entity &&
1472 !expiration);
ea25da48
PV
1473
1474 if (entity)
1475 break;
1476 }
1477
1478 if (!entity)
1479 return NULL;
1480
1481 return entity;
1482}
1483
1484bool next_queue_may_preempt(struct bfq_data *bfqd)
1485{
1486 struct bfq_sched_data *sd = &bfqd->root_group->sched_data;
1487
1488 return sd->next_in_service != sd->in_service_entity;
1489}
1490
1491/*
1492 * Get next queue for service.
1493 */
1494struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
1495{
1496 struct bfq_entity *entity = NULL;
1497 struct bfq_sched_data *sd;
1498 struct bfq_queue *bfqq;
1499
1500 if (bfqd->busy_queues == 0)
1501 return NULL;
1502
1503 /*
1504 * Traverse the path from the root to the leaf entity to
1505 * serve. Set in service all the entities visited along the
1506 * way.
1507 */
1508 sd = &bfqd->root_group->sched_data;
1509 for (; sd ; sd = entity->my_sched_data) {
1510 /*
1511 * WARNING. We are about to set the in-service entity
1512 * to sd->next_in_service, i.e., to the (cached) value
1513 * returned by bfq_lookup_next_entity(sd) the last
1514 * time it was invoked, i.e., the last time when the
1515 * service order in sd changed as a consequence of the
1516 * activation or deactivation of an entity. In this
1517 * respect, if we execute bfq_lookup_next_entity(sd)
1518 * in this very moment, it may, although with low
1519 * probability, yield a different entity than that
1520 * pointed to by sd->next_in_service. This rare event
1521 * happens in case there was no CLASS_IDLE entity to
1522 * serve for sd when bfq_lookup_next_entity(sd) was
1523 * invoked for the last time, while there is now one
1524 * such entity.
1525 *
1526 * If the above event happens, then the scheduling of
1527 * such entity in CLASS_IDLE is postponed until the
1528 * service of the sd->next_in_service entity
1529 * finishes. In fact, when the latter is expired,
1530 * bfq_lookup_next_entity(sd) gets called again,
1531 * exactly to update sd->next_in_service.
1532 */
1533
1534 /* Make next_in_service entity become in_service_entity */
1535 entity = sd->next_in_service;
1536 sd->in_service_entity = entity;
1537
1538 /*
1539 * Reset the accumulator of the amount of service that
1540 * the entity is about to receive.
1541 */
1542 entity->service = 0;
1543
1544 /*
1545 * If entity is no longer a candidate for next
46d556e6
PV
1546 * service, then it must be extracted from its active
1547 * tree, so as to make sure that it won't be
1548 * considered when computing next_in_service. See the
1549 * comments on the function
1550 * bfq_no_longer_next_in_service() for details.
ea25da48
PV
1551 */
1552 if (bfq_no_longer_next_in_service(entity))
1553 bfq_active_extract(bfq_entity_service_tree(entity),
1554 entity);
1555
1556 /*
46d556e6
PV
1557 * Even if entity is not to be extracted according to
1558 * the above check, a descendant entity may get
1559 * extracted in one of the next iterations of this
1560 * loop. Such an event could cause a change in
1561 * next_in_service for the level of the descendant
1562 * entity, and thus possibly back to this level.
ea25da48 1563 *
46d556e6
PV
1564 * However, we cannot perform the resulting needed
1565 * update of next_in_service for this level before the
1566 * end of the whole loop, because, to know which is
1567 * the correct next-to-serve candidate entity for each
1568 * level, we need first to find the leaf entity to set
1569 * in service. In fact, only after we know which is
1570 * the next-to-serve leaf entity, we can discover
1571 * whether the parent entity of the leaf entity
1572 * becomes the next-to-serve, and so on.
ea25da48 1573 */
ea25da48
PV
1574 }
1575
1576 bfqq = bfq_entity_to_bfqq(entity);
1577
1578 /*
1579 * We can finally update all next-to-serve entities along the
1580 * path from the leaf entity just set in service to the root.
1581 */
1582 for_each_entity(entity) {
1583 struct bfq_sched_data *sd = entity->sched_data;
1584
80294c3b 1585 if (!bfq_update_next_in_service(sd, NULL, false))
ea25da48
PV
1586 break;
1587 }
1588
1589 return bfqq;
1590}
1591
1592void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
1593{
1594 struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
1595 struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
1596 struct bfq_entity *entity = in_serv_entity;
1597
1598 bfq_clear_bfqq_wait_request(in_serv_bfqq);
1599 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
1600 bfqd->in_service_queue = NULL;
1601
1602 /*
1603 * When this function is called, all in-service entities have
1604 * been properly deactivated or requeued, so we can safely
1605 * execute the final step: reset in_service_entity along the
1606 * path from entity to the root.
1607 */
1608 for_each_entity(entity)
1609 entity->sched_data->in_service_entity = NULL;
1610
1611 /*
1612 * in_serv_entity is no longer in service, so, if it is in no
1613 * service tree either, then release the service reference to
1614 * the queue it represents (taken with bfq_get_entity).
1615 */
1616 if (!in_serv_entity->on_st)
1617 bfq_put_queue(in_serv_bfqq);
1618}
1619
1620void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1621 bool ins_into_idle_tree, bool expiration)
1622{
1623 struct bfq_entity *entity = &bfqq->entity;
1624
1625 bfq_deactivate_entity(entity, ins_into_idle_tree, expiration);
1626}
1627
1628void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1629{
1630 struct bfq_entity *entity = &bfqq->entity;
1631
1632 bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq),
80294c3b 1633 false, false);
ea25da48
PV
1634 bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
1635}
1636
80294c3b
PV
1637void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1638 bool expiration)
ea25da48
PV
1639{
1640 struct bfq_entity *entity = &bfqq->entity;
1641
1642 bfq_activate_requeue_entity(entity, false,
80294c3b 1643 bfqq == bfqd->in_service_queue, expiration);
ea25da48
PV
1644}
1645
1646/*
1647 * Called when the bfqq no longer has requests pending, remove it from
1648 * the service tree. As a special case, it can be invoked during an
1649 * expiration.
1650 */
1651void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1652 bool expiration)
1653{
1654 bfq_log_bfqq(bfqd, bfqq, "del from busy");
1655
1656 bfq_clear_bfqq_busy(bfqq);
1657
1658 bfqd->busy_queues--;
1659
1660 if (!bfqq->dispatched)
1661 bfq_weights_tree_remove(bfqd, &bfqq->entity,
1662 &bfqd->queue_weights_tree);
1663
1664 if (bfqq->wr_coeff > 1)
1665 bfqd->wr_busy_queues--;
1666
1667 bfqg_stats_update_dequeue(bfqq_group(bfqq));
1668
1669 bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
1670}
1671
1672/*
1673 * Called when an inactive queue receives a new request.
1674 */
1675void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1676{
1677 bfq_log_bfqq(bfqd, bfqq, "add to busy");
1678
1679 bfq_activate_bfqq(bfqd, bfqq);
1680
1681 bfq_mark_bfqq_busy(bfqq);
1682 bfqd->busy_queues++;
1683
1684 if (!bfqq->dispatched)
1685 if (bfqq->wr_coeff == 1)
1686 bfq_weights_tree_add(bfqd, &bfqq->entity,
1687 &bfqd->queue_weights_tree);
1688
1689 if (bfqq->wr_coeff > 1)
1690 bfqd->wr_busy_queues++;
1691}