#define sample_valid(samples) ((samples) > 80)
+/*
+ * Most of our rbtree usage is for sorting with min extraction, so
+ * if we cache the leftmost node we don't have to walk down the tree
+ * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
+ * move this into the elevator for the rq sorting as well.
+ */
+struct cfq_rb_root {
+ struct rb_root rb;
+ struct rb_node *left;
+};
+#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, }
+
/*
* Per block device queue structure
*/
/*
* rr list of queues with requests and the count of them
*/
- struct rb_root service_tree;
+ struct cfq_rb_root service_tree;
struct list_head cur_rr;
struct list_head idle_rr;
unsigned int busy_queues;
}
}
+static struct rb_node *cfq_rb_first(struct cfq_rb_root *root)
+{
+ if (!root->left)
+ root->left = rb_first(&root->rb);
+
+ return root->left;
+}
+
+static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
+{
+ if (root->left == n)
+ root->left = NULL;
+
+ rb_erase(n, &root->rb);
+ RB_CLEAR_NODE(n);
+}
+
/*
* would be nice to take fifo expire time into account as well
*/
static void cfq_service_tree_add(struct cfq_data *cfqd,
struct cfq_queue *cfqq)
{
- struct rb_node **p = &cfqd->service_tree.rb_node;
+ struct rb_node **p = &cfqd->service_tree.rb.rb_node;
struct rb_node *parent = NULL;
- struct cfq_queue *__cfqq;
unsigned long rb_key;
+ int left = 1;
rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
rb_key += cfqq->slice_resid;
if (rb_key == cfqq->rb_key)
return;
- rb_erase(&cfqq->rb_node, &cfqd->service_tree);
+ cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
}
while (*p) {
+ struct cfq_queue *__cfqq;
+
parent = *p;
__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
if (rb_key < __cfqq->rb_key)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ left = 0;
+ }
}
+ if (left)
+ cfqd->service_tree.left = &cfqq->rb_node;
+
cfqq->rb_key = rb_key;
rb_link_node(&cfqq->rb_node, parent, p);
- rb_insert_color(&cfqq->rb_node, &cfqd->service_tree);
+ rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
}
static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
cfq_clear_cfqq_on_rr(cfqq);
list_del_init(&cfqq->cfq_list);
- if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
- rb_erase(&cfqq->rb_node, &cfqd->service_tree);
- RB_CLEAR_NODE(&cfqq->rb_node);
- }
+ if (!RB_EMPTY_NODE(&cfqq->rb_node))
+ cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
BUG_ON(!cfqd->busy_queues);
cfqd->busy_queues--;
* if current list is non-empty, grab first entry.
*/
cfqq = list_entry_cfqq(cfqd->cur_rr.next);
- } else if (!RB_EMPTY_ROOT(&cfqd->service_tree)) {
- struct rb_node *n = rb_first(&cfqd->service_tree);
+ } else if (!RB_EMPTY_ROOT(&cfqd->service_tree.rb)) {
+ struct rb_node *n = cfq_rb_first(&cfqd->service_tree);
cfqq = rb_entry(n, struct cfq_queue, rb_node);
} else if (!list_empty(&cfqd->idle_rr)) {
int dispatched = 0;
struct rb_node *n;
- while ((n = rb_first(&cfqd->service_tree)) != NULL) {
+ while ((n = cfq_rb_first(&cfqd->service_tree)) != NULL) {
struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node);
dispatched += __cfq_forced_dispatch_cfqq(cfqq);
memset(cfqd, 0, sizeof(*cfqd));
- cfqd->service_tree = RB_ROOT;
+ cfqd->service_tree = CFQ_RB_ROOT;
INIT_LIST_HEAD(&cfqd->cur_rr);
INIT_LIST_HEAD(&cfqd->idle_rr);
INIT_LIST_HEAD(&cfqd->cic_list);