static int cfq_slice_async = HZ / 25;
static const int cfq_slice_async_rq = 2;
static int cfq_slice_idle = HZ / 125;
+static int cfq_rt_idle_only = 1;
static int cfq_group_idle = HZ / 125;
static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
static const int cfq_hist_divisor = 4;
unsigned int cfq_slice[2];
unsigned int cfq_slice_async_rq;
unsigned int cfq_slice_idle;
+ unsigned int cfq_rt_idle_only;
unsigned int cfq_group_idle;
unsigned int cfq_latency;
unsigned int cfq_target_latency;
static void
cfq_update_group_weight(struct cfq_group *cfqg)
{
- BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
-
if (cfqg->new_weight) {
cfqg->weight = cfqg->new_weight;
cfqg->new_weight = 0;
}
+}
+
+static void
+cfq_update_group_leaf_weight(struct cfq_group *cfqg)
+{
+ BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
if (cfqg->new_leaf_weight) {
cfqg->leaf_weight = cfqg->new_leaf_weight;
/* add to the service tree */
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
- cfq_update_group_weight(cfqg);
+ cfq_update_group_leaf_weight(cfqg);
__cfq_group_service_tree_add(st, cfqg);
/*
*/
while ((parent = cfqg_parent(pos))) {
if (propagate) {
+ cfq_update_group_weight(pos);
propagate = !parent->nr_active++;
parent->children_weight += pos->weight;
}
if (samples) {
v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
- do_div(v, samples);
+ v = div64_u64(v, samples);
}
__blkg_prfill_u64(sf, pd, v);
return 0;
if (wl_class == IDLE_WORKLOAD)
return false;
+ if (cfqd->cfq_rt_idle_only && wl_class != RT_WORKLOAD)
+ return false;
+
/* We do for queues that were marked with idle window flag. */
if (cfq_cfqq_idle_window(cfqq) &&
!(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
if (time_before(jiffies, rq_fifo_time(rq)))
rq = NULL;
- cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
return rq;
}
{
unsigned int max_dispatch;
+ if (cfq_cfqq_must_dispatch(cfqq))
+ return true;
+
/*
* Drain async requests before we start sync IO
*/
BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
+ rq = cfq_check_fifo(cfqq);
+ if (rq)
+ cfq_mark_cfqq_must_dispatch(cfqq);
+
if (!cfq_may_dispatch(cfqd, cfqq))
return false;
/*
* follow expired path, else get first next available
*/
- rq = cfq_check_fifo(cfqq);
if (!rq)
rq = cfqq->next_rq;
+ else
+ cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
/*
* insert request into driver dispatch list
cfq_mark_cfqq_prio_changed(cfqq);
if (is_sync) {
- if (!cfq_class_idle(cfqq))
+ if (!cfq_class_idle(cfqq) && (!cfqd->cfq_rt_idle_only || cfq_class_rt(cfqq)))
cfq_mark_cfqq_idle_window(cfqq);
cfq_mark_cfqq_sync(cfqq);
}
blkcg = bio_blkcg(bio);
cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
+ if (!cfqg) {
+ cfqq = &cfqd->oom_cfqq;
+ goto out;
+ }
+
cfqq = cic_to_cfqq(cic, is_sync);
/*
} else
cfqq = &cfqd->oom_cfqq;
}
-
+out:
if (new_cfqq)
kmem_cache_free(cfq_pool, new_cfqq);
cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
struct bio *bio, gfp_t gfp_mask)
{
- const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
- const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
+ int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
+ int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
struct cfq_queue **async_cfqq = NULL;
struct cfq_queue *cfqq = NULL;
if (!is_sync) {
+ if (!ioprio_valid(cic->ioprio)) {
+ struct task_struct *tsk = current;
+ ioprio = task_nice_ioprio(tsk);
+ ioprio_class = task_nice_ioclass(tsk);
+ }
async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
cfqq = *async_cfqq;
}
if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
return;
+ if (cfqd->cfq_rt_idle_only && !cfq_class_rt(cfqq))
+ return;
+
enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
if (cfqq->queued[0] + cfqq->queued[1] >= 4)
* if the new request is sync, but the currently running queue is
* not, let the sync request have priority.
*/
- if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
+ if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
return true;
if (new_cfqq->cfqg != cfqq->cfqg)
kfree(cfqd);
}
-static int cfq_init_queue(struct request_queue *q)
+static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
{
struct cfq_data *cfqd;
struct blkcg_gq *blkg __maybe_unused;
int i, ret;
+ struct elevator_queue *eq;
+
+ eq = elevator_alloc(q, e);
+ if (!eq)
+ return -ENOMEM;
cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
- if (!cfqd)
+ if (!cfqd) {
+ kobject_put(&eq->kobj);
return -ENOMEM;
+ }
+ eq->elevator_data = cfqd;
cfqd->queue = q;
- q->elevator->elevator_data = cfqd;
+ spin_lock_irq(q->queue_lock);
+ q->elevator = eq;
+ spin_unlock_irq(q->queue_lock);
/* Init root service tree */
cfqd->grp_service_tree = CFQ_RB_ROOT;
cfqd->cfq_target_latency = cfq_target_latency;
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
+ cfqd->cfq_rt_idle_only = cfq_rt_idle_only;
cfqd->cfq_group_idle = cfq_group_idle;
cfqd->cfq_latency = 1;
cfqd->hw_tag = -1;
out_free:
kfree(cfqd);
+ kobject_put(&eq->kobj);
return ret;
}
SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
+SHOW_FUNCTION(cfq_rt_idle_only_show, cfqd->cfq_rt_idle_only, 0);
SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
UINT_MAX, 0);
STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
+STORE_FUNCTION(cfq_rt_idle_only_store, &cfqd->cfq_rt_idle_only, 0, 1, 0);
STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
CFQ_ATTR(slice_async),
CFQ_ATTR(slice_async_rq),
CFQ_ATTR(slice_idle),
+ CFQ_ATTR(rt_idle_only),
CFQ_ATTR(group_idle),
CFQ_ATTR(low_latency),
CFQ_ATTR(target_latency),