sched: remove PREEMPT_RESTRICT
authorIngo Molnar <mingo@elte.hu>
Fri, 9 Nov 2007 21:39:39 +0000 (22:39 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 9 Nov 2007 21:39:39 +0000 (22:39 +0100)
remove PREEMPT_RESTRICT. (this is a separate commit so that any
regression related to the removal itself is bisectable)

Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/sched.h
kernel/sched.c
kernel/sched_fair.c

index 951759e30c09d909411f4c8a25515747b8932e97..93fd30d6dac4d723ef01c53cc96a117e34550a6b 100644 (file)
@@ -863,7 +863,6 @@ struct sched_entity {
        struct load_weight      load;           /* for load-balancing */
        struct rb_node          run_node;
        unsigned int            on_rq;
-       int                     peer_preempt;
 
        u64                     exec_start;
        u64                     sum_exec_runtime;
index 4b23dfb4c80f5d045f062a1e57519ce8a633b74d..2a107e4ad5eda381358d78f318562bcf472c848c 100644 (file)
@@ -460,7 +460,6 @@ enum {
        SCHED_FEAT_TREE_AVG             = 4,
        SCHED_FEAT_APPROX_AVG           = 8,
        SCHED_FEAT_WAKEUP_PREEMPT       = 16,
-       SCHED_FEAT_PREEMPT_RESTRICT     = 32,
 };
 
 const_debug unsigned int sysctl_sched_features =
@@ -468,8 +467,7 @@ const_debug unsigned int sysctl_sched_features =
                SCHED_FEAT_START_DEBIT          * 1 |
                SCHED_FEAT_TREE_AVG             * 0 |
                SCHED_FEAT_APPROX_AVG           * 0 |
-               SCHED_FEAT_WAKEUP_PREEMPT       * 1 |
-               SCHED_FEAT_PREEMPT_RESTRICT     * 0;
+               SCHED_FEAT_WAKEUP_PREEMPT       * 1;
 
 #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
 
index 7264814ba62a1d316e0a200f4cda679e15aac7e3..fbcb426029d0f43bc1cd6e9ebcba637e37f11029 100644 (file)
@@ -546,7 +546,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
 
        update_stats_dequeue(cfs_rq, se);
        if (sleep) {
-               se->peer_preempt = 0;
 #ifdef CONFIG_SCHEDSTATS
                if (entity_is_task(se)) {
                        struct task_struct *tsk = task_of(se);
@@ -574,10 +573,8 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 
        ideal_runtime = sched_slice(cfs_rq, curr);
        delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
-       if (delta_exec > ideal_runtime ||
-                       (sched_feat(PREEMPT_RESTRICT) && curr->peer_preempt))
+       if (delta_exec > ideal_runtime)
                resched_task(rq_of(cfs_rq)->curr);
-       curr->peer_preempt = 0;
 }
 
 static void
@@ -867,9 +864,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
                        gran = calc_delta_fair(gran, &se->load);
 
                if (delta > gran) {
-                       int now = !sched_feat(PREEMPT_RESTRICT);
-
-                       if (now || p->prio < curr->prio || !se->peer_preempt++)
+                       if (p->prio < curr->prio)
                                resched_task(curr);
                }
        }
@@ -1083,7 +1078,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
                swap(curr->vruntime, se->vruntime);
        }
 
-       se->peer_preempt = 0;
        enqueue_task_fair(rq, p, 0);
        resched_task(rq->curr);
 }