sched: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Wed, 12 Sep 2012 09:22:00 +0000 (11:22 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 13 Sep 2012 14:52:04 +0000 (16:52 +0200)
Now that the last architecture to use this has stopped doing so (ARM,
thanks Catalin!) we can remove this complexity from the scheduler
core.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Link: http://lkml.kernel.org/n/tip-g9p2a1w81xxbrze25v9zpzbf@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Documentation/scheduler/sched-arch.txt
include/linux/sched.h
kernel/fork.c
kernel/sched/core.c
kernel/sched/rt.c
kernel/sched/sched.h

index 28aa1075e291c40e832db64a113e8328ff6b2a06..b1b8587b86f0cc8153c2cedda5a3308f980828aa 100644 (file)
@@ -17,16 +17,6 @@ you must `#define __ARCH_WANT_UNLOCKED_CTXSW` in a header file
 Unlocked context switches introduce only a very minor performance
 penalty to the core scheduler implementation in the CONFIG_SMP case.
 
-2. Interrupt status
-By default, the switch_to arch function is called with interrupts
-disabled. Interrupts may be enabled over the call if it is likely to
-introduce a significant interrupt latency by adding the line
-`#define __ARCH_WANT_INTERRUPTS_ON_CTXSW` in the same place as for
-unlocked context switches. This define also implies
-`__ARCH_WANT_UNLOCKED_CTXSW`. See arch/arm/include/asm/system.h for an
-example.
-
-
 CPU idle
 ========
 Your cpu_idle routines need to obey the following rules:
index f3eebc121ebc1cbc8c6b4eb175186bf7033e2126..60e5e38eee2a3eac108daf733e3011e046c35d06 100644 (file)
@@ -678,11 +678,6 @@ struct signal_struct {
                                         * (notably. ptrace) */
 };
 
-/* Context switch must be unlocked if interrupts are to be enabled */
-#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-# define __ARCH_WANT_UNLOCKED_CTXSW
-#endif
-
 /*
  * Bits in flags field of signal_struct.
  */
index 2c8857e12855393759562b3c6eeec2d23de6f080..743d48f4d7111effe81ff3172d25a82c90141755 100644 (file)
@@ -1280,11 +1280,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 #endif
 #ifdef CONFIG_TRACE_IRQFLAGS
        p->irq_events = 0;
-#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-       p->hardirqs_enabled = 1;
-#else
        p->hardirqs_enabled = 0;
-#endif
        p->hardirq_enable_ip = 0;
        p->hardirq_enable_event = 0;
        p->hardirq_disable_ip = _THIS_IP_;
index c46a011ce5db89c76a0d466a0f7a965204a86aec..8b51b2d9b1fda1bfebf50f9ebde60d6c831cbb42 100644 (file)
@@ -1361,25 +1361,6 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu)
                smp_send_reschedule(cpu);
 }
 
-#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
-{
-       struct rq *rq;
-       int ret = 0;
-
-       rq = __task_rq_lock(p);
-       if (p->on_cpu) {
-               ttwu_activate(rq, p, ENQUEUE_WAKEUP);
-               ttwu_do_wakeup(rq, p, wake_flags);
-               ret = 1;
-       }
-       __task_rq_unlock(rq);
-
-       return ret;
-
-}
-#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
-
 bool cpus_share_cache(int this_cpu, int that_cpu)
 {
        return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
@@ -1440,21 +1421,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
         * If the owning (remote) cpu is still in the middle of schedule() with
         * this task as prev, wait until its done referencing the task.
         */
-       while (p->on_cpu) {
-#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-               /*
-                * In case the architecture enables interrupts in
-                * context_switch(), we cannot busy wait, since that
-                * would lead to deadlocks when an interrupt hits and
-                * tries to wake up @prev. So bail and do a complete
-                * remote wakeup.
-                */
-               if (ttwu_activate_remote(p, wake_flags))
-                       goto stat;
-#else
+       while (p->on_cpu)
                cpu_relax();
-#endif
-       }
        /*
         * Pairs with the smp_wmb() in finish_lock_switch().
         */
@@ -1798,13 +1766,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
        prev_state = prev->state;
        account_switch_vtime(prev);
        finish_arch_switch(prev);
-#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-       local_irq_disable();
-#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
        perf_event_task_sched_in(prev, current);
-#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-       local_irq_enable();
-#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
        finish_lock_switch(rq, prev);
        finish_arch_post_lock_switch();
 
index e0b7ba9c040f74b22bb63e0d957b672dac4adce0..418feb01344edb7e59f11643e153e2f1866cc4ed 100644 (file)
@@ -1632,11 +1632,6 @@ static int push_rt_task(struct rq *rq)
        if (!next_task)
                return 0;
 
-#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-       if (unlikely(task_running(rq, next_task)))
-               return 0;
-#endif
-
 retry:
        if (unlikely(next_task == rq->curr)) {
                WARN_ON(1);
index 09871698e80c26d7a7b9c41739198957916bf574..7a7db09cfabc18af1b35dd2bee57bf60d4103cf4 100644 (file)
@@ -737,11 +737,7 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
         */
        next->on_cpu = 1;
 #endif
-#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-       raw_spin_unlock_irq(&rq->lock);
-#else
        raw_spin_unlock(&rq->lock);
-#endif
 }
 
 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
@@ -755,9 +751,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
        smp_wmb();
        prev->on_cpu = 0;
 #endif
-#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
        local_irq_enable();
-#endif
 }
 #endif /* __ARCH_WANT_UNLOCKED_CTXSW */