USB: net2272: driver for PLX NET2272 USB device controller
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / rcutree.c
index 8154a4a3491c0226e825fac340c602be0e422d24..89419ff92e996c1e52fade38475ba14604a8f7bd 100644 (file)
@@ -36,7 +36,7 @@
 #include <linux/interrupt.h>
 #include <linux/sched.h>
 #include <linux/nmi.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
 #include <linux/bitops.h>
 #include <linux/module.h>
 #include <linux/completion.h>
@@ -95,7 +95,6 @@ static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
 DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
-static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq);
 DEFINE_PER_CPU(char, rcu_cpu_has_work);
 static char rcu_kthreads_spawnable;
 
@@ -1476,7 +1475,7 @@ static void invoke_rcu_cpu_kthread(void)
                local_irq_restore(flags);
                return;
        }
-       wake_up(&__get_cpu_var(rcu_cpu_wq));
+       wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
        local_irq_restore(flags);
 }
 
@@ -1526,13 +1525,10 @@ static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
  */
 static void rcu_cpu_kthread_timer(unsigned long arg)
 {
-       unsigned long flags;
        struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
        struct rcu_node *rnp = rdp->mynode;
 
-       raw_spin_lock_irqsave(&rnp->lock, flags);
-       rnp->wakemask |= rdp->grpmask;
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       atomic_or(rdp->grpmask, &rnp->wakemask);
        invoke_rcu_node_kthread(rnp);
 }
 
@@ -1599,14 +1595,12 @@ static int rcu_cpu_kthread(void *arg)
        unsigned long flags;
        int spincnt = 0;
        unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
-       wait_queue_head_t *wqp = &per_cpu(rcu_cpu_wq, cpu);
        char work;
        char *workp = &per_cpu(rcu_cpu_has_work, cpu);
 
        for (;;) {
                *statusp = RCU_KTHREAD_WAITING;
-               wait_event_interruptible(*wqp,
-                                        *workp != 0 || kthread_should_stop());
+               rcu_wait(*workp != 0 || kthread_should_stop());
                local_bh_disable();
                if (rcu_cpu_kthread_should_stop(cpu)) {
                        local_bh_enable();
@@ -1657,7 +1651,6 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
        per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
        WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
        per_cpu(rcu_cpu_kthread_task, cpu) = t;
-       wake_up_process(t);
        sp.sched_priority = RCU_KTHREAD_PRIO;
        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
        return 0;
@@ -1680,11 +1673,10 @@ static int rcu_node_kthread(void *arg)
 
        for (;;) {
                rnp->node_kthread_status = RCU_KTHREAD_WAITING;
-               wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0);
+               rcu_wait(atomic_read(&rnp->wakemask) != 0);
                rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
                raw_spin_lock_irqsave(&rnp->lock, flags);
-               mask = rnp->wakemask;
-               rnp->wakemask = 0;
+               mask = atomic_xchg(&rnp->wakemask, 0);
                rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
                for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
                        if ((mask & 0x1) == 0)
@@ -1765,13 +1757,14 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
                raw_spin_lock_irqsave(&rnp->lock, flags);
                rnp->node_kthread_task = t;
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
-               wake_up_process(t);
                sp.sched_priority = 99;
                sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
        }
        return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
 }
 
+static void rcu_wake_one_boost_kthread(struct rcu_node *rnp);
+
 /*
  * Spawn all kthreads -- called as soon as the scheduler is running.
  */
@@ -1779,24 +1772,31 @@ static int __init rcu_spawn_kthreads(void)
 {
        int cpu;
        struct rcu_node *rnp;
+       struct task_struct *t;
 
        rcu_kthreads_spawnable = 1;
        for_each_possible_cpu(cpu) {
-               init_waitqueue_head(&per_cpu(rcu_cpu_wq, cpu));
                per_cpu(rcu_cpu_has_work, cpu) = 0;
-               if (cpu_online(cpu))
+               if (cpu_online(cpu)) {
                        (void)rcu_spawn_one_cpu_kthread(cpu);
+                       t = per_cpu(rcu_cpu_kthread_task, cpu);
+                       if (t)
+                               wake_up_process(t);
+               }
        }
        rnp = rcu_get_root(rcu_state);
-       init_waitqueue_head(&rnp->node_wq);
-       rcu_init_boost_waitqueue(rnp);
        (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
-       if (NUM_RCU_NODES > 1)
+       if (rnp->node_kthread_task)
+               wake_up_process(rnp->node_kthread_task);
+       if (NUM_RCU_NODES > 1) {
                rcu_for_each_leaf_node(rcu_state, rnp) {
-                       init_waitqueue_head(&rnp->node_wq);
-                       rcu_init_boost_waitqueue(rnp);
                        (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
+                       t = rnp->node_kthread_task;
+                       if (t)
+                               wake_up_process(t);
+                       rcu_wake_one_boost_kthread(rnp);
                }
+       }
        return 0;
 }
 early_initcall(rcu_spawn_kthreads);
@@ -2200,14 +2200,14 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
        raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
 }
 
-static void __cpuinit rcu_online_cpu(int cpu)
+static void __cpuinit rcu_prepare_cpu(int cpu)
 {
        rcu_init_percpu_data(cpu, &rcu_sched_state, 0);
        rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
        rcu_preempt_init_percpu_data(cpu);
 }
 
-static void __cpuinit rcu_online_kthreads(int cpu)
+static void __cpuinit rcu_prepare_kthreads(int cpu)
 {
        struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
        struct rcu_node *rnp = rdp->mynode;
@@ -2220,6 +2220,31 @@ static void __cpuinit rcu_online_kthreads(int cpu)
        }
 }
 
+/*
+ * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state,
+ * but the RCU threads are woken on demand, and if demand is low this
+ * could be a while triggering the hung task watchdog.
+ *
+ * In order to avoid this, poke all tasks once the CPU is fully
+ * up and running.
+ */
+static void __cpuinit rcu_online_kthreads(int cpu)
+{
+       struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
+       struct rcu_node *rnp = rdp->mynode;
+       struct task_struct *t;
+
+       t = per_cpu(rcu_cpu_kthread_task, cpu);
+       if (t)
+               wake_up_process(t);
+
+       t = rnp->node_kthread_task;
+       if (t)
+               wake_up_process(t);
+
+       rcu_wake_one_boost_kthread(rnp);
+}
+
 /*
  * Handle CPU online/offline notification events.
  */
@@ -2233,10 +2258,11 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
        switch (action) {
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
-               rcu_online_cpu(cpu);
-               rcu_online_kthreads(cpu);
+               rcu_prepare_cpu(cpu);
+               rcu_prepare_kthreads(cpu);
                break;
        case CPU_ONLINE:
+               rcu_online_kthreads(cpu);
        case CPU_DOWN_FAILED:
                rcu_node_kthread_setaffinity(rnp, -1);
                rcu_cpu_kthread_setrt(cpu, 1);