Merge tag 'v3.10.69' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm64 / kernel / smp.c
index b0a8703a25ecdca15ee675696cf527ce9158d411..7d850f9a0c2b72465354d7cf32ee07ce066e462a 100644 (file)
@@ -39,6 +39,7 @@
 #include <asm/atomic.h>
 #include <asm/cacheflush.h>
 #include <asm/cputype.h>
+#include <asm/cpu_ops.h>
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 #include <asm/ptrace.h>
 
+#include <linux/mt_sched_mon.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/ipi.h>
+
 /*
  * as from 2.5, kernels no longer have an init_tasks structure
  * so we need some other way of telling a new secondary core
  * where to place its SVC stack
  */
 struct secondary_data secondary_data;
-volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
 
 enum ipi_msg_type {
        IPI_RESCHEDULE,
@@ -63,61 +68,16 @@ enum ipi_msg_type {
        IPI_CPU_STOP,
 };
 
-static DEFINE_RAW_SPINLOCK(boot_lock);
-
-/*
- * Write secondary_holding_pen_release in a way that is guaranteed to be
- * visible to all observers, irrespective of whether they're taking part
- * in coherency or not.  This is necessary for the hotplug code to work
- * reliably.
- */
-static void __cpuinit write_pen_release(u64 val)
-{
-       void *start = (void *)&secondary_holding_pen_release;
-       unsigned long size = sizeof(secondary_holding_pen_release);
-
-       secondary_holding_pen_release = val;
-       __flush_dcache_area(start, size);
-}
-
 /*
  * Boot a secondary CPU, and assign it the specified idle task.
  * This also gives us the initial stack to use for this CPU.
  */
 static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
-       unsigned long timeout;
-
-       /*
-        * Set synchronisation state between this boot processor
-        * and the secondary one
-        */
-       raw_spin_lock(&boot_lock);
-
-       /*
-        * Update the pen release flag.
-        */
-       write_pen_release(cpu_logical_map(cpu));
-
-       /*
-        * Send an event, causing the secondaries to read pen_release.
-        */
-       sev();
-
-       timeout = jiffies + (1 * HZ);
-       while (time_before(jiffies, timeout)) {
-               if (secondary_holding_pen_release == INVALID_HWID)
-                       break;
-               udelay(10);
-       }
+       if (cpu_ops[cpu]->cpu_boot)
+               return cpu_ops[cpu]->cpu_boot(cpu);
 
-       /*
-        * Now the secondary core is starting up let it run its
-        * calibrations, then wait for it to finish
-        */
-       raw_spin_unlock(&boot_lock);
-
-       return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
+       return -EOPNOTSUPP;
 }
 
 static DECLARE_COMPLETION(cpu_running);
@@ -158,6 +118,11 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
        return ret;
 }
 
+static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
+{
+       store_cpu_topology(cpuid);
+}
+
 /*
  * This is the secondary CPU boot entry.  We're using this CPUs
  * idle thread stack, but a set of temporary page tables.
@@ -177,6 +142,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        current->active_mm = mm;
        cpumask_set_cpu(cpu, mm_cpumask(mm));
 
+       set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+
        /*
         * TTBR0 is only used for the identity mapping at this stage. Make it
         * point to zero page to avoid speculatively fetching new entries.
@@ -187,17 +154,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        preempt_disable();
        trace_hardirqs_off();
 
-       /*
-        * Let the primary processor know we're out of the
-        * pen, then head off into the C entry point
-        */
-       write_pen_release(INVALID_HWID);
-
-       /*
-        * Synchronise with the boot thread.
-        */
-       raw_spin_lock(&boot_lock);
-       raw_spin_unlock(&boot_lock);
+       if (cpu_ops[cpu]->cpu_postboot)
+               cpu_ops[cpu]->cpu_postboot();
 
        /*
         * Log the CPU info before it is marked online and might get read.
@@ -212,11 +170,14 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        set_cpu_online(cpu, true);
        complete(&cpu_running);
 
+       smp_store_cpu_info(cpu);
+
        /*
         * Enable GIC and timers.
         */
        notify_cpu_starting(cpu);
 
+       local_dbg_enable();
        local_irq_enable();
        local_fiq_enable();
 
@@ -226,41 +187,136 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        cpu_startup_entry(CPUHP_ONLINE);
 }
 
-void __init smp_cpus_done(unsigned int max_cpus)
+#ifdef CONFIG_HOTPLUG_CPU
+static int op_cpu_disable(unsigned int cpu)
 {
-       unsigned long bogosum = loops_per_jiffy * num_online_cpus();
+       /*
+        * If we don't have a cpu_die method, abort before we reach the point
+        * of no return. CPU0 may not have an cpu_ops, so test for it.
+        */
+       if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
+               return -EOPNOTSUPP;
 
-       pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
-               num_online_cpus(), bogosum / (500000/HZ),
-               (bogosum / (5000/HZ)) % 100);
+       /*
+        * We may need to abort a hot unplug for some other mechanism-specific
+        * reason.
+        */
+       if (cpu_ops[cpu]->cpu_disable)
+               return cpu_ops[cpu]->cpu_disable(cpu);
+
+       return 0;
 }
 
-void __init smp_prepare_boot_cpu(void)
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
 {
-}
+       unsigned int cpu = smp_processor_id();
+       int ret;
 
-static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+       ret = op_cpu_disable(cpu);
+       if (ret)
+               return ret;
 
-static const struct smp_enable_ops *enable_ops[] __initconst = {
-       &smp_spin_table_ops,
-       &smp_psci_ops,
-       NULL,
-};
+       /*
+        * Take this CPU offline.  Once we clear this, we can't return,
+        * and we must not schedule until we're ready to give up the cpu.
+        */
+       set_cpu_online(cpu, false);
 
-static const struct smp_enable_ops *smp_enable_ops[NR_CPUS];
+       /*
+        * OK - migrate IRQs away from this CPU
+        */
+       migrate_irqs();
+
+       /*
+        * Remove this CPU from the vm mask set of all processes.
+        */
+       clear_tasks_mm_cpumask(cpu);
 
-static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
+       return 0;
+}
+
+static int op_cpu_kill(unsigned int cpu)
 {
-       const struct smp_enable_ops **ops = enable_ops;
+       /*
+        * If we have no means of synchronising with the dying CPU, then assume
+        * that it is really dead. We can only wait for an arbitrary length of
+        * time and hope that it's dead, so let's skip the wait and just hope.
+        */
+       if (!cpu_ops[cpu]->cpu_kill)
+               return 1;
 
-       while (*ops) {
-               if (!strcmp(name, (*ops)->name))
-                       return *ops;
+       return cpu_ops[cpu]->cpu_kill(cpu);
+}
+
+static DECLARE_COMPLETION(cpu_died);
 
-               ops++;
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+       if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
+               pr_crit("CPU%u: cpu didn't die\n", cpu);
+               return;
        }
+       pr_notice("CPU%u: shutdown\n", cpu);
+
+       /*
+        * Now that the dying CPU is beyond the point of no return w.r.t.
+        * in-kernel synchronisation, try to get the firwmare to help us to
+        * verify that it has really left the kernel before we consider
+        * clobbering anything it might still be using.
+        */
+       if (!op_cpu_kill(cpu))
+               pr_warn("CPU%d may not have shut down cleanly\n", cpu);
+}
+
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we disable IRQs here, but do not re-enable them
+ * before returning to the caller. This is also the behaviour
+ * of the other hotplug-cpu capable cores, so presumably coming
+ * out of idle fixes this.
+ */
+void cpu_die(void)
+{
+       unsigned int cpu = smp_processor_id();
+
+       idle_task_exit();
+
+       local_irq_disable();
+
+       /* Tell __cpu_die() that this CPU is now safe to dispose of */
+       complete(&cpu_died);
+
+       /*
+        * Actually shutdown the CPU. This must never fail. The specific hotplug
+        * mechanism must perform all required cache maintenance to ensure that
+        * no dirty lines are lost in the process of shutting down the CPU.
+        */
+       cpu_ops[cpu]->cpu_die(cpu);
 
-       return NULL;
+       BUG();
+}
+#endif
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+       unsigned long bogosum = loops_per_jiffy * num_online_cpus();
+
+       pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
+               num_online_cpus(), bogosum / (500000/HZ),
+               (bogosum / (5000/HZ)) % 100);
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+       set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
 }
 
 /*
@@ -270,9 +326,8 @@ static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
  */
 void __init smp_init_cpus(void)
 {
-       const char *enable_method;
        struct device_node *dn = NULL;
-       int i, cpu = 1;
+       unsigned int i, cpu = 1;
        bool bootcpu_valid = false;
 
        while ((dn = of_find_node_by_type(dn, "cpu"))) {
@@ -341,25 +396,10 @@ void __init smp_init_cpus(void)
                if (cpu >= NR_CPUS)
                        goto next;
 
-               /*
-                * We currently support only the "spin-table" enable-method.
-                */
-               enable_method = of_get_property(dn, "enable-method", NULL);
-               if (!enable_method) {
-                       pr_err("%s: missing enable-method property\n",
-                               dn->full_name);
+               if (cpu_read_ops(dn, cpu) != 0)
                        goto next;
-               }
-
-               smp_enable_ops[cpu] = smp_get_enable_ops(enable_method);
-
-               if (!smp_enable_ops[cpu]) {
-                       pr_err("%s: invalid enable-method property: %s\n",
-                              dn->full_name, enable_method);
-                       goto next;
-               }
 
-               if (smp_enable_ops[cpu]->init_cpu(dn, cpu))
+               if (cpu_ops[cpu]->cpu_init(dn, cpu))
                        goto next;
 
                pr_debug("cpu logical map 0x%llx\n", hwid);
@@ -389,8 +429,13 @@ next:
 
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
-       int cpu, err;
-       unsigned int ncores = num_possible_cpus();
+       int err;
+       unsigned int cpu, ncores = num_possible_cpus();
+
+       init_cpu_topology();
+
+       smp_store_cpu_info(smp_processor_id());
+
 
        /*
         * are we trying to boot more cores than exist?
@@ -417,10 +462,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
                if (cpu == smp_processor_id())
                        continue;
 
-               if (!smp_enable_ops[cpu])
+               if (!cpu_ops[cpu])
                        continue;
 
-               err = smp_enable_ops[cpu]->prepare_cpu(cpu);
+               err = cpu_ops[cpu]->cpu_prepare(cpu);
                if (err)
                        continue;
 
@@ -429,36 +474,33 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        }
 }
 
+static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
 
 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
 {
-       smp_cross_call = fn;
+       __smp_cross_call = fn;
 }
 
-void arch_send_call_function_ipi_mask(const struct cpumask *mask)
-{
-       smp_cross_call(mask, IPI_CALL_FUNC);
-}
-
-void arch_send_call_function_single_ipi(int cpu)
-{
-       smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
-}
-
-static const char *ipi_types[NR_IPI] = {
-#define S(x,s) [x - IPI_RESCHEDULE] = s
+static const char *ipi_types[NR_IPI] __tracepoint_string = {
+#define S(x,s) [x] = s
        S(IPI_RESCHEDULE, "Rescheduling interrupts"),
        S(IPI_CALL_FUNC, "Function call interrupts"),
        S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
        S(IPI_CPU_STOP, "CPU stop interrupts"),
 };
 
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+       trace_ipi_raise(target, ipi_types[ipinr]);
+       __smp_cross_call(target, ipinr);
+}
+
 void show_ipi_list(struct seq_file *p, int prec)
 {
        unsigned int cpu, i;
 
        for (i = 0; i < NR_IPI; i++) {
-               seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
+               seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
                           prec >= 4 ? " " : "");
                for_each_present_cpu(cpu)
                        seq_printf(p, "%10u ",
@@ -478,6 +520,16 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
        return sum;
 }
 
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+       smp_cross_call(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+       smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
 static DEFINE_RAW_SPINLOCK(stop_lock);
 
 /*
@@ -510,8 +562,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
        unsigned int cpu = smp_processor_id();
        struct pt_regs *old_regs = set_irq_regs(regs);
 
-       if (ipinr >= IPI_RESCHEDULE && ipinr < IPI_RESCHEDULE + NR_IPI)
-               __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_RESCHEDULE]);
+       if ((unsigned)ipinr < NR_IPI) {
+               trace_ipi_entry(ipi_types[ipinr]);
+               __inc_irq_stat(cpu, ipi_irqs[ipinr]);
+       }
 
        switch (ipinr) {
        case IPI_RESCHEDULE:
@@ -520,19 +574,25 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
 
        case IPI_CALL_FUNC:
                irq_enter();
+        mt_trace_ISR_start(ipinr);
                generic_smp_call_function_interrupt();
+        mt_trace_ISR_end(ipinr);
                irq_exit();
                break;
 
        case IPI_CALL_FUNC_SINGLE:
                irq_enter();
+        mt_trace_ISR_start(ipinr);
                generic_smp_call_function_single_interrupt();
+        mt_trace_ISR_end(ipinr);
                irq_exit();
                break;
 
        case IPI_CPU_STOP:
                irq_enter();
+        mt_trace_ISR_start(ipinr);
                ipi_cpu_stop(cpu);
+        mt_trace_ISR_end(ipinr);
                irq_exit();
                break;
 
@@ -540,6 +600,9 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
                pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
                break;
        }
+
+       if ((unsigned)ipinr < NR_IPI)
+               trace_ipi_exit(ipi_types[ipinr]);
        set_irq_regs(old_regs);
 }