import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / smp.c
index 5919eb451bb9840590091c3d26a922bd65b1d005..5c5842d690b393123dd5aab69527d74f56da6c04 100644 (file)
 #include <asm/smp_plat.h>
 #include <asm/virt.h>
 #include <asm/mach/arch.h>
+#include <linux/mt_sched_mon.h>
+/*******************************************************************************
+* 20131225 marc.huang                                                          *
+* CPU Hotplug debug                                                            *
+*******************************************************************************/
+#include <linux/mtk_ram_console.h>
+/******************************************************************************/
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/ipi.h>
 
 /*
  * as from 2.5, kernels no longer have an init_tasks structure
@@ -66,6 +76,7 @@ enum ipi_msg_type {
        IPI_CALL_FUNC,
        IPI_CALL_FUNC_SINGLE,
        IPI_CPU_STOP,
+       IPI_CPU_BACKTRACE,
 };
 
 static DECLARE_COMPLETION(cpu_running);
@@ -233,10 +244,13 @@ void __cpuinit __cpu_die(unsigned int cpu)
 void __ref cpu_die(void)
 {
        unsigned int cpu = smp_processor_id();
+       aee_rr_rec_hoplug(cpu, 51, 0);
 
        idle_task_exit();
+       aee_rr_rec_hoplug(cpu, 52, 0);
 
        local_irq_disable();
+       aee_rr_rec_hoplug(cpu, 53, 0);
 
        /*
         * Flush the data out of the L1 cache for this CPU.  This must be
@@ -245,6 +259,7 @@ void __ref cpu_die(void)
         * *this* CPU and power down its cache.
         */
        flush_cache_louis();
+       aee_rr_rec_hoplug(cpu, 54, 0);
 
        /*
         * Tell __cpu_die() that this CPU is now safe to dispose of.  Once
@@ -252,6 +267,7 @@ void __ref cpu_die(void)
         * from this CPU and its cache by platform_cpu_kill().
         */
        complete(&cpu_died);
+       aee_rr_rec_hoplug(cpu, 55, 0);
 
        /*
         * Ensure that the cache lines associated with that completion are
@@ -260,6 +276,7 @@ void __ref cpu_die(void)
         * CPU waiting for this one.
         */
        flush_cache_louis();
+       aee_rr_rec_hoplug(cpu, 56, 0);
 
        /*
         * The actual CPU shutdown procedure is at least platform (if not
@@ -312,7 +329,8 @@ static void percpu_timer_setup(void);
 asmlinkage void __cpuinit secondary_start_kernel(void)
 {
        struct mm_struct *mm = &init_mm;
-       unsigned int cpu;
+       unsigned int cpu = 0;
+       aee_rr_rec_hoplug(cpu, 1, 0);
 
        /*
         * The identity mapping is uncached (strongly ordered), so
@@ -322,34 +340,44 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        local_flush_bp_all();
        enter_lazy_tlb(mm, current);
        local_flush_tlb_all();
+       aee_rr_rec_hoplug(cpu, 2, 0);
 
        /*
         * All kernel threads share the same mm context; grab a
         * reference and switch to it.
         */
        cpu = smp_processor_id();
+       aee_rr_rec_hoplug(cpu, 3, 0);
        atomic_inc(&mm->mm_count);
        current->active_mm = mm;
        cpumask_set_cpu(cpu, mm_cpumask(mm));
+       aee_rr_rec_hoplug(cpu, 4, 0);
 
        cpu_init();
+       aee_rr_rec_hoplug(cpu, 5, 0);
 
        printk("CPU%u: Booted secondary processor\n", cpu);
 
        preempt_disable();
+       aee_rr_rec_hoplug(cpu, 6, 0);
        trace_hardirqs_off();
+       aee_rr_rec_hoplug(cpu, 7, 0);
 
        /*
         * Give the platform a chance to do its own initialisation.
         */
        if (smp_ops.smp_secondary_init)
                smp_ops.smp_secondary_init(cpu);
+       aee_rr_rec_hoplug(cpu, 8, 0);
 
        notify_cpu_starting(cpu);
+       aee_rr_rec_hoplug(cpu, 9, 0);
 
        calibrate_delay();
+       aee_rr_rec_hoplug(cpu, 10, 0);
 
        smp_store_cpu_info(cpu);
+       aee_rr_rec_hoplug(cpu, 11, 0);
 
        /*
         * OK, now it's safe to let the boot CPU continue.  Wait for
@@ -357,20 +385,26 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
         * before we continue - which happens after __cpu_up returns.
         */
        set_cpu_online(cpu, true);
+       aee_rr_rec_hoplug(cpu, 12, 0);
        complete(&cpu_running);
+       aee_rr_rec_hoplug(cpu, 13, 0);
 
        /*
         * Setup the percpu timer for this CPU.
         */
        percpu_timer_setup();
+       aee_rr_rec_hoplug(cpu, 14, 0);
 
        local_irq_enable();
+       aee_rr_rec_hoplug(cpu, 15, 0);
        local_fiq_enable();
+       aee_rr_rec_hoplug(cpu, 16, 0);
 
        /*
         * OK, it's off to the idle thread for us
         */
        cpu_startup_entry(CPUHP_ONLINE);
+       aee_rr_rec_hoplug(cpu, 17, 0);
 }
 
 void __init smp_cpus_done(unsigned int max_cpus)
@@ -432,30 +466,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        }
 }
 
-static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
 
 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
 {
-       if (!smp_cross_call)
-               smp_cross_call = fn;
-}
-
-void arch_send_call_function_ipi_mask(const struct cpumask *mask)
-{
-       smp_cross_call(mask, IPI_CALL_FUNC);
-}
-
-void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
-{
-       smp_cross_call(mask, IPI_WAKEUP);
-}
-
-void arch_send_call_function_single_ipi(int cpu)
-{
-       smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+    if (!__smp_cross_call)
+        __smp_cross_call = fn;
 }
 
-static const char *ipi_types[NR_IPI] = {
+static const char *ipi_types[NR_IPI] __tracepoint_string = {
 #define S(x,s) [x] = s
        S(IPI_WAKEUP, "CPU wakeup interrupts"),
        S(IPI_TIMER, "Timer broadcast interrupts"),
@@ -463,8 +482,15 @@ static const char *ipi_types[NR_IPI] = {
        S(IPI_CALL_FUNC, "Function call interrupts"),
        S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
        S(IPI_CPU_STOP, "CPU stop interrupts"),
+       S(IPI_CPU_BACKTRACE, "CPU backtrace"),
 };
 
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+       trace_ipi_raise(target, ipi_types[ipinr]);
+       __smp_cross_call(target, ipinr);
+}
+
 void show_ipi_list(struct seq_file *p, int prec)
 {
        unsigned int cpu, i;
@@ -496,6 +522,21 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
  */
 static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
 
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+       smp_cross_call(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
+{
+       smp_cross_call(mask, IPI_WAKEUP);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+       smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 void tick_broadcast(const struct cpumask *mask)
 {
@@ -588,6 +629,58 @@ static void ipi_cpu_stop(unsigned int cpu)
                cpu_relax();
 }
 
+static cpumask_t backtrace_mask;
+static DEFINE_RAW_SPINLOCK(backtrace_lock);
+
+/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+static unsigned long backtrace_flag;
+
+void smp_send_all_cpu_backtrace(void)
+{
+       unsigned int this_cpu = smp_processor_id();
+       int i;
+
+       if (test_and_set_bit(0, &backtrace_flag))
+               /*
+                * If there is already a trigger_all_cpu_backtrace() in progress
+                * (backtrace_flag == 1), don't output double cpu dump infos.
+                */
+               return;
+
+       cpumask_copy(&backtrace_mask, cpu_online_mask);
+       cpu_clear(this_cpu, backtrace_mask);
+
+       pr_info("Backtrace for cpu %d (current):\n", this_cpu);
+       dump_stack();
+
+       pr_info("\nsending IPI to all other CPUs:\n");
+       smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
+
+       /* Wait for up to 10 seconds for all other CPUs to do the backtrace */
+       for (i = 0; i < 10 * 1000; i++) {
+               if (cpumask_empty(&backtrace_mask))
+                       break;
+               mdelay(1);
+       }
+
+       clear_bit(0, &backtrace_flag);
+       smp_mb__after_clear_bit();
+}
+
+/*
+ * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
+ */
+static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
+{
+       if (cpu_isset(cpu, backtrace_mask)) {
+               raw_spin_lock(&backtrace_lock);
+               pr_warning("IPI backtrace for cpu %d\n", cpu);
+               show_regs(regs);
+               raw_spin_unlock(&backtrace_lock);
+               cpu_clear(cpu, backtrace_mask);
+       }
+}
+
 /*
  * Main handler for inter-processor interrupts
  */
@@ -601,17 +694,23 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
        unsigned int cpu = smp_processor_id();
        struct pt_regs *old_regs = set_irq_regs(regs);
 
-       if (ipinr < NR_IPI)
+       if ((unsigned)ipinr < NR_IPI) {
+               trace_ipi_entry(ipi_types[ipinr]);
                __inc_irq_stat(cpu, ipi_irqs[ipinr]);
+       }
 
        switch (ipinr) {
        case IPI_WAKEUP:
+        mt_trace_ISR_start(ipinr);
+        mt_trace_ISR_end(ipinr);
                break;
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
        case IPI_TIMER:
                irq_enter();
+        mt_trace_ISR_start(ipinr);
                tick_receive_broadcast();
+        mt_trace_ISR_end(ipinr);
                irq_exit();
                break;
 #endif
@@ -622,27 +721,44 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
 
        case IPI_CALL_FUNC:
                irq_enter();
+        mt_trace_ISR_start(ipinr);
                generic_smp_call_function_interrupt();
+        mt_trace_ISR_end(ipinr);
                irq_exit();
                break;
 
        case IPI_CALL_FUNC_SINGLE:
                irq_enter();
+        mt_trace_ISR_start(ipinr);
                generic_smp_call_function_single_interrupt();
+        mt_trace_ISR_end(ipinr);
                irq_exit();
                break;
 
        case IPI_CPU_STOP:
                irq_enter();
+        mt_trace_ISR_start(ipinr);
                ipi_cpu_stop(cpu);
+        mt_trace_ISR_end(ipinr);
                irq_exit();
                break;
 
+       case IPI_CPU_BACKTRACE:
+        mt_trace_ISR_start(ipinr);
+               ipi_cpu_backtrace(cpu, regs);
+        mt_trace_ISR_end(ipinr);
+               break;
+
        default:
+        mt_trace_ISR_start(ipinr);
                printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
                       cpu, ipinr);
+        mt_trace_ISR_end(ipinr);
                break;
        }
+
+       if ((unsigned)ipinr < NR_IPI)
+               trace_ipi_exit(ipi_types[ipinr]);
        set_irq_regs(old_regs);
 }