#include <asm/smp_plat.h>
#include <asm/virt.h>
#include <asm/mach/arch.h>
+#include <linux/mt_sched_mon.h>
+/*******************************************************************************
+* 20131225 marc.huang *
+* CPU Hotplug debug *
+*******************************************************************************/
+#include <linux/mtk_ram_console.h>
+/******************************************************************************/
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/ipi.h>
/*
* as from 2.5, kernels no longer have an init_tasks structure
IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
+ IPI_CPU_BACKTRACE,
};
static DECLARE_COMPLETION(cpu_running);
void __ref cpu_die(void)
{
unsigned int cpu = smp_processor_id();
+ aee_rr_rec_hoplug(cpu, 51, 0);
idle_task_exit();
+ aee_rr_rec_hoplug(cpu, 52, 0);
local_irq_disable();
+ aee_rr_rec_hoplug(cpu, 53, 0);
/*
* Flush the data out of the L1 cache for this CPU. This must be
* *this* CPU and power down its cache.
*/
flush_cache_louis();
+ aee_rr_rec_hoplug(cpu, 54, 0);
/*
* Tell __cpu_die() that this CPU is now safe to dispose of. Once
* this returns, power and/or clocks can be removed at any point
* from this CPU and its cache by platform_cpu_kill().
*/
- RCU_NONIDLE(complete(&cpu_died));
+ complete(&cpu_died);
+ aee_rr_rec_hoplug(cpu, 55, 0);
/*
* Ensure that the cache lines associated with that completion are
* CPU waiting for this one.
*/
flush_cache_louis();
+ aee_rr_rec_hoplug(cpu, 56, 0);
/*
* The actual CPU shutdown procedure is at least platform (if not
asmlinkage void __cpuinit secondary_start_kernel(void)
{
struct mm_struct *mm = &init_mm;
- unsigned int cpu;
+ unsigned int cpu = 0;
+ aee_rr_rec_hoplug(cpu, 1, 0);
/*
* The identity mapping is uncached (strongly ordered), so
local_flush_bp_all();
enter_lazy_tlb(mm, current);
local_flush_tlb_all();
+ aee_rr_rec_hoplug(cpu, 2, 0);
/*
* All kernel threads share the same mm context; grab a
* reference and switch to it.
*/
cpu = smp_processor_id();
+ aee_rr_rec_hoplug(cpu, 3, 0);
atomic_inc(&mm->mm_count);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
+ aee_rr_rec_hoplug(cpu, 4, 0);
cpu_init();
+ aee_rr_rec_hoplug(cpu, 5, 0);
printk("CPU%u: Booted secondary processor\n", cpu);
preempt_disable();
+ aee_rr_rec_hoplug(cpu, 6, 0);
trace_hardirqs_off();
+ aee_rr_rec_hoplug(cpu, 7, 0);
/*
* Give the platform a chance to do its own initialisation.
*/
if (smp_ops.smp_secondary_init)
smp_ops.smp_secondary_init(cpu);
+ aee_rr_rec_hoplug(cpu, 8, 0);
notify_cpu_starting(cpu);
+ aee_rr_rec_hoplug(cpu, 9, 0);
calibrate_delay();
+ aee_rr_rec_hoplug(cpu, 10, 0);
smp_store_cpu_info(cpu);
+ aee_rr_rec_hoplug(cpu, 11, 0);
/*
* OK, now it's safe to let the boot CPU continue. Wait for
* before we continue - which happens after __cpu_up returns.
*/
set_cpu_online(cpu, true);
+ aee_rr_rec_hoplug(cpu, 12, 0);
complete(&cpu_running);
+ aee_rr_rec_hoplug(cpu, 13, 0);
/*
* Setup the percpu timer for this CPU.
*/
percpu_timer_setup();
+ aee_rr_rec_hoplug(cpu, 14, 0);
local_irq_enable();
+ aee_rr_rec_hoplug(cpu, 15, 0);
local_fiq_enable();
+ aee_rr_rec_hoplug(cpu, 16, 0);
/*
* OK, it's off to the idle thread for us
*/
cpu_startup_entry(CPUHP_ONLINE);
+ aee_rr_rec_hoplug(cpu, 17, 0);
}
void __init smp_cpus_done(unsigned int max_cpus)
}
}
-static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
- if (!smp_cross_call)
- smp_cross_call = fn;
+ if (!__smp_cross_call)
+ __smp_cross_call = fn;
}
-void arch_send_call_function_ipi_mask(const struct cpumask *mask)
-{
- smp_cross_call(mask, IPI_CALL_FUNC);
-}
-
-void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
-{
- smp_cross_call(mask, IPI_WAKEUP);
-}
-
-void arch_send_call_function_single_ipi(int cpu)
-{
- smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
-}
-
-static const char *ipi_types[NR_IPI] = {
+static const char *ipi_types[NR_IPI] __tracepoint_string = {
#define S(x,s) [x] = s
S(IPI_WAKEUP, "CPU wakeup interrupts"),
S(IPI_TIMER, "Timer broadcast interrupts"),
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
+ S(IPI_CPU_BACKTRACE, "CPU backtrace"),
};
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+ trace_ipi_raise(target, ipi_types[ipinr]);
+ __smp_cross_call(target, ipinr);
+}
+
void show_ipi_list(struct seq_file *p, int prec)
{
unsigned int cpu, i;
*/
static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ smp_cross_call(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
+{
+ smp_cross_call(mask, IPI_WAKEUP);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
cpu_relax();
}
+static cpumask_t backtrace_mask;
+static DEFINE_RAW_SPINLOCK(backtrace_lock);
+
+/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+static unsigned long backtrace_flag;
+
+void smp_send_all_cpu_backtrace(void)
+{
+ unsigned int this_cpu = smp_processor_id();
+ int i;
+
+ if (test_and_set_bit(0, &backtrace_flag))
+ /*
+ * If there is already a trigger_all_cpu_backtrace() in progress
+ * (backtrace_flag == 1), don't output double cpu dump infos.
+ */
+ return;
+
+ cpumask_copy(&backtrace_mask, cpu_online_mask);
+ cpu_clear(this_cpu, backtrace_mask);
+
+ pr_info("Backtrace for cpu %d (current):\n", this_cpu);
+ dump_stack();
+
+ pr_info("\nsending IPI to all other CPUs:\n");
+ smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
+
+ /* Wait for up to 10 seconds for all other CPUs to do the backtrace */
+ for (i = 0; i < 10 * 1000; i++) {
+ if (cpumask_empty(&backtrace_mask))
+ break;
+ mdelay(1);
+ }
+
+ clear_bit(0, &backtrace_flag);
+ smp_mb__after_clear_bit();
+}
+
+/*
+ * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
+ */
+static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
+{
+ if (cpu_isset(cpu, backtrace_mask)) {
+ raw_spin_lock(&backtrace_lock);
+ pr_warning("IPI backtrace for cpu %d\n", cpu);
+ show_regs(regs);
+ raw_spin_unlock(&backtrace_lock);
+ cpu_clear(cpu, backtrace_mask);
+ }
+}
+
/*
* Main handler for inter-processor interrupts
*/
unsigned int cpu = smp_processor_id();
struct pt_regs *old_regs = set_irq_regs(regs);
- if (ipinr < NR_IPI)
+ if ((unsigned)ipinr < NR_IPI) {
+ trace_ipi_entry(ipi_types[ipinr]);
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
+ }
switch (ipinr) {
case IPI_WAKEUP:
+ mt_trace_ISR_start(ipinr);
+ mt_trace_ISR_end(ipinr);
break;
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
case IPI_TIMER:
irq_enter();
+ mt_trace_ISR_start(ipinr);
tick_receive_broadcast();
+ mt_trace_ISR_end(ipinr);
irq_exit();
break;
#endif
case IPI_CALL_FUNC:
irq_enter();
+ mt_trace_ISR_start(ipinr);
generic_smp_call_function_interrupt();
+ mt_trace_ISR_end(ipinr);
irq_exit();
break;
case IPI_CALL_FUNC_SINGLE:
irq_enter();
+ mt_trace_ISR_start(ipinr);
generic_smp_call_function_single_interrupt();
+ mt_trace_ISR_end(ipinr);
irq_exit();
break;
case IPI_CPU_STOP:
irq_enter();
+ mt_trace_ISR_start(ipinr);
ipi_cpu_stop(cpu);
+ mt_trace_ISR_end(ipinr);
irq_exit();
break;
+ case IPI_CPU_BACKTRACE:
+ mt_trace_ISR_start(ipinr);
+ ipi_cpu_backtrace(cpu, regs);
+ mt_trace_ISR_end(ipinr);
+ break;
+
default:
+ mt_trace_ISR_start(ipinr);
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
cpu, ipinr);
+ mt_trace_ISR_end(ipinr);
break;
}
+
+ if ((unsigned)ipinr < NR_IPI)
+ trace_ipi_exit(ipi_types[ipinr]);
set_irq_regs(old_regs);
}
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
-#ifdef CONFIG_HOTPLUG_CPU
-static void smp_kill_cpus(cpumask_t *mask)
-{
- unsigned int cpu;
- for_each_cpu(cpu, mask)
- platform_cpu_kill(cpu);
-}
-#else
-static void smp_kill_cpus(cpumask_t *mask) { }
-#endif
-
void smp_send_stop(void)
{
unsigned long timeout;
if (num_online_cpus() > 1)
pr_warning("SMP: failed to stop secondary CPUs\n");
-
- smp_kill_cpus(&mask);
}
/*