#include <asm/tlbflush.h>
#include <asm/ptrace.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/ipi.h>
+
/*
* as from 2.5, kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
}
-static void (*smp_cross_call)(const struct cpumask *, unsigned int);
-
/*
* Enumerate the possible CPU set from the device tree and build the
* cpu logical map array containing MPIDR values related to logical
}
}
+static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
- smp_cross_call = fn;
+ __smp_cross_call = fn;
}
-void arch_send_call_function_ipi_mask(const struct cpumask *mask)
-{
- smp_cross_call(mask, IPI_CALL_FUNC);
-}
-
-void arch_send_call_function_single_ipi(int cpu)
-{
- smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
-}
-
-#ifdef CONFIG_IRQ_WORK
-void arch_irq_work_raise(void)
-{
- if (smp_cross_call)
- smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
-}
-#endif
-
-static const char *ipi_types[NR_IPI] = {
-#define S(x,s) [x - IPI_RESCHEDULE] = s
+static const char *ipi_types[NR_IPI] __tracepoint_string = {
+#define S(x,s) [x] = s
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_IRQ_WORK, "IRQ work interrupts"),
};
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+ trace_ipi_raise(target, ipi_types[ipinr]);
+ __smp_cross_call(target, ipinr);
+}
+
void show_ipi_list(struct seq_file *p, int prec)
{
unsigned int cpu, i;
for (i = 0; i < NR_IPI; i++) {
- seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
prec >= 4 ? " " : "");
for_each_online_cpu(cpu)
seq_printf(p, "%10u ",
return sum;
}
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ smp_cross_call(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
+#ifdef CONFIG_IRQ_WORK
+void arch_irq_work_raise(void)
+{
+ if (__smp_cross_call)
+ smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+}
+#endif
+
static DEFINE_RAW_SPINLOCK(stop_lock);
/*
unsigned int cpu = smp_processor_id();
struct pt_regs *old_regs = set_irq_regs(regs);
- if (ipinr >= IPI_RESCHEDULE && ipinr < IPI_RESCHEDULE + NR_IPI)
- __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_RESCHEDULE]);
+ if ((unsigned)ipinr < NR_IPI) {
+ trace_ipi_entry(ipi_types[ipinr]);
+ __inc_irq_stat(cpu, ipi_irqs[ipinr]);
+ }
switch (ipinr) {
case IPI_RESCHEDULE:
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
break;
}
+
+ if ((unsigned)ipinr < NR_IPI)
+ trace_ipi_exit(ipi_types[ipinr]);
set_irq_regs(old_regs);
}