}
static inline void
-default_send_IPI_mask_sequence(const struct cpumask *mask, int vector)
+default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
{
unsigned long query_cpu;
unsigned long flags;
}
static inline void
-default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
+default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, int vector)
{
unsigned int this_cpu = smp_processor_id();
unsigned int query_cpu;
local_irq_restore(flags);
}
+#include <asm/genapic.h>
+
+static inline void
+default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector)
+{
+ unsigned long flags;
+ unsigned int query_cpu;
+
+ /*
+ * Hack. The clustered APIC addressing mode doesn't allow us to send
+ * to an arbitrary mask, so I do a unicasts to each CPU instead. This
+ * should be modified to do 1 message per cluster ID - mbligh
+ */
+
+ local_irq_save(flags);
+ for_each_cpu(query_cpu, mask)
+ __default_send_IPI_dest_field(
+ apic->cpu_to_logical_apicid(query_cpu), vector,
+ apic->dest_logical);
+ local_irq_restore(flags);
+}
+
+static inline void
+default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, int vector)
+{
+ unsigned long flags;
+ unsigned int query_cpu;
+ unsigned int this_cpu = smp_processor_id();
+
+ /* See Hack comment above */
+
+ local_irq_save(flags);
+ for_each_cpu(query_cpu, mask) {
+ if (query_cpu == this_cpu)
+ continue;
+ __default_send_IPI_dest_field(
+ apic->cpu_to_logical_apicid(query_cpu), vector,
+ apic->dest_logical);
+ }
+ local_irq_restore(flags);
+}
/* Avoid include hell */
#define NMI_VECTOR 0x02
-void default_send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
-void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-
extern int no_broadcast;
-#ifdef CONFIG_X86_64
-#include <asm/genapic.h>
-#else
-static inline void default_send_IPI_mask(const struct cpumask *mask, int vector)
+#ifndef CONFIG_X86_64
+/*
+ * This is only used on smaller machines.
+ */
+static inline void default_send_IPI_mask_bitmask_logical(const struct cpumask *cpumask, int vector)
+{
+ unsigned long mask = cpumask_bits(cpumask)[0];
+ unsigned long flags;
+
+ local_irq_save(flags);
+ WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
+ __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
+ local_irq_restore(flags);
+}
+
+static inline void default_send_IPI_mask_logical(const struct cpumask *mask, int vector)
{
- default_send_IPI_mask_bitmask(mask, vector);
+ default_send_IPI_mask_bitmask_logical(mask, vector);
}
-void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
#endif
static inline void __default_local_send_IPI_allbutself(int vector)
#include <asm/genapic.h>
#include <asm/fixmap.h>
#include <asm/apicdef.h>
+#include <asm/ipi.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/dmi.h>
return cpuid_apic >> index_msb;
}
-void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector);
-void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-
static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
{
- default_send_IPI_mask_sequence(mask, vector);
+ default_send_IPI_mask_sequence_phys(mask, vector);
}
static inline void bigsmp_send_IPI_allbutself(int vector)
{
- default_send_IPI_mask_allbutself(cpu_online_mask, vector);
+ default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
}
static inline void bigsmp_send_IPI_all(int vector)
static void es7000_send_IPI_mask(const struct cpumask *mask, int vector)
{
- default_send_IPI_mask_sequence(mask, vector);
+ default_send_IPI_mask_sequence_phys(mask, vector);
}
static void es7000_send_IPI_allbutself(int vector)
{
- default_send_IPI_mask_allbutself(cpu_online_mask, vector);
+ default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
}
static void es7000_send_IPI_all(int vector)
static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
{
- default_send_IPI_mask_sequence(cpumask, vector);
+ default_send_IPI_mask_sequence_phys(cpumask, vector);
}
static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
int vector)
{
- default_send_IPI_mask_allbutself(cpumask, vector);
+ default_send_IPI_mask_allbutself_phys(cpumask, vector);
}
static void physflat_send_IPI_allbutself(int vector)
{
- default_send_IPI_mask_allbutself(cpu_online_mask, vector);
+ default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
}
static void physflat_send_IPI_all(int vector)
#include <asm/mmu_context.h>
#include <asm/apic.h>
#include <asm/proto.h>
+#include <asm/ipi.h>
#ifdef CONFIG_X86_32
-#include <asm/genapic.h>
-
-/*
- * the following functions deal with sending IPIs between CPUs.
- *
- * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
- */
-
-static inline int __prepare_ICR(unsigned int shortcut, int vector)
-{
- unsigned int icr = shortcut | apic->dest_logical;
-
- switch (vector) {
- default:
- icr |= APIC_DM_FIXED | vector;
- break;
- case NMI_VECTOR:
- icr |= APIC_DM_NMI;
- break;
- }
- return icr;
-}
-
-static inline int __prepare_ICR2(unsigned int mask)
-{
- return SET_APIC_DEST_FIELD(mask);
-}
-
-void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
-{
- /*
- * Subtle. In the case of the 'never do double writes' workaround
- * we have to lock out interrupts to be safe. As we don't care
- * of the value read we use an atomic rmw access to avoid costly
- * cli/sti. Otherwise we use an even cheaper single atomic write
- * to the APIC.
- */
- unsigned int cfg;
-
- /*
- * Wait for idle.
- */
- apic_wait_icr_idle();
-
- /*
- * No need to touch the target chip field
- */
- cfg = __prepare_ICR(shortcut, vector);
-
- /*
- * Send the IPI. The write to APIC_ICR fires this off.
- */
- apic_write(APIC_ICR, cfg);
-}
void default_send_IPI_self(int vector)
{
- __default_send_IPI_shortcut(APIC_DEST_SELF, vector);
-}
-
-/*
- * This is used to send an IPI with no shorthand notation (the destination is
- * specified in bits 56 to 63 of the ICR).
- */
-static inline void __default_send_IPI_dest_field(unsigned long mask, int vector)
-{
- unsigned long cfg;
-
- /*
- * Wait for idle.
- */
- if (unlikely(vector == NMI_VECTOR))
- safe_apic_wait_icr_idle();
- else
- apic_wait_icr_idle();
-
- /*
- * prepare target chip field
- */
- cfg = __prepare_ICR2(mask);
- apic_write(APIC_ICR2, cfg);
-
- /*
- * program the ICR
- */
- cfg = __prepare_ICR(0, vector);
-
- /*
- * Send the IPI. The write to APIC_ICR fires this off.
- */
- apic_write(APIC_ICR, cfg);
-}
-
-/*
- * This is only used on smaller machines.
- */
-void default_send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector)
-{
- unsigned long mask = cpumask_bits(cpumask)[0];
- unsigned long flags;
-
- local_irq_save(flags);
- WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
- __default_send_IPI_dest_field(mask, vector);
- local_irq_restore(flags);
-}
-
-void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector)
-{
- unsigned long flags;
- unsigned int query_cpu;
-
- /*
- * Hack. The clustered APIC addressing mode doesn't allow us to send
- * to an arbitrary mask, so I do a unicasts to each CPU instead. This
- * should be modified to do 1 message per cluster ID - mbligh
- */
-
- local_irq_save(flags);
- for_each_cpu(query_cpu, mask)
- __default_send_IPI_dest_field(apic->cpu_to_logical_apicid(query_cpu), vector);
- local_irq_restore(flags);
-}
-
-void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
-{
- unsigned long flags;
- unsigned int query_cpu;
- unsigned int this_cpu = smp_processor_id();
-
- /* See Hack comment above */
-
- local_irq_save(flags);
- for_each_cpu(query_cpu, mask) {
- if (query_cpu == this_cpu)
- continue;
- __default_send_IPI_dest_field(
- apic->cpu_to_logical_apicid(query_cpu), vector);
- }
- local_irq_restore(flags);
+ __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);
}
/* must come after the send_IPI functions above for inlining */
#include <asm/genapic.h>
#include <asm/fixmap.h>
#include <asm/apicdef.h>
+#include <asm/ipi.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
return (x >> 24) & 0x0F;
}
-void default_send_IPI_mask_sequence(const struct cpumask *mask, int vector);
-void default_send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-
static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector)
{
- default_send_IPI_mask_sequence(mask, vector);
+ default_send_IPI_mask_sequence_logical(mask, vector);
}
static inline void numaq_send_IPI_allbutself(int vector)
{
- default_send_IPI_mask_allbutself(cpu_online_mask, vector);
+ default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector);
}
static inline void numaq_send_IPI_all(int vector)
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
- .send_IPI_mask = default_send_IPI_mask,
- .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself,
+ .send_IPI_mask = default_send_IPI_mask_logical,
+ .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical,
.send_IPI_allbutself = default_send_IPI_allbutself,
.send_IPI_all = default_send_IPI_all,
.send_IPI_self = NULL,
#include <asm/genapic.h>
#include <asm/fixmap.h>
#include <asm/apicdef.h>
+#include <asm/ipi.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
return (x >> 24) & 0xFF;
}
-void default_send_IPI_mask_sequence(const cpumask_t *mask, int vector);
-void default_send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
-
static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector)
{
- default_send_IPI_mask_sequence(mask, vector);
+ default_send_IPI_mask_sequence_logical(mask, vector);
}
static inline void summit_send_IPI_allbutself(int vector)