unsigned long (*set_apic_id)(unsigned int id);
unsigned long apic_id_mask;
- int (*cpu_mask_to_apicid)(const struct cpumask *cpumask,
- unsigned int *apicid);
int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid);
#endif
static inline int
-__flat_cpu_mask_to_apicid(unsigned long cpu_mask, unsigned int *apicid)
+flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask,
+ unsigned int *apicid)
{
- cpu_mask = cpu_mask & APIC_ALL_CPUS & cpumask_bits(cpu_online_mask)[0];
+ unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
+ cpumask_bits(andmask)[0] &
+ cpumask_bits(cpu_online_mask)[0] &
+ APIC_ALL_CPUS;
+
if (likely(cpu_mask)) {
*apicid = (unsigned int)cpu_mask;
return 0;
}
}
-static inline int
-flat_cpu_mask_to_apicid(const struct cpumask *cpumask,
- unsigned int *apicid)
-{
- return __flat_cpu_mask_to_apicid(cpumask_bits(cpumask)[0], apicid);
-}
-
-static inline int
-flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask,
- unsigned int *apicid)
-{
- unsigned long mask1 = cpumask_bits(cpumask)[0];
- unsigned long mask2 = cpumask_bits(andmask)[0];
- return __flat_cpu_mask_to_apicid(mask1 & mask2, apicid);
-}
-
-extern int
-default_cpu_mask_to_apicid(const struct cpumask *cpumask,
- unsigned int *apicid);
-
extern int
default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
apic_write(APIC_LDR, val);
}
-static inline int __default_cpu_to_apicid(int cpu, unsigned int *apicid)
-{
- if (likely((unsigned int)cpu < nr_cpu_ids)) {
- *apicid = per_cpu(x86_cpu_to_apicid, cpu);
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-int default_cpu_mask_to_apicid(const struct cpumask *cpumask,
- unsigned int *apicid)
-{
- int cpu = cpumask_first_and(cpumask, cpu_online_mask);
- return __default_cpu_to_apicid(cpu, apicid);
-}
-
int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid)
break;
}
- return __default_cpu_to_apicid(cpu, apicid);
+ if (likely((unsigned int)cpu < nr_cpu_ids)) {
+ *apicid = per_cpu(x86_cpu_to_apicid, cpu);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
}
/*
.set_apic_id = set_apic_id,
.apic_id_mask = 0xFFu << 24,
- .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
.send_IPI_mask = flat_send_IPI_mask,
.set_apic_id = set_apic_id,
.apic_id_mask = 0xFFu << 24,
- .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = physflat_send_IPI_mask,
.set_apic_id = NULL,
.apic_id_mask = 0x0F << 24,
- .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
.send_IPI_mask = noop_send_IPI_mask,
.set_apic_id = set_apic_id,
.apic_id_mask = 0xffU << 24,
- .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = numachip_send_IPI_mask,
.set_apic_id = NULL,
.apic_id_mask = 0xFF << 24,
- .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = bigsmp_send_IPI_mask,
return 1;
}
-static int
+static inline int
es7000_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id)
{
unsigned int round = 0;
.set_apic_id = NULL,
.apic_id_mask = 0xFF << 24,
- .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
.send_IPI_mask = es7000_send_IPI_mask,
.set_apic_id = NULL,
.apic_id_mask = 0xFF << 24,
- .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
.send_IPI_mask = es7000_send_IPI_mask,
* We use logical delivery to get the timer IRQ
* to the first CPU.
*/
- if (unlikely(apic->cpu_mask_to_apicid(apic->target_cpus(), &dest)))
+ if (unlikely(apic->cpu_mask_to_apicid_and(apic->target_cpus(),
+ apic->target_cpus(), &dest)))
dest = BAD_APICID;
entry.dest_mode = apic->irq_dest_mode;
* We use physical apicids here, not logical, so just return the default
* physical broadcast to stop people from breaking us
*/
-static int
-numaq_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *apicid)
-{
- *apicid = 0x0F;
- return 0;
-}
-
static int
numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
.set_apic_id = NULL,
.apic_id_mask = 0x0F << 24,
- .cpu_mask_to_apicid = numaq_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and,
.send_IPI_mask = numaq_send_IPI_mask,
.set_apic_id = NULL,
.apic_id_mask = 0x0F << 24,
- .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
.send_IPI_mask = default_send_IPI_mask_logical,
return 1;
}
-static int
+static inline int
summit_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id)
{
unsigned int round = 0;
.set_apic_id = NULL,
.apic_id_mask = 0xFF << 24,
- .cpu_mask_to_apicid = summit_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and,
.send_IPI_mask = summit_send_IPI_mask,
__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
}
-static int
-x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *apicid)
-{
- int cpu = cpumask_first_and(cpumask, cpu_online_mask);
- int i;
-
- if (cpu >= nr_cpu_ids)
- return -EINVAL;
-
- *apicid = 0;
- for_each_cpu_and(i, cpumask, per_cpu(cpus_in_cluster, cpu))
- *apicid |= per_cpu(x86_cpu_to_logical_apicid, i);
-
- return 0;
-}
-
static int
x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
.set_apic_id = x2apic_set_apic_id,
.apic_id_mask = 0xFFFFFFFFu,
- .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
.send_IPI_mask = x2apic_send_IPI_mask,
.set_apic_id = x2apic_set_apic_id,
.apic_id_mask = 0xFFFFFFFFu,
- .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = x2apic_send_IPI_mask,
{
}
-static inline int __uv_cpu_to_apicid(int cpu, unsigned int *apicid)
-{
- if (likely((unsigned int)cpu < nr_cpu_ids)) {
- *apicid = per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-static int
-uv_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *apicid)
-{
- /*
- * We're using fixed IRQ delivery, can only return one phys APIC ID.
- * May as well be the first.
- */
- int cpu = cpumask_first_and(cpumask, cpu_online_mask);
- return __uv_cpu_to_apicid(cpu, apicid);
-}
-
static int
uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
break;
}
- return __uv_cpu_to_apicid(cpu, apicid);
+ if (likely((unsigned int)cpu < nr_cpu_ids)) {
+ *apicid = per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
}
static unsigned int x2apic_get_apic_id(unsigned long x)
.set_apic_id = set_apic_id,
.apic_id_mask = 0xFFFFFFFFu,
- .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
.send_IPI_mask = uv_send_IPI_mask,
if (err != 0)
return err;
- err = apic->cpu_mask_to_apicid(eligible_cpu, &dest);
+ err = apic->cpu_mask_to_apicid_and(eligible_cpu, eligible_cpu, &dest);
if (err != 0)
return err;