}
/* As we are using single CPU as destination, pick only one CPU here */
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
+static inline unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask)
{
- int cpu;
- int apicid;
-
- cpu = first_cpu(*cpumask);
- apicid = bigsmp_cpu_to_logical_apicid(cpu);
- return apicid;
+ return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask));
}
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
+static inline unsigned int
+bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
int cpu;
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- for_each_cpu_and(cpu, cpumask, andmask)
+ for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
+ }
if (cpu < nr_cpu_ids)
return bigsmp_cpu_to_logical_apicid(cpu);
}
static inline unsigned int
-cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
+es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
{
- int num_bits_set;
int cpus_found = 0;
- int cpu;
+ int num_bits_set;
int apicid;
+ int cpu;
num_bits_set = cpumask_weight(cpumask);
/* Return id to all */
*/
cpu = cpumask_first(cpumask);
apicid = es7000_cpu_to_logical_apicid(cpu);
+
while (cpus_found < num_bits_set) {
if (cpumask_test_cpu(cpu, cpumask)) {
int new_apicid = es7000_cpu_to_logical_apicid(cpu);
+
if (apicid_cluster(apicid) !=
- apicid_cluster(new_apicid)){
+ apicid_cluster(new_apicid)) {
printk ("%s: Not a valid mask!\n", __func__);
+
return 0xFF;
}
apicid = new_apicid;
return apicid;
}
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
+static inline unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask)
{
- int num_bits_set;
int cpus_found = 0;
- int cpu;
+ int num_bits_set;
int apicid;
+ int cpu;
num_bits_set = cpus_weight(*cpumask);
/* Return id to all */
while (cpus_found < num_bits_set) {
if (cpu_isset(cpu, *cpumask)) {
int new_apicid = es7000_cpu_to_logical_apicid(cpu);
+
if (apicid_cluster(apicid) !=
- apicid_cluster(new_apicid)){
+ apicid_cluster(new_apicid)) {
printk ("%s: Not a valid mask!\n", __func__);
+
return es7000_cpu_to_logical_apicid(0);
}
apicid = new_apicid;
}
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
- const struct cpumask *andmask)
+static inline unsigned int
+es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
+ const struct cpumask *andmask)
{
int apicid = es7000_cpu_to_logical_apicid(0);
cpumask_var_t cpumask;
cpumask_and(cpumask, inmask, andmask);
cpumask_and(cpumask, cpumask, cpu_online_mask);
- apicid = cpu_mask_to_apicid(cpumask);
+ apicid = es7000_cpu_mask_to_apicid(cpumask);
free_cpumask_var(cpumask);
+
return apicid;
}
#ifdef CONFIG_X86_64
#include <asm/genapic.h>
-#define cpu_mask_to_apicid (apic->cpu_mask_to_apicid)
-#define cpu_mask_to_apicid_and (apic->cpu_mask_to_apicid_and)
#define read_apic_id() (apic->get_apic_id(apic_read(APIC_ID)))
#define send_IPI_self (apic->send_IPI_self)
#define wakeup_secondary_cpu (apic->wakeup_cpu)
return physid_isset(read_apic_id(), phys_cpu_present_map);
}
-static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
+static inline unsigned int
+default_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
return cpumask_bits(cpumask)[0];
}
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
+static inline unsigned int
+default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
unsigned long mask1 = cpumask_bits(cpumask)[0];
unsigned long mask2 = cpumask_bits(andmask)[0];
#include <asm/genapic.h>
-#define cpu_mask_to_apicid (apic->cpu_mask_to_apicid)
-#define cpu_mask_to_apicid_and (apic->cpu_mask_to_apicid_and)
#define wakeup_secondary_cpu (apic->wakeup_cpu)
extern void generic_bigsmp_probe(void);
* We use physical apicids here, not logical, so just return the default
* physical broadcast to stop people from breaking us
*/
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
+static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask)
{
- return (int) 0xF;
+ return 0x0F;
}
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
+static inline unsigned int
+numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
- return (int) 0xF;
+ return 0x0F;
}
/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
return 1;
}
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
+static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask)
{
- int num_bits_set;
int cpus_found = 0;
- int cpu;
+ int num_bits_set;
int apicid;
+ int cpu;
num_bits_set = cpus_weight(*cpumask);
/* Return id to all */
if (num_bits_set >= nr_cpu_ids)
- return (int) 0xFF;
+ return 0xFF;
/*
* The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of target_cpus():
*/
cpu = first_cpu(*cpumask);
apicid = summit_cpu_to_logical_apicid(cpu);
+
while (cpus_found < num_bits_set) {
if (cpu_isset(cpu, *cpumask)) {
int new_apicid = summit_cpu_to_logical_apicid(cpu);
+
if (apicid_cluster(apicid) !=
- apicid_cluster(new_apicid)){
+ apicid_cluster(new_apicid)) {
printk ("%s: Not a valid mask!\n", __func__);
+
return 0xFF;
}
apicid = apicid | new_apicid;
return apicid;
}
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
- const struct cpumask *andmask)
+static inline unsigned int
+summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
+ const struct cpumask *andmask)
{
int apicid = summit_cpu_to_logical_apicid(0);
cpumask_var_t cpumask;
cpumask_and(cpumask, inmask, andmask);
cpumask_and(cpumask, cpumask, cpu_online_mask);
- apicid = cpu_mask_to_apicid(cpumask);
+ apicid = summit_cpu_mask_to_apicid(cpumask);
free_cpumask_var(cpumask);
+
return apicid;
}
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- for_each_cpu_and(cpu, cpumask, andmask)
+ for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
+ }
if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
+
return BAD_APICID;
}
static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
- int cpu;
-
/*
* We're using fixed IRQ delivery, can only return one logical APIC ID.
* May as well be the first.
*/
- cpu = cpumask_first(cpumask);
+ int cpu = cpumask_first(cpumask);
+
if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_logical_apicid, cpu);
else
return BAD_APICID;
}
-static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
+static unsigned int
+x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
int cpu;
* We're using fixed IRQ delivery, can only return one logical APIC ID.
* May as well be the first.
*/
- for_each_cpu_and(cpu, cpumask, andmask)
+ for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
+ }
+
if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_logical_apicid, cpu);
+
return BAD_APICID;
}
static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
- int cpu;
-
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- cpu = cpumask_first(cpumask);
+ int cpu = cpumask_first(cpumask);
+
if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
else
return BAD_APICID;
}
-static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
+static unsigned int
+x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
int cpu;
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- for_each_cpu_and(cpu, cpumask, andmask)
+ for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
+ }
+
if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
+
return BAD_APICID;
}
static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
- int cpu;
-
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- cpu = cpumask_first(cpumask);
+ int cpu = cpumask_first(cpumask);
+
if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
else
return BAD_APICID;
}
-static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
+static unsigned int
+uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
int cpu;
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- for_each_cpu_and(cpu, cpumask, andmask)
+ for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
+ }
if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
+
return BAD_APICID;
}
assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
/*
- * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid
- * of that, or returns BAD_APICID and leaves desc->affinity untouched.
+ * Either sets desc->affinity to a valid value, and returns
+ * ->cpu_mask_to_apicid of that, or returns BAD_APICID and
+ * leaves desc->affinity untouched.
*/
static unsigned int
set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
cpumask_and(desc->affinity, cfg->domain, mask);
set_extra_move_desc(desc, mask);
- return cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
+
+ return apic->cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
}
static void
if (assign_irq_vector(irq, cfg, apic->target_cpus()))
return;
- dest = cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
+ dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
apic_printk(APIC_VERBOSE,KERN_DEBUG
"IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
*/
entry.dest_mode = apic->irq_dest_mode;
entry.mask = 1; /* mask IRQ now */
- entry.dest = cpu_mask_to_apicid(apic->target_cpus());
+ entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus());
entry.delivery_mode = apic->irq_delivery_mode;
entry.polarity = 0;
entry.trigger = 0;
set_extra_move_desc(desc, mask);
- dest = cpu_mask_to_apicid_and(cfg->domain, mask);
+ dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
modify_ioapic_rte = desc->status & IRQ_LEVEL;
if (modify_ioapic_rte) {
if (err)
return err;
- dest = cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
+ dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
#ifdef CONFIG_INTR_REMAP
if (irq_remapped(irq)) {
struct ht_irq_msg msg;
unsigned dest;
- dest = cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
+ dest = apic->cpu_mask_to_apicid_and(cfg->domain,
+ apic->target_cpus());
msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
entry->polarity = 0;
entry->trigger = 0;
entry->mask = 0;
- entry->dest = cpu_mask_to_apicid(eligible_cpu);
+ entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
mmr_pnode = uv_blade_to_pnode(mmr_blade);
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
.set_apic_id = NULL,
.apic_id_mask = 0xFF << 24,
- .cpu_mask_to_apicid = cpu_mask_to_apicid,
- .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and,
+ .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid,
+ .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and,
.send_IPI_mask = send_IPI_mask,
.send_IPI_mask_allbutself = NULL,
.set_apic_id = NULL,
.apic_id_mask = 0x0F << 24,
- .cpu_mask_to_apicid = cpu_mask_to_apicid,
- .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and,
+ .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
+ .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = send_IPI_mask,
.send_IPI_mask_allbutself = NULL,
apic->init_apic_ldr = es7000_init_apic_ldr_cluster;
- apic->cpu_mask_to_apicid = cpu_mask_to_apicid_cluster;
+ apic->cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster;
}
static int probe_es7000(void)
.set_apic_id = NULL,
.apic_id_mask = 0xFF << 24,
- .cpu_mask_to_apicid = cpu_mask_to_apicid,
- .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and,
+ .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
+ .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
.send_IPI_mask = send_IPI_mask,
.send_IPI_mask_allbutself = NULL,
.set_apic_id = NULL,
.apic_id_mask = 0x0F << 24,
- .cpu_mask_to_apicid = cpu_mask_to_apicid,
- .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and,
+ .cpu_mask_to_apicid = numaq_cpu_mask_to_apicid,
+ .cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and,
.send_IPI_mask = send_IPI_mask,
.send_IPI_mask_allbutself = NULL,
.set_apic_id = NULL,
.apic_id_mask = 0xFF << 24,
- .cpu_mask_to_apicid = cpu_mask_to_apicid,
- .cpu_mask_to_apicid_and = cpu_mask_to_apicid_and,
+ .cpu_mask_to_apicid = summit_cpu_mask_to_apicid,
+ .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and,
.send_IPI_mask = send_IPI_mask,
.send_IPI_mask_allbutself = NULL,