irq: update all arches for new irq_desc
authorMike Travis <travis@sgi.com>
Mon, 12 Jan 2009 23:27:13 +0000 (15:27 -0800)
committerMike Travis <travis@sgi.com>
Mon, 12 Jan 2009 23:27:13 +0000 (15:27 -0800)
Impact: cleanup, update to new cpumask API

Irq_desc.affinity and irq_desc.pending_mask are now cpumask_var_t's
so access to them should be using the new cpumask API.

Signed-off-by: Mike Travis <travis@sgi.com>
17 files changed:
arch/alpha/kernel/irq.c
arch/arm/kernel/irq.c
arch/arm/oprofile/op_model_mpcore.c
arch/blackfin/kernel/irqchip.c
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/irq.c
arch/ia64/kernel/msi_ia64.c
arch/ia64/sn/kernel/msi_sn.c
arch/mips/include/asm/irq.h
arch/mips/kernel/irq-gic.c
arch/mips/kernel/smtc.c
arch/mips/mti-malta/malta-smtc.c
arch/parisc/kernel/irq.c
arch/powerpc/kernel/irq.c
arch/powerpc/platforms/pseries/xics.c
arch/powerpc/sysdev/mpic.c
arch/sparc/kernel/irq_64.c

index 703731accda6d515e6879afe276e68818eaddd4b..7bc7489223f3e03b77aec838838202e8aed3f3b4 100644 (file)
@@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq)
                cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
        last_cpu = cpu;
 
-       irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+       cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
        irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
        return 0;
 }
index 7141cee1fab71e8b131d03e0d8e6dbfc043dbb85..4bb723eadad13c73dc5da7ba61ef2e2367fbc9ba 100644 (file)
@@ -104,6 +104,11 @@ static struct irq_desc bad_irq_desc = {
        .lock = SPIN_LOCK_UNLOCKED
 };
 
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* We are not allocating bad_irq_desc.affinity or .pending_mask */
+#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
+#endif
+
 /*
  * do_IRQ handles all hardware IRQ's.  Decoded IRQs should not
  * come via this function.  Instead, they should provide their
@@ -161,7 +166,7 @@ void __init init_IRQ(void)
                irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
 
 #ifdef CONFIG_SMP
-       bad_irq_desc.affinity = CPU_MASK_ALL;
+       cpumask_setall(bad_irq_desc.affinity);
        bad_irq_desc.cpu = smp_processor_id();
 #endif
        init_arch_irq();
@@ -191,15 +196,16 @@ void migrate_irqs(void)
                struct irq_desc *desc = irq_desc + i;
 
                if (desc->cpu == cpu) {
-                       unsigned int newcpu = any_online_cpu(desc->affinity);
-
-                       if (newcpu == NR_CPUS) {
+                       unsigned int newcpu = cpumask_any_and(desc->affinity,
+                                                             cpu_online_mask);
+                       if (newcpu >= nr_cpu_ids) {
                                if (printk_ratelimit())
                                        printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
                                               i, cpu);
 
-                               cpus_setall(desc->affinity);
-                               newcpu = any_online_cpu(desc->affinity);
+                               cpumask_setall(desc->affinity);
+                               newcpu = cpumask_any_and(desc->affinity,
+                                                        cpu_online_mask);
                        }
 
                        route_irq(desc, i, newcpu);
index 6d6bd5899240862771935c47f32007fb452aec48..853d42bb8682c83b517bf656ae922d4ce5cd3b63 100644 (file)
@@ -263,7 +263,7 @@ static void em_route_irq(int irq, unsigned int cpu)
        const struct cpumask *mask = cpumask_of(cpu);
 
        spin_lock_irq(&desc->lock);
-       desc->affinity = *mask;
+       cpumask_copy(desc->affinity, mask);
        desc->chip->set_affinity(irq, mask);
        spin_unlock_irq(&desc->lock);
 }
index ab8209cbbad0368f1ebc2bc252c7532478ec6b5a..5780d6df154250112a128d19ffd0bbb06298064a 100644 (file)
@@ -69,6 +69,11 @@ static struct irq_desc bad_irq_desc = {
 #endif
 };
 
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* We are not allocating a variable-sized bad_irq_desc.affinity */
+#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
+#endif
+
 int show_interrupts(struct seq_file *p, void *v)
 {
        int i = *(loff_t *) v, j;
index 5cfd3d91001aed8043764c88778d2df3e91681cf..006ad366a4548e57c2cca94683cef12872a76fc7 100644 (file)
@@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi)
        if (iosapic_intr_info[irq].count == 0) {
 #ifdef CONFIG_SMP
                /* Clear affinity */
-               cpus_setall(idesc->affinity);
+               cpumask_setall(idesc->affinity);
 #endif
                /* Clear the interrupt information */
                iosapic_intr_info[irq].dest = 0;
index a58f64ca9f0e9931476ffd4f8460f95ec4200fad..226233a6fa19a2d3d8eb9f9ab5684dfda50b2c9d 100644 (file)
@@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
 void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
 {
        if (irq < NR_IRQS) {
-               cpumask_copy(&irq_desc[irq].affinity,
+               cpumask_copy(irq_desc[irq].affinity,
                             cpumask_of(cpu_logical_id(hwid)));
                irq_redir[irq] = (char) (redir & 0xff);
        }
@@ -148,7 +148,7 @@ static void migrate_irqs(void)
                if (desc->status == IRQ_PER_CPU)
                        continue;
 
-               if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask)
+               if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
                    >= nr_cpu_ids) {
                        /*
                         * Save it for phase 2 processing
index 890339339035b74814721c799a024fa6d184438a..dcb6b7c51ea7eb8dc3c8514bf24ef38ee18a6fe2 100644 (file)
@@ -75,7 +75,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
        msg.data = data;
 
        write_msi_msg(irq, &msg);
-       irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+       cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
 }
 #endif /* CONFIG_SMP */
 
@@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
        msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
 
        dmar_msi_write(irq, &msg);
-       irq_desc[irq].affinity = *mask;
+       cpumask_copy(irq_desc[irq].affinity, mask);
 }
 #endif /* CONFIG_SMP */
 
index ca553b0429ce34cfb00398701d32ab73c942eeaf..81e428943d7374d01e0aef8530e39c7ecd892b30 100644 (file)
@@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
        msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
 
        write_msi_msg(irq, &msg);
-       irq_desc[irq].affinity = *cpu_mask;
+       cpumask_copy(irq_desc[irq].affinity, cpu_mask);
 }
 #endif /* CONFIG_SMP */
 
index abc62aa744ace852888367ffce294fea809095f0..3214ade02d105988937576c8425f444b08783b0e 100644 (file)
@@ -66,7 +66,7 @@ extern void smtc_forward_irq(unsigned int irq);
  */
 #define IRQ_AFFINITY_HOOK(irq)                                         \
 do {                                                                   \
-    if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) {      \
+    if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\
        smtc_forward_irq(irq);                                          \
        irq_exit();                                                     \
        return;                                                         \
index 494a49a317e9b78fb137714423a64ef0661a411f..87deb8f6c45885e5c356df3ac4ddf1e58a6ac81c 100644 (file)
@@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
                set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
 
        }
-       irq_desc[irq].affinity = *cpumask;
+       cpumask_copy(irq_desc[irq].affinity, cpumask);
        spin_unlock_irqrestore(&gic_lock, flags);
 
 }
index b6cca01ff82b309e803105c1230957e943c87df8..d2c1ab12425ad0b8e55ade490a7813319081071c 100644 (file)
@@ -686,7 +686,7 @@ void smtc_forward_irq(unsigned int irq)
         * and efficiency, we just pick the easiest one to find.
         */
 
-       target = first_cpu(irq_desc[irq].affinity);
+       target = cpumask_first(irq_desc[irq].affinity);
 
        /*
         * We depend on the platform code to have correctly processed
index aabd7274507b875e2726c246240d9e465a27bc9b..5ba31888fefbbecacd123e7033ef8ddcb6c30944 100644 (file)
@@ -116,7 +116,7 @@ struct plat_smp_ops msmtc_smp_ops = {
 
 void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
 {
-       cpumask_t tmask = *affinity;
+       cpumask_t tmask;
        int cpu = 0;
        void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
 
@@ -139,11 +139,12 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
         * be made to forward to an offline "CPU".
         */
 
+       cpumask_copy(&tmask, affinity);
        for_each_cpu(cpu, affinity) {
                if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
                        cpu_clear(cpu, tmask);
        }
-       irq_desc[irq].affinity = tmask;
+       cpumask_copy(irq_desc[irq].affinity, &tmask);
 
        if (cpus_empty(tmask))
                /*
index ac2c822928c75e3ade5d9e6d278c08804703c3d8..49482806863fa522d71e6e37d90a5c470e1e4023 100644 (file)
@@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
        if (CHECK_IRQ_PER_CPU(irq)) {
                /* Bad linux design decision.  The mask has already
                 * been set; we must reset it */
-               irq_desc[irq].affinity = CPU_MASK_ALL;
+               cpumask_setall(irq_desc[irq].affinity);
                return -EINVAL;
        }
 
@@ -136,7 +136,7 @@ static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
        if (cpu_check_affinity(irq, dest))
                return;
 
-       irq_desc[irq].affinity = *dest;
+       cpumask_copy(irq_desc[irq].affinity, dest);
 }
 #endif
 
@@ -295,7 +295,7 @@ int txn_alloc_irq(unsigned int bits_wide)
 unsigned long txn_affinity_addr(unsigned int irq, int cpu)
 {
 #ifdef CONFIG_SMP
-       irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+       cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
 #endif
 
        return per_cpu(cpu_data, cpu).txn_addr;
@@ -352,7 +352,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
        irq = eirr_to_irq(eirr_val);
 
 #ifdef CONFIG_SMP
-       dest = irq_desc[irq].affinity;
+       cpumask_copy(&dest, irq_desc[irq].affinity);
        if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
            !cpu_isset(smp_processor_id(), dest)) {
                int cpu = first_cpu(dest);
index 23b8b5e36f98b9afb5cf970e20b4a77ab5785627..ad1e5ac721d86f557bac20079b407c8929459273 100644 (file)
@@ -231,7 +231,7 @@ void fixup_irqs(cpumask_t map)
                if (irq_desc[irq].status & IRQ_PER_CPU)
                        continue;
 
-               cpus_and(mask, irq_desc[irq].affinity, map);
+               cpumask_and(&mask, irq_desc[irq].affinity, &map);
                if (any_online_cpu(mask) == NR_CPUS) {
                        printk("Breaking affinity for irq %i\n", irq);
                        mask = map;
index 84e058f1e1cc805516157d9a2c503168149e2084..80b513449f4c44d3c98915efe233c01caa382f4d 100644 (file)
@@ -153,9 +153,10 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check)
 {
        int server;
        /* For the moment only implement delivery to all cpus or one cpu */
-       cpumask_t cpumask = irq_desc[virq].affinity;
+       cpumask_t cpumask;
        cpumask_t tmp = CPU_MASK_NONE;
 
+       cpumask_copy(&cpumask, irq_desc[virq].affinity);
        if (!distribute_irqs)
                return default_server;
 
@@ -869,7 +870,7 @@ void xics_migrate_irqs_away(void)
                       virq, cpu);
 
                /* Reset affinity to all cpus */
-               irq_desc[virq].affinity = CPU_MASK_ALL;
+               cpumask_setall(irq_desc[virq].affinity);
                desc->chip->set_affinity(virq, cpu_all_mask);
 unlock:
                spin_unlock_irqrestore(&desc->lock, flags);
index 3e0d89dcdba2a5b4950a63a9cd79534f8e3702cf..0afd21f9a2221f6b27826f13dac546e98dd7f6b9 100644 (file)
@@ -566,9 +566,10 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
 #ifdef CONFIG_SMP
 static int irq_choose_cpu(unsigned int virt_irq)
 {
-       cpumask_t mask = irq_desc[virt_irq].affinity;
+       cpumask_t mask;
        int cpuid;
 
+       cpumask_copy(&mask, irq_desc[virt_irq].affinity);
        if (cpus_equal(mask, CPU_MASK_ALL)) {
                static int irq_rover;
                static DEFINE_SPINLOCK(irq_rover_lock);
index cab8e02868716d691a38b9bad239ec754dd39134..4ac5c651e00dc832cbc22ee68081795572fff369 100644 (file)
@@ -247,9 +247,10 @@ struct irq_handler_data {
 #ifdef CONFIG_SMP
 static int irq_choose_cpu(unsigned int virt_irq)
 {
-       cpumask_t mask = irq_desc[virt_irq].affinity;
+       cpumask_t mask;
        int cpuid;
 
+       cpumask_copy(&mask, irq_desc[virt_irq].affinity);
        if (cpus_equal(mask, CPU_MASK_ALL)) {
                static int irq_rover;
                static DEFINE_SPINLOCK(irq_rover_lock);
@@ -854,7 +855,7 @@ void fixup_irqs(void)
                    !(irq_desc[irq].status & IRQ_PER_CPU)) {
                        if (irq_desc[irq].chip->set_affinity)
                                irq_desc[irq].chip->set_affinity(irq,
-                                       &irq_desc[irq].affinity);
+                                       irq_desc[irq].affinity);
                }
                spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
        }