x86/irq: Avoid memory allocation in __assign_irq_vector()
authorJiang Liu <jiang.liu@linux.intel.com>
Tue, 14 Apr 2015 02:30:10 +0000 (10:30 +0800)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 24 Apr 2015 13:36:55 +0000 (15:36 +0200)
Function __assign_irq_vector() is protected by vector_lock, so use
a global temporary cpu_mask to avoid allocating/freeing cpu_mask.

Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: David Cohen <david.a.cohen@linux.intel.com>
Cc: Sander Eikelenboom <linux@eikelenboom.it>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dimitri Sivanich <sivanich@sgi.com>
Link: http://lkml.kernel.org/r/1428978610-28986-34-git-send-email-jiang.liu@linux.intel.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/kernel/apic/vector.c

index ad786f8a7cc7bcf828bcaa26fb072373617893ae..1c7dd42b98c1fbcea693f5e3b2a0235cf73e32a9 100644 (file)
@@ -30,6 +30,7 @@ struct apic_chip_data {
 
 struct irq_domain *x86_vector_domain;
 static DEFINE_RAW_SPINLOCK(vector_lock);
+static cpumask_var_t vector_cpumask;
 static struct irq_chip lapic_controller;
 #ifdef CONFIG_X86_IO_APIC
 static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
@@ -116,14 +117,10 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
        static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
        static int current_offset = VECTOR_OFFSET_START % 16;
        int cpu, err;
-       cpumask_var_t tmp_mask;
 
        if (d->move_in_progress)
                return -EBUSY;
 
-       if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
-               return -ENOMEM;
-
        /* Only try and allocate irqs on cpus that are present */
        err = -ENOSPC;
        cpumask_clear(d->old_domain);
@@ -131,21 +128,22 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
        while (cpu < nr_cpu_ids) {
                int new_cpu, vector, offset;
 
-               apic->vector_allocation_domain(cpu, tmp_mask, mask);
+               apic->vector_allocation_domain(cpu, vector_cpumask, mask);
 
-               if (cpumask_subset(tmp_mask, d->domain)) {
+               if (cpumask_subset(vector_cpumask, d->domain)) {
                        err = 0;
-                       if (cpumask_equal(tmp_mask, d->domain))
+                       if (cpumask_equal(vector_cpumask, d->domain))
                                break;
                        /*
                         * New cpumask using the vector is a proper subset of
                         * the current in use mask. So cleanup the vector
                         * allocation for the members that are not used anymore.
                         */
-                       cpumask_andnot(d->old_domain, d->domain, tmp_mask);
+                       cpumask_andnot(d->old_domain, d->domain,
+                                      vector_cpumask);
                        d->move_in_progress =
                           cpumask_intersects(d->old_domain, cpu_online_mask);
-                       cpumask_and(d->domain, d->domain, tmp_mask);
+                       cpumask_and(d->domain, d->domain, vector_cpumask);
                        break;
                }
 
@@ -159,16 +157,18 @@ next:
                }
 
                if (unlikely(current_vector == vector)) {
-                       cpumask_or(d->old_domain, d->old_domain, tmp_mask);
-                       cpumask_andnot(tmp_mask, mask, d->old_domain);
-                       cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
+                       cpumask_or(d->old_domain, d->old_domain,
+                                  vector_cpumask);
+                       cpumask_andnot(vector_cpumask, mask, d->old_domain);
+                       cpu = cpumask_first_and(vector_cpumask,
+                                               cpu_online_mask);
                        continue;
                }
 
                if (test_bit(vector, used_vectors))
                        goto next;
 
-               for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
+               for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
                        if (per_cpu(vector_irq, new_cpu)[vector] >
                            VECTOR_UNDEFINED)
                                goto next;
@@ -181,14 +181,13 @@ next:
                        d->move_in_progress =
                           cpumask_intersects(d->old_domain, cpu_online_mask);
                }
-               for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
+               for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
                        per_cpu(vector_irq, new_cpu)[vector] = irq;
                d->cfg.vector = vector;
-               cpumask_copy(d->domain, tmp_mask);
+               cpumask_copy(d->domain, vector_cpumask);
                err = 0;
                break;
        }
-       free_cpumask_var(tmp_mask);
 
        if (!err) {
                /* cache destination APIC IDs into cfg->dest_apicid */
@@ -397,6 +396,8 @@ int __init arch_early_irq_init(void)
        arch_init_msi_domain(x86_vector_domain);
        arch_init_htirq_domain(x86_vector_domain);
 
+       BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
+
        return arch_early_ioapic_init();
 }