x86/apic: Move online masking to core code
authorThomas Gleixner <tglx@linutronix.de>
Mon, 19 Jun 2017 23:37:42 +0000 (01:37 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 22 Jun 2017 16:21:21 +0000 (18:21 +0200)
All implementations of apic->cpu_mask_to_apicid_and() mask out the offline
cpus. The callsite already has a mask available, which has the offline CPUs
removed. Use that and remove the extra bits.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Christoph Hellwig <hch@lst.de>
Link: http://lkml.kernel.org/r/20170619235446.560868224@linutronix.de
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/apic/x2apic_cluster.c

index e9b322f050512d9155a28ce8435b0dcaea3d3650..8a0bde3fc488d0c3e8020facb3a5bedaed8398e6 100644 (file)
@@ -2205,19 +2205,12 @@ int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
                                   const struct cpumask *andmask,
                                   unsigned int *apicid)
 {
-       unsigned int cpu;
+       unsigned int cpu = cpumask_first_and(cpumask, andmask);
 
-       for_each_cpu_and(cpu, cpumask, andmask) {
-               if (cpumask_test_cpu(cpu, cpu_online_mask))
-                       break;
-       }
-
-       if (likely(cpu < nr_cpu_ids)) {
-               *apicid = per_cpu(x86_cpu_to_apicid, cpu);
-               return 0;
-       }
-
-       return -EINVAL;
+       if (cpu >= nr_cpu_ids)
+               return -EINVAL;
+       *apicid = per_cpu(x86_cpu_to_apicid, cpu);
+       return 0;
 }
 
 int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
@@ -2226,14 +2219,12 @@ int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
 {
        unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
                                 cpumask_bits(andmask)[0] &
-                                cpumask_bits(cpu_online_mask)[0] &
                                 APIC_ALL_CPUS;
 
-       if (likely(cpu_mask)) {
-               *apicid = (unsigned int)cpu_mask;
-               return 0;
-       }
-       return -EINVAL;
+       if (!cpu_mask)
+               return -EINVAL;
+       *apicid = (unsigned int)cpu_mask;
+       return 0;
 }
 
 /*
index 47c5d019fb7ed797f366ed441f8260327da3bc8d..0f94ddbb6beb0364d6f630c2a5e81813753400c0 100644 (file)
@@ -221,8 +221,11 @@ success:
         * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
         * as we already established, that mask & d->domain & cpu_online_mask
         * is not empty.
+        *
+        * vector_searchmask is a subset of d->domain and has the offline
+        * cpus masked out.
         */
-       BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
+       BUG_ON(apic->cpu_mask_to_apicid_and(mask, vector_searchmask,
                                            &d->cfg.dest_apicid));
        return 0;
 }
index 5a35f208ed95909d339db672cb493445c24a1a81..d73baa8c1a1718e411dcf6646cea8c386d015b26 100644 (file)
@@ -108,31 +108,24 @@ x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
                              const struct cpumask *andmask,
                              unsigned int *apicid)
 {
+       unsigned int cpu;
        u32 dest = 0;
        u16 cluster;
-       int i;
-
-       for_each_cpu_and(i, cpumask, andmask) {
-               if (!cpumask_test_cpu(i, cpu_online_mask))
-                       continue;
-               dest = per_cpu(x86_cpu_to_logical_apicid, i);
-               cluster = x2apic_cluster(i);
-               break;
-       }
 
-       if (!dest)
+       cpu = cpumask_first_and(cpumask, andmask);
+       if (cpu >= nr_cpu_ids)
                return -EINVAL;
 
-       for_each_cpu_and(i, cpumask, andmask) {
-               if (!cpumask_test_cpu(i, cpu_online_mask))
-                       continue;
-               if (cluster != x2apic_cluster(i))
+       dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
+       cluster = x2apic_cluster(cpu);
+
+       for_each_cpu_and(cpu, cpumask, andmask) {
+               if (cluster != x2apic_cluster(cpu))
                        continue;
-               dest |= per_cpu(x86_cpu_to_logical_apicid, i);
+               dest |= per_cpu(x86_cpu_to_logical_apicid, cpu);
        }
 
        *apicid = dest;
-
        return 0;
 }