treewide: make "nr_cpu_ids" unsigned
authorAlexey Dobriyan <adobriyan@gmail.com>
Fri, 8 Sep 2017 23:14:18 +0000 (16:14 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 9 Sep 2017 01:26:48 +0000 (18:26 -0700)
First, number of CPUs can't be negative number.

Second, different signnnedness leads to suboptimal code in the following
cases:

1)
kmalloc(nr_cpu_ids * sizeof(X));

"int" has to be sign extended to size_t.

2)
while (loff_t *pos < nr_cpu_ids)

MOVSXD is 1 byte longed than the same MOV.

Other cases exist as well. Basically compiler is told that nr_cpu_ids
can't be negative which can't be deduced if it is "int".

Code savings on allyesconfig kernel: -3KB

add/remove: 0/0 grow/shrink: 25/264 up/down: 261/-3631 (-3370)
function                                     old     new   delta
coretemp_cpu_online                          450     512     +62
rcu_init_one                                1234    1272     +38
pci_device_probe                             374     399     +25

...

pgdat_reclaimable_pages                      628     556     -72
select_fallback_rq                           446     369     -77
task_numa_find_cpu                          1923    1807    -116

Link: http://lkml.kernel.org/r/20170819114959.GA30580@avx2
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
17 files changed:
arch/arm64/kernel/smp.c
arch/powerpc/kernel/paca.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/sysdev/xive/native.c
arch/tile/kernel/setup.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/setup_percpu.c
arch/x86/kernel/smpboot.c
drivers/base/cpu.c
drivers/scsi/scsi_debug.c
include/linux/cpumask.h
kernel/rcu/tree.c
kernel/rcu/tree_plugin.h
kernel/sched/topology.c
kernel/smp.c
kernel/trace/trace_functions_graph.c
mm/slub.c

index ffe089942ac4d6e995e67c32fecbfcba27576949..9f7195a5773ee66138bc5170508f8c251f501e47 100644 (file)
@@ -690,7 +690,7 @@ void __init smp_init_cpus(void)
                                      acpi_parse_gic_cpu_interface, 0);
 
        if (cpu_count > nr_cpu_ids)
-               pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n",
+               pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n",
                        cpu_count, nr_cpu_ids);
 
        if (!bootcpu_valid) {
index 70f073d6c3b27c5cac8f8c8492da879f51f80622..2ff2b8a19f712d8b14c958b86ca947bdc50169cb 100644 (file)
@@ -224,7 +224,7 @@ void __init allocate_pacas(void)
        paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit));
        memset(paca, 0, paca_size);
 
-       printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n",
+       printk(KERN_DEBUG "Allocated %u bytes for %u pacas at %p\n",
                paca_size, nr_cpu_ids, paca);
 
        allocate_lppacas(nr_cpu_ids, limit);
index 7de73589d8e24274391de8b60b15bdaa4d703de5..0ac741fae90ea5c1cffc227323fe40cbb5319188 100644 (file)
@@ -551,7 +551,7 @@ void __init smp_setup_cpu_maps(void)
                if (maxcpus > nr_cpu_ids) {
                        printk(KERN_WARNING
                               "Partition configured for %d cpus, "
-                              "operating system maximum is %d.\n",
+                              "operating system maximum is %u.\n",
                               maxcpus, nr_cpu_ids);
                        maxcpus = nr_cpu_ids;
                } else
index 44f3a25ca630a61df697639155fce0214c09a8d3..ebc244b08d6748512c19199446d25f7ac49fca9b 100644 (file)
@@ -511,13 +511,13 @@ static bool xive_parse_provisioning(struct device_node *np)
 static void xive_native_setup_pools(void)
 {
        /* Allocate a pool big enough */
-       pr_debug("XIVE: Allocating VP block for pool size %d\n", nr_cpu_ids);
+       pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids);
 
        xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
        if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
                pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
 
-       pr_debug("XIVE: Pool VPs allocated at 0x%x for %d max CPUs\n",
+       pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n",
                 xive_pool_vps, nr_cpu_ids);
 }
 
index 443a70bccc1c862d945d3a84945c6771033b74c7..6becb96c60a03c5515cb443b5cdc7c683e259433 100644 (file)
@@ -1200,7 +1200,7 @@ static void __init validate_hv(void)
         * We use a struct cpumask for this, so it must be big enough.
         */
        if ((smp_height * smp_width) > nr_cpu_ids)
-               early_panic("Hypervisor %d x %d grid too big for Linux NR_CPUS %d\n",
+               early_panic("Hypervisor %d x %d grid too big for Linux NR_CPUS %u\n",
                            smp_height, smp_width, nr_cpu_ids);
 #endif
 
index 7834f73efbf1ec0871d25a6a01b35bb6b5221527..8315e2f517a7ef609e8d43b924a2734f7320e9bb 100644 (file)
@@ -2097,7 +2097,7 @@ static int allocate_logical_cpuid(int apicid)
 
        /* Allocate a new cpuid. */
        if (nr_logical_cpuids >= nr_cpu_ids) {
-               WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %i reached. "
+               WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %u reached. "
                             "Processor %d/0x%x and the rest are ignored.\n",
                             nr_cpu_ids, nr_logical_cpuids, apicid);
                return -EINVAL;
index 6e8fcb6f7e1e43b2d401380794245d43735a502b..28dafed6c68279f46963f89c4a8ce4b01d092c57 100644 (file)
@@ -168,7 +168,7 @@ void __init setup_per_cpu_areas(void)
        unsigned long delta;
        int rc;
 
-       pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
+       pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%d\n",
                NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
 
        /*
index 54b9e89d4d6be3844b8b3c310433467d0e5f1481..cd6622c3204e6b4c5c159a659b0b23f4728da34a 100644 (file)
@@ -1461,7 +1461,7 @@ __init void prefill_possible_map(void)
 
        /* nr_cpu_ids could be reduced via nr_cpus= */
        if (possible > nr_cpu_ids) {
-               pr_warn("%d Processors exceeds NR_CPUS limit of %d\n",
+               pr_warn("%d Processors exceeds NR_CPUS limit of %u\n",
                        possible, nr_cpu_ids);
                possible = nr_cpu_ids;
        }
index 2c3b359b3536a15cdb21fbc60af80644368c655d..321cd7b4d817fd6ffd9323362041c7d4cb29b7e9 100644 (file)
@@ -256,9 +256,9 @@ static ssize_t print_cpus_offline(struct device *dev,
                        buf[n++] = ',';
 
                if (nr_cpu_ids == total_cpus-1)
-                       n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
+                       n += snprintf(&buf[n], len - n, "%u", nr_cpu_ids);
                else
-                       n += snprintf(&buf[n], len - n, "%d-%d",
+                       n += snprintf(&buf[n], len - n, "%u-%d",
                                                      nr_cpu_ids, total_cpus-1);
        }
 
index 77a0335eb757de1c3678e267020f55f893f4964c..09ba494f88967351d84b6ab2ed781b89cc848d18 100644 (file)
@@ -5465,7 +5465,7 @@ static int sdebug_driver_probe(struct device * dev)
                return error;
        }
        if (submit_queues > nr_cpu_ids) {
-               pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%d\n",
+               pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
                        my_name, submit_queues, nr_cpu_ids);
                submit_queues = nr_cpu_ids;
        }
index 4bf4479a3a800c435ccec039f7e5da4089910f87..68c5a8290275a17b4d1466b847ab476c0a370389 100644 (file)
@@ -32,15 +32,15 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
 #define cpumask_pr_args(maskp)         nr_cpu_ids, cpumask_bits(maskp)
 
 #if NR_CPUS == 1
-#define nr_cpu_ids             1
+#define nr_cpu_ids             1U
 #else
-extern int nr_cpu_ids;
+extern unsigned int nr_cpu_ids;
 #endif
 
 #ifdef CONFIG_CPUMASK_OFFSTACK
 /* Assuming NR_CPUS is huge, a runtime limit is more efficient.  Also,
  * not all bits may be allocated. */
-#define nr_cpumask_bits        ((unsigned int)nr_cpu_ids)
+#define nr_cpumask_bits        nr_cpu_ids
 #else
 #define nr_cpumask_bits        ((unsigned int)NR_CPUS)
 #endif
index 84fe96641b2e05f8c126d9716685a6a237d704fe..1250e4bd4b85e690c7db2b061a87599e8651a606 100644 (file)
@@ -4091,7 +4091,7 @@ static void __init rcu_init_geometry(void)
        if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
            nr_cpu_ids == NR_CPUS)
                return;
-       pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n",
+       pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
                rcu_fanout_leaf, nr_cpu_ids);
 
        /*
index 55bde94b95728bae7eb1e9b4ead94e18dfa0e4e8..e012b9be777e3ba00ccf2fe4d05df78b78abd8f5 100644 (file)
@@ -89,7 +89,7 @@ static void __init rcu_bootup_announce_oddness(void)
        if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
                pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
        if (nr_cpu_ids != NR_CPUS)
-               pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
+               pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids);
 #ifdef CONFIG_RCU_BOOST
        pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", kthread_prio, CONFIG_RCU_BOOST_DELAY);
 #endif
index 6f7b43982f735d340948c6d2c1be41460ea72806..5d0062cc10cb8f99fffa3b8c1cc548f4329c46ed 100644 (file)
@@ -473,7 +473,7 @@ static int __init isolated_cpu_setup(char *str)
        alloc_bootmem_cpumask_var(&cpu_isolated_map);
        ret = cpulist_parse(str, cpu_isolated_map);
        if (ret) {
-               pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids);
+               pr_err("sched: Error, all isolcpus= values must be between 0 and %u\n", nr_cpu_ids);
                return 0;
        }
        return 1;
index 81cfca9b4cc3b37557b9e2c09602f3baf1f297a8..c94dd85c8d41798443a7080af55c24fe508815b3 100644 (file)
@@ -550,7 +550,7 @@ static int __init maxcpus(char *str)
 early_param("maxcpus", maxcpus);
 
 /* Setup number of possible processor ids */
-int nr_cpu_ids __read_mostly = NR_CPUS;
+unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
 EXPORT_SYMBOL(nr_cpu_ids);
 
 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
index d56123cdcc89027ee5841cf6573de474b38ebb39..b8f1f54731af0ab1190783291ffa1ca9aaf33feb 100644 (file)
@@ -1543,7 +1543,7 @@ fs_initcall(init_graph_tracefs);
 
 static __init int init_graph_trace(void)
 {
-       max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
+       max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
 
        if (!register_trace_event(&graph_trace_entry_event)) {
                pr_warn("Warning: could not register graph trace events\n");
index ddb04576b342a17f2b6c93c3434ca59aef76082c..d39a5d3834b31c0ab53d8d808c1224dbb2076a2a 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4232,7 +4232,7 @@ void __init kmem_cache_init(void)
        cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
                                  slub_cpu_dead);
 
-       pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%d, Nodes=%d\n",
+       pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%u, Nodes=%d\n",
                cache_line_size(),
                slub_min_order, slub_max_order, slub_min_objects,
                nr_cpu_ids, nr_node_ids);