sfc: Set default parallelism to per-core by default
authorBen Hutchings <bhutchings@solarflare.com>
Tue, 20 Dec 2011 01:08:05 +0000 (01:08 +0000)
committerBen Hutchings <bhutchings@solarflare.com>
Mon, 9 Jan 2012 17:08:18 +0000 (17:08 +0000)
The previous default of per-package can be more CPU-efficient, but
users generally seem to prefer per-core.  It should also allow
accelerated RFS to direct packets more precisely, if IRQ affinity
is properly spread out.

Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
drivers/net/ethernet/sfc/efx.c

index 5fcc42f7d865cd824f174f2e1acfba64c19ccc18..d7301d2e81a740252a1ee68e54330d7ba22cb442 100644 (file)
@@ -162,7 +162,7 @@ static unsigned int interrupt_mode;
  * interrupt handling.
  *
  * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
- * The default (0) means to assign an interrupt to each package (level II cache)
+ * The default (0) means to assign an interrupt to each core.
  */
 static unsigned int rss_cpus;
 module_param(rss_cpus, uint, 0444);
@@ -1148,14 +1148,14 @@ static void efx_fini_io(struct efx_nic *efx)
 
 static int efx_wanted_parallelism(void)
 {
-       cpumask_var_t core_mask;
+       cpumask_var_t thread_mask;
        int count;
        int cpu;
 
        if (rss_cpus)
                return rss_cpus;
 
-       if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
+       if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
                printk(KERN_WARNING
                       "sfc: RSS disabled due to allocation failure\n");
                return 1;
@@ -1163,14 +1163,14 @@ static int efx_wanted_parallelism(void)
 
        count = 0;
        for_each_online_cpu(cpu) {
-               if (!cpumask_test_cpu(cpu, core_mask)) {
+               if (!cpumask_test_cpu(cpu, thread_mask)) {
                        ++count;
-                       cpumask_or(core_mask, core_mask,
-                                  topology_core_cpumask(cpu));
+                       cpumask_or(thread_mask, thread_mask,
+                                  topology_thread_cpumask(cpu));
                }
        }
 
-       free_cpumask_var(core_mask);
+       free_cpumask_var(thread_mask);
        return count;
 }