x86: avoid back to back on_each_cpu in cpa_flush_array
authorPallipadi, Venkatesh <venkatesh.pallipadi@intel.com>
Tue, 26 May 2009 17:33:35 +0000 (10:33 -0700)
committerH. Peter Anvin <hpa@zytor.com>
Tue, 26 May 2009 20:12:12 +0000 (13:12 -0700)
Cleanup cpa_flush_array() to avoid back to back on_each_cpu() calls.

[ Impact: optimizes fix 0af48f42df15b97080b450d24219dd95db7b929a ]

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
arch/x86/mm/pageattr.c

index 0f9052bcec4b7572cb8519f7972294fa716ac51a..e17efed088c54a7b546b7b76c073ab55231d425e 100644 (file)
@@ -204,30 +204,19 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
        }
 }
 
-static void wbinvd_local(void *unused)
-{
-       wbinvd();
-}
-
 static void cpa_flush_array(unsigned long *start, int numpages, int cache,
                            int in_flags, struct page **pages)
 {
        unsigned int i, level;
+       unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
 
        BUG_ON(irqs_disabled());
 
-       on_each_cpu(__cpa_flush_range, NULL, 1);
+       on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
 
-       if (!cache)
+       if (!cache || do_wbinvd)
                return;
 
-       /* 4M threshold */
-       if (numpages >= 1024) {
-               if (boot_cpu_data.x86 >= 4)
-                       on_each_cpu(wbinvd_local, NULL, 1);
-
-               return;
-       }
        /*
         * We only need to flush on one CPU,
         * clflush is a MESI-coherent instruction that