x86/cpufeature: Speed up cpu_feature_enabled()
authorBorislav Petkov <bp@suse.de>
Mon, 15 Feb 2016 23:19:18 +0000 (00:19 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 16 Feb 2016 07:45:15 +0000 (08:45 +0100)
When GCC cannot do constant folding for this macro, it falls back to
cpu_has(). But static_cpu_has() is optimal and it works at all times
now. So use it and speedup the fallback case.

Before we had this:

  mov    0x99d674(%rip),%rdx        # ffffffff81b0d9f4 <boot_cpu_data+0x34>
  shr    $0x2e,%rdx
  and    $0x1,%edx
  jne    ffffffff811704e9 <do_munmap+0x3f9>

After alternatives patching, it turns into:

  jmp    0xffffffff81170390
  nopl   (%rax)
  ...
  callq  ffffffff81056e00 <mpx_notify_unmap>
ffffffff81170390: mov    0x170(%r12),%rdi

Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1455578358-28347-1-git-send-email-bp@alien8.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/cpufeature.h

index 9fba7a5dd24afd39cf6e60a61ba06b73d92e470a..68e4e8258b84126d59094ea1023568a8272dcdbd 100644 (file)
@@ -88,8 +88,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
  * is not relevant.
  */
 #define cpu_feature_enabled(bit)       \
-       (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 :  \
-        cpu_has(&boot_cpu_data, bit))
+       (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : static_cpu_has(bit))
 
 #define boot_cpu_has(bit)      cpu_has(&boot_cpu_data, bit)