#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
-#define X86_FEATURE_AMD_RDS (7*32+24) /* "" AMD RDS implementation */
+#define X86_FEATURE_AMD_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
-#define X86_FEATURE_RDS (18*32+31) /* Reduced Data Speculation */
+#define X86_FEATURE_SSBD (18*32+31) /* Speculative Store Bypass Disable */
/*
* BUG word(s)
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
-#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */
-#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */
+#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
+#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
-#define ARCH_CAP_RDS_NO (1 << 4) /*
+#define ARCH_CAP_SSBD_NO (1 << 4) /*
* Not susceptible to Speculative Store Bypass
- * attack, so no Reduced Data Speculation control
- * required.
+ * attack, so no Speculative Store Bypass
+ * control required.
*/
#define MSR_IA32_BBL_CR_CTL 0x00000119
/* AMD specific Speculative Store Bypass MSR data */
extern u64 x86_amd_ls_cfg_base;
-extern u64 x86_amd_ls_cfg_rds_mask;
+extern u64 x86_amd_ls_cfg_ssbd_mask;
/* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base;
-static inline u64 rds_tif_to_spec_ctrl(u64 tifn)
+static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
{
- BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT);
- return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT);
+ BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
+ return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
}
-static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn)
+static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
{
- return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL;
+ return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
}
extern void speculative_store_bypass_update(void);
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
-#define TIF_RDS 5 /* Reduced data speculation */
+#define TIF_SSBD 5 /* Reduced data speculation */
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
-#define _TIF_RDS (1 << TIF_RDS)
+#define _TIF_SSBD (1 << TIF_SSBD)
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW \
- (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS)
+ (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
}
/*
* Try to cache the base value so further operations can
- * avoid RMW. If that faults, do not enable RDS.
+ * avoid RMW. If that faults, do not enable SSBD.
*/
if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
- setup_force_cpu_cap(X86_FEATURE_RDS);
- setup_force_cpu_cap(X86_FEATURE_AMD_RDS);
- x86_amd_ls_cfg_rds_mask = 1ULL << bit;
+ setup_force_cpu_cap(X86_FEATURE_SSBD);
+ setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
+ x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
}
}
}
if (!cpu_has(c, X86_FEATURE_XENPV))
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
- if (boot_cpu_has(X86_FEATURE_AMD_RDS)) {
- set_cpu_cap(c, X86_FEATURE_RDS);
- set_cpu_cap(c, X86_FEATURE_AMD_RDS);
+ if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
+ set_cpu_cap(c, X86_FEATURE_SSBD);
+ set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
}
}
/*
* AMD specific MSR info for Speculative Store Bypass control.
- * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
+ * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
*/
u64 __ro_after_init x86_amd_ls_cfg_base;
-u64 __ro_after_init x86_amd_ls_cfg_rds_mask;
+u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
void __init check_bugs(void)
{
u64 msrval = x86_spec_ctrl_base;
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
- msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
+ msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
return msrval;
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
return;
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
- host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
+ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
if (host != guest_spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
return;
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
- host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
+ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
if (host != guest_spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, host);
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
-static void x86_amd_rds_enable(void)
+static void x86_amd_ssb_disable(void)
{
- u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;
+ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
- if (boot_cpu_has(X86_FEATURE_AMD_RDS))
+ if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
wrmsrl(MSR_AMD64_LS_CFG, msrval);
}
enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
enum ssb_mitigation_cmd cmd;
- if (!boot_cpu_has(X86_FEATURE_RDS))
+ if (!boot_cpu_has(X86_FEATURE_SSBD))
return mode;
cmd = ssb_parse_cmdline();
/*
* We have three CPU feature flags that are in play here:
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
- * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
+ * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
* - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
*/
if (mode == SPEC_STORE_BYPASS_DISABLE) {
*/
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
- x86_spec_ctrl_base |= SPEC_CTRL_RDS;
- x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS;
- x86_spec_ctrl_set(SPEC_CTRL_RDS);
+ x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
+ x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
+ x86_spec_ctrl_set(SPEC_CTRL_SSBD);
break;
case X86_VENDOR_AMD:
- x86_amd_rds_enable();
+ x86_amd_ssb_disable();
break;
}
}
if (task_spec_ssb_force_disable(task))
return -EPERM;
task_clear_spec_ssb_disable(task);
- update = test_and_clear_tsk_thread_flag(task, TIF_RDS);
+ update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
break;
case PR_SPEC_DISABLE:
task_set_spec_ssb_disable(task);
- update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
+ update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
break;
case PR_SPEC_FORCE_DISABLE:
task_set_spec_ssb_disable(task);
task_set_spec_ssb_force_disable(task);
- update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
+ update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
break;
default:
return -ERANGE;
x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
- x86_amd_rds_enable();
+ x86_amd_ssb_disable();
}
#ifdef CONFIG_SYSFS
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
- !(ia32_cap & ARCH_CAP_RDS_NO))
+ !(ia32_cap & ARCH_CAP_SSBD_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
if (x86_match_cpu(cpu_no_speculation))
setup_clear_cpu_cap(X86_FEATURE_STIBP);
setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
- setup_clear_cpu_cap(X86_FEATURE_RDS);
+ setup_clear_cpu_cap(X86_FEATURE_SSBD);
}
/*
{
u64 msr;
- if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
- msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn);
+ if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+ msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
wrmsrl(MSR_AMD64_LS_CFG, msr);
} else {
- msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn);
+ msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
}
}
if ((tifp ^ tifn) & _TIF_NOCPUID)
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
- if ((tifp ^ tifn) & _TIF_RDS)
+ if ((tifp ^ tifn) & _TIF_SSBD)
__speculative_store_bypass_update(tifn);
}
/* cpuid 7.0.edx*/
const u32 kvm_cpuid_7_0_edx_x86_features =
- F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(RDS) |
+ F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) |
F(ARCH_CAPABILITIES);
/* all calls to cpuid_count() should be made on the same cpu */
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
- !guest_cpuid_has(vcpu, X86_FEATURE_RDS))
+ !guest_cpuid_has(vcpu, X86_FEATURE_SSBD))
return 1;
msr_info->data = to_vmx(vcpu)->spec_ctrl;
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
- !guest_cpuid_has(vcpu, X86_FEATURE_RDS))
+ !guest_cpuid_has(vcpu, X86_FEATURE_SSBD))
return 1;
/* The STIBP bit doesn't fault even if it's not advertised */
- if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_RDS))
+ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
return 1;
vmx->spec_ctrl = data;