Commit 999b295a authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Greg Kroah-Hartman

x86/speculation: Rename SSBD update functions

commit 26c4d75b upstream

During context switch, the SSBD bit in SPEC_CTRL MSR is updated according
to changes of the TIF_SSBD flag in the current and next running task.

Currently, only the bit controlling speculative store bypass disable in
SPEC_CTRL MSR is updated and the related update functions all have
"speculative_store" or "ssb" in their names.

For enhanced mitigation control other bits in SPEC_CTRL MSR need to be
updated as well, which makes the SSB names inadequate.

Rename the "speculative_store*" functions to a more generic name. No
functional change.
Signed-off-by: default avatarTim Chen <>
Signed-off-by: default avatarThomas Gleixner <>
Reviewed-by: default avatarIngo Molnar <>
Cc: Peter Zijlstra <>
Cc: Andy Lutomirski <>
Cc: Linus Torvalds <>
Cc: Jiri Kosina <>
Cc: Tom Lendacky <>
Cc: Josh Poimboeuf <>
Cc: Andrea Arcangeli <>
Cc: David Woodhouse <>
Cc: Andi Kleen <>
Cc: Dave Hansen <>
Cc: Casey Schaufler <>
Cc: Asit Mallick <>
Cc: Arjan van de Ven <>
Cc: Jon Masters <>
Cc: Waiman Long <>
Cc: Greg KH <>
Cc: Dave Stewart <>
Cc: Kees Cook <>
Link: default avatarGreg Kroah-Hartman <>
parent aca2ddbc
......@@ -70,11 +70,11 @@ extern void speculative_store_bypass_ht_init(void);
static inline void speculative_store_bypass_ht_init(void) { }
extern void speculative_store_bypass_update(unsigned long tif);
extern void speculation_ctrl_update(unsigned long tif);
static inline void speculative_store_bypass_update_current(void)
static inline void speculation_ctrl_update_current(void)
......@@ -199,7 +199,7 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
......@@ -629,7 +629,7 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
* mitigation until it is next scheduled.
if (task == current && update)
return 0;
......@@ -398,27 +398,27 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
static __always_inline void intel_set_ssb_state(unsigned long tifn)
static __always_inline void spec_ctrl_update_msr(unsigned long tifn)
u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
static __always_inline void __speculation_ctrl_update(unsigned long tifn)
if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
void speculative_store_bypass_update(unsigned long tif)
void speculation_ctrl_update(unsigned long tif)
......@@ -455,7 +455,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
if ((tifp ^ tifn) & _TIF_SSBD)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment