x86/lib: Add WBNOINVD helper functions

In line with WBINVD usage, add WBNOINVD helper functions.  Explicitly fall
back to WBINVD (via alternative()) if WBNOINVD isn't supported even though
the instruction itself is backwards compatible (WBNOINVD is WBINVD with an
ignored REP prefix), so that disabling X86_FEATURE_WBNOINVD behaves as one
would expect, e.g. in case there's a hardware issue that affects WBNOINVD.

Opportunistically, add comments explaining the architectural behavior of
WBINVD and WBNOINVD, and provide hints and pointers to uarch-specific
behavior.

Note, alternative() ensures compatibility with early boot code as needed.

  [ bp: Massage, fix typos, make export _GPL. ]

Signed-off-by: Kevin Loughlin <kevinloughlin@google.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
Reviewed-by: Kai Huang <kai.huang@intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/20250522233733.3176144-4-seanjc@google.com
This commit is contained in:
Kevin Loughlin
2025-05-22 16:37:27 -07:00
committed by Borislav Petkov (AMD)
parent e638081751
commit 07f99c3fbe
3 changed files with 45 additions and 1 deletions

View File

@@ -113,6 +113,7 @@ void native_play_dead(void);
void play_dead_common(void);
void wbinvd_on_cpu(int cpu);
void wbinvd_on_all_cpus(void);
void wbnoinvd_on_all_cpus(void);
void smp_kick_mwait_play_dead(void);
void __noreturn mwait_play_dead(unsigned int eax_hint);
@@ -153,6 +154,11 @@ static inline void wbinvd_on_all_cpus(void)
wbinvd();
}
static inline void wbnoinvd_on_all_cpus(void)
{
wbnoinvd();
}
static inline struct cpumask *cpu_llc_shared_mask(int cpu)
{
return (struct cpumask *)cpumask_of(0);

View File

@@ -104,9 +104,36 @@ static inline void wrpkru(u32 pkru)
}
#endif
/*
* Write back all modified lines in all levels of cache associated with this
* logical processor to main memory, and then invalidate all caches. Depending
* on the micro-architecture, WBINVD (and WBNOINVD below) may or may not affect
* lower level caches associated with another logical processor that shares any
* level of this processor's cache hierarchy.
*/
static __always_inline void wbinvd(void)
{
asm volatile("wbinvd": : :"memory");
asm volatile("wbinvd" : : : "memory");
}
/* Instruction encoding provided for binutils backwards compatibility. */
#define ASM_WBNOINVD _ASM_BYTES(0xf3,0x0f,0x09)
/*
* Write back all modified lines in all levels of cache associated with this
* logical processor to main memory, but do NOT explicitly invalidate caches,
* i.e. leave all/most cache lines in the hierarchy in non-modified state.
*/
static __always_inline void wbnoinvd(void)
{
/*
* Explicitly encode WBINVD if X86_FEATURE_WBNOINVD is unavailable even
* though WBNOINVD is backwards compatible (it's simply WBINVD with an
* ignored REP prefix), to guarantee that WBNOINVD isn't used if it
* needs to be avoided for any reason. For all supported usage in the
* kernel, WBINVD is functionally a superset of WBNOINVD.
*/
alternative("wbinvd", ASM_WBNOINVD, X86_FEATURE_WBNOINVD);
}
static inline unsigned long __read_cr4(void)

View File

@@ -19,3 +19,14 @@ void wbinvd_on_all_cpus(void)
on_each_cpu(__wbinvd, NULL, 1);
}
EXPORT_SYMBOL(wbinvd_on_all_cpus);
static void __wbnoinvd(void *dummy)
{
wbnoinvd();
}
void wbnoinvd_on_all_cpus(void)
{
on_each_cpu(__wbnoinvd, NULL, 1);
}
EXPORT_SYMBOL_GPL(wbnoinvd_on_all_cpus);