mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-02-13 20:15:20 -05:00
x86/cpufeature: Replace X86_FEATURE_SYSENTER32 with X86_FEATURE_SYSFAST32
In most cases, the use of "fast 32-bit system call" depends either on X86_FEATURE_SEP or X86_FEATURE_SYSENTER32 || X86_FEATURE_SYSCALL32. However, nearly all the logic for both is identical. Define X86_FEATURE_SYSFAST32 which indicates that *either* SYSENTER32 or SYSCALL32 should be used, for either 32- or 64-bit kernels. This defaults to SYSENTER; use SYSCALL if the SYSCALL32 bit is also set. As this removes ALL existing uses of X86_FEATURE_SYSENTER32, which is a kernel-only synthetic feature bit, simply remove it and replace it with X86_FEATURE_SYSFAST32. This leaves an unused alternative for a true 32-bit kernel, but that should really not matter in any way. The clearing of X86_FEATURE_SYSCALL32 can be removed once the patches for automatically clearing disabled features has been merged. Signed-off-by: H. Peter Anvin (Intel) <hpa@zytor.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Link: https://patch.msgid.link/20251216212606.1325678-10-hpa@zytor.com
This commit is contained in:
committed by
Dave Hansen
parent
a0636d4c3a
commit
f49ecf5e11
@@ -56,6 +56,10 @@ config X86_REQUIRED_FEATURE_MOVBE
|
||||
def_bool y
|
||||
depends on MATOM
|
||||
|
||||
config X86_REQUIRED_FEATURE_SYSFAST32
|
||||
def_bool y
|
||||
depends on X86_64 && !X86_FRED
|
||||
|
||||
config X86_REQUIRED_FEATURE_CPUID
|
||||
def_bool y
|
||||
depends on X86_64
|
||||
@@ -120,6 +124,10 @@ config X86_DISABLED_FEATURE_CENTAUR_MCR
|
||||
def_bool y
|
||||
depends on X86_64
|
||||
|
||||
config X86_DISABLED_FEATURE_SYSCALL32
|
||||
def_bool y
|
||||
depends on !X86_64
|
||||
|
||||
config X86_DISABLED_FEATURE_PCID
|
||||
def_bool y
|
||||
depends on !X86_64
|
||||
|
||||
@@ -52,13 +52,9 @@ __kernel_vsyscall:
|
||||
#define SYSENTER_SEQUENCE "movl %esp, %ebp; sysenter"
|
||||
#define SYSCALL_SEQUENCE "movl %ecx, %ebp; syscall"
|
||||
|
||||
#ifdef BUILD_VDSO32_64
|
||||
/* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */
|
||||
ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSENTER32, \
|
||||
SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32
|
||||
#else
|
||||
ALTERNATIVE "", SYSENTER_SEQUENCE, X86_FEATURE_SEP
|
||||
#endif
|
||||
ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSFAST32, \
|
||||
SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32
|
||||
|
||||
/* Enter using int $0x80 */
|
||||
int $0x80
|
||||
|
||||
@@ -84,7 +84,7 @@
|
||||
#define X86_FEATURE_PEBS ( 3*32+12) /* "pebs" Precise-Event Based Sampling */
|
||||
#define X86_FEATURE_BTS ( 3*32+13) /* "bts" Branch Trace Store */
|
||||
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* syscall in IA32 userspace */
|
||||
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* sysenter in IA32 userspace */
|
||||
#define X86_FEATURE_SYSFAST32 ( 3*32+15) /* sysenter/syscall in IA32 userspace */
|
||||
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* "rep_good" REP microcode works well */
|
||||
#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* "amd_lbr_v2" AMD Last Branch Record Extension Version 2 */
|
||||
#define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* Clear CPU buffers using VERW */
|
||||
|
||||
@@ -102,9 +102,6 @@ static void early_init_centaur(struct cpuinfo_x86 *c)
|
||||
(c->x86 >= 7))
|
||||
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
|
||||
#endif
|
||||
if (c->x86_power & (1 << 8)) {
|
||||
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
||||
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
|
||||
|
||||
@@ -1068,6 +1068,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
|
||||
init_scattered_cpuid_features(c);
|
||||
init_speculation_control(c);
|
||||
|
||||
if (IS_ENABLED(CONFIG_X86_64) || cpu_has(c, X86_FEATURE_SEP))
|
||||
set_cpu_cap(c, X86_FEATURE_SYSFAST32);
|
||||
|
||||
/*
|
||||
* Clear/Set all flags overridden by options, after probe.
|
||||
* This needs to happen each time we re-probe, which may happen
|
||||
@@ -1813,6 +1816,11 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
||||
* that it can't be enabled in 32-bit mode.
|
||||
*/
|
||||
setup_clear_cpu_cap(X86_FEATURE_PCID);
|
||||
|
||||
/*
|
||||
* Never use SYSCALL on a 32-bit kernel
|
||||
*/
|
||||
setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
||||
@@ -236,9 +236,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
|
||||
clear_cpu_cap(c, X86_FEATURE_PSE);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
|
||||
#else
|
||||
#ifndef CONFIG_X86_64
|
||||
/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
|
||||
if (c->x86 == 15 && c->x86_cache_alignment == 64)
|
||||
c->x86_cache_alignment = 128;
|
||||
|
||||
@@ -59,9 +59,7 @@ static void early_init_zhaoxin(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (c->x86 >= 0x6)
|
||||
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
||||
#ifdef CONFIG_X86_64
|
||||
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
|
||||
#endif
|
||||
|
||||
if (c->x86_power & (1 << 8)) {
|
||||
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
||||
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
|
||||
|
||||
@@ -68,7 +68,7 @@ void cpu_init_fred_exceptions(void)
|
||||
idt_invalidate();
|
||||
|
||||
/* Use int $0x80 for 32-bit system calls in FRED mode */
|
||||
setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
|
||||
setup_clear_cpu_cap(X86_FEATURE_SYSFAST32);
|
||||
setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
|
||||
}
|
||||
|
||||
|
||||
@@ -990,13 +990,6 @@ static int register_callback(unsigned type, const void *func)
|
||||
return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
|
||||
}
|
||||
|
||||
void xen_enable_sysenter(void)
|
||||
{
|
||||
if (cpu_feature_enabled(X86_FEATURE_SYSENTER32) &&
|
||||
register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat))
|
||||
setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
|
||||
}
|
||||
|
||||
void xen_enable_syscall(void)
|
||||
{
|
||||
int ret;
|
||||
@@ -1008,11 +1001,27 @@ void xen_enable_syscall(void)
|
||||
mechanism for syscalls. */
|
||||
}
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_SYSCALL32) &&
|
||||
register_callback(CALLBACKTYPE_syscall32, xen_entry_SYSCALL_compat))
|
||||
if (!cpu_feature_enabled(X86_FEATURE_SYSFAST32))
|
||||
return;
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_SYSCALL32)) {
|
||||
/* Use SYSCALL32 */
|
||||
ret = register_callback(CALLBACKTYPE_syscall32,
|
||||
xen_entry_SYSCALL_compat);
|
||||
|
||||
} else {
|
||||
/* Use SYSENTER32 */
|
||||
ret = register_callback(CALLBACKTYPE_sysenter,
|
||||
xen_entry_SYSENTER_compat);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
|
||||
setup_clear_cpu_cap(X86_FEATURE_SYSFAST32);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void __init xen_pvmmu_arch_setup(void)
|
||||
{
|
||||
HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
|
||||
@@ -1022,7 +1031,6 @@ static void __init xen_pvmmu_arch_setup(void)
|
||||
register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
|
||||
BUG();
|
||||
|
||||
xen_enable_sysenter();
|
||||
xen_enable_syscall();
|
||||
}
|
||||
|
||||
|
||||
@@ -65,10 +65,9 @@ static void cpu_bringup(void)
|
||||
touch_softlockup_watchdog();
|
||||
|
||||
/* PVH runs in ring 0 and allows us to do native syscalls. Yay! */
|
||||
if (!xen_feature(XENFEAT_supervisor_mode_kernel)) {
|
||||
xen_enable_sysenter();
|
||||
if (!xen_feature(XENFEAT_supervisor_mode_kernel))
|
||||
xen_enable_syscall();
|
||||
}
|
||||
|
||||
cpu = smp_processor_id();
|
||||
identify_secondary_cpu(cpu);
|
||||
set_cpu_sibling_map(cpu);
|
||||
|
||||
@@ -60,7 +60,6 @@ phys_addr_t __init xen_find_free_area(phys_addr_t size);
|
||||
char * __init xen_memory_setup(void);
|
||||
void __init xen_arch_setup(void);
|
||||
void xen_banner(void);
|
||||
void xen_enable_sysenter(void);
|
||||
void xen_enable_syscall(void);
|
||||
void xen_vcpu_restore(void);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user