mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-02-19 06:38:26 -05:00
Merge tag 'kvm-x86-selftests-6.13' of https://github.com/kvm-x86/linux into HEAD
KVM selftests changes for 6.13 - Enable XFAM-based features by default for all selftests VMs, which will allow removing the "no AVX" restriction.
This commit is contained in:
@@ -20,7 +20,6 @@
|
||||
#define SLEEPING_THREAD_NUM (1 << 4)
|
||||
#define FORK_NUM (1ULL << 9)
|
||||
#define DELAY_US_MAX 2000
|
||||
#define GUEST_CODE_PIO_PORT 4
|
||||
|
||||
sem_t *sem;
|
||||
|
||||
|
||||
@@ -1049,6 +1049,11 @@ static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu)
|
||||
vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
|
||||
}
|
||||
|
||||
static inline void vcpu_get_cpuid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
|
||||
}
|
||||
|
||||
void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
|
||||
struct kvm_x86_cpu_property property,
|
||||
uint32_t value);
|
||||
|
||||
@@ -506,6 +506,8 @@ static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
|
||||
|
||||
sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
|
||||
sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
|
||||
if (kvm_cpu_has(X86_FEATURE_XSAVE))
|
||||
sregs.cr4 |= X86_CR4_OSXSAVE;
|
||||
sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
|
||||
|
||||
kvm_seg_set_unusable(&sregs.ldt);
|
||||
@@ -519,6 +521,20 @@ static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
|
||||
vcpu_sregs_set(vcpu, &sregs);
|
||||
}
|
||||
|
||||
static void vcpu_init_xcrs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_xcrs xcrs = {
|
||||
.nr_xcrs = 1,
|
||||
.xcrs[0].xcr = 0,
|
||||
.xcrs[0].value = kvm_cpu_supported_xcr0(),
|
||||
};
|
||||
|
||||
if (!kvm_cpu_has(X86_FEATURE_XSAVE))
|
||||
return;
|
||||
|
||||
vcpu_xcrs_set(vcpu, &xcrs);
|
||||
}
|
||||
|
||||
static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
|
||||
int dpl, unsigned short selector)
|
||||
{
|
||||
@@ -675,6 +691,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
vcpu = __vm_vcpu_add(vm, vcpu_id);
|
||||
vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
|
||||
vcpu_init_sregs(vm, vcpu);
|
||||
vcpu_init_xcrs(vm, vcpu);
|
||||
|
||||
/* Setup guest general purpose registers */
|
||||
vcpu_regs_get(vcpu, ®s);
|
||||
@@ -686,6 +703,13 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
mp_state.mp_state = 0;
|
||||
vcpu_mp_state_set(vcpu, &mp_state);
|
||||
|
||||
/*
|
||||
* Refresh CPUID after setting SREGS and XCR0, so that KVM's "runtime"
|
||||
* updates to guest CPUID, e.g. for OSXSAVE and XSAVE state size, are
|
||||
* reflected into selftests' vCPU CPUID cache, i.e. so that the cache
|
||||
* is consistent with vCPU state.
|
||||
*/
|
||||
vcpu_get_cpuid(vcpu);
|
||||
return vcpu;
|
||||
}
|
||||
|
||||
|
||||
@@ -86,6 +86,8 @@ static inline void __xsavec(struct xstate *xstate, uint64_t rfbm)
|
||||
|
||||
static void check_xtile_info(void)
|
||||
{
|
||||
GUEST_ASSERT((xgetbv(0) & XFEATURE_MASK_XTILE) == XFEATURE_MASK_XTILE);
|
||||
|
||||
GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_XSTATE_MAX_SIZE_XCR0));
|
||||
GUEST_ASSERT(this_cpu_property(X86_PROPERTY_XSTATE_MAX_SIZE_XCR0) <= XSAVE_SIZE);
|
||||
|
||||
@@ -122,29 +124,12 @@ static void set_tilecfg(struct tile_config *cfg)
|
||||
}
|
||||
}
|
||||
|
||||
static void init_regs(void)
|
||||
{
|
||||
uint64_t cr4, xcr0;
|
||||
|
||||
GUEST_ASSERT(this_cpu_has(X86_FEATURE_XSAVE));
|
||||
|
||||
/* turn on CR4.OSXSAVE */
|
||||
cr4 = get_cr4();
|
||||
cr4 |= X86_CR4_OSXSAVE;
|
||||
set_cr4(cr4);
|
||||
GUEST_ASSERT(this_cpu_has(X86_FEATURE_OSXSAVE));
|
||||
|
||||
xcr0 = xgetbv(0);
|
||||
xcr0 |= XFEATURE_MASK_XTILE;
|
||||
xsetbv(0x0, xcr0);
|
||||
GUEST_ASSERT((xgetbv(0) & XFEATURE_MASK_XTILE) == XFEATURE_MASK_XTILE);
|
||||
}
|
||||
|
||||
static void __attribute__((__flatten__)) guest_code(struct tile_config *amx_cfg,
|
||||
struct tile_data *tiledata,
|
||||
struct xstate *xstate)
|
||||
{
|
||||
init_regs();
|
||||
GUEST_ASSERT(this_cpu_has(X86_FEATURE_XSAVE) &&
|
||||
this_cpu_has(X86_FEATURE_OSXSAVE));
|
||||
check_xtile_info();
|
||||
GUEST_SYNC(1);
|
||||
|
||||
|
||||
@@ -12,17 +12,16 @@
|
||||
#include "kvm_util.h"
|
||||
#include "processor.h"
|
||||
|
||||
/* CPUIDs known to differ */
|
||||
struct {
|
||||
u32 function;
|
||||
u32 index;
|
||||
} mangled_cpuids[] = {
|
||||
/*
|
||||
* These entries depend on the vCPU's XCR0 register and IA32_XSS MSR,
|
||||
* which are not controlled for by this test.
|
||||
*/
|
||||
{.function = 0xd, .index = 0},
|
||||
{.function = 0xd, .index = 1},
|
||||
struct cpuid_mask {
|
||||
union {
|
||||
struct {
|
||||
u32 eax;
|
||||
u32 ebx;
|
||||
u32 ecx;
|
||||
u32 edx;
|
||||
};
|
||||
u32 regs[4];
|
||||
};
|
||||
};
|
||||
|
||||
static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid)
|
||||
@@ -56,17 +55,29 @@ static void guest_main(struct kvm_cpuid2 *guest_cpuid)
|
||||
GUEST_DONE();
|
||||
}
|
||||
|
||||
static bool is_cpuid_mangled(const struct kvm_cpuid_entry2 *entrie)
|
||||
static struct cpuid_mask get_const_cpuid_mask(const struct kvm_cpuid_entry2 *entry)
|
||||
{
|
||||
int i;
|
||||
struct cpuid_mask mask;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mangled_cpuids); i++) {
|
||||
if (mangled_cpuids[i].function == entrie->function &&
|
||||
mangled_cpuids[i].index == entrie->index)
|
||||
return true;
|
||||
memset(&mask, 0xff, sizeof(mask));
|
||||
|
||||
switch (entry->function) {
|
||||
case 0x1:
|
||||
mask.regs[X86_FEATURE_OSXSAVE.reg] &= ~BIT(X86_FEATURE_OSXSAVE.bit);
|
||||
break;
|
||||
case 0x7:
|
||||
mask.regs[X86_FEATURE_OSPKE.reg] &= ~BIT(X86_FEATURE_OSPKE.bit);
|
||||
break;
|
||||
case 0xd:
|
||||
/*
|
||||
* CPUID.0xD.{0,1}.EBX enumerate XSAVE size based on the current
|
||||
* XCR0 and IA32_XSS MSR values.
|
||||
*/
|
||||
if (entry->index < 2)
|
||||
mask.ebx = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
return false;
|
||||
return mask;
|
||||
}
|
||||
|
||||
static void compare_cpuids(const struct kvm_cpuid2 *cpuid1,
|
||||
@@ -79,6 +90,8 @@ static void compare_cpuids(const struct kvm_cpuid2 *cpuid1,
|
||||
"CPUID nent mismatch: %d vs. %d", cpuid1->nent, cpuid2->nent);
|
||||
|
||||
for (i = 0; i < cpuid1->nent; i++) {
|
||||
struct cpuid_mask mask;
|
||||
|
||||
e1 = &cpuid1->entries[i];
|
||||
e2 = &cpuid2->entries[i];
|
||||
|
||||
@@ -88,15 +101,19 @@ static void compare_cpuids(const struct kvm_cpuid2 *cpuid1,
|
||||
i, e1->function, e1->index, e1->flags,
|
||||
e2->function, e2->index, e2->flags);
|
||||
|
||||
if (is_cpuid_mangled(e1))
|
||||
continue;
|
||||
/* Mask off dynamic bits, e.g. OSXSAVE, when comparing entries. */
|
||||
mask = get_const_cpuid_mask(e1);
|
||||
|
||||
TEST_ASSERT(e1->eax == e2->eax && e1->ebx == e2->ebx &&
|
||||
e1->ecx == e2->ecx && e1->edx == e2->edx,
|
||||
TEST_ASSERT((e1->eax & mask.eax) == (e2->eax & mask.eax) &&
|
||||
(e1->ebx & mask.ebx) == (e2->ebx & mask.ebx) &&
|
||||
(e1->ecx & mask.ecx) == (e2->ecx & mask.ecx) &&
|
||||
(e1->edx & mask.edx) == (e2->edx & mask.edx),
|
||||
"CPUID 0x%x.%x differ: 0x%x:0x%x:0x%x:0x%x vs 0x%x:0x%x:0x%x:0x%x",
|
||||
e1->function, e1->index,
|
||||
e1->eax, e1->ebx, e1->ecx, e1->edx,
|
||||
e2->eax, e2->ebx, e2->ecx, e2->edx);
|
||||
e1->eax & mask.eax, e1->ebx & mask.ebx,
|
||||
e1->ecx & mask.ecx, e1->edx & mask.edx,
|
||||
e2->eax & mask.eax, e2->ebx & mask.ebx,
|
||||
e2->ecx & mask.ecx, e2->edx & mask.edx);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,30 +19,42 @@
|
||||
#include "kvm_util.h"
|
||||
#include "processor.h"
|
||||
|
||||
static inline bool cr4_cpuid_is_sync(void)
|
||||
{
|
||||
uint64_t cr4 = get_cr4();
|
||||
|
||||
return (this_cpu_has(X86_FEATURE_OSXSAVE) == !!(cr4 & X86_CR4_OSXSAVE));
|
||||
}
|
||||
#define MAGIC_HYPERCALL_PORT 0x80
|
||||
|
||||
static void guest_code(void)
|
||||
{
|
||||
uint64_t cr4;
|
||||
u32 regs[4] = {
|
||||
[KVM_CPUID_EAX] = X86_FEATURE_OSXSAVE.function,
|
||||
[KVM_CPUID_ECX] = X86_FEATURE_OSXSAVE.index,
|
||||
};
|
||||
|
||||
/* turn on CR4.OSXSAVE */
|
||||
cr4 = get_cr4();
|
||||
cr4 |= X86_CR4_OSXSAVE;
|
||||
set_cr4(cr4);
|
||||
/* CR4.OSXSAVE should be enabled by default (for selftests vCPUs). */
|
||||
GUEST_ASSERT(get_cr4() & X86_CR4_OSXSAVE);
|
||||
|
||||
/* verify CR4.OSXSAVE == CPUID.OSXSAVE */
|
||||
GUEST_ASSERT(cr4_cpuid_is_sync());
|
||||
GUEST_ASSERT(this_cpu_has(X86_FEATURE_OSXSAVE));
|
||||
|
||||
/* notify hypervisor to change CR4 */
|
||||
GUEST_SYNC(0);
|
||||
/*
|
||||
* Notify hypervisor to clear CR4.0SXSAVE, do CPUID and save output,
|
||||
* and then restore CR4. Do this all in assembly to ensure no AVX
|
||||
* instructions are executed while OSXSAVE=0.
|
||||
*/
|
||||
asm volatile (
|
||||
"out %%al, $" __stringify(MAGIC_HYPERCALL_PORT) "\n\t"
|
||||
"cpuid\n\t"
|
||||
"mov %%rdi, %%cr4\n\t"
|
||||
: "+a" (regs[KVM_CPUID_EAX]),
|
||||
"=b" (regs[KVM_CPUID_EBX]),
|
||||
"+c" (regs[KVM_CPUID_ECX]),
|
||||
"=d" (regs[KVM_CPUID_EDX])
|
||||
: "D" (get_cr4())
|
||||
);
|
||||
|
||||
/* check again */
|
||||
GUEST_ASSERT(cr4_cpuid_is_sync());
|
||||
/* Verify KVM cleared OSXSAVE in CPUID when it was cleared in CR4. */
|
||||
GUEST_ASSERT(!(regs[X86_FEATURE_OSXSAVE.reg] & BIT(X86_FEATURE_OSXSAVE.bit)));
|
||||
|
||||
/* Verify restoring CR4 also restored OSXSAVE in CPUID. */
|
||||
GUEST_ASSERT(this_cpu_has(X86_FEATURE_OSXSAVE));
|
||||
|
||||
GUEST_DONE();
|
||||
}
|
||||
@@ -62,13 +74,16 @@ int main(int argc, char *argv[])
|
||||
vcpu_run(vcpu);
|
||||
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
|
||||
|
||||
switch (get_ucall(vcpu, &uc)) {
|
||||
case UCALL_SYNC:
|
||||
if (vcpu->run->io.port == MAGIC_HYPERCALL_PORT &&
|
||||
vcpu->run->io.direction == KVM_EXIT_IO_OUT) {
|
||||
/* emulate hypervisor clearing CR4.OSXSAVE */
|
||||
vcpu_sregs_get(vcpu, &sregs);
|
||||
sregs.cr4 &= ~X86_CR4_OSXSAVE;
|
||||
vcpu_sregs_set(vcpu, &sregs);
|
||||
break;
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (get_ucall(vcpu, &uc)) {
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
break;
|
||||
|
||||
@@ -166,7 +166,7 @@ int main(void)
|
||||
/* Test single step */
|
||||
target_rip = CAST_TO_RIP(ss_start);
|
||||
target_dr6 = 0xffff4ff0ULL;
|
||||
for (i = 0; i < (sizeof(ss_size) / sizeof(ss_size[0])); i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(ss_size); i++) {
|
||||
target_rip += ss_size[i];
|
||||
memset(&debug, 0, sizeof(debug));
|
||||
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP |
|
||||
|
||||
@@ -41,8 +41,8 @@ static void guest_sev_code(void)
|
||||
/* Stash state passed via VMSA before any compiled code runs. */
|
||||
extern void guest_code_xsave(void);
|
||||
asm("guest_code_xsave:\n"
|
||||
"mov $-1, %eax\n"
|
||||
"mov $-1, %edx\n"
|
||||
"mov $" __stringify(XFEATURE_MASK_X87_AVX) ", %eax\n"
|
||||
"xor %edx, %edx\n"
|
||||
"xsave (%rdi)\n"
|
||||
"jmp guest_sev_es_code");
|
||||
|
||||
@@ -70,12 +70,6 @@ static void test_sync_vmsa(uint32_t policy)
|
||||
|
||||
double x87val = M_PI;
|
||||
struct kvm_xsave __attribute__((aligned(64))) xsave = { 0 };
|
||||
struct kvm_sregs sregs;
|
||||
struct kvm_xcrs xcrs = {
|
||||
.nr_xcrs = 1,
|
||||
.xcrs[0].xcr = 0,
|
||||
.xcrs[0].value = XFEATURE_MASK_X87_AVX,
|
||||
};
|
||||
|
||||
vm = vm_sev_create_with_one_vcpu(KVM_X86_SEV_ES_VM, guest_code_xsave, &vcpu);
|
||||
gva = vm_vaddr_alloc_shared(vm, PAGE_SIZE, KVM_UTIL_MIN_VADDR,
|
||||
@@ -84,11 +78,6 @@ static void test_sync_vmsa(uint32_t policy)
|
||||
|
||||
vcpu_args_set(vcpu, 1, gva);
|
||||
|
||||
vcpu_sregs_get(vcpu, &sregs);
|
||||
sregs.cr4 |= X86_CR4_OSFXSR | X86_CR4_OSXSAVE;
|
||||
vcpu_sregs_set(vcpu, &sregs);
|
||||
|
||||
vcpu_xcrs_set(vcpu, &xcrs);
|
||||
asm("fninit\n"
|
||||
"vpcmpeqb %%ymm4, %%ymm4, %%ymm4\n"
|
||||
"fldl %3\n"
|
||||
@@ -192,6 +181,8 @@ static void test_sev_es_shutdown(void)
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
const u64 xf_mask = XFEATURE_MASK_X87_AVX;
|
||||
|
||||
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SEV));
|
||||
|
||||
test_sev(guest_sev_code, SEV_POLICY_NO_DBG);
|
||||
@@ -204,7 +195,7 @@ int main(int argc, char *argv[])
|
||||
test_sev_es_shutdown();
|
||||
|
||||
if (kvm_has_cap(KVM_CAP_XCRS) &&
|
||||
(xgetbv(0) & XFEATURE_MASK_X87_AVX) == XFEATURE_MASK_X87_AVX) {
|
||||
(xgetbv(0) & kvm_cpu_supported_xcr0() & xf_mask) == xf_mask) {
|
||||
test_sync_vmsa(0);
|
||||
test_sync_vmsa(SEV_POLICY_NO_DBG);
|
||||
}
|
||||
|
||||
@@ -145,11 +145,6 @@ static void __attribute__((__flatten__)) guest_code(void *arg)
|
||||
|
||||
memset(buffer, 0xcc, sizeof(buffer));
|
||||
|
||||
set_cr4(get_cr4() | X86_CR4_OSXSAVE);
|
||||
GUEST_ASSERT(this_cpu_has(X86_FEATURE_OSXSAVE));
|
||||
|
||||
xsetbv(0, xgetbv(0) | supported_xcr0);
|
||||
|
||||
/*
|
||||
* Modify state for all supported xfeatures to take them out of
|
||||
* their "init" state, i.e. to make them show up in XSTATE_BV.
|
||||
|
||||
@@ -48,16 +48,16 @@ do { \
|
||||
|
||||
static void guest_code(void)
|
||||
{
|
||||
uint64_t xcr0_reset;
|
||||
uint64_t initial_xcr0;
|
||||
uint64_t supported_xcr0;
|
||||
int i, vector;
|
||||
|
||||
set_cr4(get_cr4() | X86_CR4_OSXSAVE);
|
||||
|
||||
xcr0_reset = xgetbv(0);
|
||||
initial_xcr0 = xgetbv(0);
|
||||
supported_xcr0 = this_cpu_supported_xcr0();
|
||||
|
||||
GUEST_ASSERT(xcr0_reset == XFEATURE_MASK_FP);
|
||||
GUEST_ASSERT(initial_xcr0 == supported_xcr0);
|
||||
|
||||
/* Check AVX */
|
||||
ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0,
|
||||
@@ -79,6 +79,11 @@ static void guest_code(void)
|
||||
ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0,
|
||||
XFEATURE_MASK_XTILE);
|
||||
|
||||
vector = xsetbv_safe(0, XFEATURE_MASK_FP);
|
||||
__GUEST_ASSERT(!vector,
|
||||
"Expected success on XSETBV(FP), got vector '0x%x'",
|
||||
vector);
|
||||
|
||||
vector = xsetbv_safe(0, supported_xcr0);
|
||||
__GUEST_ASSERT(!vector,
|
||||
"Expected success on XSETBV(0x%lx), got vector '0x%x'",
|
||||
|
||||
Reference in New Issue
Block a user